天天看點

ubuntu14.04server 下安裝openstack juno

系統使用ubuntu14.04server

安裝之前所有伺服器openstack更新源設定:

apt-get install python-software-properties

apt-get install software-properties-common

add-apt-repository cloud-archive:juno

apt-get update && apt-get dist-upgrade

安裝時間同步服務 

apt-get install -y ntp

vim /etc/ntp.conf

其他server都注釋掉

server 10.0.0.11

重新開機ntp服務

service ntp restart

IP約定

controller

192.168.2.11

10.0.0.11

network

192.168.2.22

10.0.0.22

10.0.1.22

compute

192.168.2.33(安裝配置好後可以斷開外網)

10.0.0.33

10.0.1.33

具體安裝配置過程

網絡配置

controller伺服器

vim /etc/hostname 并寫入

controller  

vim /etc/hosts  并寫入

10.0.0.11controller

10.0.0.22network

10.0.0.33compute

vim /etc/network/interfaces

auto lo

iface lo inet loopback

# The primary network interface

auto eth0

iface eth0 inet static

        address 192.168.2.11

        netmask 255.255.0.0

        network 192.168.0.0

        broadcast 192.168.255.255

        gateway 192.168.1.1

        # dns-* options are implemented by the resolvconf package, if installed

        dns-nameservers 192.168.1.1

auto eth1

iface eth1 inet static

        address 10.0.0.11

        netmask 255.255.255.0

        gateway 10.0.0.1

network伺服器

network 

root@network:~# vim /etc/network/interfaces

# This file describes the network interfaces available on your system

# and how to activate them. For more information, see interfaces(5).

# The loopback network interface

        address 192.168.2.22

        address 10.0.0.22

auto eth2

iface eth2 inet static

        address 10.0.1.22

compute伺服器

        address 192.168.2.33

        address 10.0.0.33

        address 10.0.1.33

注:以上伺服器的DNS為:192.168.1.1

controller伺服器安裝openstack元件

安裝資料庫mysql

apt-get install -y mysql-server-5.6 python-mysqldb

修改mysql配置檔案檔案 

vi /etc/mysql/my.cnf   

[mysqld]  

default-storage-engine = innodb

innodb_file_per_table

collation-server = utf8_general_ci

init-connect = 'SET NAMES utf8'

character-set-server = utf8

#bind-address = 127.0.0.1

bind-address = 0.0.0.0

重新開機資料庫   

service mysql restart

删除資料庫匿名使用者

在終端下執行

mysql_install_db

mysql_secure_installation

安裝 RabbitMQ (Message Queue)服務: 

apt-get install -y rabbitmq-server 

安裝keystone

apt-get install -y keystone

建立keystone資料庫,都是通過 mysql –u root –p 進入

CREATE DATABASE keystone;

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS';

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';

exit;

删除sqllite資料庫

rm /var/lib/keystone/keystone.db

配置keystone

編輯 /etc/keystone/keystone.conf

[DEFAULT]

admin_token=ADMIN

log_dir=/var/log/keystone

[database]

#connection=sqlite:////var/lib/keystone/keystone.db

connection = mysql://keystone:[email protected]/keystone

重新開機keystone

service keystone restart

同步keystone資料庫

keystone-manage db_sync

設定環境變量

export OS_SERVICE_TOKEN=ADMIN

export OS_SERVICE_ENDPOINT=http://10.0.0.11:35357/v2.0

建立管理者權力的使用者

root@controller:~# keystone user-create --name=admin --pass=admin_pass [email protected]

+----------+----------------------------------+

| Property |              Value               |

|  email   |         [email protected]         |

| enabled  |               True               |

|    id    | 61991b4c9abe46968b08c6d3268e8b25 |

|   name   |              admin               |

| username |              admin               |

root@controller:~# keystone role-create --name=admin

|    id    | 14d9aa53cfd7404ea5ecdc8c6ff96bb3 |

root@controller:~# keystone role-create --name=_member_

|    id    | 69d86b6c21d54fc3848b30d8a7afa6d6 |

|   name   |             _member_             |

root@controller:~# keystone tenant-create --name=admin --description="Admin Tenant"

+-------------+----------------------------------+

|   Property  |              Value               |

| description |           Admin Tenant           |

|   enabled   |               True               |

|      id     | 9474847b08264433b623233c85b7b6de |

|     name    |              admin               |

root@controller:~# keystone user-role-add --user=admin --tenant=admin --role=admin

root@controller:~# keystone user-role-add --user=admin --role=_member_ --tenant=admin

建立普通使用者

root@controller:~# keystone user-create --name=demo --pass=demo_pass [email protected]

|  email   |         [email protected]          |

|    id    | f40209d709564e5fbe04dc4659f4ee72 |

|   name   |               demo               |

| username |               demo               |

root@controller:~# keystone tenant-create --name=demo --description="Demo Tenant"

| description |           Demo Tenant            |

|      id     | 5e3aa75b5bce4723a755e356ef22ad26 |

|     name    |               demo               |

root@controller:~# keystone user-role-add --user=demo --role=_member_ --tenant=demo

建立 service 租戶

root@controller:~# keystone tenant-create --name=service --description="Service Tenant"

| description |          Service Tenant          |

|      id     | 4fd53777c8f84c72b09ef025ab45977d |

|     name    |             service              |

定義服務的API的endpoint

root@controller:~# keystone service-create --name=keystone --type=identity --description="OpenStack Identity"

| description |        OpenStack Identity        |

|      id     | 6b6023376cc040e8be26a57815f17b87 |

|     name    |             keystone             |

|     type    |             identity             |

建立endpoint

root@controller:~# keystone endpoint-create \

> --service-id=$(keystone service-list | awk '/ identity / {print $2}') \

> --publicurl=http://192.168.2.11:5000/v2.0 \

> --internalurl=http://10.0.0.11:5000/v2.0 \

> --adminurl=http://10.0.0.11:35357/v2.0

|   adminurl  |   http://10.0.0.11:35357/v2.0    |

|      id     | 0dcae7b8deb9437996c7c7e0ed0b4086 |

| internalurl |    http://10.0.0.11:5000/v2.0    |

|  publicurl  |  http://192.168.2.11:5000/v2.0   |

|    region   |            regionOne             |

|  service_id | 6b6023376cc040e8be26a57815f17b87 |

檢測keystone

通過下面指令檢查keystone的初始化是否正常

設定環境變量,建立creds 和 admin_creds 兩個檔案

cat <<EOF >>/root/creds

export OS_TENANT_NAME=admin

export OS_USERNAME=admin

export OS_PASSWORD=admin_pass

export OS_AUTH_URL="http://192.168.2.11:5000/v2.0/"

EOF

cat <<EOF >>/root/admin_creds

export OS_AUTH_URL=http://10.0.0.11:35357/v2.0

設定環境變量才能進行下面操作

清除OS_SERVICE_TOKEN 和OS_SERVICE_ENDPOINT環境變量裡的值,不清除的話,會出現警告 

unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT

加載環境變量

source creds

這樣就可以

root@controller:~# keystone user-list

+----------------------------------+-------+---------+------------------+

|                id                |  name | enabled |      email       |

| 61991b4c9abe46968b08c6d3268e8b25 | admin |   True  | [email protected] |

| f40209d709564e5fbe04dc4659f4ee72 |  demo |   True  | [email protected]  |

root@controller:~# keystone role-list

+----------------------------------+----------+

|                id                |   name   |

| 69d86b6c21d54fc3848b30d8a7afa6d6 | _member_ |

| 14d9aa53cfd7404ea5ecdc8c6ff96bb3 |  admin   |

root@controller:~# keystone tenant-list

+----------------------------------+---------+---------+

|                id                |   name  | enabled |

| 9474847b08264433b623233c85b7b6de |  admin  |   True  |

| 5e3aa75b5bce4723a755e356ef22ad26 |   demo  |   True  |

| 4fd53777c8f84c72b09ef025ab45977d | service |   True  |

Glance安裝配置

apt-get install -y glance python-glanceclient

建立資料庫 mysql –u root –p

CREATE DATABASE glance;

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';

keystone建立glance使用者和服務

root@controller:~# keystone user-create --name=glance --pass=service_pass [email protected]

|  email   |        [email protected]         |

|    id    | 9fa6993da7944a59b342a73a6f18728a |

|   name   |              glance              |

| username |              glance              |

root@controller:~# keystone user-role-add --user=glance --tenant=service --role=admin

設定endpoint

root@controller:~# keystone service-create --name=glance --type=p_w_picpath --description="OpenStack Image Service"

| description |     OpenStack Image Service      |

|      id     | d3d6fb3384db4ce9ad3423817b52bac9 |

|     name    |              glance              |

|     type    |              p_w_picpath               |

> --service-id=$(keystone service-list | awk '/ p_w_picpath / {print $2}') \

> --publicurl=http://192.168.2.11:9292 \

> --internalurl=http://10.0.0.11:9292 \

> --adminurl=http://10.0.0.11:9292

|   adminurl  |      http://10.0.0.11:9292       |

|      id     | 0859727be85d473391c935c3f52ddddf |

| internalurl |      http://10.0.0.11:9292       |

|  publicurl  |     http://192.168.2.11:9292     |

|  service_id | d3d6fb3384db4ce9ad3423817b52bac9 |

編輯glance配置檔案

vim /etc/glance/glance-api.conf

connection = mysql://glance:[email protected]/glance

rpc_backend = rabbit

rabbit_host = 10.0.0.11

[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = glance

admin_password = service_pass

[paste_deploy]

flavor = keystone

vim /etc/glance/glance-registry.conf

# The file name to use with SQLite (string value)

#sqlite_db = /var/lib/glance/glance.sqlite

重新開機服務

service glance-api restart; service glance-registry restart

初始化glance資料庫

glance-manage db_sync

上傳測試鏡像

root@controller:~# glance p_w_picpath-create --name "cirros-0.3.2-x86_64" --is-public true \

> --container-format bare --disk-format qcow2 \

> --location http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img

+------------------+--------------------------------------+

| Property         | Value                                |

| checksum         | None                                 |

| container_format | bare                                 |

| created_at       | 2015-03-20T08:02:56                  |

| deleted          | False                                |

| deleted_at       | None                                 |

| disk_format      | qcow2                                |

| id               | 5dbfecab-9828-4492-88bb-c0dd6aa6d75c |

| is_public        | True                                 |

| min_disk         | 0                                    |

| min_ram          | 0                                    |

| name             | cirros-0.3.2-x86_64                  |

| owner            | 9474847b08264433b623233c85b7b6de     |

| protected        | False                                |

| size             | 13200896                             |

| status           | active                               |

| updated_at       | 2015-03-20T08:02:57                  |

| virtual_size     | None                                 |

檢視鏡像

root@controller:~# glance p_w_picpath-list

+--------------------------------------+---------------------+-------------+------------------+----------+--------+

| ID                                   | Name                | Disk Format | Container Format | Size     | Status |

| 5dbfecab-9828-4492-88bb-c0dd6aa6d75c | cirros-0.3.2-x86_64 | qcow2       | bare             | 13200896 | active |

Nova元件安裝配置

apt-get install -y nova-api nova-cert nova-conductor nova-consoleauth \

nova-novncproxy nova-scheduler python-novaclient

建立nova 資料庫

mysql -u root -p

CREATE DATABASE nova;

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';

keystone建立nova使用者和角色

root@controller:~# keystone user-create --name=nova --pass=service_pass [email protected]

|  email   |         [email protected]          |

|    id    | cc25a28979b0467cac7a33426b8180f7 |

|   name   |               nova               |

| username |               nova               |

root@controller:~# keystone user-role-add --user=nova --tenant=service --role=admin

注冊服務和設定endpoint

root@controller:~# keystone service-create --name=nova --type=compute --description="OpenStack Compute"

| description |        OpenStack Compute         |

|      id     | 7bb1f0e64e3b4ef8b0408902261b2b37 |

|     name    |               nova               |

|     type    |             compute              |

> --service-id=$(keystone service-list | awk '/ compute / {print $2}') \

> --publicurl=http://192.168.2.11:8774/v2/%\(tenant_id\)s \

> --internalurl=http://10.0.0.11:8774/v2/%\(tenant_id\)s \

> --adminurl=http://10.0.0.11:8774/v2/%\(tenant_id\)s

+-------------+-------------------------------------------+

|   Property  |                   Value                   |

|   adminurl  |   http://10.0.0.11:8774/v2/%(tenant_id)s  |

|      id     |      24fc3bf020084040ba6a58d60c0b1719     |

| internalurl |   http://10.0.0.11:8774/v2/%(tenant_id)s  |

|  publicurl  | http://192.168.2.11:8774/v2/%(tenant_id)s |

|    region   |                 regionOne                 |

|  service_id |      7bb1f0e64e3b4ef8b0408902261b2b37     |

配置nova檔案

vim /etc/nova/nova.conf

以下是我的nova完整配置檔案

dhcpbridge_flagfile=/etc/nova/nova.conf

dhcpbridge=/usr/bin/nova-dhcpbridge

logdir=/var/log/nova

state_path=/var/lib/nova

lock_path=/var/lock/nova

force_dhcp_release=True

iscsi_helper=tgtadm

libvirt_use_virtio_for_bridges=True

connection_type=libvirt

root_helper=nova-rootwrap /etc/nova/rootwrap.conf

verbose=True

ec2_private_dns_show_ip=True

api_paste_config=/etc/nova/api-paste.ini

volumes_path=/var/lib/nova/volumes

enabled_apis=ec2,osapi_compute,metadata

my_ip = 10.0.0.11

vncserver_listen = 10.0.0.11

vncserver_proxyclient_address = 10.0.0.11

auth_strategy = keystone

admin_user = nova

connection = mysql://nova:[email protected]/nova

删除sqlite資料庫

rm /var/lib/nova/nova.sqlite

初始化nova資料庫

nova-manage db sync

重新開機nova相關服務

service nova-api restart

service nova-cert restart

service nova-conductor restart

service nova-consoleauth restart

service nova-novncproxy restart

service nova-scheduler restart

檢查nova服務狀态

root@controller:~# nova-manage service list

Binary           Host                                 Zone             Status     State Updated_At

nova-cert        controller                           internal         enabled    :-)   2015-03-20 08:24:17

nova-consoleauth controller                           internal         enabled    :-)   2015-03-20 08:24:17

nova-conductor   controller                           internal         enabled    :-)   2015-03-20 08:24:17

nova-scheduler   controller                           internal         enabled    :-)   2015-03-20 08:24:17

看到笑臉說明服務都啟動了

Neutron元件安裝配置

apt-get install -y neutron-server neutron-plugin-ml2

建立Neutron資料庫

CREATE DATABASE neutron;

GRANT ALL PRIVILEGES ON neutron.* TO neutron@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS';

GRANT ALL PRIVILEGES ON neutron.* TO neutron@'%' IDENTIFIED BY 'NEUTRON_DBPASS';

keystone建立neutron使用者和角色

root@controller:~# keystone user-create --name=neutron --pass=service_pass [email protected]

|  email   |        [email protected]        |

|    id    | 322f0a1d2c7e416abf0e118e50625443 |

|   name   |             neutron              |

| username |             neutron              |

root@controller:~# keystone user-role-add --user=neutron --tenant=service --role=admin

注冊服務和endpoint

root@controller:~# keystone service-create --name=neutron --type=network --description="OpenStack Networking"

| description |       OpenStack Networking       |

|      id     | e3d179a7b9be42ba982c79cd652a7be8 |

|     name    |             neutron              |

|     type    |             network              |

> --service-id=$(keystone service-list | awk '/ network / {print $2}') \

> --publicurl=http://192.168.2.11:9696 \

> --internalurl=http://10.0.0.11:9696 \

> --adminurl=http://10.0.0.11:9696

|   adminurl  |      http://10.0.0.11:9696       |

|      id     | 8b968c25d8324bb28125604a21c64f54 |

| internalurl |      http://10.0.0.11:9696       |

|  publicurl  |     http://192.168.2.11:9696     |

|  service_id | e3d179a7b9be42ba982c79cd652a7be8 |

擷取nova_admin_tenant_id

root@controller:~# keystone tenant-list | awk '/ service / { print $2 }'

4fd53777c8f84c72b09ef025ab45977d

編輯neutron配置檔案

vim /etc/neutron/neutron.conf

# Example: service_plugins = router,firewall,lbaas,vpnaas,metering

service_plugins = router,lbaas

# auth_strategy = keystone

# allow_overlapping_ips = False

allow_overlapping_ips = True

rpc_backend = neutron.openstack.common.rpc.impl_kombu

notification_driver = neutron.openstack.common.notifier.rpc_notifier

# ======== neutron nova interactions ==========

# Send notification to nova when port status is active.

notify_nova_on_port_status_changes = True

# Send notifications to nova when port data (fixed_ips/floatingips) change

# so nova can update it's cache.

notify_nova_on_port_data_changes = True

# URL for connection to nova (Only supports one nova region currently).

nova_url = http://10.0.0.11:8774/v2

# Name of nova region to use. Useful if keystone manages more than one region

# nova_region_name =

# Username for connection to nova in admin context

nova_admin_username = nova

# The uuid of the admin nova tenant

nova_admin_tenant_id = 4fd53777c8f84c72b09ef025ab45977d

# Password for connection to nova in admin context.

nova_admin_password = service_pass

# Authorization URL for connection to nova in admin context.

nova_admin_auth_url = http://10.0.0.11:35357/v2.0

#auth_host = 127.0.0.1

#auth_port = 35357

#auth_protocol = http

#admin_tenant_name = %SERVICE_TENANT_NAME%

#admin_user = %SERVICE_USER%

#admin_password = %SERVICE_PASSWORD%

#signing_dir = $state_path/keystone-signing

admin_user = neutron

#connection = sqlite:////var/lib/neutron/neutron.sqlite

connection = mysql://neutron:[email protected]/neutron

配置2層網絡元件

vim /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]

type_drivers = gre

tenant_network_types = gre

mechanism_drivers = openvswitch

[ml2_type_gre]

tunnel_id_ranges = 1:1000

[securitygroup]

firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

enable_security_group = True

配置nova支援neutron

在[DEFAULT] 添加

network_api_class=nova.network.neutronv2.api.API

neutron_url=http://10.0.0.11:9696

neutron_auth_strategy=keystone

neutron_admin_tenant_name=service

neutron_admin_username=neutron

neutron_admin_password=service_pass

neutron_admin_auth_url=http://10.0.0.11:35357/v2.0

libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver

linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver

firewall_driver=nova.virt.firewall.NoopFirewallDriver

security_group_api=neutron

重新開機nova服務

重新開機neutron服務

service neutron-server restart

安裝openstack管理界面UI---Horizon

apt-get install -y apache2 memcached libapache2-mod-wsgi openstack-dashboard

編輯 /etc/openstack-dashboard/local_settings.py

#ALLOWED_HOSTS = ['horizon.example.com', ]

ALLOWED_HOSTS = ['localhost','192.168.2.11']

#OPENSTACK_HOST = "127.0.0.1"

OPENSTACK_HOST = "10.0.0.11"

vi /etc/apache2/apache2.conf 

在檔案最後一行添上下面這行: ServerName localhost

重新開機apache服務

service apache2 restart; service memcached restart

控制端到這裡安裝完成

網絡節點network

安裝基礎元件

apt-get install -y vlan bridge-utils

編輯 

vim /etc/sysctl.conf

在檔案最後添加:

net.ipv4.ip_forward=1

net.ipv4.conf.all.rp_filter=0

net.ipv4.conf.default.rp_filter=0

生效

sysctl -p

安裝Neutron元件

apt-get install -y neutron-plugin-ml2 neutron-plugin-openvswitch-agent haproxy neutron-lbaas-agent \

dnsmasq neutron-l3-agent neutron-dhcp-agent

編輯Neutron配置檔案

# The strategy to be used for auth.

# Supported values are 'keystone'(default), 'noauth'.

編輯 /etc/neutron/l3_agent.ini

interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver

use_namespaces = True

編輯 /etc/neutron/dhcp_agent.ini

dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq

編輯 /etc/neutron/metadata_agent.ini

auth_url = http://10.0.0.11:5000/v2.0

auth_region = regionOne

nova_metadata_ip = 10.0.0.11

metadata_proxy_shared_secret = helloOpenStack

登入控制節點,修改 /etc/nova.conf 在[DEFAULT] 加入下面内容

service_neutron_metadata_proxy = true

重新開機nova api服務

編輯 /etc/neutron/plugins/ml2/ml2_conf.ini

[ovs]

local_ip = 10.0.1.22

tunnel_type = gre

enable_tunneling = True

編輯/etc/neutron/lbaas_agent.ini

device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver

[haproxy]

user_group = nogroup

重新開機openvswitch

service openvswitch-switch restart

建立br-ex

ovs-vsctl add-br br-ex

ovs-vsctl add-port br-ex eth0

編輯 /etc/network/interfaces

#auto eth0

#iface eth0 inet static

#       address 192.168.2.22

#       netmask 255.255.0.0

#       network 192.168.0.0

#       broadcast 192.168.255.255

#       gateway 192.168.1.1

#       dns-nameservers 192.168.1.1

iface eth0 inet manual

        up ifconfig $IFACE 0.0.0.0 up

        up ip link set $IFACE promisc on

        down ip link set $IFACE promisc off

        down ifconfig $IFACE down

auto br-ex

iface br-ex inet static

root@controller:~# neutron agent-list

Unable to establish connection to http://192.168.2.11:9696/v2.0/agents.json

原因:

不能同步資料庫

,同步即解決

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade 

juno" neutron

+--------------------------------------+--------------------+---------+-------+----------------+---------------------------+

| id                                   | agent_type         | host    | alive | admin_state_up | binary                    |

| 08bedacf-5eb4-445e-ba91-ea0d481a5772 | DHCP agent         | network | :-)   | True           | neutron-dhcp-agent        |

| 263fa30f-0af9-4534-9153-ea01ffa71874 | Loadbalancer agent | network | :-)   | True           | neutron-lbaas-agent       |

| 32a17ac6-50c6-4cfa-8032-8c6f67984251 | L3 agent           | network | :-)   | True           | neutron-l3-agent          |

| 3e0d5e0c-41c1-4fe0-9642-05862c0d65ed | Open vSwitch agent | network | :-)   | True           | neutron-openvswitch-agent |

| c02625d3-d3df-4bd8-bdfa-a75fff5f2f66 | Metadata agent     | network | :-)   | True           | neutron-metadata-agent    |

network伺服器配置完成

計算節點

安裝kvm套件

apt-get install -y kvm libvirt-bin pm-utils

安裝計算節點元件

apt-get install -y nova-compute-kvm python-guestfs

讓核心隻讀

dpkg-statoverride  --update --add root root 0644 /boot/vmlinuz-$(uname -r)

建立腳本 /etc/kernel/postinst.d/statoverride

#!/bin/sh

version="$1"

# passing the kernel version is required

[ -z "${version}" ] && exit 0

dpkg-statoverride --update --add root root 0644 /boot/vmlinuz-${version}

允許運作

chmod +x /etc/kernel/postinst.d/statoverride

編輯 /etc/nova/nova.conf 檔案,添加下面内容

my_ip = 10.0.0.33

vnc_enabled = True

vncserver_listen = 0.0.0.0

vncserver_proxyclient_address = 10.0.0.33

novncproxy_base_url = http://192.168.2.11:6080/vnc_auto.html

glance_host = 10.0.0.11

vif_plugging_is_fatal=false

vif_plugging_timeout=0

删除sqlite

重新開機compute服務

service nova-compute restart

編輯 /etc/sysctl.conf

馬上生效

安裝網絡元件

apt-get install -y neutron-common neutron-plugin-ml2 neutron-plugin-openvswitch-agent

編輯 /etc/neutron/neutron.conf

#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin

core_plugin = ml2

# service_plugins =

service_plugins = router

編輯  /etc/neutron/plugins/ml2/ml2_conf.ini

local_ip = 10.0.1.33

重新開機OVS

再編輯 /etc/nova/nova.conf ,在[DEFAULT]裡添加下面

network_api_class = nova.network.neutronv2.api.API

neutron_url = http://10.0.0.11:9696

neutron_auth_strategy = keystone

neutron_admin_tenant_name = service

neutron_admin_username = neutron

neutron_admin_password = service_pass

neutron_admin_auth_url = http://10.0.0.11:35357/v2.0

linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver

firewall_driver = nova.virt.firewall.NoopFirewallDriver

security_group_api = neutron

編輯 /etc/nova/nova-compute.conf ,修改為使用qemu

compute_driver=libvirt.LibvirtDriver

[libvirt]

virt_type=qemu

重新開機相關服務

service neutron-plugin-openvswitch-agent restart

控制端檢驗

nova-cert        controller                           internal         enabled    :-)   2015-03-20 10:29:32

nova-consoleauth controller                           internal         enabled    :-)   2015-03-20 10:29:31

nova-conductor   controller                           internal         enabled    :-)   2015-03-20 10:29:36

nova-scheduler   controller                           internal         enabled    :-)   2015-03-20 10:29:35

nova-compute     compute                              nova             enabled    :-)   2015-03-20 10:29:31

到這裡openstack3個節點都安裝完成

現在可以登入使用

http://192.168.2.11/horizon/

繼續閱讀