环境准备
创建3台虚拟机,分别作为controller节点(10.1.101.24)<2,2048>, compute1节点(10.1.101.25)<2,2048>,compute2节点(10.1.101.26)<2,2048>。
为所有3个节点添加grizzly的源并更新系统
echo deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/grizzly main \
apt-get update && apt-get install ubuntu-cloud-keyring
apt-get update
控制节点
网卡配置如下:
cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
# External Network
auto eth0
iface eth0 inet static
address 10.1.101.24
netmask 255.255.255.0
gateway 10.1.101.254
#dns-nameservers 192.168.1.3
dns-nameservers 10.1.101.51
auto eth1
iface eth1 inet static
address 192.168.1.24
netmask 255.255.255.0
auto eth2
iface eth2 inet static
address 192.168.2.24
netmask 255.255.255.0
hosts主机文件配置:
cat /etc/hosts
127.0.0.1
192.168.1.24
192.168.1.25
192.168.1.26
# The following lines are desirable for IPv6 capable hosts
::1
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
设置本机hostname
cat /etc/hostname
controller
开启IP转发功能
vim
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
重启网络
/etc/init.d/networking restar
验证
sysctl -e -p /etc/sysctl.conf
安装ntp服务并设置
apt-get install -y ntp
sed -i 's/server ntp.ubuntu.com/ \
server ntp.ubuntu.com \
server 127.127.1.0 \
fudge 127.127.1.0 stratum 10/g' /etc/ntp.conf
重启ntp服务
service ntp restart
设置环境变量
cat > /root/novarc <
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=password
export MYSQL_PASS=password
export SERVICE_PASSWORD=password
export RABBIT_PASSWORD=password
export OS_AUTH_URL="http://localhost:5000/v2.0/"
export SERVICE_ENDPOINT="http://localhost:35357/v2.0"
export SERVICE_TOKEN=mutil_host
export MASTER="$(/sbin/ifconfig eth0 \
export LOCAL_IP="$(/sbin/ifconfig eth1 \
EOF
使环境变量生效
cat /root/novarc >> /etc/profile
source /etc/profile
设置MYSQL管理员密码
cat <
mysql-server-5.5 mysql-server/root_password password
$MYSQL_PASS
mysql-server-5.5 mysql-server/root_password_again password
$MYSQL_PASS
mysql-server-5.5 mysql-server/start_on_boot boolean
true
MYSQL_PRESEED
安装mysql
apt-get -y install mysql-server python-mysqldb curl
设置允许非本机连接
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
(sed -i '44 i skip-name-resolve' /etc/mysql/my.cnf 可省)
重启mysql服务
service mysql restart
创建openstack相关的数据库
mysql -uroot -p$MYSQL_PASS <
CREATE DATABASE nova;
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY
'$MYSQL_PASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost'
IDENTIFIED BY '$MYSQL_PASS';
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED
BY '$MYSQL_PASS';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost'
IDENTIFIED BY '$MYSQL_PASS';
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%'
IDENTIFIED BY '$MYSQL_PASS';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost'
IDENTIFIED BY '$MYSQL_PASS';
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED
BY '$MYSQL_PASS';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost'
IDENTIFIED BY '$MYSQL_PASS';
CREATE DATABASE quantum;
GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'%' IDENTIFIED
BY '$MYSQL_PASS';
GRANT ALL PRIVILEGES ON quantum.* TO 'quantum'@'localhost'
IDENTIFIED BY '$MYSQL_PASS';
FLUSH PRIVILEGES;
EOF
安装rabbitmq服务
apt-get -y install rabbitmq-server
修改密码为password,并重启服务
rabbitmqctl change_password guest $RABBIT_PASSWORD
service rabbitmq-server restart
安装keystone服务
apt-get install -y keystone python-keystone
python-keystoneclient
配置
sed -i -e " s/# admin_token = ADMIN/admin_token =
$SERVICE_TOKEN/g; \
s/# bind_host = 0.0.0.0/bind_host = 0.0.0.0/g; \
s/# public_port = 5000/public_port = 5000/g; \
s/# admin_port = 35357/admin_port = 35357/g; \
s/# compute_port = 8774/compute_port = 8774/g; \
s/# verbose = True/verbose = True/g; \
s/# idle_timeout/idle_timeout/g"
/etc/keystone/keystone.conf
sed -i '/cOnnection=
.*/{s|sqlite:///.*|mysql://'"keystone"':'"$MYSQL_PASS"'@'"$MASTER"'/keystone|g}'\
重启keystone服务,并同步数据
service keystone restart
keystone-manage db_sync
创建keystone数据导入脚本(见附件)
vim keystone-data.sh
#!/bin/bash
# Modify these variables as needed
ADMIN_PASSWORD=${ADMIN_PASSWORD:-password}
SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD}
DEMO_PASSWORD=${DEMO_PASSWORD:-$ADMIN_PASSWORD}
export OS_SERVICE_TOKEN="mutil_host"
export
OS_SERVICE_ENDPOINT="http://localhost:35357/v2.0"
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
#
MYSQL_USER=keystone
MYSQL_DATABASE=keystone
MYSQL_HOST=localhost
MYSQL_PASSWORD=${MYSQL_PASS}
#
KEYSTONE_REGION=RegionOne
KEYSTONE_HOST=$(/sbin/ifconfig eth1 | awk '/inet addr/ {print
$2}' | cut -f2 -d ":")
# Shortcut function to get a newly generated ID
function get_field() {
}
# Tenants
ADMIN_TENANT=$(keystone tenant-create --name=admin | grep "
id " | get_field 2)
DEMO_TENANT=$(keystone tenant-create --name=demo | grep " id
" | get_field 2)
SERVICE_TENANT=$(keystone tenant-create
--name=$SERVICE_TENANT_NAME | grep " id " | get_field 2)
# Users
ADMIN_USER=$(keystone user-create --name=admin
--pass="$ADMIN_PASSWORD" --email=admin@domain.com | grep " id " |
get_field 2)
DEMO_USER=$(keystone user-create --name=demo
--pass="$DEMO_PASSWORD" --email=demo@domain.com
--tenant-id=$DEMO_TENANT | grep " id " | get_field 2)
NOVA_USER=$(keystone user-create --name=nova
--pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT
--email=nova@domain.com | grep " id " | get_field 2)
GLANCE_USER=$(keystone user-create --name=glance
--pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT
--email=glance@domain.com | grep " id " | get_field 2)
QUANTUM_USER=$(keystone user-create --name=quantum
--pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT
--email=quantum@domain.com | grep " id " | get_field 2)
CINDER_USER=$(keystone user-create --name=cinder
--pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT
--email=cinder@domain.com | grep " id " | get_field 2)
# Roles
ADMIN_ROLE=$(keystone role-create --name=admin | grep " id "
| get_field 2)
MEMBER_ROLE=$(keystone role-create --name=Member | grep " id
" | get_field 2)
# Add Roles to Users in Tenants
keystone user-role-add --user-id $ADMIN_USER --role-id
$ADMIN_ROLE --tenant-id $ADMIN_TENANT
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id
$NOVA_USER --role-id $ADMIN_ROLE
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id
$GLANCE_USER --role-id $ADMIN_ROLE
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id
$QUANTUM_USER --role-id $ADMIN_ROLE
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id
$CINDER_USER --role-id $ADMIN_ROLE
keystone user-role-add --tenant-id $DEMO_TENANT --user-id
$DEMO_USER --role-id $MEMBER_ROLE
# Create services
COMPUTE_SERVICE=$(keystone service-create --name nova --type
compute --description 'OpenStack Compute Service' | grep " id " |
get_field 2)
VOLUME_SERVICE=$(keystone service-create --name cinder --type
volume --description 'OpenStack Volume Service' | grep " id " |
get_field 2)
IMAGE_SERVICE=$(keystone service-create --name glance --type
image --description 'OpenStack Image Service' | grep " id " |
get_field 2)
IDENTITY_SERVICE=$(keystone service-create --name keystone
--type identity --description 'OpenStack Identity' | grep " id " |
get_field 2)
EC2_SERVICE=$(keystone service-create --name ec2 --type ec2
--description 'OpenStack EC2 service' | grep " id " | get_field
2)
NETWORK_SERVICE=$(keystone service-create --name quantum
--type network --description 'OpenStack Networking service' | grep
" id " | get_field 2)
# Create endpoints
keystone endpoint-create --region $KEYSTONE_REGION
--service-id $COMPUTE_SERVICE --publicurl
'http://'"$KEYSTONE_HOST"':8774/v2/$(tenant_id)s' --adminurl
'http://'"$KEYSTONE_HOST"':8774/v2/$(tenant_id)s' --internalurl
'http://'"$KEYSTONE_HOST"':8774/v2/$(tenant_id)s'
keystone endpoint-create --region $KEYSTONE_REGION
--service-id $VOLUME_SERVICE --publicurl
'http://'"$KEYSTONE_HOST"':8776/v1/$(tenant_id)s' --adminurl
'http://'"$KEYSTONE_HOST"':8776/v1/$(tenant_id)s' --internalurl
'http://'"$KEYSTONE_HOST"':8776/v1/$(tenant_id)s'
keystone endpoint-create --region $KEYSTONE_REGION
--service-id $IMAGE_SERVICE --publicurl
'http://'"$KEYSTONE_HOST"':9292' --adminurl
'http://'"$KEYSTONE_HOST"':9292' --internalurl
'http://'"$KEYSTONE_HOST"':9292'
keystone endpoint-create --region $KEYSTONE_REGION
--service-id $IDENTITY_SERVICE --publicurl
'http://'"$KEYSTONE_HOST"':5000/v2.0' --adminurl
'http://'"$KEYSTONE_HOST"':35357/v2.0' --internalurl
'http://'"$KEYSTONE_HOST"':5000/v2.0'
keystone endpoint-create --region $KEYSTONE_REGION
--service-id $EC2_SERVICE --publicurl
'http://'"$KEYSTONE_HOST"':8773/services/Cloud' --adminurl
'http://'"$KEYSTONE_HOST"':8773/services/Admin' --internalurl
'http://'"$KEYSTONE_HOST"':8773/services/Cloud'
keystone endpoint-create --region $KEYSTONE_REGION
--service-id $NETWORK_SERVICE --publicurl
'http://'"$KEYSTONE_HOST"':9696/' --adminurl
'http://'"$KEYSTONE_HOST"':9696/' --internalurl
'http://'"$KEYSTONE_HOST"':9696/'
运行keystone-data.sh脚本
bash keystone-data.sh
安装glance组件
apt-get -y install glance
配置
sed -i -e " s/%SERVICE_TENANT_NAME%/service/g; \
s/%SERVICE_USER%/glance/g;
s/%SERVICE_PASSWORD%/$SERVICE_PASSWORD/g; \
" /etc/glance/glance-api.conf
sed -i '/sql_cOnnection=
.*/{s|sqlite:///.*|mysql://'"glance"':'"$MYSQL_PASS"'@'"$MASTER"'/glance|g}'\
sed -i " s/notifier_strategy = noop/notifier_strategy =
rabbit/g;\
cat <
flavor = keystone+cachemanagement
EOF
cat <
flavor = keystone
EOF
重启glance相关服务并同步数据
service glance-api
service glance-registry restart
glance-manage db_sync
下载镜像测试glance服务
wget
https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
添加cirros镜像
glance add name=cirros is_public=true container_format=bare
\
disk_format=qcow2 <
/root/cirros-0.3.0-x86_64-disk.img
查看镜像
glance index
ID
------------------------------------
------------------------------ --------------------
-------------------- --------------
932fad9e-3f93-4e76-ab58-2fad4c4acd5f
cirros-0.3.0-x86_64
安装cinder 组件
apt-get install -y cinder-api cinder-scheduler cinder-volume
iscsitarget \
修改iscsitarget配置文件并重启服务
sed -i 's/false/true/g' /etc/default/iscsitarget
service iscsitarget start
service open-iscsi start
配置cinder文件
cat >/etc/cinder/cinder.conf <
[DEFAULT]
rootwrap_cOnfig= /etc/cinder/rootwrap.conf
sql_cOnnection=
mysql://cinder:$MYSQL_PASS@$MASTER:3306/cinder
iscsi_helper = ietadm
volume_group = cinder-volumes
rabbit_password= $RABBIT_PASSWORD
logdir=/var/log/cinder
verbose=true
auth_strategy = keystone
EOF
sed -i -e " s/%SERVICE_TENANT_NAME%/service/g; \
同步cinder数据,并重启相关服务
cinder-manage db sync
service cinder-api restart
service cinder-scheduler restart
service cinder-volume restart
安装quantum组件
apt-get install quantum-server
quantum-plugin-openvswitch
修改配置文件quantum.conf如下
vim /etc/quantum/quantum.conf
[DEFAULT]
verbose = True
allow_overlapping_ips = True
rabbit_host = 192.168.1.24
rabbit_password = password
[keystone_authtoken]
admin_tenant_name = service
admin_user = quantum
admin_password = password
signing_dir =
/var/lib/quantum/keystone-signing
修改配置文件ovs_quantum_plugin.ini
vim
/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
[DEFAULT]
core_plugin =
quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2
rabbit_host = 192.168.1.24
rabbit_password = password
host=192.168.1.24
[DATABASE]
sql_cOnnection=
mysql://quantum:password@192.169.1.24/quantum
reconnect_interval = 2
[OVS]
tenant_network_type = gre
enable_tunneling = True
tunnel_id_ranges = 1:1000
[AGENT]
polling_interval = 2
[SECURITYGROUP]
firewall_driver =
quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewal
重启quantum服务
/etc/init.d/quantum-server restart
安装nova组件
apt-get install -y nova-api nova-cert nova-common
nova-conductor \
nova-novncproxy
修改配置文件api-paste.ini
sed -i -e " s/127.0.0.1/$MASTER/g;
s/%SERVICE_TENANT_NAME%/service/g; \
修改nova.conf文件如下
cat /etc/nova/nova.conf
cat /etc/nova/nova.conf
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap
/etc/nova/rootwrap.conf
verbose=True
ec2_private_dns_show_ip=True
api_paste_cOnfig=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
sql_cOnnection=mysql://nova:password@localhost/nova
rabbit_password=password
auth_strategy=keystone
# Cinder
volume_api_class=nova.volume.cinder.API
osapi_volume_listen_port=5900
#scheduler
scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilte
# Networking
network_api_class=nova.network.quantumv2.api.API
quantum_url=http://192.168.1.24:9696
quantum_auth_strategy=keystone
quantum_admin_tenant_name=service
quantum_admin_username=quantum
quantum_admin_password=password
quantum_admin_auth_url=http://192.168.1.24:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDr
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
host=192.168.1.24
service_quantum_metadata_proxy = True
# Security Groups
#firewall_driver=nova.virt.firewall.NoopFirewallDriver
#security_group_api=quantum
# Metadata
quantum_metadata_proxy_shared_secret=password
service_quantum_metadata_proxy=true
metadata_listen = 192.168.1.24
metadata_listen_port = 8775
# Glance
glance_api_servers=192.168.1.24:9292
image_service=nova.image.glance.GlanceImageService
# novnc
novnc_enable=true
novncproxy_base_url=http://10.1.101.24:6080/vnc_auto.html
vncserver_proxyclient_address=10.1.101.24
vncserver_listen=0.0.0.0
同步数据库并重启相关服务
nova-manage db sync
service nova-api restart
service nova-cert restart
service nova-consoleauth restart
service nova-scheduler restart
service nova-novncproxy restart
service nova-conductor restart
查看服务(确保nova-cert、nova-consoleauth、nova-scheduler和nova-conductor均开启)
nova-manage service
list
Binary
nova-cert
nova-consoleauth
controller
nova-scheduler
nova-conductor
安装dashboard组件
apt-get -y install apache2 libapache2-mod-wsgi
openstack-dashboard memcached python-memcache
计算节点
确保系统已经grizzly-update
网卡配置
cat /etc/network/interfaces
# This file describes the network interfaces available on
your system
# and how to activate them. For more information, see
interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address 10.1.101.27
netmask 255.255.255.0
gateway 10.1.101.254
#dns-nameservers 192.168.1.3
dns-nameservers 10.1.101.51
auto eth1
iface eth1 inet static
address 192.168.1.27
netmask 255.255.255.0
auto eth2
iface eth2 inet static
address 192.168.2.27
netmask 255.255.255.0
修改hosts文件
vim /etc/hosts
127.0.0.1
192.168.1.24
192.168.1.25
192.168.1.26
# The following lines are desirable for IPv6 capable
hosts
::1
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
修改hostname
vim /etc/hostname
compute1
设置IP转发
vim
net.ipv4.ip_forward = 1
net.ipv4.conf.all.forwarding = 1
重启网络,并验证
/etc/init.d/networking restart
sysctl -e -p /etc/sysctl.conf
设置环境变量
cat > /root/novarc <
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=password
export MYSQL_PASS=password
export SERVICE_PASSWORD=password
export RABBIT_PASSWORD=password
export SERVICE_TOKEN=mutil_host
export CONTROLLER_IP=192.168.1.24
export MASTER="$(/sbin/ifconfig eth0 | awk '/inet addr/
{print $2}' | cut -f2 -d ":")"
export LOCAL_IP="$(/sbin/ifconfig eth1 | awk '/inet addr/
{print $2}' | cut -f2 -d ":")"
export OS_AUTH_URL="http://192.168.1.24:5000/v2.0/"
export
SERVICE_ENDPOINT="http://192.168.1.24:35357/v2.0"
EOF
使环境变量生效
cat /root/novarc >> /etc/profile
source /etc/profile
安装ntp服务,并设置,重启
apt-get install -y ntp
sed -i -e " s/server ntp.ubuntu.com/server $CONTROLLER_IP/g"
/etc/ntp.conf
service ntp restart
安装OpenVSwitch
apt-get install
openvswitch-datapath-source
module-assistant auto-install
openvswitch-datapath
apt-get install openvswitch-switch
openvswitch-brcompat
修改openvswitch和brcompat配置文件
sed -i 's/# BRCOMPAT=no/BRCOMPAT=yes/g'
/etc/default/openvswitch-switch
echo 'brcompat' >> /etc/modules
重启openvswitch
/etc/init.d/openvswitch-switch restart
(再次启动,确保 ovs-brcompatd、ovs-vswitchd、ovsdb-server等服务都启动
直到检查出现:
lsmod | grep brcompat
如果还是启动不了的话,用下面命令:
/etc/init.d/openvswitch-switch force-reload-kmod
重启openvswitch服务
service openvswitch-switch restart
添加网桥br-int和br-ex
ovs-vsctl add-br br-int
ovs-vsctl add-br br-ex
ifconfig br-ex promisc up
ovs-vsctl add-port br-ex eth2
修改网卡配置文件
cat /etc/network/interfaces
# This file describes the network interfaces available on
your system
# and how to activate them. For more information, see
interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address 10.1.101.25
netmask 255.255.255.0
gateway 10.1.101.254
#dns-nameservers 192.168.1.3
dns-nameservers 10.1.101.51
auto eth1
iface eth1 inet static
address 192.168.1.25
netmask 255.255.255.0
auto br-ex
iface br-ex inet static
address 192.168.2.25
netmask 255.255.255.0
up ip link set $IFACE promisc on
down ip link set $IFACE promisc off
auto eth2
iface eth2 inet manual
up ifconfig $IFACE 0.0.0.0 up
up ip link set $IFACE promisc on
down ip link set $IFACE promisc off
down ifconfig $IFACE down
重启网络
安装quantum插件
apt-get install -y quantum-plugin-openvswitch-agent
quantum-dhcp-agent quantum-l3-agent quantum-metadata-agent
--force-yes
修改quantum.conf配置文件
vim
[DEFAULT]
verbose = False
state_path = /var/lib/quantum
lock_path = $state_path/lock
bind_host = 0.0.0.0
bind_port = 9696
core_plugin =
quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2
api_paste_cOnfig= /etc/quantum/api-paste.ini
control_exchange = quantum
allow_overlapping_ips = True
rabbit_host = 192.168.1.24
rabbit_password = password
rabbit_port = 5672
notification_driver =
quantum.openstack.common.notifier.rpc_notifie
[AGENT]
root_helper = sudo quantum-rootwrap
/etc/quantum/rootwrap.conf
[keystone_authtoken]
auth_host = 192.168.1.24
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = quantum
admin_password = password
signing_dir = /var/lib/quantum/keystone-signing
修改ovs_quantum_plugin.ini配置文件
vim
/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
[DATABASE]
sql_cOnnection=
mysql://quantum:password@192.168.1.24/quantum
reconnect_interval = 2
[OVS]
tenant_network_type = gre
enable_tunneling = True
tunnel_id_ranges = 1:1000
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip = 192.168.1.25
[AGENT]
polling_interval = 2
[SECURITYGROUP]
#
修改l3_agent.ini配置文件
vim
[DEFAULT]
debug = True
use_namespaces = True
external_network_bridge = br-ex
signing_dir = /var/cache/quantum
admin_tenant_name = service
admin_user = quantum
admin_password = password
auth_url = http://192.168.1.24:35357/v2.0
l3_agent_manager =
quantum.agent.l3_agent.L3NATAgentWithStateRepor
root_helper = sudo quantum-rootwrap
/etc/quantum/rootwrap.conf
interface_driver =
quantum.agent.linux.interface.OVSInterfaceDriver
enable_multi_host = True
修改dhcp_agent.ini配置文件
vim /etc/quantum/dhcp_agent.ini
[DEFAULT]
debug = true
use_namespaces = True
signing_dir = /var/cache/quantum
admin_tenant_name = service
admin_user = quantum
admin_password = password
auth_url = http://192.168.1.24:35357/v2.0
dhcp_agent_manager =
quantum.agent.dhcp_agent.DhcpAgentWithStateReport
root_helper = sudo quantum-rootwrap
/etc/quantum/rootwrap.conf
state_path = /var/lib/quantum
interface_driver =
quantum.agent.linux.interface.OVSInterfaceDriver
dhcp_driver =
quantum.agent.linux.dhcp.Dnsmasq
enable_multi_host = True
enable_isolated_metadata = False
修改metadata_agent.ini配置文件
vim
[DEFAULT]
debug = True
auth_url = http://192.168.1.24:35357/v2.0
auth_region = RegionOne
admin_tenant_name = service
admin_user = quantum
admin_password = password
nova_metadata_ip = 192.168.1.24
nova_metadata_port = 8775
重启quantum相关的agent服务
service quantum-plugin-openvswitch-agent restart
service quantum-dhcp-agent restart
service quantum-l3-agent restart
service quantum-metadata-agent restart
安装nova-compute组件
apt-get install -y nova-compute --force-yes
修改api-paste.ini文件
vim
[filter:authtoken]
paste.filter_factory =
keystoneclient.middleware.auth_token:filter_factory
auth_host = 192.168.1.24
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = password
signing_dir = /var/lib/nova/keystone-signing
auth_version = v2.0
修改nova.conf配置文件
vim
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap
/etc/nova/rootwrap.conf
verbose=True
ec2_private_dns_show_ip=True
api_paste_cOnfig=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
#cinder
volume_api_class = nova.volume.cinder.API
osapi_volume_listen_port=5900
#scheduler
scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilte
# General
rabbit_host=192.168.1.24
rabbit_password=password
auth_strategy=keystone
ec2_host=192.168.1.24
ec2_url=http://192.168.1.24:8773/services/Cloud
# Networking
# libvirt_use_virtio_for_bridges=True
network_api_class=nova.network.quantumv2.api.API
quantum_url=http://192.168.1.24:9696
quantum_auth_strategy=keystone
quantum_admin_tenant_name=service
quantum_admin_username=quantum
quantum_admin_password=password
quantum_admin_auth_url=http://192.168.1.24:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDr
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
host=192.168.1.25
service_quantum_metadata_proxy = True
# Security Groups
#firewall_driver=nova.virt.firewall.NoopFirewallDriver
#security_group_api=quantum
# Compute #
compute_driver=libvirt.LibvirtDriver
connection_type=libvirt
# Glance
glance_api_servers=192.168.1.24:9292
image_service=nova.image.glance.GlanceImageService
# novnc
vnc_enabled=true
vncserver_proxyclient_address=10.1.101.25
novncproxy_base_url=http://10.1.101.24:6080/vnc_auto.html
vncserver_listen=0.0.0.0
重启nova-compute服务
service nova-compute restart
参照以上过程部署compute2节点。
在controller节点中为openstack设置好ICMP和TCP规则:
# Obtain default security group ID
quantum
# Enable ICMP and TCP ports
quantum security-group-rule-create --protocol
icmp --direction ingress {security group ID}
quantum security-group-rule-create --protocol
icmp --direction egress {security group ID}
quantum security-group-rule-create --protocol
tcp --direction egress --port-range-min 1 --port-range-max 65535
{security group ID}
quantum security-group-rule-create --protocol
tcp --direction ingress --port-range-min 1 --port-range-max 65535
{security group ID}
(2)准备quantum的网络(脚本pre_network.sh见附件)
#!/bin/bash
# Create Tenant and User #
tenant=TenantA
user=UserA
usermail=usera@mutil_host.com
role=Member
if keystone tenant-list | grep -q
$tenant;then
else
fi
if keystone user-list | grep -q
$user;then
else
fi
keystone user-role-add --tenant
$tenant
# Create virtual router and sub-network
#
quantum net-create Ext-Net
--provider:network_type local --router:external true
quantum subnet-create
quantum --os-tenant-name $tenant --os-username
$user --os-password password
subnet_id=`quantum --os-tenant-name $tenant
--os-username $user --os-password password
quantum --os-tenant-name $tenant --os-username
$user --os-password password
quantum --os-tenant-name $tenant --os-username
$user --os-password password
quantum router-gateway-set $tenant-R1
Ext-Net
运行脚本:bash
(3)为了充分测试,需要创建多个租户,使用脚本tenant.sh (见附件)
(每次运行请修改脚本tenant,user和usermail变量)
root@controller:~# cat tenant.sh
#!/bin/bash
# Create Tenant and User #
tenant=TenantB
user=UserB
usermail=userb@mutil_host.com
role=Member
if keystone tenant-list | grep -q
$tenant;then
else
fi
if keystone user-list | grep -q
$user;then
else
fi
keystone user-role-add --tenant
$tenant
# Create virtual router and sub-network
#
quantum --os-tenant-name $tenant --os-username
$user --os-password password
subnet_id=`quantum --os-tenant-name $tenant
--os-username $user --os-password password
quantum --os-tenant-name $tenant --os-username
$user --os-password password
quantum --os-tenant-name $tenant --os-username
$user --os-password password
quantum router-gateway-set $tenant-R1
Ext-Net
运行脚本:bash
(4)准备资源,创建新的flavor
nova
查看tenant和user
(5)查看namespase,分布在计算节点上
(6) 为租户创建虚拟机
nova --os-tenant-name TenantH --os-username
UserH --os-password password
--os-auth-url="http://192.168.1.24:5000/v2.0/" list
nova --os-tenant-name TenantH --os-username
UserH --os-password password
--os-auth-url="http://192.168.1.24:5000/v2.0/" boot --flavor 6
--image cirros vm005
(7)通过namespace
ping租户的虚拟机
(8)ssh进入虚拟机[cubswinJ],并ping其它虚拟机
进入cirros虚拟机的密码是[cubswinJ]
(9)为TenantH的虚拟机vm005准备floatingip
新建的floatingip为192.168.2.17
主要命令:
quantum floatingip-list
quantum --os-tenant-name TenantH --os-username
UserH --os-password password
--os-auth-url="http://192.168.1.24:5000/v2.0/" net-list
quantum --os-tenant-name TenantH --os-username
UserH --os-password password
--os-auth-url="http://192.168.1.24:5000/v2.0/" floatingip-create
Ext-net
(10) 为TenantH的虚拟机vm005[10.0.8.6]绑定floatingip[192.168.2.17]
查看vm005的私有ip地址[vm005 ?
10.0.8.6]
nova --os-tenant-name TenantH --os-username
UserH --os-password password
--os-auth-url="http://192.168.1.24:5000/v2.0/" list
查看floatingip[192.168.2.17
]端口号
quantum floatingip-list
查看fixedip[10.0.8.6]的端口号
quantum --os-tenant-name TenantH --os-username
UserH --os-password password
--os-auth-url="http://192.168.1.24:5000/v2.0/" port-list
为vm005[10.0.8.6]绑定floatingip[192.168.2.17
]
quantum --os-tenant-name TenantH --os-username
UserH --os-password password
--os-auth-url="http://192.168.1.24:5000/v2.0/"
floatingip-associate
(11)验证floatingip是否生效,并通过floatingip
ssh进入虚拟机[cubswinJ]
进入cirros虚拟机的密码是[cubswinJ]
进一步测试mutil-host的特性:
当重启compute节点后无法使用nova将vm启动,报错:
libvirtError: Unable to add bridge qbrxxxxxxx port tapxxxxxxx:
Invalid argument
vim /usr/share/pyshared/nova/virt/libvirt/vif.py
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
主要命令:
compute1和compute2这个两个计算节点都有DHCP
agent,L3 agent和Open vSwitch
agent,可以手动关闭其中一套L3和dhch插件(比如[关闭compute1的L3和关闭compute2的dhcp],[关闭compute2的L3和关闭compute1的dhcp][关闭任意一个compute节点中的任意一个L3或dhcp]),只要整个集群中至少有一个up的L3和dhcp,这样不会影响整个系统的使用,依然可以创建新的租户和虚拟机。这就是mutil-host带来的好处。