一、简介
本次实验环境搭建需要两台物理主机
操作系统 节点 主机名 IP地址 网卡 centos7.6 控制节点/网络节点 controller 192.168.80.11 ens33 centos7.6 计算节点/存储节点 compute 192.168.80.12 ens33 本次环境涉及到的密码全部为 opensource
二、基础环境搭建
以下配置在controller节点/compute节点执行
-
更改主机名
[root@controller ~]# hostnamectl set-hostname controller [root@compute ~]# hostnamectl set-hostname compute
-
修改/etc/hosts文件
[root@controller ~]# vim /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain 4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain 6 192.168.80.11 controller 192.168.80.12 compute [root@compute ~]# vim /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain 4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain 6 192.168.80.11 controller 192.168.80.12 compute
-
关闭防火墙和SELINUX
[root@controller ~]# systemctl stop firewalld && systemctl disable firewalld [root@compute ~]# systemctl stop firewalld && systemctl disable firewalld
-
配置网络YUN源
替换阿里源。更多源下载请访问:https://developer.aliyun.com/mirror/
compute节点操作和controller节点操作一样
[root@controller ~]# mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup [root@controller ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo [root@controller ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
-
配置NTP
为了简化操作,我们直接配置网络源。
[root@controller ~]# yum -y install ntpdate [root@controller ~]# ntpdate time1.aliyun.com [root@compute ~]# yum -y install ntpdate [root@compute ~]# ntpdate time1.aliyun.com
-
安装OpenStack包
[root@controller ~]# yum install centos-release-openstack-rocky -y [root@controller ~]# yum install python-openstackclient -y [root@controller ~]# yum install openstack-selinux -y [root@compute ~]# yum install centos-release-openstack-rocky -y [root@compute ~]# yum install python-openstackclient -y [root@compute ~]# yum install openstack-selinux -y
以下配置在controller节点执行
-
安装Mariadb
[root@controller ~]# yum install mariadb mariadb-server python2-PyMySQL -y [root@controller ~]# vim /etc/my.cnf.d/openstack.cnf [mysqld] bind-address = 192.168.80.11 default-storage-engine = innodb innodb_file_per_table = on max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8 [root@controller ~]# systemctl start mariadb && systemctl enable mariadb mysql_secure_installation Enter current password for root (enter for none): Set root password? [Y/n] y Remove anonymous users? [Y/n] y Disallow root login remotely? [Y/n] n Remove test database and access to it? [Y/n] y Reload privilege tables now? [Y/n] y
Remove anonymous users? [Y/n] y # 是否移除anonymous用户
Disallow root login remotely? [Y/n] n # 是否禁止远程登录
Remove test database and access to it? [Y/n] y # 是否移除默认的演示数据库
Reload privilege tables now? [Y/n] y # 是否重新加载权限表?
-
安装Rabbitmq
[root@controller ~]# yum install rabbitmq-server -y [root@controller ~]# systemctl enable rabbitmq-server.service && systemctl start rabbitmq-server.service [root@controller ~]# rabbitmqctl add_user openstack opensource Creating user "openstack" ... [root@controller ~]#rabbitmqctl set_permissions openstack ".*" ".*" ".*" Setting permissions for user "openstack" in vhost "/ " ...
client端通信端口:5672
管理端口:15672
server间内部通信端口:25672
erlang发现端口:4369
-
安装Memcached
[root@controller ~]# yum install memcached python-memcached [root@controller ~]# vim /etc/sysconfig/memcached PORT="11211" USER="memcached" MAXCONN="1024" CACHESIZE="64" OPTIONS="-l 127.0.0.1,::1,192.168.80.11" [root@controller ~]# systemctl enable memcached.service && systemctl start memcached.service
三、安装配置Keystone
以下配置全部在controller节点
-
创建Keystone数据库
[root@controller ~]# mysql -uroot -p MariaDB [(none)]> CREATE DATABASE keystone; MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'opensource'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'oopensorce';
-
配置Keystone文件
[root@controller ~]# yum install openstack-keystone httpd mod_wsgi [root@controller ~]# cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf4 [root@controller ~]# > /etc/keystone/keystone.conf [root@controller ~]# vim !$ [database] connection = mysql+pymysql://keystone:opoensource@controller/keystone [token] provider = fernet
-
将配置填充数据库
[root@controller ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone [root@controller ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone [root@controller ~]# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
-
创建Keystone endpoint
[root@controller ~]# keystone-manage bootstrap --bootstrap-password opensource \ --bootstrap-admin-url http://controller:5000/v3/ \ --bootstrap-internal-url http://controller:5000/v3/ \ --bootstrap-public-url http://controller:5000/v3/ \ --bootstrap-region-id RegionOne
-
配置httpd
[root@controller ~]# vim /etc/httpd/conf/httpd.conf ServerName controller [root@controller ~]# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/ [root@controller ~]# systemctl enable httpd.service && systemctl start httpd.service
-
创建openstack全局配置文件
[root@controller ~]# vim admin.sh export OS_USERNAME=admin export OS_PASSWORD=opensource export OS_PROJECT_NAME=admin export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_DOMAIN_NAME=Default export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3
-
创建域、项目、用户和角色
[root@controller ~]# source admin.sh [root@controller ~]# openstack domain create --description "An Example Domain" example [root@controller ~]# openstack project create --domain default --description "Service Project" service [root@controller ~]# openstack project create --domain default --description "Demo Project" myproject [root@controller ~]# openstack user create --domain default --password opensource myuser [root@controller ~]# openstack role create myrole [root@controller ~]# openstack role add --project myproject --user myuser myrole
openstack role add --project myproject --user myuser myrole 命令没有输出
四、安装配置Glance
以下配置全部在controller节点
-
创建Glance数据库
[root@controller ~]# mysql -u root -p MariaDB [(none)]> CREATE DATABASE glance; MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'opensource'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'opensource';
-
配置Glance服务、用户
[root@controller ~]# openstack service create --name glance --description "OpenStack Image" image [root@controller ~]# openstack user create --domain default --password opensource glance [root@controller ~]# openstack role add --project service --user glance admin
-
配置Glance endpoint
[root@controller ~]# openstack endpoint create --region RegionOne image public http://controller:9292 [root@controller ~]# openstack endpoint create --region RegionOne image internal http://controller:9292 [root@controller ~]# openstack endpoint create --region RegionOne image admin http://controller:9292
-
配置Glance文件
[root@controller ~]# yum install openstack-glance [root@controller ~]# cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak [root@controller ~]# > /etc/glance/glance-api.conf [root@controller ~]# vim !$ [database] connection = mysql+pymysql://glance:opensource@controller/glance [keystone_authtoken] www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = glance password = opensource [paste_deploy] flavor = keystone [glance_store] stores = file,http default_store = file filesystem_store_datadir = /var/lib/glance/images/ [root@controller ~]# cp /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.bak [root@controller ~]# > /etc/glance/glance-registry.conf [root@controller ~]# vim !$ [database] connection = mysql+pymysql://glance:opensource@controller/glance [keystone_authtoken] www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = glance password = opensource [paste_deploy] flavor = keystone
-
将配置填充数据库,启动服务
[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance [root@controller ~]# systemctl enable openstack-glance-api.service openstack-glance-registry.service [root@controller ~]# systemctl start openstack-glance-api.service openstack-glance-registry.service
-
验证
[root@controller ~]# wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img [root@controller ~]# openstack image create "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare [root@controller ~]# openstack image list +--------------------------------------+--------+--------+ | ID | Name | Status | +--------------------------------------+--------+--------+ | 40669d38-2c0d-4964-b060-f9f933c4aa64 | cirros | active | +--------------------------------------+--------+--------+
五、安装配置Nova
以下配置在controller节点
-
创建Nova数据库
[root@controller ~]# mysql -u root -p MariaDB [(none)]> CREATE DATABASE nova_api; MariaDB [(none)]> CREATE DATABASE nova; MariaDB [(none)]> CREATE DATABASE nova_cell0; MariaDB [(none)]> CREATE DATABASE placement; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'opensource'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'opensource'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'opensource'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'opensource'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'opensource'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'opensource'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'opensource'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'opensource';
-
创建Nova服务、用户
[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute [root@controller ~]# openstack user create --domain default --password opensorce nova [root@controller ~]# openstack role add --project service --user nova admin [root@controller ~]# openstack service create --name placement --description "Placement API" placement [root@controller ~]# openstack user create --domain default --password opensource placement [root@controller ~]# openstack role add --project service --user placement admin
-
创建Nova endpoint
[root@controller ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1 [root@controller ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1 [root@controller ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1 [root@controller ~]# openstack endpoint create --region RegionOne placement public http://controller:8778 [root@controller ~]# openstack endpoint create --region RegionOne placement internal http://controller:8778 [root@controller ~]# openstack endpoint create --region RegionOne placement admin http://controller:8778
-
配置nova文件
[root@controller ~]# yum install openstack-nova-api openstack-nova-conductor \ openstack-nova-console openstack-nova-novncproxy \ openstack-nova-scheduler openstack-nova-placement-api [root@controller ~]# cp /etc/nova/nova.conf /etc/nova/nova.conf.bak [root@controller ~]# > /etc/nova/nova.conf [root@controller ~]# vim !$ [DEFAULT] enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:opensource@controller my_ip = 192.168.80.11 use_neutron = true firewall_driver = nova.virt.firewall.NoopFirewallDriver [api_database] connection = mysql+pymysql://nova:opensource@controller/nova_api [database] connection = mysql+pymysql://nova:opoensource@controller/nova [placement_database] connection = mysql+pymysql://placement:opoensource@controller/placement [api] auth_strategy = keystone [keystone_authtoken] auth_url = http://controller:5000/v3 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = opensource [vnc] enabled = true server_listen = $my_ip server_proxyclient_address = $my_ip [glance] api_servers = http://controller:9292 [oslo_concurrency] lock_path = /var/lib/nova/tmp [placement] region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = opensource [neutron] url = http://controller:9696 auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = opensource service_metadata_proxy = true metadata_proxy_shared_secret = opensource [scheduler] discover_hosts_in_cells_interval = 300 [root@controller ~]# vim /etc/httpd/conf.d/00-nova-placement-api.conf <Directory /usr/bin> <IfVersion >= 2.4> Require all granted </IfVersion> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> </Directory> [root@controller ~]# systemctl restart httpd
此处[neutron]内容,应在网络服务时配置,我们提前配置。
因为包bug,所以我们需要配置httpd配置文件。
-
将配置填充数据库,启动服务
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova [root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova [root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova [root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova [root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova +-------+--------------------------------------+ | Name | UUID | +-------+--------------------------------------+ | cell1 | 109e1d4b-536a-40d0-83c6-5f121b82b650 | | cell0 | 00000000-0000-0000-0000-000000000000 | +-------+--------------------------------------+ [root@controller ~]# systemctl enable openstack-nova-api.service \ openstack-nova-consoleauth openstack-nova-scheduler.service \ openstack-nova-conductor.service openstack-nova-novncproxy.service [root@controller ~]# systemctl start openstack-nova-api.service \ openstack-nova-consoleauth openstack-nova-scheduler.service \ openstack-nova-conductor.service openstack-nova-novncproxy.service
-
验证
[root@controller ~]# openstack compute service list +----+------------------+------------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+------------------+------------+----------+---------+-------+----------------------------+ | 1 | nova-scheduler | controller | internal | enabled | up | 2020-08-14T03:16:58.000000 | | 3 | nova-consoleauth | controller | internal | enabled | up | 2020-08-14T03:16:59.000000 | | 4 | nova-conductor | controller | internal | enabled | up | 2020-08-14T03:16:58.000000 | +----+------------------+------------+----------+---------+-------+----------------------------+
以下配置在compute节点
-
配置nova文件
[root@compute ~]# yum install openstack-nova-compute [root@compute ~]# cp /etc/nova/nova.conf /etc/nova/nova.conf.bak [root@compute ~]# > /etc/nova/nova.conf [root@compute ~]# vim !$ [DEFAULT] enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:000000@controller my_ip = 192.168.80.12 use_neutron = true firewall_driver = nova.virt.firewall.NoopFirewallDriver [api] auth_strategy = keystone [keystone_authtoken] auth_url = http://controller:5000/v3 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = opensource [vnc] enabled = true server_listen = 0.0.0.0 server_proxyclient_address = $my_ip novncproxy_base_url = http://192.168.80.11:6080/vnc_auto.html [glance] api_servers = http://controller:9292 [oslo_concurrency] lock_path = /var/lib/nova/tmp [placement] region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = opensource [libvirt] virt_type = qemu [neutron] url = http://controller:9696 auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = opensource
配置[libvirt]内容之后,不需要在CPU中开启虚拟化,建议按配置文件走。
-
启动服务
[root@compute ~]# systemctl enable libvirtd.service openstack-nova-compute.service [root@compute ~]# systemctl start libvirtd.service openstack-nova-compute.service
-
验证
[root@controller ~]# openstack compute service list +----+------------------+------------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+------------------+------------+----------+---------+-------+----------------------------+ | 1 | nova-scheduler | controller | internal | enabled | up | 2020-08-14T03:16:58.000000 | | 3 | nova-consoleauth | controller | internal | enabled | up | 2020-08-14T03:16:59.000000 | | 4 | nova-conductor | controller | internal | enabled | up | 2020-08-14T03:16:58.000000 | | 8 | nova-compute | compute | nova | enabled | up | 2020-08-14T03:16:52.000000 | +----+------------------+------------+----------+---------+-------+----------------------------+
六、安装配置Neutron
以下配置在controller节点
-
创建Neutron数据库
[root@controller ~]# mysql -uroot -p MariaDB [(none)] CREATE DATABASE neutron; MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'opensource'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'opensource';
-
创建Neutron服务,用户
[root@controller ~]# openstack service create --name neutron --description "OpenStack Networking" network [root@controller ~]# openstack user create --domain default --password opensource neutron [root@controller ~]# openstack role add --project service --user neutron admin
-
创建Neutron endpoint
[root@controller ~]# openstack endpoint create --region RegionOne network public http://controller:9696 [root@controller ~]# openstack endpoint create --region RegionOne network internal http://controller:9696 [root@controller ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
-
配置Neutron文件
[root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 \ openstack-neutron-linuxbridge ebtables [root@controller ~]# cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak [root@controller ~]# > /etc/neutron/neutron.conf [root@controller ~]# vim !$ [DEFAULT] core_plugin = ml2 service_plugins = router allow_overlapping_ips = true transport_url = rabbit://openstack:opensource@controller auth_strategy = keystone notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [database] connection = mysql+pymysql://neutron:opensource@controller/neutron [keystone_authtoken] www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = opensource [nova] auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = opensource [oslo_concurrency] lock_path = /var/lib/neutron/tmp [root@controller ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak [root@controller ~]# > /etc/neutron/plugins/ml2/ml2_conf.ini [root@controller ~]# vim !$ [ml2] type_drivers = flat,vlan,vxlan tenant_network_types = vxlan mechanism_drivers = linuxbridge,l2population extension_drivers = port_security [ml2_type_flat] flat_networks = provider [securitygroup] enable_ipset = true [ml2_type_vxlan] vni_ranges = 1:1000 [root@controller ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak [root@controller ~]# > /etc/neutron/plugins/ml2/linuxbridge_agent.ini [root@controller ~]# vim [linux_bridge] physical_interface_mappings = provider:ens33 [vxlan] enable_vxlan = true local_ip = 192.168.80.11 l2_population = true [securitygroup] enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver [root@controller ~]# cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.bak [root@controller ~]# > /etc/neutron/l3_agent.ini [root@controller ~]# vim !$ [root@controller ~]# cat /etc/neutron/l3_agent.ini [DEFAULT] interface_driver = linuxbridge interface_driver = linuxbridge [root@controller ~]# cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bak [root@controller ~]# > /etc/neutron/dhcp_agent.ini [root@controller ~]# vim !$ [DEFAULT] interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true [root@controller ~]# cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak [root@controller ~]# > /etc/neutron/metadata_agent.ini [root@controller ~]# vim !$ [DEFAULT] nova_metadata_host = controller metadata_proxy_shared_secret = 000000
-
将配置填充数据库,启动服务
[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini [root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron [root@controller ~]# systemctl restart openstack-nova-api.service [root@controller ~]# systemctl enable neutron-server.service \ neutron-linuxbridge-agent.service neutron-dhcp-agent.service \ neutron-metadata-agent.service [root@controller ~]# systemctl start neutron-server.service \ neutron-linuxbridge-agent.service neutron-dhcp-agent.service \ neutron-metadata-agent.service [root@controller ~]# systemctl enable neutron-l3-agent.service [root@controller ~]# systemctl start neutron-l3-agent.service
以下配置在compute节点
-
配置Neutron文件
[root@compute ~]# yum install openstack-neutron-linuxbridge ebtables ipset [root@compute ~]# cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak [root@compute ~]# > /etc/neutron/neutron.conf [root@compute ~]# vim !$ [DEFAULT] transport_url = rabbit://openstack:opensource@controller auth_strategy = keystone [keystone_authtoken] www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = opensource [oslo_concurrency] lock_path = /var/lib/neutron/tmp [root@compute ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak [root@compute ~]# > /etc/neutron/plugins/ml2/linuxbridge_agent.ini [root@compute ~]# vim !$ [root@compute ~]# cat /etc/neutron/plugins/ml2/linuxbridge_agent.ini [linux_bridge] physical_interface_mappings = provider:ens33 [vxlan] enable_vxlan = true local_ip = 192.168.80.12 l2_population = true [securitygroup] enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
-
启动服务
[root@compute ~]# systemctl restart openstack-nova-compute.service [root@compute ~]# systemctl enable neutron-linuxbridge-agent.service
-
验证
[root@controller ~]# openstack network agent list +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+ | 2499ce2f-02a3-4701-9e53-dfda82461ed4 | DHCP agent | controller | nova | :-) | UP | neutron-dhcp-agent | | 95186cd1-da1d-4c2f-94ff-fec095157eda | Linux bridge agent | controller | None | :-) | UP | neutron-linuxbridge-agent | | aeba51e2-0c94-4f1a-9d1f-708ebbcb13d2 | Metadata agent | controller | None | :-) | UP | neutron-metadata-agent | | d29d5e7b-b9d2-4303-afd9-d5f6310a9fdd | L3 agent | controller | nova | :-) | UP | neutron-l3-agent | | f3e158a0-ea39-4e9b-8326-7f3071c9c0dd | Linux bridge agent | compute | None | :-) | UP | neutron-linuxbridge-agent | +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
七、安装配置Horizon
-
配置Horizon文件
[root@controller ~]# yum install openstack-dashboard -y # 配置仪表板以在controller节点上使用OpenStack服务 [root@controller ~]# vim /etc/openstack-dashboard/local_settings OPENSTACK_HOST = "192.168.80.11" # 配置允许访问的主机列表 ALLOWED_HOSTS = ['*', 'controller'] # 配置memcached会话存储服务 SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '192.168.80.11:11211', } } # 启用Identity API版本3 OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST # 启用对域的支持 OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True # 配置API版本 OPENSTACK_API_VERSIONS = { "identity": 3, "image": 2, "volume": 2, } # 配置Default为通过仪表板创建的用户的默认域 OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default" # 配置user为您通过仪表板创建的用户的默认角色 OPENSTACK_KEYSTONE_DEFAULT_ROLE = "myrole" # 配置时区 TIME_ZONE = "Asia/Shanghai" [root@controller ~]# vim /etc/httpd/conf.d/openstack-dashboard.conf WSGIApplicationGroup %{GLOBAL}
-
启动
[root@controller ~]# systemctl restart httpd.service memcached.service
-
验证
在本机关闭防火墙,输入192.168.80.11/dashboard 进行访问登陆。