标签:openstack
安装openstack k版本
OS:CentOS Linux release 7.0.1406 (Core)
host:
node1 (管理):10.10.0.10 10.20.0.10 10.30.0.10
node2(网络):10.10.0.20 10.20.0.20 10.30.0.20 192.168.74.211
node2(计算):10.10.0.30 10.20.0.30 10.30.0.30
1:环境准备
1):关闭防火墙
systemctl stop firewalld
2):关闭SELinux
vim /etc/selinux/config
...
SELINUX=disabled
setenforce 0
3):时间同步,管理节点做时间服务器,其它节点从管理节点同步时间
管理节点
yum install ntp
vim /etc/ntp.conf
...
server 127.127.1.0 # local clock
fudge 127.127.1.0 stratum 10
systemctl enable ntpd.service
systemctl start ntpd.service
[root@node1 ~]# ntpq -c peers
remote refid st t when poll reach delay offset jitter
==============================================================================
*LOCAL(0) .LOCL. 10 l 57 64 177 0.000 0.000 0.000
其它节点安装服务将时间服务器直接管理节点即可
4):yum 源
yum install http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm
yum install http://rdo.fedorapeople.org/openstack-kilo/rdo-release-kilo.rpm
示例
[root@node1 yum.repos.d]# cat media.repo
[media]
name=media
baseurl=http://vault.centos.org/7.0.1406/os/x86_64
enabled=1
gpgcheck=0
[root@node1 yum.repos.d]# cat rdo-release.repo
[openstack-kilo]
name=OpenStack Kilo Repository
baseurl=http://repos.fedorapeople.org/repos/openstack/openstack-kilo/el7/
skip_if_unavailable=0
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-kilo
[root@node1 yum.repos.d]# cat epel.repo
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
#baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
#baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch/debug
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
[epel-source]
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
#baseurl=http://download.fedoraproject.org/pub/epel/7/SRPMS
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
5):互信
vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.0.10 node1
10.10.0.20 node2
10.10.0.30 node3
ssh-keygen
cat id_rsa.pub >> authorized_keys #进入authorized_keys复制并修改
cat authorized_keys
ssh-rsa ----- root@node1
ssh-rsa ----- root@node2
ssh-rsa ----- root@node3
scp -r /root/.ssh node2:/root/
scp -r /root/.ssh node3:/root/
2:安装数据库
管理节点
yum install mariadb mariadb-server MySQL-python
vim /etc/my.cnf.d/mariadb_openstack.cnf
[mysqld]
bind-address = 10.10.0.10
default-storage-engine = innodb
innodb_file_per_table
collation-server = utf8_general_ci
init-connect = ‘SET NAMES utf8‘
character-set-server = utf8
mysql_secure_installation
#设置密码,后面全部输入Y
3:安装消息服务
yum install rabbitmq-server
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
rabbitmqctl add_user openstack openstack
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
4:安装keystone
mysql -u root -p
MariaDB [(none)]> CREATE DATABASE keystone;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone‘@‘localhost‘ IDENTIFIED BY ‘keystone‘;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone‘@‘%‘ IDENTIFIED BY ‘keystone‘;
MariaDB [(none)]> flush PRIVILEGES ;
yum install openstack-keystone httpd mod_wsgi python-openstackclient memcached python-memcached
systemctl enable memcached.service
systemctl start memcached.service
vim /etc/keystone/keystone.conf
DEFAULT]
verbose = True
admin_token = openstack
[database]
connection = mysql://keystone:keystone@10.10.0.10/keystone
[revoke]
driver = keystone.contrib.revoke.backends.sql.Revoke
[token]
provider = keystone.token.providers.uuid.Provider
driver = keystone.token.persistence.backends.memcache.Token
su -s /bin/sh -c "keystone-manage db_sync" keystone
vim /etc/httpd/conf.d/wsgi-keystone.conf
Listen 5000
Listen 35357
<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /var/www/cgi-bin/keystone/main
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
LogLevel info
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
</VirtualHost>
<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /var/www/cgi-bin/keystone/admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
LogLevel info
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
</VirtualHost>
mkdir -p /var/www/cgi-bin/keystone
curl http://git.openstack.org/cgit/openstack/keystone/plain/httpd/keystone.py?h=stable/kilo| tee /var/www/cgi-bin/keystone/main /var/www/cgi-bin/keystone/admin
chown -R keystone:keystone /var/www/cgi-bin/keystone
chmod 755 /var/www/cgi-bin/keystone/*
systemctl enable httpd.service
systemctl start httpd.service
export OS_TOKEN=openstack
export OS_URL=http://10.10.0.10:35357/v2.0
keystone user-list
openstack service create --name keystone --description "OpenStack Identity" identity
openstack endpoint create --publicurl http://10.10.0.10:5000/v2.0 --internalurl http://10.10.0.10:5000/v2.0 --adminurl http://10.10.0.10:35357/v2.0 --region RegionOne identity
openstack project create --description "Admin Project" admin
openstack user create --password-prompt admin
openstack role create admin
openstack role add --project admin --user admin admin
openstack project create --description "Service Project" service
openstack project create --description "Demo Project" demo
openstack user create --password-prompt demo
openstack role create user
openstack role add --project demo --user demo user
unset OS_TOKEN OS_URL
openstack --os-auth-url http://controller:35357 --os-project-name admin --os-username admin --os-auth-type password \
openstack --os-auth-url http://10.10.0.10:35357 --os-project-name admin --os-username admin --os-auth-type password token issue
openstack --os-auth-url http://10.10.0.10:35357 --os-project-name admin --os-username admin --os-auth-type password project list
vim openrc.sh
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=admin
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://10.10.0.10:35357/v3
source openrc.sh
openstack token issue
keystone user-list
5:安装glance
mysql -u root -p
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO ‘glance‘@‘localhost‘ IDENTIFIED BY ‘glance‘;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO ‘glance‘@‘%‘ IDENTIFIED BY ‘glance‘;
MariaDB [(none)]> flush PRIVILEGES ;
source openrc.sh
openstack user create --password-prompt glance
openstack role add --project service --user glance admin
penstack service create --name glance --description "OpenStack Image service" image
openstack endpoint create --publicurl http://10.10.0.10:9292 --internalurl http://10.10.0.10:9292 --adminurl http://10.10.0.10:9292 --region RegionOne image
yum install openstack-glance python-glance python-glanceclient
vim /etc/glance/glance-api.conf
[DEFAULT]
notification_driver = noop
verbose = True
[oslo_policy]
[database]
connection = mysql://glance:glance@node1/glance
[oslo_concurrency]
[keystone_authtoken]
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
[store_type_location_strategy]
[profiler]
[task]
[taskflow_executor]
[glance_store]
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
vim /etc/glance/glance-registry.conf
[DEFAULT]
notification_driver = noop
verbose = True
[oslo_policy]
[database]
connection = mysql://glance:glance@10.10.0.10/glance
[keystone_authtoken]
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service
wget -P /tmp/images http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
glance image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < cirros-0.3.3-x86_64-disk.img
glance image-update --property architecture=x86_64 --property os_distro=ubuntu --property os_version=0.3.3 --property vol_size=1 4aaaebae-4c34-45c9-9b7d-fb8911de7c6e
glance image-list
6:安装nova
管理节点
mysql -u root -p
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO ‘nova‘@‘localhost‘ IDENTIFIED BY ‘nova‘;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO ‘nova‘@‘%‘ IDENTIFIED BY ‘nova‘;
MariaDB [(none)]> flush PRIVILEGES ;
openstack user create --password-prompt nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --publicurl http://10.10.0.10:8774/v2/%\(tenant_id\)s --internalurl http://10.10.0.10:8774/v2/%\(tenant_id\)s --adminurl http://10.10.0.10:8774/v2/%\(tenant_id
\)s --region RegionOne compute
yum install openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient
vim /etc/nova/nova.conf
[DEFAULT]
my_ip = 10.10.0.10
rpc_backend = rabbit
auth_strategy = keystone
vncserver_listen = 10.10.0.10
vncserver_proxyclient_address = 10.10.0.10
verbose = True
network_api_class = nova.network.neutronv2.api.API
security_group_api = neutron
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
network_api_class = nova.network.api.API
security_group_api = nova
[api_database]
[barbican]
[cells]
[cinder]
[conductor]
[database]
connection = mysql://nova:nova@10.10.0.10/nova
[ephemeral_storage_encryption]
[glance]
host = node1
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = nova
[libvirt]
[metrics]
[neutron]
url = http://10.10.0.10:9696
auth_strategy = keystone
admin_auth_url = http://10.10.0.10:35357/v2.0
admin_tenant_name = service
admin_username = neutron
admin_password = neutron
service_metadata_proxy = True
metadata_proxy_shared_secret = 10.10.0.10
[osapi_v3]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[workarounds]
[xenserver]
[zookeeper]
[matchmaker_redis]
[matchmaker_ring]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
su -s /bin/sh -c "nova-manage db sync" nova
systemctl enable openstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-
nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-
nova-novncproxy.service
计算节点
yum install openstack-nova-compute sysfsutils
[root@node3 ~]# grep -Ev ‘^#|^$‘ /etc/nova/nova.conf
[DEFAULT]
verbose = True
my_ip = 10.10.0.30
rpc_backend = rabbit
auth_strategy = keystone
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.10.0.10
novncproxy_base_url = http://10.10.0.10:6080/vnc_auto.html
network_api_class = nova.network.neutronv2.api.API
security_group_api = neutron
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
[barbican]
[cells]
[cinder]
[conductor]
[database]
[ephemeral_storage_encryption]
[glance]
host = 10.10.0.10
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = nova
[libvirt]
virt_type = qemu
[metrics]
[neutron]
url = http://10.10.0.10:9696
auth_strategy = keystone
admin_auth_url = http://10.10.0.10:35357/v2.0
admin_tenant_name = service
admin_username = neutron
admin_password = neutron
[osapi_v3]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[workarounds]
[xenserver]
[zookeeper]
[matchmaker_redis]
[matchmaker_ring]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
grep -c ‘(vmx|svm)‘ /proc/cpuinfo
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
管理节点
[root@node1 ~]# nova service-list
+----+------------------+-------+----------+---------+-------+----------------------------+-----------------+
| Id | Binary | Host | Zone | Status | State | Updated_at | Disabled Reason |
+----+------------------+-------+----------+---------+-------+----------------------------+-----------------+
| 1 | nova-conductor | node1 | internal | enabled | up | 2015-12-28T22:43:53.000000 | - |
| 2 | nova-consoleauth | node1 | internal | enabled | up | 2015-12-28T22:43:53.000000 | - |
| 3 | nova-cert | node1 | internal | enabled | up | 2015-12-28T22:43:52.000000 | - |
| 4 | nova-scheduler | node1 | internal | enabled | up | 2015-12-28T22:43:54.000000 | - |
| 5 | nova-compute | node3 | nova | enabled | up | 2015-12-28T22:43:56.000000 | - |
+----+------------------+-------+----------+---------+-------+----------------------------+-----------------+
7:neutron
管理节点
mysql -u root -p
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron‘@‘localhost‘ IDENTIFIED BY ‘neutron‘;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron‘@‘%‘ IDENTIFIED BY ‘neutron‘;
openstack user create --password-prompt neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --publicurl http://10.10.0.10:9696 --adminurl http://10.10.0.10:9696 --internalurl http://10.10.0.10:9696 --region RegionOne network
yum install openstack-neutron openstack-neutron-ml2 python-neutronclient
vim /etc/neutron/neutron.conf
[DEFAULT]
verbose = True
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://10.10.0.10:8774/v2
rpc_backend = rabbit
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://10.10.0.10:8774/v2
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
[database]
connection = mysql://neutron:neutron@10.10.0.10/neutron
rpc_backend = rabbit
[nova]
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = nova
[oslo_concurrency]
lock_path = $state_path/lock
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service
systemctl enable neutron-server.service
systemctl start neutron-server.service
网络节点
[root@node2 ~]# cat /etc/sysctl.conf
...
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
sysctl -p
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch
vim /etc/neutron/neutron.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
[database]
[nova]
[oslo_concurrency]
lock_path = $state_path/lock
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
flat_networks = external
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[ovs]
local_ip = 10.10.0.20
bridge_mappings = external:br-ex
[agent]
tunnel_types = gre
vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
external_network_bridge =
router_delete_namespaces = True
verbose = True
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
vim /etc/neutron/dnsmasq-neutron.conf
dhcp-option-force=26,1454
pkill dnsmasq
vim /etc/neutron/metadata_agent.ini
[DEFAULT]
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_region = RegionOne
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
nova_metadata_ip = 10.10.0.10
metadata_proxy_shared_secret = 10.10.0.10
verbose = True
systemctl enable openvswitch.service
systemctl start openvswitch.service
ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex eno33554992
ethtool -K eno33554992 gro off
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
cp /usr/lib/systemd/system/neutron-openvswitch-agent.service /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
sed -i ‘s,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g‘ /usr/lib/systemd/system/neutron-openvswitch-agent.service
systemctl enable neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-ovs-cleanup.service
systemctl start neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
计算节点
vim /etc/sysctl.conf
pv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
sysctl -p
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch
vim /etc/neutron/neutron.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
[database]
[nova]
[oslo_concurrency]
lock_path = $state_path/lock
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
systemctl restart neutron-openvswitch-agent.service
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
[database]
[nova]
[oslo_concurrency]
lock_path = $state_path/lock
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[ovs]
local_ip = 10.20.0.20
[agent]
tunnel_types = gre
systemctl enable openvswitch.service
systemctl start openvswitch.service
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
cp /usr/lib/systemd/system/neutron-openvswitch-agent.service /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
sed -i ‘s,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g‘ /usr/lib/systemd/system/neutron-openvswitch-agent.service
systemctl restart openstack-nova-compute.service
systemctl enable neutron-openvswitch-agent.service
systemctl start neutron-openvswitch-agent.service
管理节点
neutron ext-list
neutron net-create ext-net --router:external --provider:physical_network external --provider:network_type flat
neutron subnet-create ext-net 192.168.124.0/24 --name ext-subnet --allocation-pool start=192.168.124.110,end=192.168.124.200 --disable-dhcp --gateway 192.168.124.1
neutron net-create demo-net
neutron subnet-create demo-net 192.168.1.0/24 --name demo-subnet --gateway 192.168.1.1
neutron router-create demo-router
neutron router-interface-add demo-router demo-subnet
neutron router-gateway-set demo-router ext-net
8:安装dashboard
yum install openstack-dashboard httpd mod_wsgi memcached python-memcached
vim /etc/openstack-dashboard/local_settings
...
OPENSTACK_HOST = "10.10.0.10"
ALLOWED_HOSTS = [‘*‘]
CACHES = {
‘default‘: {
‘BACKEND‘: ‘django.core.cache.backends.memcached.MemcachedCache‘,
‘LOCATION‘: ‘127.0.0.1:11211‘,
}
}
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
...
setsebool -P httpd_can_network_connect on
chown -R apache:apache /usr/share/openstack-dashboard/static
systemctl enable httpd.service memcached.service
systemctl start httpd.service memcached.service
本文出自 “刘福” 博客,请务必保留此出处http://liufu1103.blog.51cto.com/9120722/1729367
标签:openstack
原文地址:http://liufu1103.blog.51cto.com/9120722/1729367