码迷,mamicode.com
首页 > 其他好文 > 详细

openstack K版本和ceph对接

时间:2016-06-30 23:37:56      阅读:677      评论:0      收藏:0      [点我收藏+]

标签:openstack

本次环境:

openstack(K版本):  控制和计算各一台,并且安装到dashboard,可以正常创建虚拟机(搭建过程建官方http://docs.openstack.org/kilo/install-guide/install/yum/content/)

ceph: 共3台,两台节点一台desploy部署机(搭建过程建官方http://ceph.com/)

下面在控制节点安装cinder,在控制节点上操作:

##创建数据库并且授权

[root@contorller ~]# mysql

Welcome to the MariaDB monitor.  Commands end with ; or \g.

Your MariaDB connection id is 2439

Server version: 5.5.47-MariaDB MariaDB Server


Copyright (c) 2000, 2015, Oracle, MariaDB Corporation Ab and others.


Type ‘help;‘ or ‘\h‘ for help. Type ‘\c‘ to clear the current input statement.


MariaDB [(none)]> CREATE DATABASE cinder;

Query OK, 1 row affected (0.00 sec)


MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder‘@‘localhost‘ \

    ->   IDENTIFIED BY ‘awcloud‘;

Query OK, 0 rows affected (0.15 sec)


MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder‘@‘%‘ \

    ->   IDENTIFIED BY ‘awcloud‘;

Query OK, 0 rows affected (0.01 sec)

##创建用户、端点等信息

[root@contorller ~]# source admin-openrc.sh

[root@contorller ~]# openstack user create --password-prompt cinder

[root@contorller ~]# openstack role add --project service --user cinder admin

[root@contorller ~]# openstack service create --name cinder \

>   --description "OpenStack Block Storage" volume

[root@contorller ~]# openstack service create --name cinderv2 \

>   --description "OpenStack Block Storage" volumev2

[root@contorller ~]# openstack endpoint create \

>   --publicurl http://controller:8776/v2/%\(tenant_id\)s \

>   --internalurl http://controller:8776/v2/%\(tenant_id\)s \

>   --adminurl http://controller:8776/v2/%\(tenant_id\)s \

>   --region RegionOne \

>   volume

[root@contorller ~]# openstack endpoint create \

>   --publicurl http://controller:8776/v2/%\(tenant_id\)s \

>   --internalurl http://controller:8776/v2/%\(tenant_id\)s \

>   --adminurl http://controller:8776/v2/%\(tenant_id\)s \

>   --region RegionOne \

>   volumev2

安装cinder服务

[root@contorller ~]# yum install openstack-cinder python-cinderclient python-oslo-db -y

修改配置文件

[root@contorller ~]# cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bk

[root@contorller ~]# vim /etc/cinder/cinder.conf

[root@contorller ~]# egrep -v "^#|^$" /etc/cinder/cinder.conf

[DEFAULT]

rpc_backend = rabbit

auth_strategy = keystone

my_ip = 192.168.8.199

verbose = True

[BRCD_FABRIC_EXAMPLE]

[CISCO_FABRIC_EXAMPLE]

[database]

connection = mysql://cinder:awcloud@controller/cinder

[fc-zone-manager]

[keymgr]

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = cinder

password = awcloud

[matchmaker_redis]

[matchmaker_ring]

[oslo_messaging_amqp]

[oslo_messaging_qpid]

[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = guest

rabbit_password = guest

[profiler]

[oslo_concurrency]

lock_path = /var/lock/cinder

重启服务

[root@contorller ~]# systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service

[root@contorller ~]# systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service

###为contronller节点配置实现接管ceph

[root@contorller ~]#  yum install python-rbd ceph-common -y

[root@compute ~]# yum install python-rbd ceph-common -y

把验证文件和ceph的配置文件拷贝到控制节点

[root@ceph-admin ceph]# scp ceph.client.admin.keyring ceph.conf 192.168.8.199:/etc/ceph/

此时在controller节点执行ceph命令是否成功

[root@contorller ~]# ceph -s

    cluster 3155ed83-9e92-43da-90f1-c7715148f48f

     health HEALTH_OK

     monmap e1: 1 mons at {node1=192.168.8.35:6789/0}

            election epoch 2, quorum 0 node1

     osdmap e47: 2 osds: 2 up, 2 in

      pgmap v1325: 64 pgs, 1 pools, 0 bytes data, 0 objects

            80896 kB used, 389 GB / 389 GB avail

                  64 active+clean

##为cinder、nova、glance创建volume

[root@contorller ~]# ceph osd pool create vms

[root@contorller ~]# ceph osd pool create volumes 50

pool ‘volumes‘ created

[root@contorller ~]# ceph osd pool create images 50

pool ‘images‘ created

[root@contorller ~]# ceph osd pool create backups 50

pool ‘backups‘ created

[root@contorller ~]# ceph osd pool create vms 50

pool ‘vms‘ created

[root@contorller ~]#

为ceph客户端做认证

[root@contorller ~]# ceph auth get-or-create client.cinder mon ‘allow r‘ osd ‘allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images‘

rbd_children, allow rwx pool=images‘

ceph auth get-or-create client.cinder-backup mon ‘allow r‘ osd ‘allow class-read object_prefix rbd_children, allow rwx pool=backups‘

[root@contorller ~]# ceph auth get-or-create client.glance mon ‘allow r‘ osd ‘allow class-read object_prefix rbd_children, allow rwx pool=images‘

[root@contorller ~]# ceph auth get-or-create client.cinder-backup mon ‘allow r‘ osd ‘allow class-read object_prefix rbd_children, allow rwx pool=backups‘

[root@contorller ~]# 

##创建用户的认证文件

[root@contorller ceph]# ceph auth get-or-create client.glance|tee /etc/ceph/ceph.client.glance.keyring

[client.glance]

key = AQANyXRXb5l7CRAA2yVyM92BIm+U3QDseZGqow==

[root@contorller ceph]# chown glance:glance /etc/ceph/ceph.client.glance.keyring

[root@contorller ceph]# ceph auth get-or-create client.cinder | sudo tee /etc/ceph/ceph.client.cinder.keyring

[client.cinder]

key = AQDkyHRXvOTwARAAbRha/MtmqPcJm0RF9jcrsQ==

[root@contorller ceph]# sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring

[root@contorller ceph]# ceph auth get-or-create client.cinder-backup |sudo tee /etc/ceph/ceph.client.cinder-backup.keyring

[client.cinder-backup]

key = AQAVyXRXQDKFBRAAtY9DuiGGRSTBDu0MRckXbA==

[root@contorller ceph]#  chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring

[root@contorller ceph]# 

[root@contorller ceph]# 

##把/etc/ceph/ceph.client.cinder.keyring用户认证文件拷贝到计算节点

[root@contorller ceph]# scp /etc/ceph/ceph.client.cinder.keyring compute:/etc/ceph/

##在compute节点创建libvirt的key

[root@compute ~]# uuidgen

457eb676-33da-42ec-9a8c-9293d545c337

cat > secret.xml <<EOF

<secret ephemeral=‘no‘ private=‘no‘>

  <uuid>457eb676-33da-42ec-9a8c-9293d545c337</uuid>

  <usage type=‘ceph‘>

    <name>client.cinder secret</name>

  </usage>

</secret>

EOF

[root@compute ~]# sudo virsh secret-define --file secret.xml

[root@compute ~]# sudo virsh secret-set-value --secret 457eb676-33da-42ec-9a8c-9293d545c337 --base64 $(cat client.cinder.key) 

###为glance对接ceph

vi /etc/glance/glance-api.conf

[DEFAULT]

...

default_store=rbd

rbd_store_user=glance

rbd_store_pool=images

show_image_direct_url=True

[root@contorller ceph]# systemctl restart openstack-glance-api.service 

[root@contorller ceph]# systemctl restart openstack-glance-registry.service

##为cinder和ceph对接

[root@contorller ceph]# vim /etc/cinder/cinder.conf

[DEFAULT]

volume_driver=cinder.volume.drivers.rbd.RBDDriver

rbd_pool=volumes

rbd_ceph_conf=/etc/ceph/ceph.conf

rbd_flatten_volume_from_snapshot=false

rbd_max_clone_depth=5

glance_api_version=2

rbd_user=cinder

rbd_secret_uuid=457eb676-33da-42ec-9a8c-9293d545c337

[root@contorller ceph]# systemctl restart openstack-cinder-api.service 

[root@contorller ceph]# systemctl restart openstack-glance-registry.service

##为cinder backup对接ceph

[DEFAULT]

backup_driver=cinder.backup.drivers.ceph

backup_ceph_conf=/etc/ceph/ceph.conf

backup_ceph_user=cinder-backup

backup_ceph_chunk_size=134217728

backup_ceph_pool=backups

backup_ceph_stripe_unit=0

backup_ceph_stripe_count=0

restore_discard_excess_bytes=true


[root@contorller ceph]# systemctl restart openstack-cinder-backup.service 

为nova对接ceph 

[root@compute ~]# vim  /etc/nova/nova.conf

[DEFAULT]

libvirt_images_type=rbd

libvirt_images_rbd_pool=vms

libvirt_images_rbd_ceph_conf=/etc/ceph/ceph.conf

rbd_user=cinder

rbd_secret_uuid=457eb676-33da-42ec-9a8c-9293d545c337

libvirt_inject_password=false

libvirt_inject_key=false

libvirt_inject_partition=-2

libvirt_live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST"


[root@compute ~]# systemctl restart openstack-nova-compute.service

至此已经全部完成!下面进行验证操作:

创建一个虚拟机磁盘作用在云硬盘上,若出报错,

tailf  /var/log/cinder/volume.log 

2016-06-24 03:21:00.458 58907 ERROR oslo_messaging.rpc.dispatcher [req-41df406d-44b9-4e59-b317-faafcdd880c7 9d20f58520ad43658dceda03cf4e266c dce7915317f14e6aacad0b6ef84c4483 - - -] Exception during message handling: [Errno 13] Permission denied: ‘/var/lock/cinder‘

查看是否有这个目录

[root@contorller cinder]# ll /var/lock/cinder

ls: cannot access /var/lock/cinder: No such file or directory

##创建此目录

[root@contorller cinder]# mkdir /var/lock/cinder -p

[root@contorller cinder]# chown cinder.cinder /var/lock/cinder/

创建一台云主机,使用cinder命令验证

[root@contorller images]# rbd ls volumes

volume-8a1ff9c3-0dbd-41d7-a46b-ebaa45bc2230

现在创建的虚拟机已经在ceph集群中了。


参考文档:

http://docs.ceph.com/docs/master/rbd/rbd-openstack/

http://docs.openstack.org/kilo/install-guide/install/yum/content/cinder-install-controller-node.html



openstack K版本和ceph对接

标签:openstack

原文地址:http://shyln.blog.51cto.com/6890594/1794689

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!