一、前期准备
1、openstack集群设备列表
10.240.216.101 os-node1 控制节点(ceph mon mds节点)
10.240.216.102 os-node2 计算节点(ceph mon mds节点)
10.240.216.103 os-node3 计算节点(ceph mon mds节点)
10.240.216.104 os-node4 存储节点(ceph osd节点)
10.240.216.105 os-node5 存储节点(ceph osd节点)
2、系统版本
[root@os-node1 ~]# cat /etc/redhat-release
Red Hat Enterprise Linux Server release 6.5 (Santiago)
3、系统内核
[root@os-node1 ~]# uname -r
2.6.32-431.el6.x86_64
4、配置host主机(从第4---12步在所有的节点都要配置)
# vi /etc/hosts
10.240.216.101 os-node1
10.240.216.102 os-node2
10.240.216.103 os-node3
10.240.216.104 os-node4
10.240.216.105 os-node5
5、关闭NetworkManager服务开启network服务
service NetworkManager stop
service network start
chkconfig NetworkManager off
chkconfig network on
6、卸载redhat自带的yum包(用redhat的yum源需注册付费,费时费力,需要卸载redhat的yum包,安装centos的
rpm -qa | grep yum | xargs rpm -e --nodeps
7、安装centos的yum包
可以通过http://mirrors.163.com/centos下载,或http://pan.baidu.com/s/1qW0MbgC下载相关安装包
rpm -ivh python-iniparse-0.3.1-2.1.el6.noarch.rpm
rpm -ivh yum-metadata-parser-1.1.2-16.el6.x86_64.rpm
rpm -ivh yum-3.2.29-40.el6.centos.noarch.rpm yum-plugin-fastestmirror-1.1.30-14.el6.noarch.rpm
编辑一个自己的yum源
[root@os-node1 ~]# vi /etc/yum.repos.d/my.repo
a
[base]
name=CentOS-6 - Base - 163.com
baseurl=http://mirrors.163.com/centos/6.5/os/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=6&arch=$basearch&repo=os
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
exclude=*ceph* (#加上此条命令,安装python-ceph、ceph时,不从此yum源中寻找ceph相关的软件)
[updates]
name=CentOS-6 - Updates - 163.com
baseurl=http://mirrors.163.com/centos/6.5/updates/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=6&arch=$basearch&repo=updates
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
exclude=*ceph*
[extras]
name=CentOS-6 - Extras - 163.com
baseurl=http://mirrors.163.com/centos/6.5/extras/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=6&arch=$basearch&repo=extras
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
exclude=*ceph*
8、安装ceph软件包key
(1)、release.asc key、autobuild.asc key
rpm --import ‘https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc‘
rpm --import ‘https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc‘
(2)、安装ceph附加包源ceph-extras.repo,设置priority=2,确保新的包(如qemu)优先级比标准包的高。
vi /etc/yum.repos.d/ceph-extras.repo
a
[ceph-extras-source]
name=Ceph Extras Sources
baseurl=http://ceph.com/packages/ceph-extras/rpm/rhel6.5/SRPMS
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
(3)、安装ceph源
vi /etc/yum.repos.d/ceph.repo
a
[ceph]
name=Ceph packages for $basearch
baseurl=http://ceph.com/rpm/rhel6/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
[ceph-noarch]
name=Ceph noarch packages
baseurl=http://ceph.com/rpm/rhel6/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
[ceph-source]
name=Ceph source packages
baseurl=http://ceph.com/rpm/rhel6/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
(4)、安装epel yum源
rpm -Uvh http://mirrors.sohu.com/fedora-epel/6/x86_64/epel-release-6-8.noarch.rpm(redhat6版本的)
rpm -Uvh http://mirrors.sohu.com/fedora-epel/7/x86_64/e/epel-release-7-2.noarch.rpm (redhat 7 版本的)
注: openstack的控制节点及计算节点需要安装ceph client (在openstack控制节点及计算节点上需要按下面的方式编辑epel源)
重新编辑以前的epel yum源 (下面的加入一条exclude=*ceph*命令)
[root@os-node1 ~]# vi /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 6 - $basearch
#baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
exclude=*ceph* #加上此条命令,安装python-ceph、ceph时,不从此yum源中寻找ceph相关的软件
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 6 - $basearch - Debug
#baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1
exclude=*ceph* #加上此条命令,安装python-ceph、ceph时,不从此yum源中寻找ceph相关的软件
[epel-source]
name=Extra Packages for Enterprise Linux 6 - $basearch - Source
#baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1
exclude=*ceph* #加上此条命令,安装python-ceph、ceph时,不从此yum源中寻找ceph相关的软件
9、安装openstack包yum源
openstack包可以用redhat RDO openstack包库,下载redhat数据包库并安装
下面是三种不同的版本随便安装一个版本就可以
https://repos.fedorapeople.org/repos/openstack/openstack-havana/rdo-release-havana-9.noarch.rpm
yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-icehouse/epel-6/rdo-release-icehouse-4.noarch.rpm(redhat6.5安装)
yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-icehouse/epel-7/rdo-release-icehouse-4.noarch.rpm(redhat7.0安装)
yum install -y http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm(暂时只支持centos7安装)
10、更新yum 源
yum clean all
yum update -y
11、安装openstack工具
yum install -y openstack-utils
12、关闭所有节点的防火墙及关闭selinux,重启机器。
service iptables stop
service ip6tables stop
chkconfig iptables off
chkconfig ip6tables off
echo "/etc/init.d/iptables stop" >> /etc/rc.local
echo "/etc/init.d/ip6tables stop" >> /etc/rc.local
echo "*/5 * * * * /etc/init.d/iptables stop" >> /var/spool/cron/root
echo "*/5 * * * * /etc/init.d/ip6tables stop" >> /var/spool/cron/root
sed -i ‘/SELINUX/s/enforcing/disabled/‘ /etc/selinux/config
reboot
13、配置NTP服务器
把os-node1配置成ntp服务器,其他节点同步os-node1
在控制节点上安装NTP服务
# yum -y install ntp
编辑ntp配置文件
# vi /etc/ntp.conf
屏蔽掉下面几行
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
在最后下面加入下面两行(ntp服务器同步本地时间)
server 127.127.1.0
fudge 127.127.1.0 stratum 10
启动NTP服务器并配成开机自启动
service ntpd restart
chkconfig ntpd on
[root@os-node1 ~] ntpq -p
remote refid st t when poll reach delay offset jitter
==============================================================================
*LOCAL(0) .LOCL. 10 l 35 64 17 0.000 0.000 0.000
关闭防火墙
[root@os-node1 ~]# service iptables stop
iptables: Flushing firewall rules: [ OK ]
iptables: Setting chains to policy ACCEPT: filter [ OK ]
iptables: Unloading modules: [ OK ]
其他节点时间都同步控制节点(等待5分钟再进行同步,等ntp服务器的reach的值达到17的时候才可以)
[root@compute ~]# ntpdate 10.240.216.101
12 Jun 23:51:35 ntpdate[2781]: adjust time server 10.240.216.101 offset 0.033492 sec
14、在控制节点安装并配置mysql
[root@os-node1 ~]# yum install -y mysql mysql-server MySQL-python
编辑mysql配置文件
[root@os-node1 ~]# vi /etc/my.cnf
[mysqld]
bind-address = 10.240.216.101 #加入绑定IP一行,默认没有这行
启动mysql服务并设置开机自启动
service mysqld start
chkconfig mysqld on
修改mysql的密码 (把密码修改为jiayuan.com,也可以不设置密码,不需要执行此步骤)
[root@os-node1 ~]# mysqladmin -uroot -p password ‘jiayuan.com‘ #把密码修改为jiayuan.com
Enter password: #输入当前的密码,数据库默认没有密码
[root@os-node1 ~]#
在其他节点安装mysql客户端
[root@compute ~]# yum install -y mysql MySQL-python
15、配置消息队列服务
这里用Qpid消息队列服务,RabbitMQand ZeroMQ (0MQ)也可以使用。
在控制节点安装qpid server
[root@os-node1 ~]# yum install -y qpid-cpp-server memcached
编辑配置文件,关闭认证方式
vi /etc/qpidd.conf
auth = no
启动Qpid服务并设备开机自启动
service qpidd start
chkconfig qpidd on
二、部署ceph存储
1、在控制节点上安装ceph部署工具及ceph 相关软件
yum install ceph-deploy -y
yum install ceph -y
yum install python-ceph -y #rbd模块使用
2、在计算、存储及节点安装ceph软件
yum install ceph -y
3、在所有节点安装ceph依赖的软件及模块
yum install *argparse* redhat-lsb xfs* -y
4、准备各节点的osd分区并格式化
[root@os-node4 ~]# fdisk /dev/sda
[root@os-node4 ~]# partx -a /dev/sda
5、为块存储安装虚拟化软件
(1)、如果之前有qemu模块先删除,确保之后安装的是最完善的
yum remove -y qemu-kvm qemu-kvm-tools qemu-img
(2)、卸载后重新安装qemu
yum install -y qemu-kvm qemu-kvm-tools qemu-img
(3)、安装qemu客户代理
yum install -y qemu-guest-agent
(4)、安装libvirt软件包
yum install -y libvirt
三、搭建ceph集群 (此步骤可以用附录中的ceph快速脚本安装)
搭建mon节点
1、建立第一个mon节点
(1)、登录监控节点os-node1节点
ls /etc/ceph #查看ceph配置文件目录是否有东西
(2)、创建ceph配置文件并配置ceph配置文件内的内容
touch /etc/ceph/ceph.conf #创建一个ceph配置文件
[root@client ~]# uuidgen #执行此命令可以得到一个唯一的标识,作为ceph集群ID
f11240d4-86b1-49ba-aacc-6d3d37b24cc4
fsid = f11240d4-86b1-49ba-aacc-6d3d37b24cc4 #此标识就是上面得到的,把此条命令加入ceph的配置文件
mon initial members = os-node1,os-node2,os-node3 #os-node1、os-node2、os-node3作为ceph集群的监控节点,把此条命令加入到ceph配置文件
mon host = os-node1,10.240.216.102,10.240.216.103 #监控节点的地址,把此条命令加入ceph的配置文件中
按下面的内容编辑ceph配置文件
vi /etc/ceph/ceph.conf
[global]
fsid = f11240d4-86b1-49ba-aacc-6d3d37b24cc4
mon initial members = os-node1,os-node2,os-node3
mon host = os-node1,10.240.216.102,10.240.216.103
public network = 10.240.216.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
filestore xattr use omap = true
osd pool default size = 2
osd pool default min size = 1
osd crush chooseleaf type = 1
osd_mkfs_type = xfs
max mds = 5
mds max file size = 100000000000000
mds cache size = 1000000
mon osd down out interval = 900 #设置osd节点down后900s,把此osd节点逐出ceph集群,把之前映射到此节点的数据映射到其他节点。
#cluster_network = 10.240.216.0/24
[mon]
mon clock drift allowed = .50 #把时钟偏移设置成0.5s,默认是0.05s,由于ceph集群中存在异构PC,导致时钟偏移总是大于0.05s,为了方便同步直接把时钟偏移设置成0.5s
(3)、在os-node1创建各种密钥
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon ‘allow *‘ #为监控节点创建管理密钥
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon ‘allow *‘ --cap osd ‘allow *‘ --cap mds ‘allow‘ #为ceph amin用户创建管理集群的密钥并赋予访问权限
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring #添加client.admin key到 ceph.mon.keyring
(4)、在os-node1监控节点创建一个mon数据目录
mkdir -p /var/lib/ceph/mon/ceph-os-node1
(5)、在os-node1创建一个boot引导启动osd的key
mkdir -p /var/lib/ceph/bootstrap-osd/
ceph-authtool -C /var/lib/ceph/bootstrap-osd/ceph.keyring
(6)、在os-node1节点上初始化mon节点,执行下面的命令
ceph-mon --mkfs -i os-node1 --keyring /tmp/ceph.mon.keyring
(7)、为了防止重新被安装创建一个空的done文件
touch /var/lib/ceph/mon/ceph-os-node1/done
(8)、创建一个空的初始化文件
touch /var/lib/ceph/mon/ceph-os-node1/sysvinit
(9)、启动ceph进程
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.os-node1
(10)、查看asok mon状态
[root@os-node1 ~]# ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.os-node1.asok mon_status
2、建立第二个mon节点
(1)、复制os-node1节点的/etc/ceph目录到os-node2
scp /etc/ceph/* os-node2:/etc/ceph/
(2)、在os-node2节点上新建一个/var/lib/ceph/bootstrap-osd/目录
mkdir /var/lib/ceph/bootstrap-osd/
(3)、复制os-node1节点上的/var/lib/ceph/bootstrap-osd/ceph.keyring文件到os-node2
scp /var/lib/ceph/bootstrap-osd/ceph.keyring os-node2:/var/lib/ceph/bootstrap-osd/
(4)、复制os-node1节点上的/tmp/ceph.mon.keyring
scp /tmp/ceph.mon.keyring os-node2:/tmp/
(5)、在os-node2节点上建立一个/var/lib/ceph/mon/ceph-os-node2目录
mkdir -p /var/lib/ceph/mon/ceph-os-node2
(6)、在os-node2节点上初始化mon节点,执行下面的命令
ceph-mon --mkfs -i os-node2 --keyring /tmp/ceph.mon.keyring
(7)、为了防止重新被安装创建一个空的done文件
touch /var/lib/ceph/mon/ceph-os-node2/done
(8)、创建一个空的初始化文件
touch /var/lib/ceph/mon/ceph-os-node2/sysvinit
(9)、启动ceph进程
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.os-node2
3、建立第三个mon节点
(1)、复制os-node1节点的/etc/ceph目录到os-node3
scp /etc/ceph/* os-node3:/etc/ceph/
(2)、在os-node3节点上新建一个/var/lib/ceph/bootstrap-osd/目录
mkdir /var/lib/ceph/bootstrap-osd/
(3)、复制os-node1节点上的/var/lib/ceph/bootstrap-osd/ceph.keyring文件到os-node3
scp /var/lib/ceph/bootstrap-osd/ceph.keyring os-node3:/var/lib/ceph/bootstrap-osd/
(4)、复制os-node1节点上的/tmp/ceph.mon.keyring
scp /tmp/ceph.mon.keyring os-node3:/tmp/
(5)、在os-node3节点上建立一个/var/lib/ceph/mon/ceph-os-node3目录
mkdir -p /var/lib/ceph/mon/ceph-os-node3
(6)、在os-node3节点上初始化mon节点,执行下面的命令
ceph-mon --mkfs -i os-node3 --keyring /tmp/ceph.mon.keyring
(7)、为了防止重新被安装创建一个空的done文件
touch /var/lib/ceph/mon/ceph-os-node3/done
(8)、创建一个空的初始化文件
touch /var/lib/ceph/mon/ceph-os-node3/sysvinit
(9)、启动ceph进程
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.os-node3
(10)、查看集群状态
[root@os-node1 ~]# ceph -w
cluster f11240d4-86b1-49ba-aacc-6d3d37b24cc4
health HEALTH_ERR 192 pgs stuck inactive; 192 pgs stuck unclean; no osds
monmap e2: 3 mons at {os-node1=os-node1:6789/0,os-node2=10.240.216.102:6789/0,os-node3=10.240.216.103:6789/0}, election epoch 6, quorum 0,1,2 os-node1,os-node2,os-node3
osdmap e1: 0 osds: 0 up, 0 in
pgmap v2: 192 pgs, 3 pools, 0 bytes data, 0 objects
0 kB used, 0 kB / 0 kB avail
192 creating
2014-07-14 18:39:03.379373 mon.0 [INF] mdsmap e1: 0/0/5 up
(12)、查看ceph pool
ceph osd lspools
1、添加osd节点
添加第一块osd节点
(1)、创建一个OSD,生成一个osd number
[root@os-node4~]# ceph osd create
0
(2)、为osd节点创建一个osd目录
[root@os-node4 ~]# mkdir -p /var/lib/ceph/osd/ceph-0
(3)、格式化已准备好的osd硬盘(格式化为xfs格式)
[root@os-node4 ~]# mkfs.xfs -f /dev/sda6
meta-data=/dev/sda6 isize=256 agcount=4, agsize=1310720 blks
= sectsz=512 attr=2, projid32bit=0
data = bsize=4096 blocks=5242880, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
(4)、挂在目录
[root@os-node4 ~]# mount -o user_xattr /dev/sda6 /var/lib/ceph/osd/ceph-0
mount: wrong fs type, bad option, bad superblock on /dev/sda6,
missing codepage or helper program, or other error
In some cases useful info is found in syslog - try
dmesg | tail or so
执行上面的命令会报错
解决的办法是:用下面的两条命令替代上面的一条命令。
[root@os-node4 ~]# mount /dev/sda6 /var/lib/ceph/osd/ceph-0
[root@os-node4 ~]# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-0
查看挂载的情况
[root@os-node4 ~]# mount
/dev/sda3 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
/dev/sda2 on /var type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
/dev/sda6 on /var/lib/ceph/osd/ceph-0 type xfs (rw,user_xattr)
把上面的挂载信息写入分区表
[root@os-node4 ~]# vi /etc/fstab
/dev/sda6 /var/lib/ceph/osd/ceph-0 xfs defaults 0 0
/dev/sda6 /var/lib/ceph/osd/ceph-0 xfs remount,user_xattr 0 0
(5)、初始化osd数据目录
[root@os-node4 ~]# ceph-osd -i 0 --mkfs --mkkey
(6)、注册osd的认证密钥
[root@os-node4 ~]# ceph auth add osd.0 osd ‘allow *‘ mon ‘allow profile osd‘ -i /var/lib/ceph/osd/ceph-0/keyring
(7)、为此osd节点创建一个crush map
[root@os-node4 ~]# ceph osd crush add-bucket os-node4 host
added bucket os-node1 type host to crush map
(8)、Place the Ceph os-node under the root default
[root@os-node4 ~]# ceph osd crush move os-node4 root=default
moved item id -2 name ‘os-node4‘ to location {root=default} in crush map
(9)、
[root@os-node4 ~]# ceph osd crush add osd.0 1.0 host=os-node4
add item id 0 name ‘osd.0‘ weight 1 at location {host=os-node4} to crush map
(10)、创建一个初始化目录
[root@os-node4 ~]# touch /var/lib/ceph/osd/ceph-0/sysvinit
(11)、启动osd进程
/etc/init.d/ceph start osd.0
(12)、查看osd目录树
[root@os-node4 ~]# ceph osd tree
# id weight type name up/down reweight
-1 1 root default
-2 1 host os-node4
0 1 osd.0 up 1
2、添加第二个osd节点
(1)、创建一个OSD,生成一个osd number
[root@os-node5 ~]# ceph osd create
1
(2)、为osd节点创建一个osd目录
[root@os-node5 ~]# mkdir -p /var/lib/ceph/osd/ceph-1
(3)、格式化已准备好的osd硬盘,并挂在到上一步创建的osd目录(格式化为xfs格式)
[root@os-node5 ~]# mkfs.xfs -f /dev/sda6
meta-data=/dev/sda6 isize=256 agcount=4, agsize=1310720 blks
= sectsz=512 attr=2, projid32bit=0
data = bsize=4096 blocks=5242880, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
(4)、挂在目录
[root@os-node5 ~]# mount -o user_xattr /dev/sda6 /var/lib/ceph/osd/ceph-1
mount: wrong fs type, bad option, bad superblock on /dev/sda6,
missing codepage or helper program, or other error
In some cases useful info is found in syslog - try
dmesg | tail or so
执行上面的命令会报错
解决的办法是:用下面的两条命令替代上面的一条命令。
[root@os-node5 ~]# mount /dev/sda6 /var/lib/ceph/osd/ceph-1
[root@os-node5 ~]# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-1
查看挂载的情况
[root@os-node5 ~]# mount
/dev/sda3 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
/dev/sda2 on /var type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
/dev/sda6 on /var/lib/ceph/osd/ceph-1 type xfs (rw,user_xattr)
把上面的挂载信息写入分区表
[root@os-node5 ~]# vi /etc/fstab
/dev/sda6 /var/lib/ceph/osd/ceph-1 xfs defaults 0 0
/dev/sda6 /var/lib/ceph/osd/ceph-1 xfs remount,user_xattr 0 0
(5)、初始化osd数据目录
[root@os-node5 ~]# ceph-osd -i 1 --mkfs --mkkey
2014-06-25 23:17:37.633040 7fa8fd06b7a0 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway
2014-06-25 23:17:37.740713 7fa8fd06b7a0 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway
2014-06-25 23:17:37.744937 7fa8fd06b7a0 -1 filestore(/var/lib/ceph/osd/ceph-1) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory
2014-06-25 23:17:37.812999 7fa8fd06b7a0 -1 created object store /var/lib/ceph/osd/ceph-1 journal /var/lib/ceph/osd/ceph-1/journal for osd.1 fsid f11240d4-86b1-49ba-aacc-6d3d37b24cc4
2014-06-25 23:17:37.813192 7fa8fd06b7a0 -1 auth: error reading file: /var/lib/ceph/osd/ceph-1/keyring: can‘t open /var/lib/ceph/osd/ceph-1/keyring: (2) No such file or directory
2014-06-25 23:17:37.814050 7fa8fd06b7a0 -1 created new key in keyring /var/lib/ceph/osd/ceph-1/keyring
(6)、注册osd的认证密钥
[root@os-node5 ~]# ceph auth add osd.1 osd ‘allow *‘ mon ‘allow profile osd‘ -i /var/lib/ceph/osd/ceph-1/keyring
added key for osd.1
(7)、为此osd节点创建一个crush map
[[root@os-node5 ~]# ceph osd crush add-bucket os-node5 host
added bucket os-node2 type host to crush map
(8)、Place the Ceph os-node under the root default
[root@os-node5 ~]# ceph osd crush move os-node5 root=default
moved item id -3 name ‘os-node5‘ to location {root=default} in crush map
(9)、
[root@os-node5 ~]# ceph osd crush add osd.1 1.0 host=os-node5
add item id 1 name ‘osd.1‘ weight 1 at location {host=os-node5} to crush map
(10)、创建一个初始化目录
[root@os-node5 ~]# touch /var/lib/ceph/osd/ceph-1/sysvinit
(11)、启动osd进程
[root@os-node5 ~]# /etc/init.d/ceph start osd.1
=== osd.1 ===
create-or-move updated item name ‘osd.1‘ weight 0.02 at location {host=os-node5,root=default} to crush map
Starting Ceph osd.1 on os-node5...
starting osd.1 at :/0 osd_data /var/lib/ceph/osd/ceph-1 /var/lib/ceph/osd/ceph-1/journal
(12)、查看osd目录树
[root@os-node5 ~]# ceph osd tree
# id weight type name up/down reweight
-1 2 root default
-2 1 host os-node4
0 1 osd.0 up 1
-3 1 host os-node4
1 1 osd.1 up 1
3、添加第三块osd节点
(1)、创建一个OSD,生成一个osd number
[root@os-node6 ~]# ceph osd create
2
(2)、为osd节点创建一个osd目录
[root@os-node6 ~]# mkdir -p /var/lib/ceph/osd/ceph-2
(3)、格式化已准备好的osd硬盘(格式化为xfs格式)
[root@os-node6 ~]# mkfs.xfs -f /dev/sda6
meta-data=/dev/sdb isize=256 agcount=4, agsize=1310720 blks
= sectsz=512 attr=2, projid32bit=0
data = bsize=4096 blocks=5242880, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
(4)、挂在目录
[root@os-node6 ~]# mount -o user_xattr /dev/sdb /var/lib/ceph/osd/ceph-2
mount: wrong fs type, bad option, bad superblock on /dev/sda6,
missing codepage or helper program, or other error
In some cases useful info is found in syslog - try
dmesg | tail or so
执行上面的命令会报错
解决的办法是:用下面的两条命令替代上面的一条命令。
[root@os-node6 ~]# mount /dev/sda6 /var/lib/ceph/osd/ceph-2
[root@os-node6 ~]# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-2
查看挂载的情况
[root@os-node6 ~]# mount
把上面的挂载信息写入分区表
[root@os-node6 ~]# vi /etc/fstab
/dev/sda6 /var/lib/ceph/osd/ceph-2 xfs defaults 0 0
/dev/sda6 /var/lib/ceph/osd/ceph-2 xfs remount,user_xattr 0 0
(5)、初始化osd数据目录
[root@os-node6 ~]# ceph-osd -i 2 --mkfs --mkkey
2014-06-25 23:29:01.734251 7f52915927a0 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway
2014-06-25 23:29:01.849158 7f52915927a0 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway
2014-06-25 23:29:01.852189 7f52915927a0 -1 filestore(/var/lib/ceph/osd/ceph-2) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory
2014-06-25 23:29:01.904476 7f52915927a0 -1 created object store /var/lib/ceph/osd/ceph-2 journal /var/lib/ceph/osd/ceph-2/journal for osd.2 fsid f11240d4-86b1-49ba-aacc-6d3d37b24cc4
2014-06-25 23:29:01.904712 7f52915927a0 -1 auth: error reading file: /var/lib/ceph/osd/ceph-2/keyring: can‘t open /var/lib/ceph/osd/ceph-2/keyring: (2) No such file or directory
2014-06-25 23:29:01.905376 7f52915927a0 -1 created new key in keyring /var/lib/ceph/osd/ceph-2/keyring
[root@os-node6 ~]#
(6)、注册osd的认证密钥
[root@os-node6 ~]# ceph auth add osd.2 osd ‘allow *‘ mon ‘allow profile osd‘ -i /var/lib/ceph/osd/ceph-2/keyring
added key for osd.2
(7)、为此osd节点创建一个crush map
[root@os-node6 ~]# ceph osd crush add-bucket os-node6 host
added bucket os-node6 type host to crush map
(8)、Place the Ceph os-node under the root default
[root@os-node6 ~]# ceph osd crush move os-node6 root=default
moved item id -4 name ‘os-node6‘ to location {root=default} in crush map
(9)、
[root@os-node6 ~]# ceph osd crush add osd.2 1.0 host=os-node6
add item id 2 name ‘osd.2‘ weight 1 at location {host=os-node6} to crush map
(10)、创建一个初始化目录
[root@os-node6 ~]# touch /var/lib/ceph/osd/ceph-2/sysvinit
(11)、启动osd进程
[root@os-node6 ~]# /etc/init.d/ceph start osd.2
=== osd.2 ===
create-or-move updated item name ‘osd.2‘ weight 0.02 at location {host=os-node6,root=default} to crush map
Starting Ceph osd.2 on os-node6...
starting osd.2 at :/0 osd_data /var/lib/ceph/osd/ceph-2 /var/lib/ceph/osd/ceph-2/journal
(12)、查看osd目录树
[root@os-node6 ~]# ceph osd tree
# id weight type name up/down reweight
-1 3 root default
-2 1 host os-node4
0 1 osd.0 up 1
-3 1 host os-node5
1 1 osd.1 up 1
-4 1 host os-node6
2 1 osd.2 up 1
添加元数据服务器
1、添加第一个元数据服务器
(1)、为mds元数据服务器创建一个目录
[root@os-node1 ~]# mkdir -p /var/lib/ceph/mds/ceph-os-node1
(2)、为bootstrap-mds客户端创建一个密钥 注:(如果下面的密钥在目录里已生成可以省略此步骤)
[root@os-node1 ~]# ceph-authtool --create-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring --gen-key -n client.bootstrap-mds
(3)、在ceph auth库中创建bootstrap-mds客户端,赋予权限添加之前创建的密钥 注(查看ceph auth list 用户权限认证列表 如果已有client.bootstrap-mds此用户,此步骤可以省略)
[root@os-node1 ~]# ceph auth add client.bootstrap-mds mon ‘allow profile bootstrap-mds‘ -i /var/lib/ceph/bootstrap-mds/ceph.keyring
added key for client.bootstrap-mds
(4)、在root家目录里创建ceph.bootstrap-mds.keyring文件
touch /root/ceph.bootstrap-mds.keyring
(5)、把keyring /var/lib/ceph/bootstrap-mds/ceph.keyring里的密钥导入家目录下的ceph.bootstrap-mds.keyring文件里
ceph-authtool --import-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring ceph.bootstrap-mds.keyring
(6)、在ceph auth库中创建mds.os-node1用户,并赋予权限和创建密钥,密钥保存在/var/lib/ceph/mds/ceph-os-node1/keyring文件里
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.os-node1 osd ‘allow rwx‘ mds ‘allow‘ mon ‘allow profile mds‘ -o /var/lib/ceph/mds/ceph-os-node1/keyring
(7)、为mds创建一个初始化文件用于启动使用(此文件为空文件)
[root@os-node1 ~]# touch /var/lib/ceph/mds/ceph-os-node1/sysvinit
(8)、为了防止重新被安装创建一个空的done文件
[root@os-node1 ~]# touch /var/lib/ceph/mds/ceph-os-node1/done
(9)、情况mds服务进程
[root@os-node1 ~]# service ceph start mds.os-node1
=== mds.os-node1 ===
Starting Ceph mds.os-node1 on os-node1...
starting mds.os-node1 at :/0
2、添加第二个元数据服务器
(1)、在os-node2节点上为mds元数据服务器创建一个目录
[root@os-node2 ~]# mkdir -p /var/lib/ceph/mds/ceph-os-node2
(2)、在os-node2节点上创建一个bootstrap-mds目录
[root@os-node2 ~]# mkdir -p /var/lib/ceph/bootstrap-mds/
(3)、在os-node1节点上复制/var/lib/ceph/bootstrap-mds/ceph.keyring、/root/ceph.bootstrap-mds.keyring文件到os-node2节点
[root@os-node1 ~]# scp /var/lib/ceph/bootstrap-mds/ceph.keyring os-node2:/var/lib/ceph/bootstrap-mds/
[root@os-node1 ~]# scp /root/ceph.bootstrap-mds.keyring os-node2:/root/
(4)、在os-node1节点复制/var/lib/ceph/mds/ceph-os-node1/*里的所有文件到os-node2
[root@os-node2 ~]# scp /var/lib/ceph/mds/ceph-os-node1/sysvinit os-node2:/var/lib/ceph/mds/ceph-os-node2/
(5)、在ceph auth库中创建mds.os-node2用户,并赋予权限和创建密钥,密钥保存在/var/lib/ceph/mds/ceph-os-node1/keyring文件里
[root@os-node2 ~]# ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.os-node2 osd ‘allow rwx‘ mds ‘allow‘ mon ‘allow profile mds‘ -o /var/lib/ceph/mds/ceph-os-node2/keyring
(7)、为了防止重新被安装创建一个空的done文件
[root@os-node2 ~]# touch /var/lib/ceph/mds/ceph-os-node2/done
(8)、启动mds服务进程
[root@os-node1 ~]# service ceph start mds.os-node2
3、添加第三个元数据服务器
(1)、在os-node3节点上为mds元数据服务器创建一个目录
[root@os-node3 ~]# mkdir -p /var/lib/ceph/mds/ceph-os-node3
(2)、在os-node3节点上创建一个bootstrap-mds目录
[root@os-node3 ~]# mkdir -p /var/lib/ceph/bootstrap-mds/
(3)、在os-node1节点上复制/var/lib/ceph/bootstrap-mds/ceph.keyring、/root/ceph.bootstrap-mds.keyring文件到os-node3节点
[root@os-node1 ~]# scp /var/lib/ceph/bootstrap-mds/ceph.keyring os-node3:/var/lib/ceph/bootstrap-mds/
[root@os-node1 ~]# scp /root/ceph.bootstrap-mds.keyring os-node3:/root/
(4)、在os-node1节点复制/var/lib/ceph/mds/ceph-os-node1/*里的所有文件到os-node3
[root@os-node3 ~]# scp /var/lib/ceph/mds/ceph-os-node1/sysvinit os-node3://var/lib/ceph/mds/ceph-os-node3/
(5)、在ceph auth库中创建mds.os-node2用户,并赋予权限和创建密钥,密钥保存在/var/lib/ceph/mds/ceph-os-node1/keyring文件里
[root@os-node3 ~]# ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.os-node3 osd ‘allow rwx‘ mds ‘allow‘ mon ‘allow profile mds‘ -o /var/lib/ceph/mds/ceph-os-node3/keyring
(7)、为了防止重新被安装创建一个空的done文件
[root@os-node3 ~]# touch /var/lib/ceph/mds/ceph-os-node3/done
(8)、情况mds服务进程
[root@os-node1 ~]# service ceph start mds.os-node3
4、查看集群状态
[root@os-node1 ~]# ceph -w
cluster f11240d4-86b1-49ba-aacc-6d3d37b24cc4
health HEALTH_OK
monmap e2: 3 mons at {os-node1=10.240.240.211:6789/0,os-node2=10.240.240.212:6789/0,os-node3=10.240.240.213:6789/0}, election epoch 8, quorum 0,1,2 os-node1,os-node2,os-node3
osdmap e23: 3 osds: 3 up, 3 in
pgmap v47: 192 pgs, 3 pools, 0 bytes data, 0 objects
3175 MB used, 58234 MB / 61410 MB avail
192 active+clean
2014-06-25 23:32:48.340284 mon.0 [INF] pgmap v47: 192 pgs: 192 active+clean; 0 bytes data, 3175 MB used, 58234 MB / 61410 MB avail
5、、查看集群运行状态
[root@os-node1 ceph]# ceph health
HEALTH_WARN clock skew detected on mon.os-node4, mon.os-node5
出现上面信息的意思是,os-node1 os-node4 os-node5的时间不一致,必须把他们的时间同步,解决方法如下:
把os-node1配置ntp服务器,所有的节点都同步os-node1
启动NTP服务器并配成开机自启动
配置NTP服务器
把os-node1配置成ntp服务器,其他节点同步os-node1
在控制节点上安装NTP服务
# yum -y install ntp
编辑ntp配置文件
# vi /etc/ntp.conf
屏蔽掉下面几行
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
在最后下面加入下面两行(ntp服务器同步本地时间)
server 127.127.1.0
fudge 127.127.1.0 stratum 2
启动NTP服务器并配成开机自启动
# service ntpd restart
# chkconfig ntpd on
[root@os-node1 ~]# ntpq -p
remote refid st t when poll reach delay offset jitter
==============================================================================
*LOCAL(0) .LOCL. 10 l 35 64 17 0.000 0.000 0.000
关闭防火墙
[root@admin-os-node ceph]# service iptables stop
iptables: Flushing firewall rules: [ OK ]
iptables: Setting chains to policy ACCEPT: filter [ OK ]
iptables: Unloading modules: [ OK ]
其他节点时间都同步控制节点(等待5分钟再进行同步,等ntp服务器的reach的值达到17的时候才可以)
[root@os-node4 ceph]# ntpdate os-node1
19 Jun 17:58:00 ntpdate[16034]: adjust time server os-node1 offset -0.000067 sec
再次执行结果如下:
[root@os-node1 ceph]# ceph health
HEALTH_OK
16、相关进程:
/etc/init.d/ceph start osd.0 启动osd进程
/etc/init.d/ceph start 启动整个ceph进程包括mon osd
四、安装Keystone服务
在控制节点上安装keystone
1、安装keystone软件包
[root@os-node1 ~]# yum install openstack-keystone python-keystoneclient -y
配置keystone配置文件
[root@os-node1 ~]# openstack-config --set /etc/keystone/keystone.conf connection mysql://keystone:keystone@localhost/keystone
2、配置keystone DB数据库
[root@os-node1 ~]# openstack-db --init --service keystone --password keystone
Please enter the password for the ‘root‘ MySQL user:
Verified connectivity to MySQL.
Creating ‘keystone‘ database.
Initializing the keystone database, please wait...
Complete!
[root@os-node1 ~]#
如果想删除keystone库用下面的命令删除
openstack-db --drop --service keystone
3、配置keystone的认证token,使用openssl生成一串字符串
ADMIN_TOKEN=admin
echo $ADMIN_TOKEN
4、把认证token存储到keystone的配置文件
openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token $ADMIN_TOKEN
5、配置PKI token
keystone-manage pki_setup --keystone-user keystone --keystone-group keystone
6、配置keystone相关文件的权限
[root@os-node1 ~]# chown -R keystone:keystone /etc/keystone/* /var/log/keystone/keystone.log
7、启动keystone服务并配置开机自启动
service openstack-keystone start
chkconfig openstack-keystone on
查看进程启动的相关信息:
[root@os-node1 ~]# ps auxf | grep -i keystone-all
root 9021 0.0 0.1 103236 864 pts/0 S+ 01:34 0:00 \_ grep -i keystone-all
keystone 8929 1.1 7.6 270468 35132 ? S 01:33 0:00 /usr/bin/python /usr/bin/keystone-all --config-file /etc/keystone/keystone.conf
检查日志文件中有无错误提示:
[root@os-node1 ~]# grep ERROR /var/log/keystone/keystone.log
8、初始化新生成的keystone数据库
以管理员的身份初始化keystone数据库
[root@control ~]# keystone-manage db_sync
9、配置keystone的环境变量
[root@os-node1 ~]# vi keystone-token
export OS_SERVICE_TOKEN=$ADMIN_TOKEN
export OS_SERVICE_ENDPOINT=http://os-node1:35357/v2.0
10、创建admin、service租户
创建admin租户
[root@os-node1 ~]# keystone tenant-create --name=admin --description="Admin Tenant"
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| description | Admin Tenant |
| enabled | True |
| id | 8208d5035b84466a98408b41d596cb1d |
| name | admin |
+-------------+----------------------------------+
创建service租户
[root@os-node1 ~]# keystone tenant-create --name=service --description="Service Tenant"
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| description | Service Tenant |
| enabled | True |
| id | baf0e8b3d3ed4b75b40f05213c6b9d84 |
| name | service |
+-------------+----------------------------------+
11、创建一个admin用户
[root@os-node1 ~]# keystone user-create --name=admin --pass=admin --email=admin@jiayuan.com
+----------+----------------------------------+
| Property | Value |
+----------+----------------------------------+
| email | admin@jiayuan.com |
| enabled | True |
| id | 283d063867744046b7203af6ce4ec681 |
| name | admin |
+----------+----------------------------------+
12、创建一个admin角色
[root@os-node1 ~]# keystone role-create --name=admin
+----------+----------------------------------+
| Property | Value |
+----------+----------------------------------+
| id | e363e80c062a4b86b47ab730290fbd2d |
| name | admin |
+----------+----------------------------------+
[root@os-node1 ~]#
13、把用户添加到角色及租户内
[root@os-node1 ~]# keystone user-role-add --user=admin --tenant=admin --role=admin
14、创建keystone认证服务
[root@os-node1 ~]# keystone service-create --name=keystone --type=identity --description="Keystone Identity Service"
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| description | Keystone Identity Service |
| id | 5d35bdf277ec4699a3ea6d3e7e5d668d |
| name | keystone |
| type | identity |
+-------------+----------------------------------+
[root@os-node1 ~]#
15、创建keystone的endpoint API节口
[root@os-node1 ~]# keystone endpoint-create --service-id=bed4139a44724dd183a17cc7b66dc7bd --publicurl=http://os-node1:5000/v2.0 --internalurl=http://os-node1:5000/v2.0 --adminurl=http://os-node1:35357/v2.0
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| adminurl | http://os-node1:35357/v2.0 |
| id | 74ff559cbfe2414bb50df224faddf2a7 |
| internalurl | http://os-node1:5000/v2.0 |
| publicurl | http://os-node1:5000/v2.0 |
| region | regionOne |
| service_id | bed4139a44724dd183a17cc7b66dc7bd |
+-------------+----------------------------------+
16、卸载之前的环境变量
[root@os-node1 ~]# unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
17、
keystone --os-username=admin --os-password=admin --os-auth-url=http://os-node1:35357/v2.0 token-get
keystone --os-username=admin --os-password=admin --os-tenant-name=admin --os-auth-url=http://os-node1:35357/v2.0 token-get
18、重新设置新的环境变量
vi openrc.sh
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_TENANT_NAME=admin
export OS_AUTH_URL=http://os-node1:35357/v2.0
执行环境变量
[root@os-node1 ~]# source openrc.sh
验证上面的命令正确性
keystone token-get
再重新获取用户信息
[root@os-node1 ~]# keystone user-list
+----------------------------------+-------+---------+-------------------+
| id | name | enabled | email |
+----------------------------------+-------+---------+-------------------+
| 283d063867744046b7203af6ce4ec681 | admin | True | admin@jiayuan.com |
+----------------------------------+-------+---------+-------------------+
[root@os-node1 ~]#
五、安装配置glance服务
1、安装glance软件包
[root@os-node1 ~]#yum install -y openstack-glance
2、修改glance的配置文件,glance的配置文件主要包括两个glance-api.conf 、glance-registry.conf。
openstack-config --set /etc/glance/glance-api.conf DEFAULT sql_connection mysql://glance:glance@localhost/glance
openstack-config --set /etc/glance/glance-registry.conf DEFAULT sql_connection mysql://glance:glance@localhost/glance
3、创建glance数据库
[root@os-node1 ~]# openstack-db --init --service glance --password glance
4、创建glance用户
[root@os-node1 ~]# keystone user-create --name=glance --pass=glance --email=glance@jiayuan.com
+----------+----------------------------------+
| Property | Value |
+----------+----------------------------------+
| email | glance@jiayuan.com |
| enabled | True |
| id | 6a277d1d8e5d469f936cc90f4291cb6f |
| name | glance |
+----------+----------------------------------+
把glance用户加入service租户并赋予admin角色
[root@os-node1 ~]# keystone user-role-add --user=glance --tenant=service --role=admin
5、修改glance配置文件glance-api.conf 、glance-registry.conf的keystone认证。
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_uri http://os-node1:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_host os-node1
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_user glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_password glance
openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_uri http://os-node1:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_host os-node1
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_user glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_password glance
openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
6、复制管理证书到/etc/glance/glance-api-paste.ini and /etc/glance/glance-registry-paste.ini两个文件
cp /usr/share/glance/glance-api-dist-paste.ini /etc/glance/glance-api-paste.ini
cp /usr/share/glance/glance-registry-dist-paste.ini /etc/glance/glancere-gistry-paste.ini
配置上面的两个文件的认证方式
openstack-config --set /etc/glance/glance-api-paste.ini filter:authtoken auth_host os-node1
openstack-config --set /etc/glance/glance-api-paste.ini filter:authtoken admin_user glance
openstack-config --set /etc/glance/glance-api-paste.ini filter:authtoken admin_tenant_name service
openstack-config --set /etc/glance/glance-api-paste.ini filter:authtoken admin_password glance
openstack-config --set /etc/glance/glancere-gistry-paste.ini filter:authtoken auth_host os-node1
openstack-config --set /etc/glance/glancere-gistry-paste.ini filter:authtoken admin_user glance
openstack-config --set /etc/glance/glancere-gistry-paste.ini filter:authtoken admin_tenant_name service
openstack-config --set /etc/glance/glancere-gistry-paste.ini filter:authtoken admin_password glance
7、创建glance服务并创建endpiont
[root@os-node1 ~]# keystone service-create --name=glance --type=image --description="Glance Image Service"
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| description | Glance Image Service |
| id | 404a974bb11942429037bf25088bc4ea |
| name | glance |
| type | image |
+-------------+----------------------------------+
[root@os-node1 ~]# keystone endpoint-create --service-id=404a974bb11942429037bf25088bc4ea --publicurl=http://os-node1:9292 --internalurl=http://os-node1:9292 --adminurl=http://os-node1:9292
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| adminurl | http://os-node1:9292 |
| id | 0cfbfcb01c8d402bbf8bb972081eefd4 |
| internalurl | http://os-node1:9292 |
| publicurl | http://os-node1:9292 |
| region | regionOne |
| service_id | 404a974bb11942429037bf25088bc4ea |
+-------------+----------------------------------+
8、启动glance服务并设置开机自启动
service openstack-glance-api start
service openstack-glance-registry start
chkconfig openstack-glance-api on
chkconfig openstack-glance-registry on
9、创建一个镜像存放目录并下载一个测试镜像
[root@os-node1 ~]# mkdir /images
[root@os-node1 ~]# cd /images/
[root@os-node1 images]# wget https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-i386-disk.img
10、上传镜像
[root@os-node1 images]# glance image-create --name="test-img-1" --disk-format=qcow2 --container-format=bare --is-public=true < /images/cirros-0.3.0-i386-disk.img
+------------------+--------------------------------------+
| Property | Value |
+------------------+--------------------------------------+
| checksum | 90169ba6f09b5906a7f0755bd00bf2c3 |
| container_format | bare |
| created_at | 2014-07-14T14:21:59 |
| deleted | False |
| deleted_at | None |
| disk_format | qcow2 |
| id | 240f6dbf-12f5-462e-8164-2bf756df559f |
| is_public | True |
| min_disk | 0 |
| min_ram | 0 |
| name | test-img-1 |
| owner | b36a323105954a449e5d104293f6b888 |
| protected | False |
| size | 9159168 |
| status | active |
| updated_at | 2014-07-14T14:22:00 |
+------------------+--------------------------------------+
11、查看镜像列表
[root@os-node1 images]# glance image-list
+--------------------------------------+------------+-------------+------------------+---------+--------+
| ID | Name | Disk Format | Container Format | Size | Status |
+--------------------------------------+------------+-------------+------------------+---------+--------+
| 240f6dbf-12f5-462e-8164-2bf756df559f | test-img-1 | qcow2 | bare | 9159168 | active |
+--------------------------------------+------------+-------------+------------------+---------+--------+
六、compute 服务配置
在控制节点上安装nova服务
1、在控制节点os-node1上安装nova软件包
[root@os-node1 ~]# yum install -y openstack-nova python-novaclient
[root@os-node1 ~]#yum remove -y openstack-nova-compute #卸载控制节点的compute功能
2、配置nova配置文件中的mysql连接
openstack-config --set /etc/nova/nova.conf database connection mysql://nova:nova@localhost/nova
3、配置nova.conf配置文件中的消息队列方式为qpid
openstack-config --set /etc/nova/nova.conf DEFAULT rpc_backend nova.openstack.common.rpc.impl_qpid
openstack-config --set /etc/nova/nova.conf DEFAULT qpid_hostname os-node1
4、创建nova数据库
[root@os-node1 ~]# openstack-db --init --service nova --password nova
Please enter the password for the ‘root‘ MySQL user:
Verified connectivity to MySQL.
Creating ‘nova‘ database.
Initializing the nova database, please wait...
Complete!
5、配置nova.conf配置文件中vnc server及客户端的地址
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 10.240.216.101
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_listen 10.240.216.101
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_proxyclient_address 10.240.216.101
6、创建一个nova用户并为此用户添加角色
[root@os-node1 ~]# keystone user-create --name=nova --pass=nova --email=nova@jiayuan.com
+----------+----------------------------------+
| Property | Value |
+----------+----------------------------------+
| email | nova@jiayuan.com |
| enabled | True |
| id | b3bfd7d087b247749caced867b8ac144 |
| name | nova |
+----------+----------------------------------+
[root@os-node1 ~]# keystone user-role-add --user=nova --tenant=service --role=admin
7、配置nova.conf配置文件中keystone的认证方式
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_host os-node1
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_user nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_password nova
8、配置/etc/nova/api-paste.ini文件认证方式。
openstack-config --set /etc/nova/api-paste.ini filter:authtoken auth_host os-node1
openstack-config --set /etc/nova/api-paste.ini filter:authtoken auth_port 35357
openstack-config --set /etc/nova/api-paste.ini filter:authtoken auth_protocol http
openstack-config --set /etc/nova/api-paste.ini filter:authtoken auth_uri http://os-node1:5000/v2.0
openstack-config --set /etc/nova/api-paste.ini filter:authtoken admin_tenant_name service
openstack-config --set /etc/nova/api-paste.ini filter:authtoken admin_user nova
openstack-config --set /etc/nova/api-paste.ini filter:authtoken admin_password nova
9、修改nova.conf文件中的其他内容
openstack-config --set /etc/nova/nova.conf DEFAULT state_path /var/lib/nova
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis ec2,osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT osapi_compute_listen 10.240.216.101
openstack-config --set /etc/nova/nova.conf DEFAULT osapi_compute_workers 16
openstack-config --set /etc/nova/nova.conf DEFAULT metadata_listen 10.240.216.101
openstack-config --set /etc/nova/nova.conf DEFAULT service_down_time 60
openstack-config --set /etc/nova/nova.conf DEFAULT rootwrap_config /etc/nova/rootwrap.conf
openstack-config --set /etc/nova/nova.conf DEFAULT keystone_ec2_url http://os-node1:5000/v2.0/ec2tokens
openstack-config --set /etc/nova/nova.conf DEFAULT allow_resize_to_same_host True
openstack-config --set /etc/nova/nova.conf DEFAULT glance_api_servers os-node1:9292
openstack-config --set /etc/nova/nova.conf DEFAULT api_paste_config /etc/nova/api-paste.ini
openstack-config --set /etc/nova/nova.conf DEFAULT lock_path /var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf DEFAULT verbose true
openstack-config --set /etc/nova/nova.conf DEFAULT use_syslog false
openstack-config --set /etc/nova/nova.conf DEFAULT memcached_servers os-node1:11211
openstack-config --set /etc/nova/nova.conf DEFAULT use_cow_images true
openstack-config --set /etc/nova/nova.conf DEFAULT volume_api_class nova.volume.cinder.API
openstack-config --set /etc/nova/nova.conf DEFAULT image_service nova.image.glance.GlanceImageService
openstack-config --set /etc/nova/nova.conf DEFAULT compute_scheduler_driver nova.scheduler.filter_scheduler.FilterScheduler
openstack-config --set /etc/nova/nova.conf DEFAULT start_guests_on_host_boot true
openstack-config --set /etc/nova/nova.conf DEFAULT osapi_volume_listen 10.240.216.101
10、创建nova服务并创建endpiont
[root@os-node1 ~]# keystone service-create --name=nova --type=compute --description="Nova Compute service"
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| description | Nova Compute service |
| id | b02ae450a1c14abc859c8b7a2522d703 |
| name | nova |
| type | compute |
+-------------+----------------------------------+
[root@os-node1 ~]# keystone endpoint-create --service-id=b02ae450a1c14abc859c8b7a2522d703 --publicurl=http://os-node1:8774/v2/%\(tenant_id\)s --internalurl=http://os-node1:8774/v2/%\(tenant_id\)s --adminurl=http://os-node1:8774/v2/%\(tenant_id\)s
+-------------+---------------------------------------------+
| Property | Value |
+-------------+---------------------------------------------+
| adminurl | http://os-node1:8774/v2/%(tenant_id)s |
| id | 805f47919c2746fcaa3b4bf26b4496a6 |
| internalurl | http://os-node1:8774/v2/%(tenant_id)s |
| publicurl | http://os-node1:8774/v2/%(tenant_id)s |
| region | regionOne |
| service_id | b02ae450a1c14abc859c8b7a2522d703 |
+-------------+---------------------------------------------+
11、启动nova的所有进程
service openstack-nova-api start
service openstack-nova-cert start
service openstack-nova-consoleauth start
service openstack-nova-scheduler start
service openstack-nova-conductor start
service openstack-nova-novncproxy start
chkconfig openstack-nova-api on
chkconfig openstack-nova-cert on
chkconfig openstack-nova-consoleauth on
chkconfig openstack-nova-scheduler on
chkconfig openstack-nova-conductor on
chkconfig openstack-nova-novncproxy on
12、用nova命令查看可用的镜像文件
[root@os-node1 ~]# nova image-list
+--------------------------------------+--------------+--------+--------+
| ID | Name | Status | Server |
+--------------------------------------+--------------+--------+--------+
| d53229ed-6a7d-4e24-b32f-c617b961d6ab | CirrOS 0.3.1 | ACTIVE | |
+--------------------------------------+--------------+--------+--------+
在compute节点安装配置nova
1、安装nova软件包
[root@compute ~]# yum install -y openstack-nova-compute
2、配置nova配置文件中的mysql连接及keystone认证
openstack-config --set /etc/nova/nova.conf database connection mysql://nova:nova@os-node1/nova
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_host os-node1
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_user nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_password nova
3、配置nova.conf配置文件中的消息队列方式为qpid
openstack-config --set /etc/nova/nova.conf DEFAULT rpc_backend nova.openstack.common.rpc.impl_qpid
openstack-config --set /etc/nova/nova.conf DEFAULT qpid_hostname os-node1
4、配置nova.conf配置文件中vnc server的内容
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 10.240.216.102
openstack-config --set /etc/nova/nova.conf DEFAULT vnc_enabled True
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_listen 0.0.0.0
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_proxyclient_address 10.240.216.102
openstack-config --set /etc/nova/nova.conf DEFAULT novncproxy_base_url http://os-node1:6080/vnc_auto.html
5、配置/etc/nova/api-paste.ini文件认证方式。
openstack-config --set /etc/nova/api-paste.ini DEFAULT api_paste_config /etc/nova/api-paste.ini
openstack-config --set /etc/nova/api-paste.ini filter:authtoken auth_host os-node1
openstack-config --set /etc/nova/api-paste.ini filter:authtoken auth_port 35357
openstack-config --set /etc/nova/api-paste.ini filter:authtoken auth_protocol http
openstack-config --set /etc/nova/api-paste.ini filter:authtoken auth_uri http://os-node1:5000/v2.0
openstack-config --set /etc/nova/api-paste.ini filter:authtoken admin_tenant_name service
openstack-config --set /etc/nova/api-paste.ini filter:authtoken admin_user nova
openstack-config --set /etc/nova/api-paste.ini filter:authtoken admin_password nova
6、配置nova配置文件中其他内容
openstack-config --set /etc/nova/nova.conf DEFAULT state_path /var/lib/nova
openstack-config --set /etc/nova/nova.conf DEFAULT osapi_compute_listen 10.240.216.102
openstack-config --set /etc/nova/nova.conf DEFAULT metadata_listen 10.240.216.102
openstack-config --set /etc/nova/nova.conf DEFAULT service_down_time 60
openstack-config --set /etc/nova/nova.conf DEFAULT instance_usage_audit_period hour
openstack-config --set /etc/nova/nova.conf DEFAULT rootwrap_config /etc/nova/rootwrap.conf
openstack-config --set /etc/nova/nova.conf DEFAULT allow_resize_to_same_host True
openstack-config --set /etc/nova/nova.conf DEFAULT instance_usage_audit True
openstack-config --set /etc/nova/nova.conf DEFAULT glance_host os-node1
openstack-config --set /etc/nova/nova.conf DEFAULT glance_api_servers os-node1:9292
openstack-config --set /etc/nova/nova.conf DEFAULT linuxnet_interface_driver nova.network.linux_net.LinuxOVSInterfaceDriver
openstack-config --set /etc/nova/nova.conf DEFAULT linuxnet_ovs_integration_bridge br-int
openstack-config --set /etc/nova/nova.conf DEFAULT lock_path /var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf DEFAULT verbose true
openstack-config --set /etc/nova/nova.conf DEFAULT use_syslog false
openstack-config --set /etc/nova/nova.conf DEFAULT memcached_servers os-node1:11211
openstack-config --set /etc/nova/nova.conf DEFAULT image_service nova.image.glance.GlanceImageService
openstack-config --set /etc/nova/nova.conf DEFAULT compute_scheduler_driver nova.scheduler.filter_scheduler.FilterScheduler
openstack-config --set /etc/nova/nova.conf DEFAULT start_guests_on_host_boot true
openstack-config --set /etc/nova/nova.conf DEFAULT osapi_volume_listen 10.240.216.102
openstack-config --set /etc/nova/nova.conf DEFAULT connection_type libvirt
openstack-config --set /etc/nova/nova.conf DEFAULT compute_driver libvirt.LibvirtDriver
openstack-config --set /etc/nova/nova.conf DEFAULT use_cow_images true
openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_type kvm
openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_inject_key false
openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_inject_partition -2
openstack-config --set /etc/nova/nova.conf DEFAULT live_migration_flag VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST
openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_vif_driver nova.virt.libvirt.vif.LibvirtGenericVIFDriver
openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_cpu_mode host-model
openstack-config --set /etc/nova/nova.conf DEFAULT disk_cachemodes "file=writethrough"
7、修改虚拟机模块libvirt的配置
sed -i ‘s/#listen_tls = 0/listen_tls = 0/g‘ /etc/libvirt/libvirtd.conf
sed -i ‘s/#listen_tcp = 1/listen_tcp = 1/g‘ /etc/libvirt/libvirtd.conf
sed -i ‘$a\auth_tcp="none"‘ /etc/libvirt/libvirtd.conf
sed -i ‘s/env libvirtd_opts="-d "/env libvirtd_opts="-d -l"/g‘ /etc/init.d/libvirtd
sed -i ‘s/#vnc_listen = “0.0.0.0″/vnc_listen = “0.0.0.0″/g‘ /etc/libvirt/qemu.conf
sed -i ‘s/#user = "root"/user = "root"/g‘ /etc/libvirt/qemu.conf
sed -i ‘s/#group = "root"/group = "root"/g‘ /etc/libvirt/qemu.conf
sed -i ‘s/#LIBVIRTD_ARGS/LIBVIRTD_ARGS/‘ /etc/sysconfig/libvirtd
8、启动compute节点nova服务并配置开机自启动
service libvirtd restart
service messagebus restart
chkconfig libvirtd on
chkconfig messagebus on
service openstack-nova-compute restart
chkconfig openstack-nova-compute on
为创建虚拟机做好前期准备
1、在控制节点上创建密钥,把此密钥用于创建虚拟机时无密码登录的密钥
[root@os-node1 ~]# ssh-keygen
[root@os-node1 ~]# cd .ssh/
[root@os-node1 .ssh]# nova keypair-add --pub_key id_rsa.pub mykey
[root@os-node1 .ssh]# nova keypair-list
+-------+-------------------------------------------------+
| Name | Fingerprint |
+-------+-------------------------------------------------+
| mykey | 4c:07:d8:f4:e5:a5:93:fe:cf:98:eb:33:bc:06:04:06 |
+-------+-------------------------------------------------+
2、查看用于创建虚拟机的模板
[root@os-node1 .ssh]# nova flavor-list
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
| 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
| 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
| 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
| 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
[root@os-node1 .ssh]#
3、自己创建一个自定义的虚拟机创建模板
[root@os-node1 ~]# nova flavor-create flavor-test 6 1024 30 2 --is-public=true
+----+-------------+-----------+------+-----------+------+-------+-------------+-----------+
| ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
+----+-------------+-----------+------+-----------+------+-------+-------------+-----------+
| 6 | flavor-test | 1024 | 30 | 0 | | 2 | 1.0 | True |
+----+-------------+-----------+------+-----------+------+-------+-------------+-----------+
[root@os-node1 .ssh]# nova flavor-list
+----+-------------+-----------+------+-----------+------+-------+-------------+-----------+
| ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
+----+-------------+-----------+------+-----------+------+-------+-------------+-----------+
| 1 | m1.tiny | 512 | 1 | 0 | | 1 | 1.0 | True |
| 2 | m1.small | 2048 | 20 | 0 | | 1 | 1.0 | True |
| 3 | m1.medium | 4096 | 40 | 0 | | 2 | 1.0 | True |
| 4 | m1.large | 8192 | 80 | 0 | | 4 | 1.0 | True |
| 5 | m1.xlarge | 16384 | 160 | 0 | | 8 | 1.0 | True |
| 6 | flavor-test | 512 | 10 | 0 | | 2 | 1.0 | True |
+----+-------------+-----------+------+-----------+------+-------+-------------+-----------+
4、查看安全组并为default安全组添加规则
[root@os-node1 .ssh]# nova secgroup-list
+----+---------+-------------+
| Id | Name | Description |
+----+---------+-------------+
| 1 | default | default |
+----+---------+-------------+
[root@os-node1 ~]# nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
+-------------+-----------+---------+-----------+--------------+
| IP Protocol | From Port | To Port | IP Range | Source Group |
+-------------+-----------+---------+-----------+--------------+
| tcp | 22 | 22 | 0.0.0.0/0 | |
+-------------+-----------+---------+-----------+--------------+
[root@os-node1 ~]# nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
+-------------+-----------+---------+-----------+--------------+
| IP Protocol | From Port | To Port | IP Range | Source Group |
+-------------+-----------+---------+-----------+--------------+
| icmp | -1 | -1 | 0.0.0.0/0 | |
+-------------+-----------+---------+-----------+--------------+
七、安装cinder块存储服务
1、在控制节点上安装cinder软件包
[root@os-node1 ~]# yum install -y openstack-cinder openstack-utils openstack-selinux
2、修改cinder配置文件中mysql的访问
[root@os-node1 ~]# openstack-config --set /etc/cinder/cinder.conf database connection mysql://cinder:cinder@localhost/cinder
3、创建一个cinder数据库
[root@os-node1 ~]# openstack-db --init --service cinder --password cinder
Please enter the password for the ‘root‘ MySQL user:
Verified connectivity to MySQL.
Creating ‘cinder‘ database.
Initializing the cinder database, please wait...
Complete!
4、创建cinder用户并把此用户添加到admin租户及添加用户角色
[root@os-node1 ~]# keystone user-create --name=cinder --pass=cinder --email=cinder@jiayuan.com
+----------+----------------------------------+
| Property | Value |
+----------+----------------------------------+
| email | cinder@jiayuan.com |
| enabled | True |
| id | 54d5bb10cf7341aa88c1dda46087ce6d |
| name | cinder |
+----------+----------------------------------+
[root@os-node1 ~]# keystone user-role-add --user=cinder --tenant=service --role=admin
5、配置cinder配置文件中的消息队列内容
openstack-config --set /etc/cinder/cinder.conf DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid
openstack-config --set /etc/cinder/cinder.conf DEFAULT qpid_hostname os-node1
6、创建cinder用户服务并配置endpiont
[root@os-node1 ~]# keystone service-create --name=cinder --type=volume --description="Cinder Volume Service"
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| description | Cinder Volume Service |
| id | ad715e197f404c51973a263ff376792d |
| name | cinder |
| type | volume |
+-------------+----------------------------------+
[root@os-node1 ~]# keystone endpoint-create --service-id=ad715e197f404c51973a263ff376792d --publicurl=http://os-node1:8776/v1/%\(tenant_id\)s --internalurl=http://os-node1:8776/v1/%\(tenant_id\)s --adminurl=http://os-node1:8776/v1/%\(tenant_id\)s
+-------------+---------------------------------------------+
| Property | Value |
+-------------+---------------------------------------------+
| adminurl | http://os-node1:8776/v1/%(tenant_id)s |
| id | f2181fe1fddc46cebc3952538111ff19 |
| internalurl | http://os-node1:8776/v1/%(tenant_id)s |
| publicurl | http://os-node1:8776/v1/%(tenant_id)s |
| region | regionOne |
| service_id | ad715e197f404c51973a263ff376792d |
+-------------+---------------------------------------------+
7、配置cinder /etc/cinder/api-paste.ini配置文件的keystone认证
openstack-config --set /etc/cinder/api-paste.ini filter:authtoken auth_host os-node1
openstack-config --set /etc/cinder/api-paste.ini filter:authtoken auth_port 35357
openstack-config --set /etc/cinder/api-paste.ini filter:authtoken auth_protocol http
openstack-config --set /etc/cinder/api-paste.ini filter:authtoken auth_uri http://os-node1:5000
openstack-config --set /etc/cinder/api-paste.ini filter:authtoken admin_tenant_name service
openstack-config --set /etc/cinder/api-paste.ini filter:authtoken admin_user cinder
openstack-config --set /etc/cinder/api-paste.ini filter:authtoken admin_password cinder
8、启动cinder相关服务并设置开机自启动
service openstack-cinder-api start
service openstack-cinder-scheduler start
chkconfig openstack-cinder-api on
chkconfig openstack-cinder-scheduler on
9、查看volumes列表
[root@os-node1 ~]# cinder list
+----+--------+--------------+------+-------------+----------+-------------+
| ID | Status | Display Name | Size | Volume Type | Bootable | Attached to |
+----+--------+--------------+------+-------------+----------+-------------+
+----+--------+--------------+------+-------------+----------+-------------+
六、openstack与ceph的对接
1、创建pool
ceph osd pool create volumes 128
ceph osd pool create images 128
ceph osd pool create compute 128
2. 设置cephclient认证
(1)、设置images用户认证并生成密钥文件
[root@control ceph]# ceph auth get-or-create client.glance mon ‘allow *‘ osd ‘allow *‘
[client.glance]
key = AQDnH5dTyJWZAxAAKaAPNdPBXLkaRM11PV5jUw==
把生成的密钥保存到密钥文件中
[root@control ceph]# vi /etc/ceph/ceph.client.glance.keyring
[client.glance]
key = AQDnH5dTyJWZAxAAKaAPNdPBXLkaRM11PV5jUw==
把密钥文件拷贝到计算节点
在所有节点修改密钥文件的用户权限
[root@control ceph]# chown glance:glance /etc/ceph/ceph.client.glance.keyring
(2)、设置volumes用户认证并生成密钥文件
[root@control ~]# ceph auth get-or-create client.volumes mon ‘allow *‘ osd ‘allow *‘
[client.volumes]
key = AQBPHpdT2KbKMxAAARPGhg9hezM6wdis4V4F2A==
把生成的密钥保存到密钥文件中
[root@control ceph]# vi /etc/ceph/ceph.client.volumes.keyring
[client.volumes]
key = AQBPHpdT2KbKMxAAARPGhg9hezM6wdis4V4F2A==
把密钥文件拷贝到计算节点
在所有节点修改密钥文件的用户权限
chown cinder:cinder /etc/ceph/ceph.client.volumes.keyring
(3)设置compute用户
[root@os-node1 ~]# ceph auth get-or-create client.compute mon ‘allow *‘ osd ‘allow *‘ mds ‘allow *‘
[client.compute]
key = AQBMtchTEMAHBRAAyJLh3ZTDY3ajPDMm4VKgHQ==
把生成的密钥保存到密钥文件中
[root@os-node1 ~]# vi /etc/ceph/ceph.client.compute.keyring
[client.compute]
key = AQBMtchTEMAHBRAAyJLh3ZTDY3ajPDMm4VKgHQ==
把密钥文件拷贝到计算节点
在所有节点修改密钥文件的用户权限
[root@os-node1]# chown nova:nova /etc/ceph/ceph.client.compute.keyring
(4)在控制及计算节点上修改ceph.client.admin.keyring密钥文件的权限
chmod 777 /etc/ceph/ceph.client.admin.keyring
配置glance与ceph对接
1、编辑vi /etc/glance/glance-api.conf,修改内容如下:
default_store = rbd #glance默认使用“File”做为其存储类型,现用ceph中的rbd服务来替代这里的“File”类型。
# ============ RBD Store Options =============================
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_user = glance
rbd_store_pool = images
rbd_store_chunk_size = 8
2、修改/etc/glance目录中文件的权限
chown -R glance:glance /etc/glance/
chmod 770 /etc/glance/
3、编辑后重启进程
service openstack-glance-api restart
service openstack-glance-registry restart
3、下载一个镜像进行上传测试
[root@os-node1 images]# wget https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-i386-disk.img
[root@os-node1 ~]# glance image-create --name=cirros-0.3.0-i386 --disk-format=qcow2 --container-format=bare < /images/cirros-0.3.0-i386-disk.img
+------------------+--------------------------------------+
| Property | Value |
+------------------+--------------------------------------+
| checksum | 90169ba6f09b5906a7f0755bd00bf2c3 |
| container_format | bare |
| created_at | 2014-07-12T12:07:36 |
| deleted | False |
| deleted_at | None |
| disk_format | qcow2 |
| id | 181e3398-97c7-4a8d-9671-9ba84cfb51c5 |
| is_public | False |
| min_disk | 0 |
| min_ram | 0 |
| name | cirros-0.3.0-i386 |
| owner | e9eb582da335442b9b7d7e7599517f61 |
| protected | False |
| size | 9159168 |
| status | active |
| updated_at | 2014-07-12T12:07:43 |
+------------------+--------------------------------------+
cinder与ceph对接
1、登录compute节点 为libvirt增加密钥
[root@compute ~]# uuidgen (在其中一台计算机节点执行这条命令获得一个uid就可以,其他的计算节点都用此ID)
0071bd6f-849c-433a-8051-2e553df49aea
[root@compute ~]# cd /etc/libvirt/
[root@compute libvirt]# ls
libvirt.conf libvirtd.conf lxc.conf nwfilter qemu qemu.conf
[root@compute libvirt]# cat > secret.xml <<EOF
<secret ephemeral=‘no‘ private=‘no‘>
<uuid>0071bd6f-849c-433a-8051-2e553df49aea</uuid>
<usage type=‘ceph‘>
<name>client.compute secret</name>
</usage>
</secret>
EOF
[root@compute libvirt]# ls
libvirt.conf libvirtd.conf lxc.conf nwfilter qemu qemu.conf secret.xml
[root@compute libvirt]#
[root@compute libvirt]# cat secret.xml
<secret ephemeral=‘no‘ private=‘no‘>
<uuid>0071bd6f-849c-433a-8051-2e553df49aea</uuid>
<usage type=‘ceph‘>
<name>client.compute secret</name>
</usage>
</secret>
[root@compute libvirt]# virsh secret-define --file secret.xml
Secret 0071bd6f-849c-433a-8051-2e553df49aea created
使用ceph client.admin密钥设置secret
[root@compute libvirt]# virsh secret-set-value --secret 0071bd6f-849c-433a-8051-2e553df49aea --base64 AQDT9pNTSFD6NRAAoZkAgx21uGQ+DM/k0rzxow== (最后一串码是查看 cat /etc/ceph/ceph.client.compute.keyring 得到的)
Secret value set
[root@compute libvirt]# ls
libvirt.conf libvirtd.conf lxc.conf nwfilter qemu qemu.conf secrets secret.xml
[root@compute libvirt]# rm -rf secret.xml
[root@compute libvirt]# cd secrets/
[root@compute secrets]# ls
0071bd6f-849c-433a-8051-2e553df49aea.base64 0071bd6f-849c-433a-8051-2e553df49aea.xml
[root@compute secrets]#
2、配置cinder配置文件中的RBD内容:
在控制节点
openstack-config --set /etc/cinder/cinder.conf DEFAULT volume_driver cinder.volume.drivers.rbd.RBDDriver
openstack-config --set /etc/cinder/cinder.conf DEFAULT rbd_pool volumes
openstack-config --set /etc/cinder/cinder.conf DEFAULT rbd_ceph_conf /etc/ceph/ceph.conf
openstack-config --set /etc/cinder/cinder.conf DEFAULT rbd_flatten_volume_from_snapshot false
openstack-config --set /etc/cinder/cinder.conf DEFAULT rbd_max_clone_depth 5
openstack-config --set /etc/cinder/cinder.conf DEFAULT rbd_user volumes
openstack-config --set /etc/cinder/cinder.conf DEFAULT rbd_secret_uuid 0071bd6f-849c-433a-8051-2e553df49aea
3、配置cinder配置文件的其他内容,内容如下:
在控制节点
openstack-config --set /etc/cinder/cinder.conf DEFAULT osapi_volume_listen 10.240.216.101
openstack-config --set /etc/cinder/cinder.conf DEFAULT api_paste_config /etc/cinder/api-paste.ini
openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_servers 10.240.216.101:9292
openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_version 2
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/cinder/cinder.conf DEFAULT max_retries -1
openstack-config --set /etc/cinder/cinder.conf DEFAULT debug True
openstack-config --set /etc/cinder/cinder.conf DEFAULT verbose True
openstack-config --set /etc/cinder/cinder.conf DEFAULT use_syslog False
4、修改/etc/cinder目录中文件的权限
chown -R cinder:cinder /etc/cinder/
chmod 774 /etc/ceph/ceph.client.volumes.keyring
5、重启cinder相关的服务
service openstack-glance-api restart
service openstack-cinder-volume restart
service openstack-cinder-backup restart
6、通过镜像ID创建一个云硬盘
[root@os-node1 ~]# cinder create 10 --image-id 98d5ccee-b09d-4237-abe7-1acf86145b51 --display-name test-volumes-1
+---------------------+--------------------------------------+
| Property | Value |
+---------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| created_at | 2014-07-15T09:19:35.784021 |
| display_description | None |
| display_name | test-volumes-1 |
| id | c82cfe6c-220b-4d30-86ec-f098ce512523 |
| image_id | 98d5ccee-b09d-4237-abe7-1acf86145b51 |
| metadata | {} |
| size | 10 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| volume_type | None |
+---------------------+--------------------------------------+
[root@os-node1 ~]# cinder list
+--------------------------------------+-----------+----------------+------+-------------+----------+-------------+
| ID | Status | Display Name | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+----------------+------+-------------+----------+-------------+
| c82cfe6c-220b-4d30-86ec-f098ce512523 | available | test-volumes-1 | 10 | None | true | |
+--------------------------------------+-----------+----------------+------+-------------+----------+-------------+
nova与ceph对接
1、cephFS方式对接
(1)、把ceph存储用cephFS方式挂载到计算节点的/var/lib/nova/instances/目录
[root@os-node2 ~]# mount.ceph os-node1,os-node2,os-node3:/ /var/lib/nova/instances/ -v -o name=admin,secret=AQAWssNTwHyEORAAqVWA3KSo0ZhTvu2ck63lMQ==
FATAL: Module ceph not found.
mount.ceph: modprobe failed, exit status 1
parsing options: name=admin,secret=AQAWssNTwHyEORAAqVWA3KSo0ZhTvu2ck63lMQ==
mount error: ceph filesystem not supported by the system
上面的报错信息表示此内核版本没有ceph模块,需要升级内核版本解决
kernel 2.6.34以前的版本是没有Module rbd的,把系统内核版本升级到最新
rpm --import http://elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://elrepo.org/elrepo-release-6-5.el6.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install kernel-ml -y
安装完内核后修改/etc/grub.conf配置文件使机器重启后生效
修改配置文件中的 Default=1 to Default=0
再次执行上面的挂载命令
[root@os-node2 ~]# mount.ceph os-node1,os-node2,os-node3:/ /var/lib/nova/instances/ -v -o name=admin,secret=AQAWssNTwHyEORAAqVWA3KSo0ZhTvu2ck63lMQ==
parsing options: name=admin,secret=AQAWssNTwHyEORAAqVWA3KSo0ZhTvu2ck63lMQ==
[root@os-node2 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda3 30G 1.7G 27G 7% /
tmpfs 16G 0 16G 0% /dev/shm
/dev/sda1 93M 84M 2.1M 98% /boot
/dev/sda2 50G 325M 47G 1% /var
os-node1,10.240.216.102,10.240.216.103:/
2.1T 2.1G 2.1T 1% /var/lib/nova/instances
[root@os-node3 ~]# mount.ceph os-node1,os-node2,os-node3:/ /var/lib/nova/instances/ -v -o name=admin,secret=AQAWsTwHyEORAAqVWA3KSo0ZhTvu2ck63lMQ==
parsing options: name=admin,secret=AQAWssNTwHyEORAAqVWA3KSo0ZhTvu2ck63lMQ==
[root@os-node3 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda3 30G 1.7G 27G 7% /
tmpfs 16G 0 16G 0% /dev/shm
/dev/sda1 93M 84M 2.1M 98% /boot
/dev/sda2 50G 324M 47G 1% /var
os-node1,10.240.216.102,10.240.216.103:/
2.1T 2.1G 2.1T 1% /var/lib/nova/instances
[root@os-node3 ~]#
(2)、把上面的挂载写入分区表里,防止重启挂载丢失
[root@os-node2 ~]# vi /etc/fstab
10.240.216.101,10.240.216.102,10.240.216.103:/ /var/lib/nova/instances ceph name=admin,secret=AQAWssNTwHyEORAAqVWA3KSo0ZhTvu2ck63lMQ==,noatime 0 0
[root@os-node3 ~]# vi /etc/fstab
10.240.216.101,10.240.216.102,10.240.216.103:/ /var/lib/nova/instances ceph name=admin,secret=AQAWssNTwHyEORAAqVWA3KSo0ZhTvu2ck63lMQ==,noatime 0 0
2、RBD方式对接
在计算节点配置nova.conf配置文件
openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_images_type rbd
openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_images_rbd_pool compute
openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_use_virtio_for_bridges True
openstack-config --set /etc/nova/nova.conf DEFAULT rbd_user compute
openstack-config --set /etc/nova/nova.conf DEFAULT rbd_secret_uuid 0071bd6f-849c-433a-8051-2e553df49aea
openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_images_rbd_ceph_conf /etc/ceph/ceph.conf
3、修改计算、控制节点nova.conf的用户属性
chown -R nova:nova /var/lib/nova/
4、重启所有的相关进程
在控制节点
service openstack-glance-api restart
service openstack-cinder-volume restart
service openstack-cinder-backup restart
在计算节点
service openstack-nova-compute restart
5、查看nova运行的服务列表
[root@os-node1 ~]# nova-manage service list
Binary Host Zone Status State Updated_At
nova-consoleauth os-node1 internal enabled :-) 2014-07-15 09:39:51
nova-scheduler os-node1 internal enabled :-) 2014-07-15 09:39:51
nova-cert os-node1 internal enabled :-) 2014-07-15 09:39:51
nova-conductor os-node1 internal enabled :-) 2014-07-15 09:39:51
nova-compute os-node2 nova enabled :-) 2014-07-15 09:39:55
nova-compute os-node3 nova enabled :-) 2014-07-15 09:39:38
[root@os-node1 ~]#
八、配置neutron网络服务
控制节点配置
1、安装neutron包
yum install openstack-neutron python-neutron openstack-neutron-openvswitch python-neutronclient -y
yum install kernel-ml iproute -y #neutron dhcp模块会用到此软件包并升级kernel
2、编辑neutron配置文件
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2
openstack-config --set /etc/neutron/neutron.conf DEFAULT control_exchange neutron
openstack-config --set /etc/neutron/neutron.conf DEFAULT qpid_hostname os-node1
openstack-config --set /etc/neutron/neutron.conf DEFAULT qpid_port 5672
openstack-config --set /etc/neutron/neutron.conf DEFAULT debug true
openstack-config --set /etc/neutron/neutron.conf DEFAULT verbose true
openstack-config --set /etc/neutron/neutron.conf DEFAULT use_syslog false
openstack-config --set /etc/neutron/neutron.conf DEFAULT use_stderr true
openstack-config --set /etc/neutron/neutron.conf DEFAULT publish_errors false
openstack-config --set /etc/neutron/neutron.conf DEFAULT bind_host os-node1
openstack-config --set /etc/neutron/neutron.conf DEFAULT bind_port 9696
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf DEFAULT dhcp_lease_duration 120
openstack-config --set /etc/neutron/neutron.conf DEFAULT allow_bulk true
openstack-config --set /etc/neutron/neutron.conf DEFAULT control_exchange neutron
openstack-config --set /etc/neutron/neutron.conf DEFAULT agent_down_time 15
openstack-config --set /etc/neutron/neutron.conf DEFAULT network_auto_schedule true
openstack-config --set /etc/neutron/neutron.conf DEFAULT router_auto_schedule true
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_user neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_password neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_host os-node1
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://os-node1:35357/v2.0
openstack-config --set /etc/neutron/neutron.conf database max_retries -1
openstack-config --set /etc/neutron/neutron.conf database reconnect_interval 2
openstack-config --set /etc/neutron/neutron.conf database sql_connection mysql://neutron:neutron@localhost:3306/neutron
3、编辑nova的配置文件
openstack-config --set /etc/nova/nova.conf DEFAULT network_api_class nova.network.neutronv2.api.API
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_url http://os-node1:9696
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_auth_strategy keystone
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_tenant_name service
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_username neutron
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_password neutron
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_auth_url http://os-node1:35357/v2.0
4、编辑/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini配置文件把ovs模式配置成vlan模式
openstack-config --set /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini OVS tenant_network_type vlan
openstack-config --set /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini OVS network_vlan_ranges physnet1:1007:1009 #用于创建虚拟机的VLAN为1007到1009
openstack-config --set /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini OVS bridge_mappings physnet1:br-int
5、把/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini 连接到/etc/neutron/plugin.ini
ln -s /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini /etc/neutron/plugin.ini
6、在控制节点创建neutron数据库
[root@os-node1 ~]#openstack-db --init --service neutron --password neutron
7、创建neutron用户加入admin租户并赋予admin角色
[root@os-node1 ~]# keystone user-create --name=neutron --pass=neutron --email=neutron@jiayuan.com
+----------+----------------------------------+
| Property | Value |
+----------+----------------------------------+
| email | neutron@jiayuan.com |
| enabled | True |
| id | 3dcd95ce1c8f4664be0426e1ebbd1bff |
| name | neutron |
+----------+----------------------------------+
[root@os-node1 ~]# keystone user-role-add --user=neutron --tenant=service --role=admin
8、创建neutron认证服务并配置endpiont
[root@os-node1 ~]# keystone service-create --name=neutron --type=network --description="OpenStack Networking Service"
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| description | OpenStack Networking Service |
| id | 1b76562ac6e342f39a4eac8776e6d6ca |
| name | neutron |
| type | network |
+-------------+----------------------------------+
[root@os-node1 ~]# keystone endpoint-create --service-id 1b76562ac6e342f39a4eac8776e6d6ca --publicurl http://os-node1:9696 --adminurl http://os-node1:9696 --internalurl http://os-node1:9696
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| adminurl | http://os-node1:9696 |
| id | c9a554d9a5474892b147b6ad8aa1da55 |
| internalurl | http://os-node1:9696 |
| publicurl | http://os-node1:9696 |
| region | regionOne |
| service_id | 1b76562ac6e342f39a4eac8776e6d6ca |
+-------------+----------------------------------+
[root@os-node1 ~]#
9、添加网桥
[root@os-node1 ~]# service openvswitch start #开启openvswitch服务
[root@os-node1 ~]# ovs-vsctl add-br br-int #作为虚拟机使用
[root@os-node1 ~]# ifconfig #查看加入的网桥
br-int Link encap:Ethernet HWaddr 5A:C8:01:46:4C:43
inet6 addr: fe80::dc56:cbff:fe04:7eea/64 Scope:Link
UP BROADCAST RUNNING MTU:1500 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:6 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:0 (0.0 b) TX bytes:468 (468.0 b)
[root@os-node1 ~]# ovs-vsctl add-port br-int em2 #把网桥加到eth1口
[root@os-node1 ~]# ifconfig em2 0.0.0.0 up #eth1接口上不用配置IP
[root@os-node1 ~]# ifconfig em2 promisc #把eht1接口配置成混合模式
[root@os-node1 ~]# service network restart
10、重启nova服务并启动neutron-server服务
service openstack-nova-api restart
service openstack-nova-scheduler restart
service openstack-nova-conductor restart
service neutron-server restart
service neutron-openvswitch-agent restart
安装网络节点(这里控制节点当做网络节点)
1、编辑/etc/neutron/dhcp_agent.ini配置文件
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
2、重启dhcp代理
service neutron-dhcp-agent restart
安装计算节点
安装配置neutron-l2-agent
1、安装openvswitch软件包
[root@os-node2 ~]# yum install openstack-neutron-openvswitch -y
yum install iproute -y #neutron dhcp模块会用到此软件包
2、启动openvswitch服务并设置开机自启动
[root@os-node2 ~]# service openvswitch start
/etc/openvswitch/conf.db does not exist ... (warning).
Creating empty database /etc/openvswitch/conf.db [ OK ]
Starting ovsdb-server [ OK ]
Configuring Open vSwitch system IDs [ OK ]
Inserting openvswitch module [ OK ]
Starting ovs-vswitchd [ OK ]
Enabling remote OVSDB managers [ OK ]
[root@os-node2 ~]# chkconfig openvswitch on
3、添加网桥
[root@os-node2 ~]# ovs-vsctl add-br br-int #作为虚拟机使用
[root@os-node2 ~]# ifconfig #查看加入的网桥
br-int Link encap:Ethernet HWaddr 5A:C8:01:46:4C:43
inet6 addr: fe80::dc56:cbff:fe04:7eea/64 Scope:Link
UP BROADCAST RUNNING MTU:1500 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:6 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:0 (0.0 b) TX bytes:468 (468.0 b)
[root@os-node2 ~]# ovs-vsctl add-port br-int em2 #把网桥加到eth1口
[root@os-node2 ~]# ifconfig em2 0.0.0.0 up
[root@os-node2 ~]# ifconfig em2 promisc
[root@os-node2 ~]# service network restart
4、编辑/etc/neutron/l3_agent.ini、/etc/neutron/dhcp_agent.ini配置文件
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT use_namespaces True
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT use_namespaces True
5、配置/etc/neutron/neutron.conf配置文件
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2
openstack-config --set /etc/neutron/neutron.conf DEFAULT control_exchange neutron
openstack-config --set /etc/neutron/neutron.conf DEFAULT qpid_hostname os-node1
openstack-config --set /etc/neutron/neutron.conf DEFAULT qpid_port 5672
openstack-config --set /etc/neutron/neutron.conf DEFAULT debug true
openstack-config --set /etc/neutron/neutron.conf DEFAULT verbose true
openstack-config --set /etc/neutron/neutron.conf DEFAULT use_syslog false
openstack-config --set /etc/neutron/neutron.conf DEFAULT use_stderr true
openstack-config --set /etc/neutron/neutron.conf DEFAULT publish_errors false
openstack-config --set /etc/neutron/neutron.conf DEFAULT bind_host 0.0.0.0
openstack-config --set /etc/neutron/neutron.conf DEFAULT bind_port 9696
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf DEFAULT dhcp_lease_duration 120
openstack-config --set /etc/neutron/neutron.conf DEFAULT allow_bulk true
openstack-config --set /etc/neutron/neutron.conf DEFAULT control_exchange neutron
openstack-config --set /etc/neutron/neutron.conf DEFAULT agent_down_time 15
openstack-config --set /etc/neutron/neutron.conf DEFAULT network_auto_schedule true
openstack-config --set /etc/neutron/neutron.conf DEFAULT router_auto_schedule true
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_user neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_password neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_host os-node1
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://os-node1:35357/v2.0
openstack-config --set /etc/neutron/neutron.conf database max_retries -1
openstack-config --set /etc/neutron/neutron.conf database reconnect_interval 2
openstack-config --set /etc/neutron/neutron.conf database sql_connection mysql://neutron:neutron@os-node1/neutron
6、编辑/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini配置文件
openstack-config --set /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini OVS tenant_network_type vlan
openstack-config --set /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini OVS network_vlan_ranges physnet1:1007:1009
openstack-config --set /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini OVS bridge_mappings physnet1:br-int
7、配置nova配置文件
openstack-config --set /etc/nova/nova.conf DEFAULT network_api_class nova.network.neutronv2.api.API
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_url http://os-node1:9696
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_auth_strategy keystone
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_tenant_name service
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_username neutron
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_password neutron
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_auth_url http://os-node1:35357/v2.0
8、把/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini 连接到/etc/neutron/plugin.ini
ln -s /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini /etc/neutron/plugin.ini
9、重启服务
service neutron-openvswitch-agent restart
service openstack-nova-compute restart
创建VM逻辑网络
1、创建一个jiayuan租户
[root@os-node1 ~]# keystone tenant-create --name=jiayuan --description="jiayuan-test"
+-------------+----------------------------------+
| Property | Value |
+-------------+----------------------------------+
| description | jiayuan-test |
| enabled | True |
| id | 4fa503932eb7429ca756e9f3f4bc0650 |
| name | jiayuan |
+-------------+----------------------------------+
[root@os-node1 ~]# keystone user-role-add --user=admin --tenant=jiayuan --role=admin
2、创建一个网络
[root@os-node1 ~]# neutron net-create vlan1008 --tenant-id 4fa503932eb7429ca756e9f3f4bc0650
Created a new network:
+---------------------------+--------------------------------------+
| Field | Value |
+---------------------------+--------------------------------------+
| admin_state_up | True |
| id | ab34095f-d5b4-40f3-a236-73fb8eba1df4 |
| name | vlan1008 |
| provider:network_type | vlan |
| provider:physical_network | physnet1 |
| provider:segmentation_id | 1008 |
| shared | False |
| status | ACTIVE |
| subnets | |
| tenant_id | 4fa503932eb7429ca756e9f3f4bc0650 |
+---------------------------+--------------------------------------+
[root@os-node1 ~]#
3、给网络添加一个网段
[root@os-node1 ~]# neutron subnet-create --tenant-id 4fa503932eb7429ca756e9f3f4bc0650 vlan1008 10.240.218.0/24 --name=vlan1008 --gateway=10.240.218.1 --enable_dhcp=True --allocation-pool start=10.240.218.10,end=10.240.218.200
Created a new subnet:
+------------------+-----------------------------------------------------+
| Field | Value |
+------------------+-----------------------------------------------------+
| allocation_pools | {"start": "10.240.218.10", "end": "10.240.218.200"} |
| cidr | 10.240.218.0/24 |
| dns_nameservers | |
| enable_dhcp | True |
| gateway_ip | 10.240.218.1 |
| host_routes | |
| id | 7bb7ed9e-5e90-40b9-ab41-9da3f7f99c07 |
| ip_version | 4 |
| name | vlan1008 |
| network_id | ab34095f-d5b4-40f3-a236-73fb8eba1df4 |
| tenant_id | 4fa503932eb7429ca756e9f3f4bc0650 |
+------------------+-----------------------------------------------------+
[root@os-node1 ~]#
4、建立一个虚拟机
[root@os-node1 ~]# nova boot --flavor 6 --key_name mykey --image 551c7d4b-65f3-4ecc-a512-2a035a7b7038 --security_group default jiayuan-test
+--------------------------------------+----------------------------------------------------------+
| Property | Value |
+--------------------------------------+----------------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | nova |
| OS-EXT-SRV-ATTR:host | - |
| OS-EXT-SRV-ATTR:hypervisor_hostname | - |
| OS-EXT-SRV-ATTR:instance_name | instance-00000004 |
| OS-EXT-STS:power_state | 0 |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | - |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| adminPass | jbFeypcmh3QD |
| config_drive | |
| created | 2014-06-24T06:16:06Z |
| flavor | flavor-test (6) |
| hostId | |
| id | 01016cca-ad8e-4fd6-8ce9-2dec2f73eb78 |
| image | cirros-0.3.0-i386 (551c7d4b-65f3-4ecc-a512-2a035a7b7038) |
| key_name | mykey |
| metadata | {} |
| name | jiayuan-test |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | default |
| status | BUILD |
| tenant_id | 0370afd03e814f93a6d825d32194e272 |
| updated | 2014-06-24T06:16:06Z |
| user_id | 254db522a9b54804b49c4224ad9d0f2b |
+--------------------------------------+----------------------------------------------------------+
5、查看一个虚拟机详细信息
[root@os-node1 ~]# nova show ce712672-958b-4ff2-88a2-e31d611b4938
+--------------------------------------+----------------------------------------------------------+
| Property | Value |
+--------------------------------------+----------------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | nova |
| OS-EXT-SRV-ATTR:host | os-node2 |
| OS-EXT-SRV-ATTR:hypervisor_hostname | os-node2 |
| OS-EXT-SRV-ATTR:instance_name | instance-00000001 |
| OS-EXT-STS:power_state | 0 |
| OS-EXT-STS:task_state | spawning |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | - |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| config_drive | |
| created | 2014-07-13T09:53:05Z |
| flavor | flavor-test (6) |
| hostId | 1b938bf8f6bbec202b7d68986e7fb0f35a410dd51108272b4ee98385 |
| id | ce712672-958b-4ff2-88a2-e31d611b4938 |
| image | cirros-0.3.0-i386 (a79026b8-56a5-45b4-8a2d-baea933af60d) |
| key_name | mykey |
| metadata | {} |
| name | jiayuan-test |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | default |
| status | BUILD |
| tenant_id | e9eb582da335442b9b7d7e7599517f61 |
| updated | 2014-07-13T09:53:07Z |
| user_id | c123d5be51404fdead770ebc0cecdbde |
+--------------------------------------+----------------------------------------------------------+
6、查看虚拟机列表
[root@os-node1 ~]# nova list
+--------------------------------------+--------------+--------+------------+-------------+----------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+--------------+--------+------------+-------------+----------+
| ce712672-958b-4ff2-88a2-e31d611b4938 | jiayuan-test | BUILD | spawning | NOSTATE | |
+--------------------------------------+--------------+--------+------------+-------------+----------+
[root@os-node1 ~]#
九、安装dashboard
1、在控制节点os-node1上安装dashboard软件包
[root@os-node1 ~]# yum install -y memcached python-memcached mod_wsgi openstack-dashboard
2、为dashboard创建Member角色
dashboard依赖于keystone的Member角色,因此,需要为其创建此默认角色。
[root@os-node1 ~]# keystone role-create --name Member
+----------+----------------------------------+
| Property | Value |
+----------+----------------------------------+
| id | e32e6cfed3f5428d925fcdf0ffe0c6a0 |
| name | Member |
+----------+----------------------------------+
3、修改dashboard的配置文件内容如下:(可以直接把原来/etc/openstack-dashboard/local_settings内容删掉粘贴下面的内容)
[root@os-node1 ~]# cat /etc/openstack-dashboard/local_settings | grep -v "#" |sed ‘/^$/d‘
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
DEBUG =False
TEMPLATE_DEBUG = DEBUG
PROD = True
#SITE_BRANDING = ‘Mirantis OpenStack Dashboard‘
ALLOWED_HOSTS = [‘*‘]
OPENSTACK_API_VERSIONS = {
"identity": 2.0
}
CONSOLE_TYPE = "AUTO"
HORIZON_CONFIG = {
‘dashboards‘: (‘project‘, ‘admin‘, ‘settings‘),
‘default_dashboard‘: ‘project‘,
‘user_home‘: ‘openstack_dashboard.views.get_user_home‘,
‘ajax_queue_limit‘: 10,
‘auto_fade_alerts‘: {
‘delay‘: 3000,
‘fade_duration‘: 1500,
‘types‘: [‘alert-success‘, ‘alert-info‘]
},
‘help_url‘: "http://docs.openstack.org",
‘exceptions‘: {
‘recoverable‘: exceptions.RECOVERABLE,
‘not_found‘: exceptions.NOT_FOUND,
‘unauthorized‘: exceptions.UNAUTHORIZED
},
}
ENABLE_JUJU_PANEL = True
HORIZON_CONFIG["simple_ip_management"] = False
LOCAL_PATH = ‘/tmp‘
SECRET_KEY = ‘dummy_secret_key‘
CACHES = {
‘default‘: {
‘BACKEND‘ : ‘django.core.cache.backends.memcached.MemcachedCache‘,
‘LOCATION‘ : "10.240.216.101:11211"
},
}
EMAIL_BACKEND = ‘django.core.mail.backends.console.EmailBackend‘
OPENSTACK_HOST = "10.240.216.101"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
OPENSTACK_KEYSTONE_BACKEND = {
‘name‘: ‘native‘,
‘can_edit_user‘: True,
‘can_edit_group‘: True,
‘can_edit_project‘: True,
‘can_edit_domain‘: True,
‘can_edit_role‘: True
}
OPENSTACK_HYPERVISOR_FEATURES = {
‘can_set_mount_point‘: False
}
OPENSTACK_NEUTRON_NETWORK = {
‘enable_lb‘: False,
‘enable_firewall‘: False,
‘enable_quotas‘: True,
‘enable_vpn‘: False,
‘profile_support‘: None,
}
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
TIME_ZONE = "UTC"
EXTERNAL_MONITORING = [ ]
LOGGING = {
‘version‘: 1,
‘disable_existing_loggers‘: False,
‘formatters‘: {
‘debug‘: {
‘format‘: ‘dashboard-%(name)s: %(levelname)s %(module)s %(funcName)s %(message)s‘
},
‘normal‘: {
‘format‘: ‘dashboard-%(name)s: %(levelname)s %(message)s‘
},
},
‘handlers‘: {
‘null‘: {
‘level‘: ‘DEBUG‘,
‘class‘: ‘django.utils.log.NullHandler‘,
‘formatter‘: ‘debug‘
},
‘console‘: {
‘level‘: ‘DEBUG‘,
‘class‘: ‘logging.StreamHandler‘,
‘formatter‘: ‘debug‘
},
‘file‘: {
‘level‘: ‘DEBUG‘,
‘class‘: ‘logging.FileHandler‘,
‘filename‘: ‘/var/log/horizon/horizon.log‘,
‘formatter‘: ‘normal‘
},
‘syslog‘: {
‘level‘: ‘DEBUG‘,
‘facility‘: ‘local1‘,
‘class‘: ‘logging.handlers.SysLogHandler‘,
‘address‘: ‘/dev/log‘,
‘formatter‘: ‘normal‘
},
},
‘loggers‘: {
‘‘: {
‘handlers‘: [‘syslog‘],
‘level‘: ‘NOTSET‘,
‘propagate‘: True
},
‘django.db.backends‘: {
‘handlers‘: [‘null‘],
‘level‘: ‘DEBUG‘,
‘propagate‘: False
},
‘horizon‘: {
‘handlers‘: [‘syslog‘],
‘level‘: ‘DEBUG‘,
‘propagate‘: False
},
‘openstack_dashboard‘: {
‘handlers‘: [‘syslog‘],
‘level‘: ‘DEBUG‘,
‘propagate‘: False
},
‘novaclient‘: {
‘handlers‘: [‘syslog‘],
‘level‘: ‘DEBUG‘,
‘propagate‘: False
},
‘glanceclient‘: {
‘handlers‘: [‘syslog‘],
‘level‘: ‘DEBUG‘,
‘propagate‘: False
},
‘keystoneclient‘: {
‘handlers‘: [‘syslog‘],
‘level‘: ‘DEBUG‘,
‘propagate‘: False
},
‘neutronclient‘: {
‘handlers‘: [‘syslog‘],
‘level‘: ‘DEBUG‘,
‘propagate‘: False
},
‘nose.plugins.manager‘: {
‘handlers‘: [‘syslog‘],
‘level‘: ‘DEBUG‘,
‘propagate‘: False
}
}
}
LOGIN_URL=‘/dashboard/auth/login/‘
LOGIN_REDIRECT_URL=‘/dashboard‘
COMPRESS_OFFLINE = True
SECURITY_GROUP_RULES = {
‘all_tcp‘: {
‘name‘: ‘ALL TCP‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘1‘,
‘to_port‘: ‘65535‘,
},
‘all_udp‘: {
‘name‘: ‘ALL UDP‘,
‘ip_protocol‘: ‘udp‘,
‘from_port‘: ‘1‘,
‘to_port‘: ‘65535‘,
},
‘all_icmp‘: {
‘name‘: ‘ALL ICMP‘,
‘ip_protocol‘: ‘icmp‘,
‘from_port‘: ‘-1‘,
‘to_port‘: ‘-1‘,
},
‘ssh‘: {
‘name‘: ‘SSH‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘22‘,
‘to_port‘: ‘22‘,
},
‘smtp‘: {
‘name‘: ‘SMTP‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘25‘,
‘to_port‘: ‘25‘,
},
‘dns‘: {
‘name‘: ‘DNS‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘53‘,
‘to_port‘: ‘53‘,
},
‘http‘: {
‘name‘: ‘HTTP‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘80‘,
‘to_port‘: ‘80‘,
},
‘pop3‘: {
‘name‘: ‘POP3‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘110‘,
‘to_port‘: ‘110‘,
},
‘imap‘: {
‘name‘: ‘IMAP‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘143‘,
‘to_port‘: ‘143‘,
},
‘ldap‘: {
‘name‘: ‘LDAP‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘389‘,
‘to_port‘: ‘389‘,
},
‘https‘: {
‘name‘: ‘HTTPS‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘443‘,
‘to_port‘: ‘443‘,
},
‘smtps‘: {
‘name‘: ‘SMTPS‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘465‘,
‘to_port‘: ‘465‘,
},
‘imaps‘: {
‘name‘: ‘IMAPS‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘993‘,
‘to_port‘: ‘993‘,
},
‘pop3s‘: {
‘name‘: ‘POP3S‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘995‘,
‘to_port‘: ‘995‘,
},
‘ms_sql‘: {
‘name‘: ‘MS SQL‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘1443‘,
‘to_port‘: ‘1443‘,
},
‘mysql‘: {
‘name‘: ‘MYSQL‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘3306‘,
‘to_port‘: ‘3306‘,
},
‘rdp‘: {
‘name‘: ‘RDP‘,
‘ip_protocol‘: ‘tcp‘,
‘from_port‘: ‘3389‘,
‘to_port‘: ‘3389‘,
},
}
4、修改/etc/openstack-dashboard/local_settings用户属性
chown apache:apache /etc/openstack-dashboard/local_settings
chmod 644 /etc/openstack-dashboard/local_settings
5、启动服务器并设置开机自启动
service httpd start
service memcached start
chkconfig httpd on
chkconfig memcached on
6、登录web界面访问
http://10.240.216.101/dashboard/
附录:ceph快速安装脚本
mon节点安装
1、安装第一个mon节点
mkdir /etc/ceph/
cat > /etc/ceph/ceph.conf << EOF
[global]
fsid = f11240d4-86b1-49ba-aacc-6d3d37b24cc4
mon initial members = os-node1,os-node2,os-node3
mon host = os-node1,10.240.216.102,10.240.216.103
public network = 10.240.216.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
filestore xattr use omap = true
osd pool default size = 2
osd pool default min size = 1
osd crush chooseleaf type = 1
osd_mkfs_type = xfs
max mds = 5
mds max file size = 100000000000000
mds cache size = 1000000
mon osd down out interval = 900
#cluster_network = 10.240.216.0/24
[mon]
mon clock drift allowed = .50
EOF
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon ‘allow *‘
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon ‘allow *‘ --cap osd ‘allow *‘ --cap mds ‘allow *‘
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
mkdir -p /var/lib/ceph/mon/ceph-os-node1
mkdir -p /var/lib/ceph/bootstrap-osd/
ceph-authtool -C /var/lib/ceph/bootstrap-osd/ceph.keyring
ceph-mon --mkfs -i os-node1 --keyring /tmp/ceph.mon.keyring
touch /var/lib/ceph/mon/ceph-os-node1/done
touch /var/lib/ceph/mon/ceph-os-node1/sysvinit
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.os-node1
2、建立第二个mon节点
1、在os-node2节点
mkdir -p /var/lib/ceph/mon/ceph-os-node2
2、在os-node1节点
scp /etc/ceph/* os-node2:/etc/ceph/
scp /var/lib/ceph/bootstrap-osd/ceph.keyring os-node2:/var/lib/ceph/bootstrap-osd/
scp /tmp/ceph.mon.keyring os-node2:/tmp/
3、在os-node2节点
ceph-mon --mkfs -i os-node2 --keyring /tmp/ceph.mon.keyring
touch /var/lib/ceph/mon/ceph-os-node2/done
touch /var/lib/ceph/mon/ceph-os-node2/sysvinit
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.os-node2
3、建立第三个mon节点
1、在os-node3节点
mkdir -p /var/lib/ceph/mon/ceph-os-node3
2、在os-node1节点
scp /etc/ceph/* os-node3:/etc/ceph/
scp /var/lib/ceph/bootstrap-osd/ceph.keyring os-node3:/var/lib/ceph/bootstrap-osd/
scp /tmp/ceph.mon.keyring os-node3:/tmp/
3、在os-node3节点
ceph-mon --mkfs -i os-node3 --keyring /tmp/ceph.mon.keyring
touch /var/lib/ceph/mon/ceph-os-node3/done
touch /var/lib/ceph/mon/ceph-os-node3/sysvinit
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.os-node3
添加osd节点
在os-node1节点复制ceph目录下的文件到osd节点
scp /etc/ceph/* os-node4:/etc/ceph
scp /etc/ceph/* os-node5:/etc/ceph
scp /etc/ceph/* os-node5:/etc/ceph
1、添加第一块osd节点
在os-node4节点
ceph osd create
mkdir -p /var/lib/ceph/osd/ceph-0
mkfs.xfs -f /dev/sda6
mount /dev/sda6 /var/lib/ceph/osd/ceph-0
mount -o remount,user_xattr /var/lib/ceph/osd/ceph-0
[root@os-node4 ~]# vi /etc/fstab
/dev/sda6 /var/lib/ceph/osd/ceph-0 xfs defaults 0 0
/dev/sda6 /var/lib/ceph/osd/ceph-0 xfs remount,user_xattr 0 0
ceph-osd -i 0 --mkfs --mkkey
ceph auth add osd.0 osd ‘allow *‘ mon ‘allow profile osd‘ -i /var/lib/ceph/osd/ceph-0/keyring
ceph osd crush add-bucket os-node4 host
ceph osd crush move os-node4 root=default
ceph osd crush add osd.0 1.0 host=os-node4
touch /var/lib/ceph/osd/ceph-0/sysvinit
/etc/init.d/ceph start osd.0
2、添加第二个osd节点
在os-node5节点
ceph osd create
mkdir -p /var/lib/ceph/osd/ceph-1
mkfs.xfs -f /dev/sda6
mount /dev/sda6 /var/lib/ceph/osd/ceph-1
mount -o remount,user_xattr /var/lib/ceph/osd/ceph-1
[root@os-node5 ~]# vi /etc/fstab
/dev/sda6 /var/lib/ceph/osd/ceph-1 xfs defaults 0 0
/dev/sda6 /var/lib/ceph/osd/ceph-1 xfs remount,user_xattr 0 0
ceph-osd -i 1 --mkfs --mkkey
ceph auth add osd.1 osd ‘allow *‘ mon ‘allow profile osd‘ -i /var/lib/ceph/osd/ceph-1/keyring
ceph osd crush add-bucket os-node5 host
ceph osd crush move os-node5 root=default
ceph osd crush add osd.1 1.0 host=os-node5
touch /var/lib/ceph/osd/ceph-1/sysvinit
/etc/init.d/ceph start osd.1
3、添加第三块osd节点
在os-node6节点
ceph osd create
mkdir -p /var/lib/ceph/osd/ceph-2
mkfs.xfs -f /dev/sda6
mount /dev/sda6 /var/lib/ceph/osd/ceph-2
mount -o remount,user_xattr /var/lib/ceph/osd/ceph-2
[root@os-node6 ~]# vi /etc/fstab
/dev/sda6 /var/lib/ceph/osd/ceph-2 xfs defaults 0 0
/dev/sda6 /var/lib/ceph/osd/ceph-2 xfs remount,user_xattr 0 0
ceph-osd -i 2 --mkfs --mkkey
ceph auth add osd.2 osd ‘allow *‘ mon ‘allow profile osd‘ -i /var/lib/ceph/osd/ceph-2/keyring
ceph osd crush add-bucket os-node6 host
ceph osd crush move os-node6 root=default
ceph osd crush add osd.2 1.0 host=os-node6
touch /var/lib/ceph/osd/ceph-2/sysvinit
/etc/init.d/ceph start osd.2
添加元数据服务器
添加元数据服务器
添加第一个元数据服务器
1、在os-node1节点
mkdir -p /var/lib/ceph/mds/ceph-os-node1
touch /root/ceph.bootstrap-mds.keyring
ceph-authtool --import-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring ceph.bootstrap-mds.keyring
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.os-node1 osd ‘allow rwx‘ mds ‘allow‘ mon ‘allow profile mds‘ -o /var/lib/ceph/mds/ceph-os-node1/keyring
touch /var/lib/ceph/mds/ceph-os-node1/sysvinit
touch /var/lib/ceph/mds/ceph-os-node1/done
service ceph start mds.os-node1
添加第二个元数据服务器
1、在os-node2节点
mkdir -p /var/lib/ceph/mds/ceph-os-node2
mkdir -p /var/lib/ceph/bootstrap-mds/
2、在os-node1节点
scp /var/lib/ceph/bootstrap-mds/ceph.keyring os-node2:/var/lib/ceph/bootstrap-mds/
scp /root/ceph.bootstrap-mds.keyring os-node2:/root/
scp /var/lib/ceph/mds/ceph-os-node1/sysvinit os-node2://var/lib/ceph/mds/ceph-os-node2/
3、在os-node2节点
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.os-node2 osd ‘allow rwx‘ mds ‘allow‘ mon ‘allow profile mds‘ -o /var/lib/ceph/mds/ceph-os-node2/keyring
touch /var/lib/ceph/mds/ceph-os-node2/done
service ceph start mds.os-node2
添加第三个元数据服务器
1、在os-node3节点
mkdir -p /var/lib/ceph/mds/ceph-os-node3
mkdir -p /var/lib/ceph/bootstrap-mds/
2、在os-node1节点
scp /var/lib/ceph/bootstrap-mds/ceph.keyring os-node3:/var/lib/ceph/bootstrap-mds/
scp /root/ceph.bootstrap-mds.keyring os-node3:/root/
scp /var/lib/ceph/mds/ceph-os-node1/sysvinit os-node3://var/lib/ceph/mds/ceph-os-node3/
3、在os-node3节点
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.os-node3 osd ‘allow rwx‘ mds ‘allow‘ mon ‘allow profile mds‘ -o /var/lib/ceph/mds/ceph-os-node3/keyring
touch /var/lib/ceph/mds/ceph-os-node3/done
service ceph start mds.os-node3
本文出自 “zhangdh开放空间” 博客,请务必保留此出处http://linuxblind.blog.51cto.com/7616603/1710176
原文地址:http://linuxblind.blog.51cto.com/7616603/1710176