标签:
yum install -y wget wget https://pypi.python.org/packages/source/p/pip/pip-1.5.6.tar.gz#md5=01026f87978932060cc86c1dc527903e tar zxvf pip-1.5.6.tar.gz cd pip-1.5.6 python setup.py build python setup.py install ssh-keygen ################################# #echo "ceph-admin" >/etc/hostname #echo "ceph-node1" >/etc/hostname #echo "ceph-node2" >/etc/hostname #echo "ceph-node3" >/etc/hostname #reboot ################################# cat >/etc/hosts<<EOF 192.168.55.185 ceph-admin 192.168.55.186 ceph-node1 192.168.55.187 ceph-node2 192.168.55.188 ceph-node3 EOF ssh-copy-id root@ceph-node1 && sh-copy-id root@ceph-node2 && ssh-copy-id root@ceph-node3 ssh root@ceph-node1 systemctl stop firewalld && setenforce 0 ssh root@ceph-node2 systemctl stop firewalld && setenforce 0 ssh root@ceph-node3 systemctl stop firewalld && setenforce 0 cat >/root/.ssh/config<<EOF Host ceph-node1 Hostname ceph-node1 User root Host ceph-node2 Hostname ceph-node2 User root Host ceph-node3 Hostname ceph-node3 User root EOF mkdir ~/my-cluster cd ~/my-cluster pip install ceph-deploy ceph-deploy new ceph-node1 ceph-node2 ceph-node3 ceph-deploy install ceph-node1 ceph-node2 ceph-node3 ceph-deploy mon create-initial ceph-deploy mon create ceph-node1 ceph-node2 ceph-node3 ceph-deploy gatherkeys ceph-node1 ceph-node2 ceph-node3 ############################################################################ ## ceph-deploy --overwrite-conf mon create ceph-node1 ceph-node2 ceph-node3# ############################################################################ #mkfs.xfs /dev/sdb #mount /dev/sdb /opt/ceph/ ssh root@ceph-node1 mkdir /opt/ceph ssh root@ceph-node2 mkdir /opt/ceph ssh root@ceph-node3 mkdir /opt/ceph ceph-deploy osd prepare ceph-node1:/opt/ceph ceph-node2:/opt/ceph ceph-node3:/opt/ceph ceph-deploy osd activate ceph-node1:/opt/ceph ceph-node2:/opt/ceph ceph-node3:/opt/ceph #添加metadata节点 ceph-deploy mds create ceph-node1 ############################################################### #分发key文件 #ceph-deploy admin ceph-admin ceph-node1 ceph-node2 ceph-node3 ############################################################### #集群检查 ceph health ceph -s ceph -w ceph quorum_status --format json-pretty #客户端挂载 yum install -y ceph-fuse mkdir /mnt/ceph [root@ceph-admin ~]# ceph osd pool create metadata 256 256 [root@ceph-admin ~]# ceph osd pool create data 256 256 [root@ceph-admin ~]# ceph fs new filesystemNew metadata data [root@ceph-admin ceph]# ceph fs ls name: filesystemNew, metadata pool: metadata, data pools: [data ] [root@ceph-admin ceph]# ceph mds stat e5: 1/1/1 up {0=ceph-node1=up:active} ceph-fuse -m 192.168.55.186:6789 /mnt/ceph ####end#### #添加osd节点 ssh ceph-node1 sudo mkdir /var/local/osd2 exit [root@ceph-admin my-cluster]# ceph-deploy osd prepare ceph-node1:/var/local/osd2 [root@ceph-admin my-cluster]# ceph-deploy osd activate ceph-node1:/var/local/osd2 [root@ceph-admin my-cluster]# ceph -w [root@ceph-admin my-cluster]# ceph -s cluster 8f7a79b6-ab8d-40c7-abfa-6e6e23d9a26d health HEALTH_OK monmap e1: 1 mons at {ceph-node1=192.168.55.186:6789/0}, election epoch 2, quorum 0 ceph-node1 osdmap e13: 3 osds: 3 up, 3 in pgmap v38: 64 pgs, 1 pools, 0 bytes data, 0 objects 18600 MB used, 35153 MB / 53754 MB avail 64 active+clean #添加monitors节点 [root@ceph-admin my-cluster]# ceph-deploy new ceph-node2 ceph-node3 [root@ceph-admin my-cluster]# ceph-deploy mon create-initial [root@ceph-admin my-cluster]# ceph-deploy --overwrite-conf mon create ceph-node2 ceph-node3
标签:
原文地址:http://www.cnblogs.com/caoguo/p/4609009.html