标签:layout engine ros ddr rda mic object 安装环境 12c
1. 前期准备
本次安装环境为:
ceph1(集群命令分发管控,提供磁盘服务集群) CentOs7.5 10.160.20.28
ceph2(提供磁盘服务集群) CentOs7.5 10.160.20.29
ceph3(提供磁盘服务集群) CentOs7.5 10.160.20.30
2 编辑hosts文件,增加以下内容
# ceph1-master and ceph1-osd1
10.160.20.28 ceph1
# ceph2-osd2
10.160.20.29 ceph2
# ceph3-osd3
10.160.20.30 ceph3
3 ceph是通ssh下发指令。首先配置管理节点可以无密码访问存储节点,生成密钥,并建立touch authorized_keys文件
# ssh-keygen -t rsa 生成公钥及私钥
# touch /root/.ssh/authorized_keys
# cat id_rsa.pub > authorized_keys
# chmod 700 /root/.ssh
# chmod 644 /root/.ssh/authorized_keys
[root@ceph1 .ssh]# ls
authorized_keys id_rsa id_rsa.pub known_hosts
[root@ceph1 .ssh]# egrep -v "^$|^#" /etc/ssh/sshd_config
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
HostKey /etc/ssh/ssh_host_ed25519_key
SyslogFacility AUTHPRIV
PubkeyAuthentication yes
AuthorizedKeysFile .ssh/authorized_keys
PasswordAuthentication yes
ChallengeResponseAuthentication no
GSSAPIAuthentication yes
GSSAPICleanupCredentials no
UsePAM yes
X11Forwarding yes
AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
AcceptEnv XMODIFIERS
Subsystem sftp /usr/libexec/openssh/sftp-server
|
将密钥拷贝到其他主机
# scp~/.ssh/id_rsa.pub root@ceph2:/root/.ssh/authorized_keys
# scp~/.ssh/id_rsa.pub root@ceph3:/root/.ssh/authorized_keys
|
关闭防火墙及安全选项
[root@ceph1 .ssh]# egrep -v "^$|^#" /etc/selinux/config
SELINUX=disabled
SELINUXTYPE=targeted
# systemctl stop firewalld
# systemctl disable firewalld
|
4 ceph各节点安装ceph-deploy(各节点都要安装)
安装EPEL软件包仓库,并安装
[root@ceph1 yum.repos.d]# mv CentOS-Base.repo CentOS-Base.repo.bak
[root@ceph1 yum.repos.d]#wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@ceph1 yum.repos.d]#wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
# rpm -Uvh https://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/ceph-release-1-1.el7.noarch.rpm
|
替换ceph.repo 服务器
sed -i ‘s#htt.*://download.ceph.com#https://mirrors.aliyun.com/ceph#g‘ /etc/yum.repos.d/ceph.repo
|
安装 ceph-deploy和python-setuptools
# yum update
# yum install ceph-deploy --disablerepo=epel
# yum install python-setuptools
|
5 配置NTP服务器(略)
7 搭建ceph
在ceph-master节点上创建ceph-install目录
[root@ceph1]# mkdir /ceph-install && cd /ceph-install
[root@ceph1 ceph-install]#
|
在ceph-master的ceph-install上创建MON节点。
[root@ceph1 ceph-install]#ceph-deploy new ceph1 ceph2 ceph3
|
新创建的集群被自动命名为ceph,集群创建完成后,在my-cluster目录中会产生ceph配置文件、monitor秘钥文件和log文件,具体如下:
[root@ceph1 ceph-install]# ls -l
总用量 276
-rw-r--r-- 1 root root 259 4月 6 18:28 ceph.conf
-rw-r--r-- 1 root root 274426 4月 6 18:37 ceph-deploy-ceph.log
-rw------- 1 root root 73 4月 6 18:24 ceph.mon.keyring
|
ceph.conf默认配置如下:
[root@ceph1 ceph-install]# cat ceph.conf
[global]
fsid = a87ce363-44cb-4d07-8acd-7bb8779f9b8b
mon_initial_members = ceph1, ceph2, ceph3
mon_host = 10.160.20.28,10.160.20.29,10.160.20.30
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
|
在ceph1用ceph-deploy安装,注意ceph-deploy已经默认安装N版本了,将repo环境变量更改成阿里云的地址再安装
export CEPH_DEPLOY_REPO_URL=https://mirrors.aliyun.com/ceph/rpm-luminous/el7
export CEPH_DEPLOY_GPG_URL=https://mirrors.aliyun.com/ceph/keys/release.asc
|
在ceph1进入ceph-install目录,用deploy安装
ceph-deploy install ceph1 ceph2 ceph3
|
成功安装后提示:
初始化monitors节点并收集keys
[root@ceph1 ceph-install]# ceph-deploy mon create-initial
|
提示一下内容,成功安装。
初始化完成后,在ceph-install中将会新增多个秘钥文件,具体如下:
[root@ceph1 ceph-install]# ls -ls
总用量 428
4 -rw-------. 1 root root 113 4月 10 16:00 ceph.bootstrap-mds.keyring
4 -rw-------. 1 root root 113 4月 10 16:00 ceph.bootstrap-mgr.keyring
4 -rw-------. 1 root root 113 4月 10 16:00 ceph.bootstrap-osd.keyring
4 -rw-------. 1 root root 113 4月 10 16:00 ceph.bootstrap-rgw.keyring
4 -rw-------. 1 root root 151 4月 10 16:00 ceph.client.admin.keyring
4 -rw-r--r--. 1 root root 294 4月 12 10:28 ceph.conf
400 -rw-r--r--. 1 root root 407953 4月 12 10:17 ceph-deploy-ceph.log
4 -rw-------. 1 root root 73 4月 10 15:15 ceph.mon.keyring
|
将ceph-install目录的keyring拷贝到/etc/ceph/下面,其他节点也拷贝过去。
cp -p /ceph-install/*keyring /etc/ceph/
scp /ceph-install/*keyring root@ceph2:/etc/ceph/
scp /ceph-install/*keyring root@ceph3:/etc/ceph/
|
此时可以在osd节点查看mon端口
[root@ceph1 ceph-install]# netstat -lpnt
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:6800 0.0.0.0:* LISTEN 7115/ceph-osd
tcp 0 0 0.0.0.0:6801 0.0.0.0:* LISTEN 7115/ceph-osd
tcp 0 0 0.0.0.0:6802 0.0.0.0:* LISTEN 7115/ceph-osd
tcp 0 0 0.0.0.0:6803 0.0.0.0:* LISTEN 7115/ceph-osd
tcp 0 0 0.0.0.0:6804 0.0.0.0:* LISTEN 7116/ceph-osd
tcp 0 0 0.0.0.0:6805 0.0.0.0:* LISTEN 7116/ceph-osd
tcp 0 0 0.0.0.0:6806 0.0.0.0:* LISTEN 7116/ceph-osd
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 7100/sshd
tcp 0 0 0.0.0.0:6807 0.0.0.0:* LISTEN 7116/ceph-osd
tcp 0 0 0.0.0.0:6808 0.0.0.0:* LISTEN 7107/ceph-mgr
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 7232/master
tcp 0 0 172.16.3.61:6789 0.0.0.0:* LISTEN 7103/ceph-mon
tcp6 0 0 :::22 :::* LISTEN 7100/sshd
tcp6 0 0 ::1:25 :::* LISTEN 7232/master
|
创建OSD存储节点(本环境各节点有4个硬盘)
[root@ceph1 ceph-install]# ceph-deploy osd create ceph1 --data /dev/sdc --journal /dev/sdb1
[root@ceph1 ceph-install]# ceph-deploy osd create ceph1 --data /dev/sdd --journal /dev/sdb2
[root@ceph2 ceph-install]# ceph-deploy osd create ceph2 --data /dev/sdc --journal /dev/sdb1
[root@ceph2 ceph-install]# ceph-deploy osd create ceph2 --data /dev/sdd --journal /dev/sdb2
[root@ceph3 ceph-install]# ceph-deploy osd create ceph3 --data /dev/sdc --journal /dev/sdb1
[root@ceph3 ceph-install]# ceph-deploy osd create ceph3 --data /dev/sdd --journal /dev/sdb2
|
把配置文件和admin 秘钥到管理节点和ceph节点
[root@ceph1 ceph-install]# ceph-deploy --overwrite-conf admin ceph1 ceph2 ceph3
|
使用 ceph -s 命令查看集群状态
[root@ceph1 ceph-install]# ceph -s
cluster:
id: 31dd66e4-76a7-42bb-8209-b4091753ed6c
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph1,ceph2,ceph3
mgr: ceph1(active), standbys: ceph3, ceph2
osd: 6 osds: 6 up, 6 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 6.0 GiB used, 3.5 TiB / 3.5 TiB avail
pgs:
|
假如以上出现集群状态health_warn:clock skew detected on mon,就修改配置文件vi /root/ceph-install/ceph.conf
mon clock drift allowed = 10
--------根据实际情况进行修改,单位是秒,实验环境需要调高到10, 使用ceph -w进行调测
mon clock drift warn backoff = 30
|
向需要同步的mon节点推送配置文件:
# ceph-deploy --overwrite-conf config push ceph{1..3}
|
重启mon服务(centos7环境下)并验证。
#systemctl restart ceph-mon.target
#ceph -s
|
ceph luminous版本的安装
标签:layout engine ros ddr rda mic object 安装环境 12c
原文地址:https://www.cnblogs.com/cloud-datacenter/p/12322694.html