标签:size type cts == yam dwr resources orm 生成
1. Ceph与volumes结合目标:实现Ceph RBD和kubernetes volumes集成。
1、创建pool和用户
[root@node-1 ~]# ceph osd pool create kubernetes 8 8
2、创建认证用户
[root@node-1 ~]# ceph auth get-or-create client.kubernetes mon ‘profile rbd‘ osd ‘profile rbd pool=kubernetes‘
[client.kubernetes]
key = AQDMup1emZMFOhAABlmnZFE2fF8puHeIhu+UPg==
3、创建secrets对象存储将Ceph的认证key存储在Secrets中
获取步骤2生成的key,并将其加密为base64格式
[root@node-1 ~]# echo AQDMup1emZMFOhAABlmnZFE2fF8puHeIhu+UPg== | base64
QVFETXVwMWVtWk1GT2hBQUJsbW5aRkUyZkY4cHVIZUlodStVUGc9PQo=
创建定义secrets对象
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
type: "kubernetes.io/rbd"
data:
key: QVFETXVwMWVtWk1GT2hBQUJsbW5aRkUyZkY4cHVIZUlodStVUGc9PQo=
生成secrets
[root@node-1 volumes]# kubectl apply -f secret.yaml
secret/ceph-secret created
[root@node-1 volumes]# kubectl get secret
NAME TYPE DATA AGE
ceph-secret kubernetes.io/rbd 1 10s
default-token-hn65d kubernetes.io/service-account-token 3 41d
1、创建rbd块
[root@node-1 ~]# rbd create -p kubernetes --image-feature layering rbd.img --size 10G
[root@node-1 ~]# rbd info kubernetes/rbd.img
rbd image ‘rbd.img‘:
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
id: 519576b8b4567
block_name_prefix: rbd_data.519576b8b4567
format: 2
features: layering
op_features:
flags:
create_timestamp: Mon Apr 20 23:27:02 2020
2、pod中引用RBD volumes
[root@node-1 volumes]# cat pods.yaml
apiVersion: v1
kind: Pod
metadata:
name: volume-rbd-demo
spec:
containers:
- name: pod-with-rbd
image: nginx:1.7.9
imagePullPolicy: IfNotPresent
ports:
- name: www
containerPort: 80
protocol: TCP
volumeMounts:
- name: rbd-demo
mountPath: /data
volumes:
- name: rbd-demo
rbd:
monitors:
- 10.254.100.101:6789
- 10.254.100.102:6789
- 10.254.100.103:6789
pool: kubernetes
image: rbd.img
fsType: ext4
user: kubernetes
secretRef:
name: ceph-secret
1、生成pod
[root@node-1 volumes]# kubectl apply -f pods.yaml
pod/volume-rbd-demo configured
[root@node-1 volumes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
demo-8ffbcf7c5-r2wzf 1/1 Running 1 21h
volume-rbd-demo 1/1 Running 0 43m
2、查看挂载的情况,可以看到RBD块存储挂载至data目录
[root@node-1 volumes]# kubectl exec -it volume-rbd-demo -- df -h
Filesystem Size Used Avail Use% Mounted on
rootfs 50G 6.7G 41G 15% /
overlay 50G 6.7G 41G 15% /
tmpfs 64M 0 64M 0% /dev
tmpfs 920M 0 920M 0% /sys/fs/cgroup
/dev/rbd0 9.8G 37M 9.7G 1% /data
参考步骤一,创建好pool,镜像,用户认证,secrets
1、PV定义,定义一块存储,抽象化为PV
[root@node-1 pv_and_pvc]# cat pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: rbd-demo
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 10G
rbd:
monitors:
- 10.254.100.101:6789
- 10.254.100.102:6789
- 10.254.100.103:6789
pool: kubernetes
image: demo-1.img
fsType: ext4
user: kubernetes
secretRef:
name: ceph-secret
persistentVolumeReclaimPolicy: Retain
storageClassName: rbd
2、PVC定义,引用PV
[root@node-1 pv_and_pvc]# cat pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-demo
spec:
accessModes:
- ReadWriteOnce
volumeName: rbd-demo
resources:
requests:
storage: 10G
storageClassName: rbd
3、生成PV和PVC
[root@node-1 pv_and_pvc]# kubectl apply -f pv.yaml
persistentvolume/rbd-demo created
[root@node-1 pv_and_pvc]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
rbd-demo 10G RWO Retain Available rbd 9s
[root@node-1 pv_and_pvc]# kubectl apply -f pvc.yaml
persistentvolumeclaim/pvc-demo created
[root@node-1 pv_and_pvc]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-demo Pending rbd-demo 0 rbd 2s
[root@node-1 pv_and_pvc]# cat pod-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
spec:
containers:
- name: demo
image: nginx:1.7.9
imagePullPolicy: IfNotPresent
ports:
- name: www
protocol: TCP
containerPort: 80
volumeMounts:
- name: rbd
mountPath: /data
volumes:
- name: rbd
persistentVolumeClaim:
claimName: pvc-demo
标签:size type cts == yam dwr resources orm 生成
原文地址:https://blog.51cto.com/happylab/2488904