标签:cli pod pvc 副本 exe capacity 清单 affinity else
一、简介statefulset控制器是有状态应用副本集;在k8s集群statefulset用于管理以下副本集:
kubernetes上的存储卷类型
emptyDir:空目录,按需创建;删除pod后存储卷中的数据也被删除,一般用于临时目录或缓存
hostPath:主机路径,在node上找一个目录与pod中的容器建立联系
gitRepo:基于emptyDir实现的git仓库 clone到emptyDir存储卷
NFS:将现有NFS(网络文件系统)共享安装到Pod中
master节点:
kubectl create ns bigdata
node节点:
mkdir -pv /data/pv/zk{1..3}
实验环境为虚拟机环境,因此使用的存储卷类型为hostPath。
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk1
namespace: bigdata
labels:
type: local
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /data/pv/zk1
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk2
namespace: bigdata
labels:
type: local
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /data/pv/zk2
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk3
namespace: bigdata
labels:
type: local
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /data/pv/zk3
persistentVolumeReclaimPolicy: Recycle
kubectl??apply? -f??zookeeper-pv.yaml
[root@master ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
k8s-pv-zk1 2Gi RWO Recycle Available 6m42s
k8s-pv-zk2 2Gi RWO Recycle Available 6m42s
k8s-pv-zk3 2Gi RWO Recycle Available 6m42s
官方资源清单:https://kubernetes.io/zh/docs/tutorials/stateful-application/zookeeper/
apiVersion: v1
kind: Service
metadata:
name: zk-hs
namespace: bigdata
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
namespace: bigdata
labels:
app: zk
spec:
ports:
- port: 2181
name: client
selector:
app: zk
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
namespace: bigdata
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
namespace: bigdata
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: kubernetes-zookeeper
imagePullPolicy: IfNotPresent
image: "mirrorgcrio/kubernetes-zookeeper:1.0-3.4.10"
resources:
requests:
memory: "500Mi"
cpu: "0.5"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper --servers=3 --data_dir=/var/lib/zookeeper/data --data_log_dir=/var/lib/zookeeper/data/log --conf_dir=/opt/zookeeper/conf --client_port=2181 --election_port=3888 --server_port=2888 --tick_time=2000 --init_limit=10 --sync_limit=5 --heap=512M --max_client_cnxns=60 --snap_retain_count=3 --purge_interval=12 --max_session_timeout=40000 --min_session_timeout=4000 --log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
kubectl apply -f zookeeper.yaml
[root@master ~]# kubectl get pods -n bigdata -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
zk-0 1/1 Running 0 109s 10.122.104.4 node2 <none> <none>
zk-1 1/1 Running 0 95s 10.122.166.134 node1 <none> <none>
zk-2 1/1 Running 0 78s 10.122.135.1 node3 <none> <none>
[root@master ~]# kubectl get pvc -n bigdata -o wide
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
datadir-zk-0 Bound k8s-pv-zk1 2Gi RWO 19m Filesystem
datadir-zk-1 Bound k8s-pv-zk3 2Gi RWO 115s Filesystem
datadir-zk-2 Bound k8s-pv-zk2 2Gi RWO 98s Filesystem
[root@master ~]# kubectl exec -n bigdata zk-0 -- cat /opt/zookeeper/conf/zoo.cfg
#This file was autogenerated DO NOT EDIT
clientPort=2181
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/data/log
tickTime=2000
initLimit=10
syncLimit=5
maxClientCnxns=60
minSessionTimeout=4000
maxSessionTimeout=40000
autopurge.snapRetainCount=3
autopurge.purgeInteval=12
server.1=zk-0.zk-hs.bigdata.svc.cluster.local:2888:3888
server.2=zk-1.zk-hs.bigdata.svc.cluster.local:2888:3888
server.3=zk-2.zk-hs.bigdata.svc.cluster.local:2888:3888
[root@master ~]# kubectl exec -n bigdata zk-0 zkServer.sh status
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [C
OMMAND] instead.ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
[root@master ~]# kubectl exec -n bigdata zk-1 zkServer.sh status
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [C
OMMAND] instead.ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: leader
[root@master ~]# kubectl exec -n bigdata zk-2 zkServer.sh status
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [C
OMMAND] instead.ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
参考文章:
官方zk实例:https://kubernetes.io/zh/docs/tutorials/stateful-application/zookeeper/
资源清单相关:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#statefulset-v1-apps
标签:cli pod pvc 副本 exe capacity 清单 affinity else
原文地址:https://blog.51cto.com/jiayimeng/2508515