标签:space 运行 ida ml2 nec image oge bootstra lin
kube-apiserver集群准备对外ip | 内网ip | cpu | 内存 | 硬盘 |
---|---|---|---|---|
192.168.3.10 | 172.172.1.1 | 64 | 256 | 1T |
192.168.3.11 | 172.172.1.2 | 64 | 256 | 1T |
192.168.3.12 | 172.172.1.3 | 64 | 256 | 1T |
192.168.3.13 | 172.172.1.4 | 64 | 256 | 1T |
192.168.3.14 | 172.172.1.5 | 64 | 256 | 1T |
cd /apps/work/k8s
mkdir kubernetes/{server,node}
cd kubernetes/server
wget https://dl.k8s.io/v1.14.0/kubernetes-server-linux-amd64.tar.gz
tar -xvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server
mkdir -p conf config log ssl
cd binrm -rf *.docker_tag
rm -rf *.tar
留下所有的二进制文件
mkdir -p /apps/work/k8s/cfssl/k8s
cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s-ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "GuangZhou",
"O": "niuke",
"OU": "niuke"
}
]
}
EOF生成 Kubernetes CA 证书和私钥
mkdir -p /apps/work/cfssl/pki/k8s/
cfssl gencert -initca /apps/work/k8s/cfssl/k8s/k8s-ca-csr.json | \
cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s-ca
export K8S_APISERVER_VIP=" \
\"192.168.3.10\", \
\"192.168.3.11\", \
\"192.168.3.12\", \
\"192.168.3.13\", \
\"192.168.3.14\", \
\"192.168.3.15\", \
\"192.168.3.16\", \
\"192.168.3.17\", \
\"192.168.3.18\", \
\"192.168.3.19\", \
\"192.168.31.252\", \
\"192.168.31.253\", \
\"172.172.1.1\", \
\"172.172.1.2\", \
\"172.172.1.3\", \
\"172.172.1.4\", \
\"172.172.1.5\", \
\"172.172.1.6\", \
\"172.172.1.7\", \
\"172.172.1.8\", \
\"172.172.1.9\", \
\"172.172.1.10\", \
" && \
export K8S_APISERVER_SERVICE_CLUSTER_IP="10.64.0.1" && \
export K8S_APISERVER_HOSTNAME="api.k8s.niuke.local" && \
export K8S_CLUSTER_DOMAIN_SHORTNAME="niuke" && \
export K8S_CLUSTER_DOMAIN_FULLNAME="niuke.local" && \
cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_apiserver.json
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
${K8S_APISERVER_VIP}
"${K8S_APISERVER_SERVICE_CLUSTER_IP}",
"${K8S_APISERVER_HOSTNAME}",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.${K8S_CLUSTER_DOMAIN_SHORTNAME}",
"kubernetes.default.svc.${K8S_CLUSTER_DOMAIN_FULLNAME}"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "GuangZhou",
"O": "niuke",
"OU": "niuke"
}
]
}
EOF生成 Kubernetes API Server 证书和私钥
cfssl gencert \
-ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
-ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
-config=/apps/work/k8s/cfssl/ca-config.json \
-profile=kubernetes \
/apps/work/k8s/cfssl/k8s/k8s_apiserver.json | \
cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_server
cat << EOF | tee /apps/work/k8s/cfssl/k8s/aggregator.json
{
"CN": "aggregator",
"hosts": [""],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "GuangZhou",
"O": "niuke",
"OU": "niuke"
}
]
}
EOF
##### 生成 Kubernetes webhook 证书和私钥
cfssl gencert -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem -config=/apps/work/k8s/cfssl/ca-config.json -profile=kubernetes /apps/work/k8s/cfssl/k8s/aggregator.json | cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/aggregator
cd /apps/work/k8s/kubernetes/server
cp -pdr /apps/work/k8s/cfssl/pki/k8s ssl/k8s
mkdir -p ssl/etcd
cp -pdr /apps/work/k8s/cfssl/pki/etcd/{etcd-ca.pem,etcd_client-key.pem,etcd_client.pem} ssl/etcd
cd /apps/work/k8s/kubernetes/server/config
export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
secret: ${ENCRYPTION_KEY}
- identity: {}
EOF
cd /apps/work/k8s/kubernetes/server/config
head -c 16 /dev/urandom | od -An -t x | tr -d ‘ ‘
75045e3b5c47255e0f03922d18dc3bec
vi token.csv
75045e3b5c47255e0f03922d18dc3bec,kubelet-bootstrap,10001,"system:bootstrappers"
根据实际环境进行调整这些只是参考
cd /apps/work/k8s/kubernetes/server/conf
vi kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=false --bind-address=192.168.3.10 --advertise-address=192.168.3.10 --secure-port=5443 --insecure-port=0 --service-cluster-ip-range=10.64.0.0/16 --service-node-port-range=30000-65000 --etcd-cafile=/apps/kubernetes/ssl/etcd/etcd-ca.pem --etcd-certfile=/apps/kubernetes/ssl/etcd/etcd_client.pem --etcd-keyfile=/apps/kubernetes/ssl/etcd/etcd_client-key.pem --etcd-prefix=/registry --etcd-servers=https://172.172.0.1:2379,https://172.172.0.2:2379,https://172.172.0.3:2379 --etcd-servers-overrides=/events#https://172.172.0.4:2379;https://1172.172.0.5:2379;https://172.172.0.6:2379 --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem --tls-cert-file=/apps/kubernetes/ssl/k8s/k8s_server.pem --tls-private-key-file=/apps/kubernetes/ssl/k8s/k8s_server-key.pem --kubelet-certificate-authority=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
--kubelet-client-certificate=/apps/kubernetes/ssl/k8s/k8s_server.pem --kubelet-client-key=/apps/kubernetes/ssl/k8s/k8s_server-key.pem --service-account-key-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem --requestheader-client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem --proxy-client-cert-file=/apps/kubernetes/ssl/k8s/aggregator.pem --proxy-client-key-file=/apps/kubernetes/ssl/k8s/aggregator-key.pem --requestheader-allowed-names=aggregator --requestheader-group-headers=X-Remote-Group --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-username-headers=X-Remote-User --enable-aggregator-routing=true --anonymous-auth=false --experimental-encryption-provider-config=/apps/kubernetes/config/encryption-config.yaml --enable-admission-plugins=AlwaysPullImages,DefaultStorageClass,DefaultTolerationSeconds,LimitRanger,NamespaceExists,NamespaceLifecycle,NodeRestriction,OwnerReferencesPermissionEnforcement,PodNodeSelector,PersistentVolumeClaimResize,PodPreset,PodTolerationRestriction,ResourceQuota,ServiceAccount,StorageObjectInUseProtection MutatingAdmissionWebhook ValidatingAdmissionWebhook --disable-admission-plugins=DenyEscalatingExec,ExtendedResourceToleration,ImagePolicyWebhook,LimitPodHardAntiAffinityTopology,NamespaceAutoProvision,Priority,EventRateLimit,PodSecurityPolicy --cors-allowed-origins=.* --enable-swagger-ui --runtime-config=api/all=true --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --authorization-mode=Node,RBAC --allow-privileged=true --apiserver-count=3 --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --kubelet-https --event-ttl=1h --feature-gates=RotateKubeletServerCertificate=true,RotateKubeletClientCertificate=true --enable-bootstrap-token-auth=true --token-auth-file=/apps/kubernetes/config/token.csv --audit-log-path=/apps/kubernetes/log/api-server-audit.log --alsologtostderr=true --log-dir=/apps/kubernetes/log --v=2 --endpoint-reconciler-type=lease --max-mutating-requests-inflight=600 --max-requests-inflight=1600 --target-ram-mb=120000"
cd /apps/work/k8s/kubernetes
vi kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
Type=notify
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/kubernetes/conf/kube-apiserver
ExecStart=/apps/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
User=k8s
[Install]
WantedBy=multi-user.target
cd /apps/work/k8s/kubernetes/
vi host
[master]
192.168.3.10
192.168.3.11
192.168.3.12
192.168.3.13
192.168.3.14
ansible -i host master -m shell -a "useradd k8s -s /sbin/nologin -M"
ansible -i host master -m shell -a "mkdir -p /apps/kubernetes/kubelet-plugins/volume"
ansible -i host master -m copy -a "src=server/ dest=/apps/kubernetes/"
ansible -i host master -m shell -a "chown -R k8s:root /apps/kubernetes"
ansible -i host master -m shell -a "chmod u+x /apps/kubernetes/bin/*"
ansible -i host master -m copy -a "src=kube-apiserver.service dest=/usr/lib/systemd/system/kube-apiserver.service"
ansible -i host master -m shell -a "systemctl daemon-reload"
ansible -i host master -m shell -a "systemctl enable kube-apiserver"
ansible -i host master -m shell -a "systemctl start kube-apiserver"
ansible -i host master -m shell -a "systemctl status kube-apiserver"
mkdir -p /apps/work/k8s/haproxy
cd /apps/work/k8s/haproxy
wget https://www.haproxy.org/download/1.9/src/haproxy-1.9.8.tar.gz
yum -y install epel-release
yum install -y git patch gcc gcc-c++ readline-devel zlib-devel libffi-devel openssl openssl-devel make autoconf libtool bison libxml2 libxml2-devel libxslt-devel libyaml-devel python python-docutils cmake imake expat-devel libaio libaio-devel bzr ncurses-devel wget libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel pcre-devel curl-devel libmcrypt libmcrypt-devel libunwind libunwind-devel rsyslog
git clone https://github.com/jemalloc/jemalloc.git
cd jemalloc
./autogen.sh
./configure
make -j$(nproc)
make -j$(nproc) install_bin install_include install_lib
echo ‘/usr/local/lib‘ > /etc/ld.so.conf.d/local.conf
ldconfig
ln -sf /usr/local/lib/libjemalloc.so /usr/lib/
ln -sf /usr/local/lib/libjemalloc.so /usr/lib64/
yum -y install systemd-devel
useradd haproxy -s /sbin/nologin -M
cd ../
tar -xvf haproxy-1.9.8.tar.gz
cd haproxy-1.9.8
make CPU="generic" TARGET="linux2628" USE_SYSTEMD=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_OPENSSL=1 USE_ZLIB=1 USE_REGPARM=1 USE_LINUX_TPROXY=1 ADDLIB="-ljemalloc" DEFINE=-DTCP_USER_TIMEOUT=18 PREFIX=/apps/haproxy
make install PREFIX=/apps/haproxy
mkdir -pv /apps/haproxy/{conf,run,log}
cd /app/haproxy/conf
vim haproxy.conf
global
maxconn 100000
chroot /apps/haproxy
user haproxy
group haproxy
daemon
pidfile /apps/haproxy/run/haproxy.pid
#debug
#quiet
stats socket /apps/haproxy/run/haproxy.sock mode 600 level admin
log 127.0.0.1 local2
nbproc 8
cpu-map 1 0
cpu-map 2 1
cpu-map 3 2
cpu-map 4 3
cpu-map 5 4
cpu-map 6 5
cpu-map 7 6
cpu-map 8 7
stats bind-process 8
defaults
log global
mode tcp
option tcplog
option dontlognull
option redispatch
retries 3
maxconn 100000
timeout connect 30000
timeout client 50000
timeout server 50000
resolvers dns1
nameserver dns1 114.114.114.114:53
nameserver dns2 8.8.8.8:53
resolve_retries 3
timeout resolve 10s
timeout retry 10s
hold other 30s
hold refused 30s
hold nx 30s
hold timeout 30s
hold valid 10s
hold obsolete 30s
listen admin_stat
# 监听端口
bind 0.0.0.0:57590
# http的7层模式
mode http
#log global
# 统计页面自动刷新时间
stats refresh 30s
# 统计页面URL
stats uri /admin?stats
# 统计页面密码框上提示文本
stats realm Haproxy\ Statistics
# 统计页面用户名和密码设置
stats auth admin:123456admin
# 隐藏统计页面上HAProxy的版本信息
#stats hide-version
stats enable
frontend kube-apiserver-https
mode tcp
bind :6443
default_backend kube-apiserver-backend
backend kube-apiserver-backend
mode tcp
server 192.168.3.10-api 192.168.3.10:5443 check
server 192.168.3.11-api 192.168.3.11:5443 check
server 192.168.3.12-api 192.168.3.12:5443 check
server 192.168.3.13-api 192.168.3.13:5443 check
server 192.168.3.14-api 192.168.3.14:5443 check
vim /etc/logrotate.d/haproxy
/apps/haproxy/log/*.log {
rotate 14
daily
missingok
compress
dateext
size 50M
notifempty
copytruncate
}
vim /etc/rsyslog.d/49-haproxy.conf
$ModLoad imudp
$UDPServerAddress 127.0.0.1
$UDPServerRun 514
$template HAProxy,"%syslogtag%%msg:::drop-last-lf%\n"
$template TraditionalFormatWithPRI,"%pri-text%: %timegenerated% %syslogtag%%msg:::drop-last-lf%\n"
local2.=info /apps/haproxy/log/access.log;HAProxy
local2.=notice;local2.=warning /apps/haproxy/log/status.log;TraditionalFormatWithPRI
local2.error /apps/haproxy/log/error.log;TraditionalFormatWithPRI
local2.* stop
vim /usr/lib/systemd/system/haproxy.service
[Unit]
Description=HAProxy Load Balancer
Documentation=man:haproxy(1)
After=syslog.target network.target
[Service]
LimitCORE=infinity
LimitNOFILE=1024000
LimitNPROC=1024000
EnvironmentFile=-/etc/sysconfig/haproxy
Environment="CONFIG=/apps/haproxy/conf/haproxy.conf" "PIDFILE=/apps/haproxy/run/haproxy.pid"
ExecStartPre=/apps/haproxy/sbin/haproxy -f $CONFIG -c -q
ExecStart=/apps/haproxy/sbin/haproxy -Ws -f $CONFIG -p $PIDFILE
ExecReload=/apps/haproxy/sbin/haproxy -f $CONFIG -c -q
ExecReload=/bin/kill -USR2 $MAINPID
KillMode=mixed
Restart=always
Type=notify
[Install]
WantedBy=multi-user.target
systemctl start haproxy
systemctl enable haproxy
http://192.168.4.1:57590/admin?stats
账号密码:admin:123456admin
第二台参照第一台安装
haproxy 支持后端域名负载
这两台也是对外的一个路由
mkdir -p /apps/work/k8s/keepalived
cd /apps/work/k8s/keepalived
wget https://www.keepalived.org/software/keepalived-2.0.16.tar.gz
wget https://ftp.gnu.org/gnu/automake/automake-1.15.1.tar.gz
yum install -y libnl-devel openssl-devel libnfnetlink-devel ipvsadm popt-devel libnfnetlink kernel-devel
tar -xvf automake-1.15.1.tar.gz
cd automake-1.15.1
./configure
make
make install
cd ../
tar -xvf keepalived-2.0.16.tar.gz
cd keepalived-2.0.16
automake --add-missing
automake
./configure --prefix=/apps/keepalived
make
make install
vim keepalived/keepalived.service
[Unit]
Description=LVS and VRRP High Availability Monitor
After=network-online.target syslog.target
Wants=network-online.target
[Service]
Type=forking
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
PIDFile=/var/run/keepalived.pid
KillMode=process
EnvironmentFile=-/apps/keepalived/etc/sysconfig/keepalived
ExecStart=/apps/keepalived/sbin/keepalived $KEEPALIVED_OPTIONS
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target
cp -pdr keepalived/keepalived.service /usr/lib/systemd/system
cd /apps/keepalived/etc/sysconfig
vim keepalived
KEEPALIVED_OPTIONS="-D --use-file=/apps/keepalived/etc/keepalived/keepalived.conf"
生成 auth_pass
head -c 16 /dev/urandom | od -An -t x | tr -d ‘ ‘
99ce6e3381dc326633737ddaf5d904d2
cd /apps/keepalived/etc/keepalived
vim keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 250
advert_int 1
authentication {
auth_type PASS
auth_pass 99ce6e3381dc326633737ddaf5d904d2
}
virtual_ipaddress {
192.168.31.252/19
}
track_script {
check_haproxy
}
}
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 249
advert_int 1
authentication {
auth_type PASS
auth_pass 99ce6e3381dc326633737ddaf5d904d2
}
virtual_ipaddress {
192.168.31.252/19
}
track_script {
check_haproxy
}
}
systemctl enable keepalived.service
systemctl start keepalived.service
systemctl status keepalived.service
ip address show eth0
cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_apiserver_admin.json
{
"CN": "admin",
"hosts": [""],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "GuangZhou",
"O": "system:masters",
"OU": "Kubernetes-manual"
}
]
}
EOF
cfssl gencert -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem -config=/apps/work/k8s/cfssl/ca-config.json -profile=kubernetes /apps/work/k8s/cfssl/k8s/k8s_apiserver_admin.json | cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_apiserver_admin
cd /apps/k8s/
编辑hosts
vim /etc/hosts
192.168.31.252 api.k8s.niuke.local
export KUBE_APISERVER="https://api.k8s.niuke.local:6443"
kubectl config set-cluster kubernetes --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=admin.kubeconfig
kubectl config set-credentials admin --client-certificate=/apps/work/k8s/cfssl/pki/k8s/k8s_apiserver_admin.pem --client-key=/apps/work/k8s/cfssl/pki/k8s/k8s_apiserver_admin-key.pem --embed-certs=true --kubeconfig=admin.kubeconfig
kubectl config set-context kubernetes --cluster=kubernetes --user=admin --namespace=kube-system --kubeconfig=admin.kubeconfig
kubectl config use-context kubernetes --kubeconfig=admin.kubeconfig
cp admin.kubeconfig ~/.kube/config
kubectl cluster-info
[root@jenkins tasks]# kubectl cluster-info
Kubernetes master is running at https://api.k8s.niuke.local:6443
kubectl get cs
[root@jenkins tasks]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Unhealthy Get http://127.0.0.1:10251/healthz: net/http: HTTP/1.x
controller-manager Unhealthy Get http://127.0.0.1:10252/healthz: net/http: HTTP/1.x
etcd-2 Healthy {"health":"true"}
etcd-3 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
etcd-5 Healthy {"health":"true"}
etcd-4 Healthy {"health":"true"}
下一篇: Kubernetes 生产环境安装部署 基于 Kubernetes v1.14.0 之 kube_scheduler集群部署
Kubernetes v1.14.0 之 kube-apiserver集群部署
标签:space 运行 ida ml2 nec image oge bootstra lin
原文地址:https://blog.51cto.com/juestnow/2404297