标签:k8s
简介由于现在Centos 已经把kubernetes加入到了源,所以本次安装是通过yum方式来安装的
本次实验使用3台设备,1主2个节点的方式安装。
系统是centos7.4 ,关闭了防火墙firewalld,selinux,iptables,
master包含kube-apiserver kube-scheduler kube-controller-manager etcd四个组件
node包含kube-proxy kubelet flannel 3个组件
1. kube-apiserver:位于master节点,接受用户请求。
2. kube-scheduler:位于master节点,负责资源调度,即pod建在哪个node节点。
3. kube-controller-manager:位于master节点,包含ReplicationManager,Endpointscontroller,Namespacecontroller,and Nodecontroller等。
4. etcd:分布式键值存储系统,共享整个集群的资源对象信息。
5. kubelet:位于node节点,负责维护在特定主机上运行的pod。
6. kube-proxy:位于node节点,它起的作用是一个服务代理的角色
要保证三台设备的时间是一直同步的,所以要安装NTP(3台设备都要执行下面的命令)
# yum -y install ntp
# systemctl start ntpd
# systemctl enable ntpd
# ntpdate time1.aliyun.com
yum -y install kubernetes-master etcd
[root@localhost ~]# cat /etc/etcd/etcd.conf
#[Member]
#ETCD_CORS=""
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"# 指定节点的数据存储目录,包括节点ID,集群ID,集群初始化配置,Snapshot文件,若未指定—wal-dir,还会存储WAL文件;
#ETCD_WAL_DIR=""#指定节点的was文件的存储目录,若指定了该参数,wal文件会和其他数据文件分开存储
ETCD_LISTEN_PEER_URLS="http://192.168.56.200:2380"#监听URL,用于与其他节点通讯
ETCD_LISTEN_CLIENT_URLS="http://192.168.56.200:2379,http://127.0.0.1:2379"#告知客户端url, 也就是服务的url
ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
ETCD_NAME="etcd1"#节点名称
#ETCD_SNAPSHOT_COUNT="100000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_QUOTA_BACKEND_BYTES="0"
#ETCD_MAX_REQUEST_BYTES="1572864"
#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
#
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.56.200:2380"#告知集群其他节点url
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.56.200:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_DISCOVERY_SRV=""
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.56.200:2380,etcd2=http://192.168.56.201:2380,etcd3=http://192.168.56.202:2380"#集群中所有节点
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"#集群的ID
#ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_STRICT_RECONFIG_CHECK="true"
#ETCD_ENABLE_V2="true"
#
#[Proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
#
#[Security]
#ETCD_CERT_FILE=""
#ETCD_KEY_FILE=""
#ETCD_CLIENT_CERT_AUTH="false"
#ETCD_TRUSTED_CA_FILE=""
#ETCD_AUTO_TLS="false"
#ETCD_PEER_CERT_FILE=""
#ETCD_PEER_KEY_FILE=""
#ETCD_PEER_CLIENT_CERT_AUTH="false"
#ETCD_PEER_TRUSTED_CA_FILE=""
#ETCD_PEER_AUTO_TLS="false"
#
#[Logging]
#ETCD_DEBUG="false"
#ETCD_LOG_PACKAGE_LEVELS=""
#ETCD_LOG_OUTPUT="default"
#
#[Unsafe]
#ETCD_FORCE_NEW_CLUSTER="false"
#
#[Version]
#ETCD_VERSION="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
#
#[Profiling]
#ETCD_ENABLE_PPROF="false"
#ETCD_METRICS="basic"
#
#[Auth]
#ETCD_AUTH_TOKEN="simple"
[root@localhost ~]# cat /etc/kubernetes/apiserver
###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
#
# The address on the local server to listen to.
#KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1"
KUBE_API_ADDRESS="--address=0.0.0.0"
# The port on the local server to listen on.
KUBE_API_PORT="--port=8080"
# Port minions listen on
KUBELET_PORT="--kubelet-port=10250"
# Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.56.200:2379,http://192.168.56.201:2379,http://192.168.56.202:2379"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
# default admission control policies
#KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"
# Add your own!
#KUBE_CONTROLLER_MANAGER_ARGS=""
KUBE_CONTROLLER_MANAGER_ARGS="--node-monitor-grace-period=10s --pod-eviction-timeout=10s"
[root@localhost ~]# cat /etc/kubernetes/config
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
# kube-apiserver.service
# kube-controller-manager.service
# kube-scheduler.service
# kubelet.service
# kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"
# How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER="--master=http://127.0.0.1:8080"
如果端口8080呗占用,可以使用其他的
systemctl enable etcd kube-apiserver kube-scheduler kube-controller-manager
systemctl start etcd kube-apiserver kube-scheduler kube-controller-manager
定义etcd中的网络配置,nodeN中的flannel service会拉取此配置
etcdctl mk /coreos.com/network/config ‘{"Network":"172.17.0.0/16"}‘
#这里coreos.com目录自己可以创建
或者写脚本
[root@localhost ~]# cat etc.sh
etcdctl mkdir /atomic.io/network
etcdctl mk /atomic.io/network/config "{ \"Network\": \"172.17.0.0/16\", \"SubnetLen\": 24, \"Backend\": { \"Type\": \"vxlan\" } }"
为flannel创建分配的网络
#只在master上etcd执行
etcdctl mk /coreos.com/network/config ‘{"Network": "10.1.0.0/16"}‘
#若要重新建,先删除
etcdctl rm /coreos.com/network/ --recursive
yum -y install kubernetes-node etcd flannel docker
所有节点的配置都一样
[root@localhost ~]# cat /etc/kubernetes/config
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
# kube-apiserver.service
# kube-controller-manager.service
# kube-scheduler.service
# kubelet.service
# kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"
# How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER="--master=http://192.168.56.200:8080"
[root@localhost ~]# cat /etc/kubernetes/kubelet
###
# kubernetes kubelet (minion) config
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address=127.0.0.1"
# The port for the info server to serve on
KUBELET_PORT="--port=10250"
# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override=192.168.56.201"
# location of the api-server
KUBELET_API_SERVER="--api-servers=http://192.168.56.200:8080"
# pod infrastructure container
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
# Add your own!
KUBELET_ARGS=""
#Add your own!
#KUBELET_ARGS=""
KUBELET_ARGS="--pod-infra-container-image=kubernetes/pause"
为etcd服务配置flannel,修改配置文件 /etc/sysconfig/flanneld
[root@localhost ~]# cat /etc/sysconfig/flanneld
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.56.200:2379"
# etcd config key. This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/atomic.io/network" #这里的名字和上面etc的里面设置的名字必须一直
# Any additional options that you want to pass
#FLANNEL_OPTIONS=""
Example:
[root@localhost ~]# cat /etc/sysconfig/flanneld
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.56.200:2379"
# etcd config key. This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/atomic.io/network"
# Any additional options that you want to pass
#FLANNEL_OPTIONS=""
systemctl restart flanneld docker
systemctl start kubelet kube-proxy
systemctl enable flanneld kubelet kube-proxy
通过查看网卡发现有docker和flannel 两个网卡
[root@localhost ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.30.31.1 netmask 255.255.255.0 broadcast 0.0.0.0
inet6 fe80::42:ddff:fe03:591c prefixlen 64 scopeid 0x20<link>
ether 02:42:dd:03:59:1c txqueuelen 0 (Ethernet)
RX packets 69 bytes 4596 (4.4 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 8 bytes 648 (648.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.56.201 netmask 255.255.255.0 broadcast 192.168.56.255
inet6 fe80::d203:ac67:53b0:897b prefixlen 64 scopeid 0x20<link>
inet6 fe80::30c1:3975:3246:cc1f prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:f1:de:cb txqueuelen 1000 (Ethernet)
RX packets 810383 bytes 126454887 (120.5 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 796437 bytes 163368198 (155.8 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
inet 172.30.31.0 netmask 255.255.255.255 broadcast 0.0.0.0
inet6 fe80::54ee:7aff:fe11:ba95 prefixlen 64 scopeid 0x20<link>
ether 56:ee:7a:11:ba:95 txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 8 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1 (Local Loopback)
RX packets 1719 bytes 89584 (87.4 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 1719 bytes 89584 (87.4 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@localhost ~]# kubectl get node
NAME STATUS AGE
192.168.56.201 Ready 4d
192.168.56.202 Ready 4d
标签:k8s
原文地址:http://blog.51cto.com/sgk2011/2106775