标签:lib pypi 私有 back image rpm lsm 主机 support
hostname | ip | 内存 | 核 | 硬 | 说明 |
---|---|---|---|---|---|
harbor | 192.168.136.30 | 2G | 2 | 100G | 私有仓库 |
koolshare | 2G | 2 | 20G | 软路由 | |
k8s-master | 192.168.136.10 | 2G | 4 | 100G | |
k8s-node1 | 192.168.136.20 | 4G | 4 | 100G | |
k8s-node2 | 192.168.136.21 | 4G | 4 | 100G |
安装参考视频:https://www.bilibili.com/video/av66617940?p=10
创建虚拟机的时候注意几点:
1、网络模式选择“仅主机模式”,单一文件
2、磁盘存储为单个文件
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup # 备份
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo # 下载
cp -rp CentOS7-Base-163.repo CentOS-Base.repo # 改名字
yum clean all
yum makecache
# 安装几个常用软件
yum install -y wget
yum install -y vim
yum install -y lrzsz
yum install -y openssh-clients
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
hostnamectl set-hostname habor
# 修改hosts文件
vim /etc/hosts
cat >> /etc/hosts <<EOF
192.168.136.10 k8s-master
192.168.136.20 k8s-node1
192.168.136.21 k8s-node2
192.168.136.30 habor
EOF
ssh-keygen -t rsa # 然后一直回车
cat /root/.ssh/id_rsa.pub # 查看公钥
vim /root/.ssh/authorized_keys # 添加公钥
chmod 600 /root/.ssh/authorized_keys # 赋权限
# 禁止防火墙
systemctl stop firewalld && systemctl disable firewalld
# 安装iptables
yum install -y iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
swapoff -a && sed -i ‘/ swap / s/^\(.*\)$/#\1/g‘ /etc/fstab
setenforce 0 && sed -i ‘s/^SELINUX=.*/SELINUX=disable/‘ /etc/selinux/config
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recyle=0
vm.swappiness=0 #禁止使用swap交换空间,只有当系统OOM时候才是用
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom_=0 # 开启oom
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watchers=1048576
fs.file-max=52706963
fs.nr-open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
# 设置为中国/上海
timedatectl set-timezone Asia/Shanghai
# 将当前的UTC写入到硬件时钟
timedatectl set-local-rtc 0
# 重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond
systemctl stop postfix && systemctl disable postfix
mkdir /var/log/journal # 持久化保存的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=10G
# 单个日志文件最大200M
SystemMaxFileSize=200M
# 日志保存时间2周
MaxRatentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald
uname -r # 查看当前内核版本
rpm -import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 查看所有内核版本
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
# 安装
# 安装完成后检查 /boot/grub2/grub.cfg 中对应的内核中是否包含 initrd6 配置,如果没有再安装一次
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 查看内核启动顺序
awk -F\‘ ‘$1=="menuentry " {print $2}‘ /etc/grub2.cfg
# 设置开机从新内核启动
grub2-set-default ‘CentOS Linux (4.4.215-1.el7.elrepo.x86_64) 7 (Core)‘
# 重启即可
reboot
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 775 /etc/sysconfig/modules/ipvs.modules && sh /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum update -y && yum install -y docker-ce
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts":["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
官方文档:https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
中文社区:https://kubernetes.io/zh/docs/setup/independent/install-kubeadm/
# 下面操作针对(k8s节点)
cat >/etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service
1、创建并修改配置
mkdir -p /usr/local/docker/kubernetes
cd /usr/local/docker/kubernetes
# 导出配置文件
kubeadm config print init-defaults --kubeconfig ClusterConfiguration > kubeadm.yml
2、修改配置文件
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
# 修改为主节点 IP
advertiseAddress: 192.168.136.10
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master # 这里注意在其他节点的时候需要修改为当前节点
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
# imageRepository: k8s.gcr.io
# 国内不能访问 Google,修改为阿里云
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
# kubernetesVersion: v1.14.0
# 修改版本号
kubernetesVersion: v1.15.1
networking:
dnsDomain: cluster.local
# 配置成 flannel 的默认网段
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
# 下面是新增的
---
# 开启 IPVS 模式
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
3、查看和拉取镜像
# 查看所需镜像列表,注意kubeadm.yml的路径
kubeadm config images list --config kubeadm.yml
# 拉取镜像
kubeadm config images pull --config kubeadm.yml
# 使用kubeadm config 可能一次性拉不完,我们使用docker pull拉取
cd /usr/local/docker/kubernetes
vim install.sh
#!/bin/bash
kubeadm config images list --config /usr/local/docker/kubernetes/kubeadm.yml > /usr/local/docker/kubernetes/list.txt
for i in $( cat /usr/local/docker/kubernetes/list.txt )
do
docker pull $i
done
chmod 775 /usr/local/docker/kubernetes/install.sh
/usr/local/docker/kubernetes/install.sh
# 把这些镜像再所有节点上都下载一遍!!!
# 查看镜像
docker images
registry.aliyuncs.com/google_containers/kube-apiserver v1.15.1 68c3eb07bfc3 7 months ago 207MB
registry.aliyuncs.com/google_containers/kube-controller-manager v1.15.1 d75082f1d121 7 months ago 159MB
registry.aliyuncs.com/google_containers/kube-scheduler v1.15.1 b0b3c4c404da 7 months ago 81.1MB
registry.aliyuncs.com/google_containers/kube-proxy v1.15.1 89a062da739d 7 months ago 82.4MB
registry.aliyuncs.com/google_containers/coredns 1.3.1 eb516548c180 13 months ago 40.3MB
registry.aliyuncs.com/google_containers/etcd 3.3.10 2c4adeb21b4f 15 months ago 258MB
registry.aliyuncs.com/google_containers/pause 3.1 da86e6ba6ca1 2 years ago 742kB
4、初始化主节点
cd /usr/local/docker/kubernetes/
kubeadm init --config=kubeadm.yml --experimental-upload-certs | tee kubeadm-init.log
# 按照输出日志执行命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 初始化节点失败的话重置一下
kubeadm reset
从节点安装了kubeadm、kubectl、kubelet之后加入主节点即可:
# 再 k8s-node1、k8s-node2上执下面的语句,具体以及自己的为准
kubeadm join 192.168.136.10:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:2da505d739930c4396a6e724fa3d777bf78816b64487d90e18a91f35b082e3f9
# 切换到主节点 k8s-master
[root@k8s-master kubernetes]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master NotReady master 40m v1.15.1
k8s-node1 NotReady <none> 15s v1.15.1
k8s-node2 NotReady <none> 12s v1.15.1
#注意如果token过期了:
- token
- 可以通过安装 master 时的日志查看 token 信息
- 可以通过 kubeadm token list 命令打印出 token 信息
- 如果 token 过期,可以使用 kubeadm token create 命令创建新的 token
- discovery-token-ca-cert-hash
- 可以通过安装 master 时的日志查看 sha256 信息
- 可以通过 openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed ‘s/^.* //‘ 命令查看 sha256 信息
# 查看 pod 状态
kubectl get pod -n kube-system -o wide
mkdir -p /usr/local/docker/kubernetes/plugins/flannel
cd /usr/local/docker/kubernetes/plugins/flannel
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 注意:因为防火墙的原因k8s的镜像大部分被墙了,参考下面的博客再阿里云镜像搜索别人制作好的景象
https://mp.weixin.qq.com/s/kf0SrktAze3bT7LcIveDYw
# 删除
kubectl delete -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 查看进度
kubectl get pods -n kube-system
# 查看详细
kubectl get pods -n kube-system -o wide -w
# 查看集群节点状态
kubectl get node
版本要求:
# 该步骤对所有的k8s节点以及habor节点都要操作
vim /etc/docker/daemon.json
{
"exec-opts":["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"insecure-registries": ["https://habor-repo.com"]
}
# "insecure-registries": ["https://habor-repo.com"] 的意思是因为我们自己的https证书不被互联网认可,所以需要排除 , 注意不要添加 www
systemctl daemon-reload
systemctl restart docker
# 将 habor-repo.com 192.168.0.130 添加到每个主机的 /etc/hosts 中
# 最好是在所有节点上都升级一下 python3
#1、安装EPEL:
sudo yum -y install epel-release
#2、安装IUS软件源:
sudo yum -y install https://centos7.iuscommunity.org/ius-release.rpm
#3、安装Python3.6:
sudo yum -y install python36u
#4、安装pip3:
sudo yum -y install python36u-pip
#5、检查一下安装情况,分别执行命令查看:
python3.6 -V
pip3.6 -V
#3. 添加软链接
#使用python3去使用Python3.6:
ln -s /usr/bin/python3.6 /usr/bin/python3
#复制代码pip3.6同理:
ln -s /usr/bin/pip3.6 /usr/bin/pip3
#4. 使用镜像加速安装某个模块
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple [模块名]
# 前提是安装了python和 pip,最好是python3
pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple -U docker-compose
# 查看帮助
docker-compose -h
###### 命令
docker-compose 【命令】
-f 指定使用的yaml文件位置 ps 显示所有容器信息
restart 重新启动容器 pause 暂停容器
stop 停止容器 unpause 恢复容器
start 启动容器(容器已经有了,被stop了) rm 删除容器
up -d 启动容器项目(还未运行容器,第一次运行) logs 查看日志信息
config -q 验证yaml配置文件是否正确
#1、habor地址:https://github.com/vmware/harbor/releases
wget https://github.com/vmware/harbor/releases/download/v1.2.0/harbor-offline-installer-v1.2.0.tgz
tar -zxvf harbor-offline-installer-v1.2.0.tgz
mv harbor /usr/local/
cd /usr/local/harbor
vim harbor.cfg
hostname = habor-repo.com # 修改此处,别的保持默认即可
###### 内容解释 开始
hostname: 目标的主机名或者完全限定域名:目标的主机名或者完全限定域名
ui_url_protocol: http或或https。默认为http
db_password:用于: 用于db_auth的的MySQL数据库的根密码。更改此密码进行任何生产用途
max_job_workers: (默认值为:(默认值为3)作业服务中的复制工作人员的最大数量。对于每个映像复制作业,工作人员将存储库的所有标签同步到远程目标。增加此数字允许系统中更多的并发复制作业。但是,由于每个工作人员都会消耗一定数量的网络、CPU、IO资源,请根据主机的硬件资源,仔细选择该属性的值
customize_crt: (on或或off。默认为。默认为on)当此属性打开时,prepare脚本将为注册表的令牌的生成验证创验证创建私钥和根证书建私钥和根证书
ssl_cert: SSL证书的路径,仅当协议设置为https时才应用时才应用
ssl_cert_key:SSL密钥的路径,仅当协议设置为https时才应用时
secretkey_path:用于复制策略中加密、解密远程注册表中的密码的秘钥路径
###### 内容解释 结束
#创建目录
mkdir -p /data/cert
cd /data/cert
# 创建http证书
openssl genrsa -des3 -out server.key 2048 # 会提示输入密码,输入 123456
openssl req -new -key server.key -out server.csr
# 提示输出下面的内容
Enter pass phrase for server.key:
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter ‘.‘, the field will be left blank.
-----
Country Name (2 letter code) [XX]:CN # 国家
State or Province Name (full name) []:SX # 省份
Locality Name (eg, city) [Default City]:XA # 城市
Organization Name (eg, company) [Default Company Ltd]:bart # 公司/组织
Organizational Unit Name (eg, section) []:habor-repo.com # 域名
Common Name (eg, your name or your server‘s hostname) []:habor # 主机名
Email Address []:bart@qq.com # 邮箱
Please enter the following ‘extra‘ attributes
to be sent with your certificate request
A challenge password []: # 直接回车
An optional company name []:# 直接回车
cp server.key server.key.org # 复制一份私钥做备份
openssl rsa -in server.key.org -out server.key # 转为证书,输入密码 123456
openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt # 签名
chmod -R 777 /data/cert # 副执行权限
# 安装 harbor
cd /usr/local/harbor
./install.sh
# 访问浏览器
http://habor-repo.com/harbor/sign-in
输入用户名和密码:
admin
Harbor12345
# 密码查看 vim /usr/local/harbor/harbor.cfg 中的 harbor_admin_password = Harbor12345
# docker 登录harbor
docker login https://habor-repo.com
用户名密码:admin Harbor12345
# 可能第一次无法登陆,重启一下docker就好了,systemctl restart docker
# 推送
# 先下载一个nginx镜像
docker pull nginx:latest
# 修改 tag
docker tag nginx:latest habor-repo.com/library/nginx:v1
# 推送
docker push habor-repo.com/library/nginx:v1
# 在浏览器里面查看发现存在推送的 nginx:v1
# 在k8s节点上使用
# 创建一个deployment,port是容器内的端口
kubectl run nginx-deployment --image=habor-repo.com/library/nginx:v1 --port=80 --replicas=1
kubectl get deployment # 查看部署
kubectl get rs # 查看资源
kubectl get pod # 查看pod状态
kubectl get pod -o wide # 查看pod详情
kubectl delete pod [pod名] # 删除pod
kubectl scale --replicas=3 deployment/nginx-deployment # 扩容为3个副本
kubectl expose deployment nginx-deployment --port=30000 --target-port=80 # 暴露服务
# 查看暴露的服务
[root@k8s-master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 169m
nginx-deployment ClusterIP 10.110.194.10 <none> 30000/TCP 9m28s
# type 是 ClusterIP 如果外网访问需要修改为 NodePort
curl 10.110.194.10:30000 # 调用三次发现负载均衡调用
# 修改 ClusterIP 为 Nodeport
kubectl edit svc nginx-deployment
#将type修改为NodePort保存退出
# 再次查看svc类型
[root@k8s-master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 174m
nginx-deployment NodePort 10.110.194.10 <none> 30000:30004/TCP 14m
# 此时k8s将所有的节点上都暴露了一个端口 30004 ,即通过k8s任意节点的IP:30004都可以访问部署的nginx
标签:lib pypi 私有 back image rpm lsm 主机 support
原文地址:https://www.cnblogs.com/bartggg/p/12996845.html