cat > ./kubeadm-config.yaml <<EOF
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.3
apiServer:
certSANs:
- "127.0.0.1"
- "k8s-vip"
- "k8s-m1"
- "k8s-m2"
- "k8s-m3"
- "192.168.40.9"
- "192.168.40.81"
- "192.168.40.82"
- "192.168.40.83"
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
controlPlaneEndpoint: "k8s-vip:16443"
networking:
serviceSubnet: "10.96.0.0/16"
podSubnet: "10.244.0.0/16"
EOF
(在主master上)
# kubeadm init --config=kubeadm-config.yaml --upload-certs
按照说明进行设置:
[root@k8s-m1 ~]# mkdir -p $HOME/.kube
[root@k8s-m1 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-m1 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-m1 ~]# mkdir -p ~/k8s_flannel && cd ~/k8s_flannel
[root@k8s-m1 k8s_flannel]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
修改kube-flannel.yml配置文件
# vim kube-flannel.yml
集群容器网段要与集群的pod-network一致
flanneld启动参数加上–-iface=<iface-name> 本例中的网卡为 eth0(单网卡可选 多个网卡必须指定网络接口否则nds解析错误)
启动flannel:
# kubectl apply -f kube-flannel.yml 启动flannel网络插件
检查pod与node
先复制密钥及相关文件:
在vip的master1上把相关密钥与文件scp到其他所有master上
# ssh root@k8s-m2 mkdir -p /etc/kubernetes/pki/etcd
# scp /etc/kubernetes/admin.conf root@k8s-m2:/etc/kubernetes/admin.conf
# scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@k8s-m2:/etc/kubernetes/pki
# scp /etc/kubernetes/pki/etcd/ca.* root@k8s-m2:/etc/kubernetes/pki/etcd
# ssh root@k8s-m3 mkdir -p /etc/kubernetes/pki/etcd
# scp /etc/kubernetes/admin.conf root@k8s-m3:/etc/kubernetes/admin.conf
# scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@k8s-m3:/etc/kubernetes/pki
# scp /etc/kubernetes/pki/etcd/ca.* root@k8s-m3:/etc/kubernetes/pki/etcd
加入集群
在k8s-m2 k8s-m3 分别输入join命令:
kubeadm join k8s-vip:16443 --token cf1xoc.etmcuyk8d8ms138j \
--discovery-token-ca-cert-hash sha256:1b177dfd0be798642fb643f5a7c23aa92e8f1f11fd5619c7dcf8e815c69ce2c1 \
--control-plane --certificate-key 680e691bf851059d00ba25e96a4d2f68825965c56e15d2020b305a17fc6cc1b0
按照说明进行设置:
[root@k8s-m1 ~]# mkdir -p $HOME/.kube
[root@k8s-m1 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-m1 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
在各node(work)节点输入join命令:
修改ConfigMap的kube-system/kube-proxy中的config.conf文件 填写mode: “ipvs”:
# kubectl edit cm kube-proxy -n kube-system
重制master节点上的kube-proxy pod:
# kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
删除后会重新启动新的proxy pod
检查kube-proxy
# kubectl get pod -n kube-system | grep kube-proxy
# kubectl logs kube-proxy-jllpr -n kube-system 查看其中一个的log 日志中需打印出了Using ipvs Proxier
还可以使用 # ipvsadm -Ln 来查询kube-proxy的负载均衡情况
如果觉得我的文章对您有用,请随意打赏。你的支持将鼓励我继续创作!