k8s集群添加节点

发布于 2022-05-16  22 次阅读


新节点环境配置

# systemctl stop firewalld && setenforce 0
# ntpdate ntp.aliyun.com
# sed -i 's/.*swap.*/#&/' /etc/fstab && swapoff -a
# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# cat <<EOF > /etc/modules-load.d/ipvs.conf 
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack_ipv4
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF

重新启动服务器

新节点安装docker

# yum install -y yum-utils device-mapper-persistent-data lvm2 git
# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# yum install docker-ce -y
# systemctl start docker && systemctl enable docker

新节点拉取镜像

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.20.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.20.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.20.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.13-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
docker pull quay.io/coreos/flannel:v0.14.0

# 修改标签
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.20.2 k8s.gcr.io/kube-controller-manager:v1.20.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.2 k8s.gcr.io/kube-proxy:v1.20.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.20.2 k8s.gcr.io/kube-apiserver:v1.20.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.20.2 k8s.gcr.io/kube-scheduler:v1.20.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0 k8s.gcr.io/coredns:1.7.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.13-0 k8s.gcr.io/etcd:3.4.13-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2

新节点安装k8s

# yum install -y kubelet-1.20.2-0.x86_64 kubeadm-1.20.2-0.x86_64 kubectl-1.20.2-0.x86_64 ipvsadm
DOCKER_CGROUPS=`docker info |grep 'Cgroup' | awk ' NR==1 {print $3}'`
cat >/etc/sysconfig/kubelet<<EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=$DOCKER_CGROUPS --pod-infra-container-image=k8s.gcr.io/pause:3.2"
EOF

如果新节点为node节点

旧的master上执行:
# kubeadm token create --print-join-command
kubeadm join 192.168.96.233:6443 --token 3em4f7.q4qnt5qh4dlmyltu     --discovery-token-ca-cert-hash sha256:20d1eef2680
9b06abf3804cd4bd5c3630ca70c6e2759d3e23eb7acdad0e1b93b 

# 上述命令可以加上 --ttl=0 表示token永不过期,默认24小时

# 新node节点执行
kubeadm join 192.168.96.233:6443 --token 3em4f7.q4qnt5qh4dlmyltu     --discovery-token-ca-cert-hash sha256:20d1eef2680
9b06abf3804cd4bd5c3630ca70c6e2759d3e23eb7acdad0e1b93b 

如果新节点为master节点

旧的master上执行:
# kubeadm token create --print-join-command 
kubeadm join 192.168.96.233:6443 --token 3em4f7.q4qnt5qh4dlmyltu     --discovery-token-ca-cert-hash sha256:20d1eef2680
9b06abf3804cd4bd5c3630ca70c6e2759d3e23eb7acdad0e1b93b 

# kubeadm init phase upload-certs --upload-certs
I0516 05:27:46.573100   35185 version.go:251] remote version is much newer: v1.24.0; falling back to: stable-1.20
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
8d0917dfd5b06f46a7d1c2708323589cc5887140d87e9aca1479b04874938d0b

新master节点上执行:
kubeadm join 192.168.96.233:6443 ---token 3em4f7.q4qnt5qh4dlmyltu     --discovery-token-ca-cert-hash sha256:20d1eef2680
9b06abf3804cd4bd5c3630ca70c6e2759d3e23eb7acdad0e1b93b --control-plane --certificate-key 8d0917dfd5b06f46a7d1c2708323589cc5887140d87e9aca1479b04874938d0b

# 如果报错如下:
unable to add a new control plane instance to a cluster that doesn’t have a stable controlPlaneEndpoint address

# 解决办法:
kubectl -n kube-system edit cm kubeadm-config
并添加下图部分,注意ip地址为旧的master ip及端口

k.png

检查

[root@master ~]# kubectl get node
NAME      STATUS   ROLES                  AGE   VERSION
master    Ready    control-plane,master   69m   v1.20.2
master2   Ready    control-plane,master   24m   v1.20.2
node-1    Ready    <none>                 61m   v1.20.2
node-2    Ready    <none>                 61m   v1.20.2