一、kubernets集群升级
1、升级master
1.1.下载要升级版本的二进制包
github kubernetes二进制下载地址
1.1 先把要升级的master从负载均衡器里下架
root@k8s-node2:/etc/kube-lb/conf# cat /etc/kube-lb/conf/kube-lb.conf
user root;
worker_processes 1;
error_log /etc/kube-lb/logs/error.log warn;
events {
worker_connections 3000;
}
stream {
upstream backend {
server 172.31.7.101:6443 max_fails=2 fail_timeout=3s;
#server 172.31.7.102:6443 max_fails=2 fail_timeout=3s;
}
server {
listen 127.0.0.1:6443;
proxy_connect_timeout 1s;
proxy_pass backend;
}
}
#systemctl restart kube-lb
停止服务
root@k8s-master1-etcd1:~/kubernetes_1.23.5/kubernetes/server/bin# systemctl stop kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet
把下载下来的进二制文件master所用到的组件有kube-apiserver
kube-controller-manager
kube-proxy
kube-scheduler
kubelet
kubectl
copy到/usr/local/bin
cp kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet kubectl /usr/local/bin
#重启服务
systemctl start kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet
然后修改负载均衡器,用同样的方法升级另一个master节点
最后验证二台master已升级到了V1.23.5
root@k8s-master1-etcd1:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
172.31.7.101 Ready,SchedulingDisabled master 30h v1.23.5
172.31.7.102 Ready,SchedulingDisabled master 30h v1.23.5
172.31.7.111 Ready node 30h v1.23.1
172.31.7.112 Ready node 30h v1.23.1
2、升级node节点
(1)先驱逐node1节点上的所有pod
root@k8s-master1-etcd1:~# kubectl drain 172.31.7.111
node/172.31.7.111 cordoned
error: unable to drain node "172.31.7.111" due to error:[cannot delete DaemonSet-managed Pods (use --ignore-daemonsets to ignore): kube-system/calico-node-wlpl9, cannot delete Pods with local storage (use --delete-emptydir-data to override): kubernetes-dashboard/dashboard-metrics-scraper-799d786dbf-l52xb, velero-system/velero-6755cb8697-l2x99, cannot delete Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet (use --force to override): default/net-test1], continuing command...
There are pending nodes to be drained:
172.31.7.111
cannot delete DaemonSet-managed Pods (use --ignore-daemonsets to ignore): kube-system/calico-node-wlpl9
cannot delete Pods with local storage (use --delete-emptydir-data to override): kubernetes-dashboard/dashboard-metrics-scraper-799d786dbf-l52xb, velero-system/velero-6755cb8697-l2x99
cannot delete Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet (use --force to override): default/net-test1
root@k8s-master1-etcd1:~# kubectl drain 172.31.7.111 --ignore-daemonsets --delete-emptydir-data --force
node/172.31.7.111 already cordoned
WARNING: deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: default/net-test1; ignoring DaemonSet-managed Pods: kube-system/calico-node-wlpl9
evicting pod velero-system/velero-6755cb8697-l2x99
evicting pod kube-system/coredns-79688b6cb4-kqpgs
evicting pod default/net-test1
evicting pod kubernetes-dashboard/dashboard-metrics-scraper-799d786dbf-l52xb
pod/velero-6755cb8697-l2x99 evicted
pod/dashboard-metrics-scraper-799d786dbf-l52xb evicted
pod/coredns-79688b6cb4-kqpgs evicted
pod/net-test1 evicted
node/172.31.7.111 drained
root@k8s-master1-etcd1:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
172.31.7.101 Ready,SchedulingDisabled master 30h v1.23.5
172.31.7.102 Ready,SchedulingDisabled master 30h v1.23.5
172.31.7.111 Ready,SchedulingDisabled node 30h v1.23.1
172.31.7.112 Ready node 30h v1.23.1
(2)停止node1节点的kubelet
kube-proxy
服务
root@k8s-node1:/etc/kube-lb/conf# systemctl stop kubelet kube-proxy.service
(3)复制二进制文件kubelete
kube-proxy
到node1节点的/usr/local/bin
目录下
root@k8s-master1-etcd1:~/kubernetes_1.23.5/kubernetes/server/bin# scp kubelet kube-proxy 172.31.7.111:/usr/local/bin
(4)node1上启动kubelet
kube-proxy
服务
root@k8s-node1:/etc/kube-lb/conf# systemctl start kubelet kube-proxy.service
(5)取消停止调度的策略
root@k8s-master1-etcd1:~/kubernetes_1.23.5/kubernetes/server/bin# kubectl uncordon 172.31.7.111
node/172.31.7.111 uncordoned
相同的方法升级node2节点
验证
root@k8s-master1-etcd1:~/kubernetes_1.23.5/kubernetes/server/bin# kubectl get nodes
NAME STATUS ROLES AGE VERSION
172.31.7.101 Ready,SchedulingDisabled master 30h v1.23.5
172.31.7.102 Ready,SchedulingDisabled master 30h v1.23.5
172.31.7.111 Ready node 30h v1.23.5
172.31.7.112 Ready node 30h v1.23.5
3、通过kubeasz项目添加master节点
(1)把所需要的二进制文件复制到/etc/kubeasz/bin
,和要添加的master节点做免密认证
\cp kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet kubectl /etc/kubeasz/bin/
(2)添加master
root@k8s-master1-etcd1:/etc/kubeasz# ./ezctl --help
Usage: ezctl COMMAND [args]
-------------------------------------------------------------------------------------
Cluster setups:
list to list all of the managed clusters
checkout <cluster> to switch default kubeconfig of the cluster
new <cluster> to start a new k8s deploy with name 'cluster'
setup <cluster> <step> to setup a cluster, also supporting a step-by-step way
start <cluster> to start all of the k8s services stopped by 'ezctl stop'
stop <cluster> to stop all of the k8s services temporarily
upgrade <cluster> to upgrade the k8s cluster
destroy <cluster> to destroy the k8s cluster
backup <cluster> to backup the cluster state (etcd snapshot)
restore <cluster> to restore the cluster state from backups
start-aio to quickly setup an all-in-one cluster with 'default' settings
Cluster ops:
add-etcd <cluster> <ip> to add a etcd-node to the etcd cluster
add-master <cluster> <ip> to add a master node to the k8s cluster
add-node <cluster> <ip> to add a work node to the k8s cluster
del-etcd <cluster> <ip> to delete a etcd-node from the etcd cluster
del-master <cluster> <ip> to delete a master node from the k8s cluster
del-node <cluster> <ip> to delete a work node from the k8s cluster
Extra operation:
kcfg-adm <cluster> <args> to manage client kubeconfig of the k8s cluster
Use "ezctl help <command>" for more information about a given command.
root@k8s-master1-etcd1:/etc/kubeasz# ./ezctl add-master k8s-cluster-01 172.31.7.103
2022-04-24 23:16:50 INFO add 172.31.7.103 into 'kube_master' group
2022-04-24 23:16:50 INFO start to add a master node:172.31.7.103 into cluster:k8s-cluster-01
(3)添加node
root@k8s-master1-etcd1:/etc/kubeasz# ./ezctl --help
Usage: ezctl COMMAND [args]
-------------------------------------------------------------------------------------
Cluster setups:
list to list all of the managed clusters
checkout <cluster> to switch default kubeconfig of the cluster
new <cluster> to start a new k8s deploy with name 'cluster'
setup <cluster> <step> to setup a cluster, also supporting a step-by-step way
start <cluster> to start all of the k8s services stopped by 'ezctl stop'
stop <cluster> to stop all of the k8s services temporarily
upgrade <cluster> to upgrade the k8s cluster
destroy <cluster> to destroy the k8s cluster
backup <cluster> to backup the cluster state (etcd snapshot)
restore <cluster> to restore the cluster state from backups
start-aio to quickly setup an all-in-one cluster with 'default' settings
Cluster ops:
add-etcd <cluster> <ip> to add a etcd-node to the etcd cluster
add-master <cluster> <ip> to add a master node to the k8s cluster
add-node <cluster> <ip> to add a work node to the k8s cluster
del-etcd <cluster> <ip> to delete a etcd-node from the etcd cluster
del-master <cluster> <ip> to delete a master node from the k8s cluster
del-node <cluster> <ip> to delete a work node from the k8s cluster
Extra operation:
kcfg-adm <cluster> <args> to manage client kubeconfig of the k8s cluster
Use "ezctl help <command>" for more information about a given command.
root@k8s-master1-etcd1:/etc/kubeasz# ./ezctl add-node k8s-cluster-01 172.31.7.113
2022-04-24 23:37:22 INFO add 172.31.7.113 into 'kube_node' group
2022-04-24 23:37:22 INFO start to add a work node:172.31.7.113 into cluster:k8s-cluster-01
#验证
root@k8s-master1-etcd1:/etc/kubeasz# kubectl get nodes
NAME STATUS ROLES AGE VERSION
172.31.7.101 Ready,SchedulingDisabled master 31h v1.23.5
172.31.7.102 Ready,SchedulingDisabled master 31h v1.23.5
172.31.7.103 Ready,SchedulingDisabled master 21m v1.23.5
172.31.7.111 Ready node 31h v1.23.5
172.31.7.112 Ready node 31h v1.23.5
172.31.7.113 Ready node 2m24s v1.23.5