环境
centos 7.6
安装前准备
- 配置hostname
 
sudo hostnamectl set-hostname k8s-node156
- 配置ntp同步
 
#未安装ntp
sudo hostnamectl set-hostname k8s-node156
#配置ntp服务器
ntpdate pool.ntp.org
#设置定时任务
echo '*/10 * * * * /usr/sbin/ntpdate pool.ntp.org' >>/var/spool/cron/root
- 关闭selinux,防火墙,swap
 
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
systemctl stop firewalld
systemctl disable firewalld
swapoff -a
echo "vm.swappiness=0" >> /etc/sysctl.conf
sysctl -pswapoff -a
- 修改内核参数
 
yum install -y bridge-utils.x86_64
modprobe bridge
modprobe br_netfilter
echo "net.bridge.bridge-nf-call-iptables=1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables=1" >> /etc/sysctl.conf
sysctl -p
安装docker , k8s
- 安装docker 略过
 - 安装k8s,这里建议使用同其他节点同样版本k8s+源
- 查看其他节点的k8s源
 
 
[root@k8s-master storage]# cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
- 配置新节点源
 
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
- 查看其他机器上安装的软件包版本,并在新节点上安装
 
[root@k8s-master storage]# yum list kubelet
已加载插件:fastestmirror, langpacks, priorities
Repodata is over 2 weeks old. Install yum-cron? Or run: yum makecache fast
Determining fastest mirrors
 * epel: hkg.mirror.rackspace.com
1 packages excluded due to repository priority protections
已安装的软件包
kubelet.x86_64                                                                         1.19.1-0                                                                          @kubernetes
可安装的软件包
kubelet.x86_64            
[root@k8s-node156 ~]# yum -y install kubelet-1.19.1-0.x86_64
[root@k8s-node156 ~]# yum -y install kubeadm-1.19.1-0.x86_64
[root@k8s-node156 ~]# systemctl enable kubelet
- 加入集群
 
#在主节点上执行以下命令,获取加入集群命令
[root@k8s-master storage]# kubeadm token create --print-join-command --ttl 0
#然后将打印出来的命令在新节点上执行即可
测试
[root@k8s-master storage]# kubectl get nodes
NAME          STATUS   ROLES    AGE    VERSION
k8s-master    Ready    master   37d    v1.19.1
k8s-node156   Ready    <none>   3m7s   v1.19.1
k8s-node213   Ready    <none>   36d    v1.19.1
k8s-node218   Ready    <none>   36d    v1.19.1
到这里我们集群节点就加成功啦,最后测试以下,新的pod会不会在新节点上跑
[root@k8s-master storage]# kubectl get pod -n midware -o wide
NAME                                                 READY   STATUS    RESTARTS   AGE   IP           NODE          NOMINATED NODE   READINESS GATES
mysql-0                                              1/1     Running   0          17h   10.34.0.13   k8s-node213   <none>           <none>
mysql-1                                              1/1     Running   0          17h   10.44.0.13   k8s-node218   <none>           <none>
mysql-2                                              1/1     Running   0          17h   10.44.0.16   k8s-node218   <none>           <none>
nfs-common-nfs-client-provisioner-6d8c5c579b-fkpjw   1/1     Running   0          22h   10.44.0.11   k8s-node218   <none>           <none>
[root@k8s-master storage]# kubectl delete pod -n midware mysql-2
pod "mysql-2" deleted
[root@k8s-master storage]# kubectl get pod -n midware -o wide
NAME                                                 READY   STATUS     RESTARTS   AGE   IP           NODE          NOMINATED NODE   READINESS GATES
mysql-0                                              1/1     Running    0          17h   10.34.0.13   k8s-node213   <none>           <none>
mysql-1                                              1/1     Running    0          17h   10.44.0.13   k8s-node218   <none>           <none>
mysql-2                                              0/1     Init:0/1   0          5s    <none>       k8s-node156   <none>           <none>
nfs-common-nfs-client-provisioner-6d8c5c579b-fkpjw   1/1     Running    0          22h   10.44.0.11   k8s-node218   <none>           <none>
[root@k8s-master storage]#