1. 环境
主机名 | IP |
---|---|
k8s-master01 | 10.0.0.10 |
k8s-master02 | 10.0.0.11 |
k8s-master03 | 10.0.0.12 |
k8s-node01 | 10.0.0.13 |
k8s-node02 | 10.0.0.14 |
k8s-node03 | 10.0.0.15 |
master-lb | 10.0.0.16 |
2. 基础环境部署
2.1 配置 hosts 文件
cat >> /etc/hosts <<'EOF'
10.0.0.10 k8s-master01
10.0.0.11 k8s-master02
10.0.0.12 k8s-master03
10.0.0.13 k8s-node01
10.0.0.14 k8s-node02
10.0.0.15 k8s-node03
10.0.0.16 master-lb
10.0.0.3 harbor.nbsre.cn
EOF
2.2 关闭 swap
# 临时关闭
swapoff -a
sysctl -w vm.swappiness=0
# 永久关闭(修改配置文件)
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
echo vm.swappiness=0 >>/etc/sysctl.conf
sysctl -p
2.3 允许 iptables 检查桥接流量
cat <<EOF | tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
2.4 安装 docker
# 01. 配置docker源
curl -o /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# 查看版本
yum list docker-ce --showduplicates
# 02. 安装docker,可安装指定版本(1.22 版本安装最新docker即可)
yum -y install docker-ce-19.03.15 docker-ce-cli-19.03.15
yum -y install docker-ce
yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
2.5 配置 docker 镜像加速
# 配置cgroupdriver为systemd(1.22版本)
mkdir -pv /etc/docker && cat <<EOF | sudo tee /etc/docker/daemon.json
{
"registry-mirrors": ["https://v5yfisvk.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl daemon-reload
systemctl enable --now docker
2.6 配置 kubernetes yum源配置文件
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
EOF
2.7 安装 kubeadm,kubectl 和 kubelet
# 安装kubeadm,kubelet,kubectl(版本需要一致)
# 可查看版本
yum -y list kubeadm --showduplicates | sort -r
# 指定版本安装
yum -y install kubeadm-1.22.15-0 kubelet-1.22.15-0 kubectl-1.22.15-0
# 启动
systemctl enable --now kubelet
systemctl status kubelet
3. 高可用组件部署(master节点部署)
3.1 部署nginx(所有master节点)
# 01. 下载
mkdir -p /server/tools
cd /server/tools
wget http://nginx.org/download/nginx-1.22.1.tar.gz
# 02. 安装以来环境
yum install pcre-devel openssl-devel -y
# 03. 解压编译安装 ngx
tar xf nginx-1.22.1.tar.gz
cd nginx-1.22.1
./configure --prefix=/usr/local/nginx \
--with-pcre \
--with-http_ssl_module \
--with-http_stub_status_module \
--with-stream \
--with-http_gzip_static_module
make -j2 && make install
# 04. 修改 nginx 配置文件
cat >/usr/local/nginx/conf/nginx.conf <<'EOF'
user nginx;
worker_processes auto;
events {
worker_connections 10240;
}
error_log /usr/local/nginx/logs/error.log info;
stream {
upstream kube-apiservers {
hash $remote_addr consistent;
server k8s-master01:6443 weight=5 max_fails=1 fail_timeout=3s;
server k8s-master02:6443 weight=5 max_fails=1 fail_timeout=3s;
server k8s-master03:6443 weight=5 max_fails=1 fail_timeout=3s;
}
server {
listen 8443 reuseport;
proxy_connect_timeout 3s;
proxy_timeout 3000s;
proxy_pass kube-apiservers;
}
}
EOF
# 05. 创建ngx用户
useradd nginx -s /sbin/nologin -M
# 06. 编写启动文件
cat >/usr/lib/systemd/system/nginx.service <<'EOF'
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network-online.target remote-fs.target nss-lookup.target
Wants=network-online.target
[Service]
Type=forking
ExecStartPre=/usr/local/nginx/sbin/nginx -t -c /usr/local/nginx/conf/nginx.conf
ExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
ExecReload=/usr/local/nginx/sbin/nginx -s reload
ExecStop=/usr/local/nginx/sbin/nginx -s stop
TimeoutStopSec=5
Restart=on-failure
RestartSec=42s
[Install]
WantedBy=multi-user.target
EOF
# 07. 启动服务,并检查
systemctl daemon-reload
systemctl start nginx
systemctl enable nginx
netstat -lntup|grep nginx
3.2 部署keepalived
# 01. 安装 keepalived
yum install keepalived -y
# 02. 编辑配置文件(修改router_id 和 mcast_src_ip)
cp /etc/keepalived/keepalived.conf{,.bak}
cat >/etc/keepalived/keepalived.conf <<'EOF'
! Configuration File for keepalived
global_defs {
# 改为每台机器的IP
router_id 10.0.0.10
}
vrrp_script check_k8s {
script "/server/scripts/check-k8s.sh"
interval 2
weight 2
fall 3
rise 2
}
vrrp_instance k8s-master {
# 其他两台机器改为 BACKUP
state MASTER
interface eth0
virtual_router_id 51
# 其他两台机器优先级低于100,要不同
priority 100
advert_int 1
# 每台机器mcast_src_ip不一样,自己网卡IP
mcast_src_ip 10.0.0.10
authentication {
auth_type PASS
auth_pass k8s-master-ha
}
track_script {
check_k8s
}
virtual_ipaddress {
10.0.0.16/24
}
}
EOF
# 03. 编写健康检查脚本
cat >/server/scripts/check-k8s.sh <<'EOF'
#!/bin/bash
function check_kube-apiserver(){
for i in `seq 5`
do
pid=`pgrep kube-apiserver`
if [[ -n $pid ]]
then
sleep 1
continue
else
pid=0
break
fi
done
}
check_kube-apiserver
if [[ $pid -eq 0 ]]
then
systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
chmod +x /server/scripts/check-k8s.sh
# 04. 启动服务
systemctl start keepalived
systemctl enable keepalived
4. 初始化 master01 节点
4.1 生成初始化预处理文件
# 01. 生成一个 预处理文件
kubeadm config print init-defaults > kubeadm-init.yaml
# 02. 修改初始化文件
4.2 修改 初始化文件
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
# master IP地址
advertiseAddress: 10.0.0.10
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
imagePullPolicy: IfNotPresent
name: k8s-master01
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
# 修改镜像源
#imageRepository: k8s.gcr.io
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.22.0
# 负载地址和端口
controlPlaneEndpoint: 10.0.0.16:8443
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
# pod子网
podSubnet: 10.244.0.0/16
scheduler: {}
4.3 检查 初始化 配置文件
# 03. 检查配置文件是否有错
kubeadm init --config kubeadm-init.yml --dry-run
正确 如图所示
4.4 预拉取镜像
用于预览要下载的镜像,用来检测网络连通性
kubeadm config images list --config kubeadm-init.yml
4.5 基于 kubeadm 配置文件初始化集群
kubeadm init --config kubeadm-init.yml --upload-certs
# 扩展:重置,初始化
kubeadm reset
rm -fr ~/.kube/ /etc/kubernetes/* /var/lib/etcd/*
4.6 复制 kubectl 的 kubeconfig ,便于集群管理
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 查看configmap资源,初始化的配置文件
kubectl -n kube-system get cm kubeadm-config -o yml
5. 将其他master节点加入集群
kubeadm join 10.0.0.16:8443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:cbbe897b1467b0106c486afc4a0b603e8a57d82c49fa81da32a43a3cbe9ece72 \
--control-plane --certificate-key 9f986be54b905873cb49286374baa96fcb91c32659d4438ee51a41f3c04b38ea
6. 将其他 node 节点加入集群
kubeadm join 10.0.0.16:8443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:cbbe897b1467b0106c486afc4a0b603e8a57d82c49fa81da32a43a3cbe9ece72
7. 安装网络插件
# 01. 获取node节点信息,发现全部处于 NotReady 状态,需要安装网络 插件
kubectl get nodes
# 02. 安装网络插件
kubectl apply -f flannel.yml
8. 扩展内容 - 如何手动生成 token,并将其他节点加入集群
# 01. 查看列举当前的 token 信息
kubeam token list
# 02. master 上创建 token,生成加入 集群的命令
date +%s | md5sum | cut -c 5-20
kubeadm token create yunxia.16e4539da45b0cd7 --ttl 0 --print-join-command
# 03. 删除token
kubeadm token delete yunxia.16e4539da45b0cd7
# 04. 集群扩容
kubeadm join 10.0.0.60:6443 --token yunxia.16e4539da45b0cd7 --discovery-token-ca-cert-hash sha256:84b4a0b4d2933e4b0228249d696863c38d2eb48ffa8495090c27bb7091a55a97
8.1 master 节点
# 01. 创建一个永不过期的 key
kubeadm token create --print-join-command --ttl 0
## 新生成的 token
kubeadm join 10.0.0.16:8443 --token wz6uhk.358bt7k1tq3jogpy --discovery-token-ca-cert-hash \
sha256:fab2f37f8299d8a3dea38fc2d300f5a4d1bdb20675c2de7f0ed9b793b93abe9
## kubeadm 生成的token数据存储在k8s集群
kubectl -n kube-system get secret
kubectl -n kube-system get secret bootstrap-token-wz6uhk -o yaml
# 02. 将 master 证书上传到kubeadm的证书文件中(将控制面板证书文件上传到kubeadm证书)
kubeadm init phase upload-certs --upload-certs
# 03. 上传至kubeadm 证书后,会生成一个 key
db898f2d68537969c68bf60a6e0aaab287d38065066dbf9f3036c965c01582c5
# 04. 将其他master节点加入集群
kubeadm join 10.0.0.16:8443 --token wz6uhk.358bt7k1tq3jogpy --discovery-token-ca-cert-hash \
sha256:fab2f37f8299d8a3dea38fc2d300f5a4d1bdb20675c2de7f0ed9b793b93abe9 \
--control-plane --certificate-key db898f2d68537969c68bf60a6e0aaab287d38065066dbf9f3036c965c01582c5
# 05. 复制 kubectl 的 kubeconfig
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
8.2 node 节点
kubeadm join 10.0.0.16:8443 --token wz6uhk.358bt7k1tq3jogpy --discovery-token-ca-cert-hash \
sha256:fab2f37f8299d8a3dea38fc2d300f5a4d1bdb20675c2de7f0ed9b793b93abe9
9. 去掉 master 污点
kubectl taint node k8s-master01 node-role.kubernetes.io/master:NoSchedule-
kubectl taint node k8s-master02 node-role.kubernetes.io/master:NoSchedule-
kubectl taint node k8s-master03 node-role.kubernetes.io/master:NoSchedule-
10. kube-proxy 修改为 ipvs 模式
# 01. 安装软件包(所有节点安装)
yum -y install conntrack-tools ipvsadm
# 02. 配置模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
# 03. 修改 mode 为 ipvs (master节点)
kubectl -n kube-system edit cm kube-proxy
# 04. 查看
kubectl -n kube-system describe cm kube-proxy | grep mode
# 05. 删除旧的 kube-proxy
kubectl get pods -A | grep kube-proxy | awk '{print $2}' | xargs kubectl -n kube-system delete pods
# 06. 查看日志验证
kubectl -n kube-system logs -f kube-proxy-xxx