配置
hostname | 节点IP | 节点类型 | 配置 |
---|---|---|---|
k8smaster | 10.37.1.45 | master | 4核8G |
k8snode1 | 10.37.1.45 | node | 4核8G |
k8snode2 | 10.37.1.45 | node | 4核8G |
准备
-
sudo cat /sys/class/dmi/id/product_uuid
检查uuid是否唯一。 - 内网互动
- 全部服务器,hosts都加上
cat /etc/hosts
10.37.1.45 k8smaster
10.37.1.46 k8snode1
10.37.1.47 k8snode2
- 全部服务器 禁用swap
sudo vim /etc/fstab
# 注释掉
#swap.img none swap sw 0 0
- 全部服务器,安装docker-ce
# step 1: 安装必要的一些系统工具
sudo apt-get update
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
# step 2: 安装GPG证书
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
# Step 3: 写入软件源信息
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
# Step 4: 更新并安装Docker-CE
sudo apt-get -y update
sudo apt-get -y install docker-ce
sudo systemctl enable docker
sudo systemctl start docker
- 全部服务器 使普通用户也可以执行docker命令方便操作不用老加Sudo
sudo groupadd docker #添加docker用户组<br>
sudo gpasswd -a $USER docker #将登陆用户加入到docker用户组中<br>
newgrp docker #更新用户组
- 全部服务器,检查安装情况
qlz@k8snode1:~$ docker version
Client: Docker Engine - Community
Version: 20.10.13
API version: 1.41
Go version: go1.16.15
Git commit: a224086
Built: Thu Mar 10 14:07:51 2022
OS/Arch: linux/amd64
Context: default
Experimental: true
Server: Docker Engine - Community
Engine:
Version: 20.10.13
API version: 1.41 (minimum version 1.12)
Go version: go1.16.15
Git commit: 906f57f
Built: Thu Mar 10 14:05:44 2022
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: 1.5.10
GitCommit: 2a1d4dbdb2a1030dc5b01e96fb110a9d9f150ecc
runc:
Version: 1.0.3
GitCommit: v1.0.3-0-gf46b6ba
docker-init:
Version: 0.19.0
GitCommit: de40ad0
全部服务器,安装kubectl kubeadm kubelet
#!/bin/bash
# 1. 更新 apt 包索引并安装使用 Kubernetes apt 仓库所需要的包:
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl
# 2. 下载 Google Cloud 公开签名秘钥:
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
# 3.添加 Kubernetes apt 仓库:
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
# 4. 更新 apt 包索引,安装 kubelet、kubeadm 和 kubectl,并锁定其版本:
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
容器运行时
官方参考
当时运行初始化时遇到好多问题,重新看了这部分,在初始化前要处理好这些东西
- containerd
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
# 设置必需的 sysctl 参数,这些参数在重新启动后仍然存在。
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# 应用 sysctl 参数而无需重新启动
sudo sysctl --system
配置 containerd:
sudo mkdir -p /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml
sudo systemctl restart containerd
- 使用 systemd cgroup 驱动程序
/etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
...
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
sudo systemctl restart containerd
- CRI-O
要下载什么版本先看CRI-O 兼容性列表。
image.png
# 创建 .conf 文件以在启动时加载模块
cat <<EOF | sudo tee /etc/modules-load.d/crio.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
# 配置 sysctl 参数,这些配置在重启之后仍然起作用
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sudo sysctl --system
操作系统 $OS
Ubuntu 20.04 xUbuntu_20.04
Ubuntu 19.10 xUbuntu_19.10
Ubuntu 19.04 xUbuntu_19.04
Ubuntu 18.04 xUbuntu_18.04
#!/bin/bash
OS='xUbuntu_20.04'
VERSION1='1.23'
VERSION2='1.23.2'
# 官网上的配置是错的,建议具体看这里对应好自己的路径
cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /
EOF
cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION2.list
deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.23:/1.23.2/$OS/ /
EOF
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg add -
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/1.23:/1.23.2/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers-cri-o.gpg add -
sudo apt-get update
sudo apt-get install cri-o cri-o-runc -y
Docker
sudo mkdir /etc/docker
cat <<EOF | sudo tee /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
安装集群
官方提到,这个步骤是可选的,只适用于你希望
kubeadm init
和kubeadm join
不去下载存放在k8s.gcr.io
上的默认的容器镜像的情况。
当你在离线的节点上创建一个集群的时候,Kubeadm 有一些命令可以帮助你预拉取所需的镜像。 阅读离线运行 kubeadm 获取更多的详情。
Kubeadm 允许你给所需要的镜像指定一个自定义的镜像仓库。 阅读使用自定义镜像 获取更多的详情。
网络环境问题,懂的自然懂
- 检查需要哪些镜像,这里能查出官方最新的一套k8s的镜像。
qlz@k8smaster:/data2/yfbin$ kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.23.5 # apiserver相当于网关
k8s.gcr.io/kube-controller-manager:v1.23.5 #管理控制
k8s.gcr.io/kube-scheduler:v1.23.5 # 调度
k8s.gcr.io/kube-proxy:v1.23.5 # 负责service的实现
k8s.gcr.io/pause:3.6 # 创建共享的网络名称空间
k8s.gcr.io/etcd:3.5.1-0 # etcd配置
k8s.gcr.io/coredns/coredns:v1.8.6 # dns
image.png
- 创建一个脚本,给执行权限方便操作
#!/bin/bash
aliyun='registry.aliyuncs.com/google_containers'
k8s='k8s.gcr.io'
kubeapiserver='kube-apiserver:v1.23.5'
kubecontrollermanager='kube-controller-manager:v1.23.5'
kubescheduler='kube-scheduler:v1.23.5'
kubeproxy='kube-proxy:v1.23.5'
pause='pause:3.6'
etcd='etcd:3.5.1-0'
coredns='coredns/coredns:1.8.6'
vcoredns='coredns/coredns:v1.8.6'
# kubeadm config images list 查出最终要什么包,包名是怎么样的
# k8s.gcr.io/kube-apiserver:v1.23.5
# k8s.gcr.io/kube-controller-manager:v1.23.5
# k8s.gcr.io/kube-scheduler:v1.23.5
# k8s.gcr.io/kube-proxy:v1.23.5
# k8s.gcr.io/pause:3.6
# k8s.gcr.io/etcd:3.5.1-0
# k8s.gcr.io/coredns/coredns:v1.8.6
# pull images alibaba
sudo docker pull ${aliyun}/${kubeapiserver}
sudo docker pull ${aliyun}/${kubecontrollermanager}
sudo docker pull ${aliyun}/${kubescheduler}
sudo docker pull ${aliyun}/${kubeproxy}
sudo docker pull ${aliyun}/${pause}
sudo docker pull ${aliyun}/${etcd}
# 这个会奇葩一点阿里源找不到,k8s.gcr.io也拉不到。去官方源找对应的版本,官网版本是没带'v'的要注意
sudo docker pull ${coredns}
# 改名
sudo docker tag ${aliyun}/${kubeapiserver} ${k8s}/${kubeapiserver}
sudo docker tag ${aliyun}/${kubecontrollermanager} ${k8s}/${kubecontrollermanager}
sudo docker tag ${aliyun}/${kubescheduler} ${k8s}/${kubescheduler}
sudo docker tag ${aliyun}/${kubeproxy} ${k8s}/${kubeproxy}
sudo docker tag ${aliyun}/${pause} ${k8s}/${pause}
sudo docker tag ${aliyun}/${etcd} ${k8s}/${etcd}
# 改名要带上'v'
sudo docker tag ${coredns} ${k8s}/${vcoredns}
# 删除 改名前的镜像,这样少点好看点
sudo docker rmi ${aliyun}/${kubeapiserver}
sudo docker rmi ${aliyun}/${kubecontrollermanager}
sudo docker rmi ${aliyun}/${kubescheduler}
sudo docker rmi ${aliyun}/${kubeproxy}
sudo docker rmi ${aliyun}/${pause}
sudo docker rmi ${aliyun}/${etcd}
sudo docker rmi ${coredns}
# 查看
sudo docker images
运行脚本结果
qlz@k8smaster:/data2/yfbin/k8s$ sudo ./pull.sh
v1.23.5: Pulling from google_containers/kube-apiserver
2df365faf0e3: Pull complete
8c99db1114c6: Pull complete
b6a9a43f03b3: Pull complete
Digest: sha256:ddf5bf7196eb534271f9e5d403f4da19838d5610bb5ca191001bde5f32b5492e
Status: Downloaded newer image for registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.5
registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.5
v1.23.5: Pulling from google_containers/kube-controller-manager
2df365faf0e3: Already exists
8c99db1114c6: Already exists
38fc68ed5962: Pull complete
Digest: sha256:cca0fb3532abedcc95c5f64268d54da9ecc56cc4817ff08d0128941cf2b0e1a4
Status: Downloaded newer image for registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.5
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.5
v1.23.5: Pulling from google_containers/kube-scheduler
2df365faf0e3: Already exists
8c99db1114c6: Already exists
d117fbcd1c95: Pull complete
Digest: sha256:489efb65da9edc40bf0911f3e6371e5bb6b8ad8fde1d55193a6cc84c2ef36626
Status: Downloaded newer image for registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.5
registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.5
v1.23.5: Pulling from google_containers/kube-proxy
b2481554545f: Pull complete
d9c824a47c4e: Pull complete
b767f8819bab: Pull complete
Digest: sha256:c1f625d115fbd9a12eac615653fc81c0edb33b2b5a76d1e09d5daed11fa557c1
Status: Downloaded newer image for registry.aliyuncs.com/google_containers/kube-proxy:v1.23.5
registry.aliyuncs.com/google_containers/kube-proxy:v1.23.5
3.6: Pulling from google_containers/pause
fbe1a72f5dcd: Pull complete
Digest: sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
Status: Downloaded newer image for registry.aliyuncs.com/google_containers/pause:3.6
registry.aliyuncs.com/google_containers/pause:3.6
3.5.1-0: Pulling from google_containers/etcd
e8614d09b7be: Pull complete
45b6afb4a92f: Pull complete
f951ee5fe858: Pull complete
0c6b9ab3ebf9: Pull complete
7314eabc351c: Pull complete
Digest: sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263
Status: Downloaded newer image for registry.aliyuncs.com/google_containers/etcd:3.5.1-0
registry.aliyuncs.com/google_containers/etcd:3.5.1-0
1.8.6: Pulling from google_containers/coredns
d92bdee79785: Pull complete
6e1b7c06e42d: Pull complete
Digest: sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e
Status: Downloaded newer image for registry.aliyuncs.com/google_containers/coredns:1.8.6
registry.aliyuncs.com/google_containers/coredns:1.8.6
Untagged: registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.5
Untagged: registry.aliyuncs.com/google_containers/kube-apiserver@sha256:ddf5bf7196eb534271f9e5d403f4da19838d5610bb5ca191001bde5f32b5492e
Untagged: registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.5
Untagged: registry.aliyuncs.com/google_containers/kube-controller-manager@sha256:cca0fb3532abedcc95c5f64268d54da9ecc56cc4817ff08d0128941cf2b0e1a4
Untagged: registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.5
Untagged: registry.aliyuncs.com/google_containers/kube-scheduler@sha256:489efb65da9edc40bf0911f3e6371e5bb6b8ad8fde1d55193a6cc84c2ef36626
Untagged: registry.aliyuncs.com/google_containers/kube-proxy:v1.23.5
Untagged: registry.aliyuncs.com/google_containers/kube-proxy@sha256:c1f625d115fbd9a12eac615653fc81c0edb33b2b5a76d1e09d5daed11fa557c1
Untagged: registry.aliyuncs.com/google_containers/pause:3.6
Untagged: registry.aliyuncs.com/google_containers/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db
Untagged: registry.aliyuncs.com/google_containers/etcd:3.5.1-0
Untagged: registry.aliyuncs.com/google_containers/etcd@sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263
Untagged: registry.aliyuncs.com/google_containers/coredns:1.8.6
Untagged: registry.aliyuncs.com/google_containers/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-apiserver v1.23.5 3fc1d62d6587 2 weeks ago 135MB
k8s.gcr.io/kube-proxy v1.23.5 3c53fa8541f9 2 weeks ago 112MB
k8s.gcr.io/kube-scheduler v1.23.5 884d49d6d8c9 2 weeks ago 53.5MB
k8s.gcr.io/kube-controller-manager v1.23.5 b0c9e5e4dbb1 2 weeks ago 125MB
k8s.gcr.io/etcd 3.5.1-0 25f8c7f3da61 4 months ago 293MB
k8s.gcr.io/coredns/coredns v1.8.6 a4ca41631cc7 5 months ago 46.8MB
k8s.gcr.io/pause 3.6 6270bb605e12 7 months ago 683kB
qlz@k8smaster:/data2/yfbin/k8s$
- 初始化master
# kubernetes-version版本号以kube的版本号命名
kubeadm init --apiserver-advertise-address=10.37.1.45 --kubernetes-version=v1.23.5 --service-cidr=172.16.0.0/12 --pod-network-cidr=172.1.0.0/16 --ignore-preflight-errors=all --v=6
- -v, --v Level number for the log level verbosity日志等级
- --apiserver-advertise-address string API 服务器所公布的其正在监听的 IP 地址。如果未设置,则使用默认网络接口。使用master的ip,所有请求从master进来。
- --kubernetes-version 与我们的镜像版本保持一致,如果不一致,他会从k8s上下载对应版本的镜像。
- --service-cidr 默认值:"10.96.0.0/12" 为服务的虚拟 IP 地址另外指定 IP 地址段
- --pod-network-cidr 指明 pod 网络可以使用的 IP 地址段。如果设置了这个参数,控制平面将会为每一个节点自动分配 CIDRs。
如何划分service-cidr与pod-network-cidr
网络和IP地址计算器
根据实际情况计算
service-cidr
image.png
pod-network-cidr
image.png
安装完成
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.37.1.45:6443 --token s9px1h.h3x35nk68syq9h6u \
--discovery-token-ca-cert-hash sha256:1d85143b4a5d58e9bf1b8475580cb7872a93d8d551d0fa0c01651c39d0ccb504
qlz@k8smaster:/data2/yfbin/k8s$
最后那串要记起来,方便以后各节点加入集群。
ROLES标签是空的
qlz@k8smaster:~$ kubectl get no --show-labels
NAME STATUS ROLES AGE VERSION LABELS
k8smaster Ready control-plane,master 70m v1.23.5 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8smaster,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
k8snode1 Ready <none> 40m v1.23.5 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8snode1,kubernetes.io/os=linux
k8snode2 Ready <none> 25m v1.23.5 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8snode2,kubernetes.io/os=linux
增加标签
kubectl label node k8snode1 node-role.kubernetes.io/node=1
删除标签
kubectl label node k8snode1 node-role.kubernetes.io/node-
qlz@k8smaster:~$ kubectl get no --show-labels
NAME STATUS ROLES AGE VERSION LABELS
k8smaster Ready control-plane,master 73m v1.23.5 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8smaster,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
k8snode1 Ready node 42m v1.23.5 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8snode1,kubernetes.io/os=linux,node-role.kubernetes.io/node=1
k8snode2 Ready node 28m v1.23.5 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8snode2,kubernetes.io/os=linux,node-role.kubernetes.io/node=2