01-k8s1.20搭建-抛弃docker使用containerd

前言:
k8s动不动就要抛弃docker,好吧,让我们体验一下没有docker的k8s。

1.环境准备

【所有节点上执行】

1.1 关闭无用服务

  • 关闭selinux
# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g'  /etc/sysconfig/selinux
# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g'  /etc/selinux/config
  • 关闭交换分区
# sed -i "/swap/{s/^/#/g}" /etc/fstab
# swapoff -a
  • 关闭防火墙
# systemctl stop  firewalld
# systemctl disable firewalld
  • 关闭其他无用模块
# systemctl disable auditd ||  \
systemctl disable postfix || \
systemctl disable irqbalance || \
systemctl disable remote-fs || \
systemctl disable tuned || \
systemctl disable rhel-configure

1.2 环境和网络

  • hostname
# hostname xxxx
# vim /etc/hostmame
  • hosts
# cat >> /etc/hosts << EOF
10.10.xxx.47 t-master
10.10.xxx.46 t-node-01
10.10.xxx.45 t-node-02
10.10.xxx.44 t-node-03
EOF
  • 打开路由
# cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1 
net.ipv4.ip_forward = 1 
EOF
# modprobe br_netfilter 
# sysctl -p /etc/sysctl.d/k8s.conf 
  • ip_vs模块
# cat > /etc/sysconfig/modules/ipvs.modules <<EOF 
#!/bin/bash 
modprobe -- ip_vs 
modprobe -- ip_vs_rr 
modprobe -- ip_vs_wrr 
modprobe -- ip_vs_sh 
modprobe -- nf_conntrack_ipv4 
EOF
# chmod 755 /etc/sysconfig/modules/ipvs.modules 
# bash /etc/sysconfig/modules/ipvs.modules 
# lsmod | grep -e ip_vs -e nf_conntrack_ipv4

1.3 依赖安装

  • yum源
# mkdir /etc/yum.repos.d/bak && cp -rf /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak
# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
# wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo
# yum clean all && yum makecache
# cat <<EOF > /etc/yum.repos.d/kubernetes.repo 
[kubernetes] 
name=Kubernetes 
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 
enabled=1 
gpgcheck=0 
repo_gpgcheck=0 
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg 
 http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg 
EOF
  • 安装依赖和服务升级
# yum -y install vim-enhanced wget curl net-tools conntrack-tools bind-utils socat ipvsadm ipset
# yum -y update

1.4 系统优化

  • 内核优化
# cat >>/etc/sysctl.conf <<EOF
net.ipv4.ip_forward = 1
vm.swappiness = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_max_syn_backlog = 65536
net.core.netdev_max_backlog =  32768
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.ip_local_port_range = 1024  65535
EOF

# sysctl -p
  • 句柄数
ulimit -n 655350

永修生效修改如下两个文件

# cat >>/etc/security/limits.conf <<EOF
* soft memlock unlimited
* hard memlock unlimited
* soft nofile 655350
* hard nofile 655350
* soft nproc 655350
* hard nproc 655350
EOF
vim /etc/systemd/system.conf
DefaultLimitNOFILE=655350

或者

echo ulimit -n 655350 >>/etc/profile
  • 加载内核模块
# cat <<EOF>/etc/sysconfig/modules/ipvs.modules 
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack_ipv4"
for kernel_module in \${ipvs_modules}; do
 /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
 if [ $? -eq 0 ]; then
 /sbin/modprobe \${kernel_module}
 fi
done
EOF
# chmod +x /etc/sysconfig/modules/ipvs.modules
# bash /etc/sysconfig/modules/ipvs.modules

2. 安装Containerd

【所有节点安装】

2.1 安装

# yum install -y yum-utils device-mapper-persistent-data lvm2 
# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 
# yum list | grep containerd 
# yum install containerd.io-1.4.4 -y 

2.2 修改配置文件

  • 生成配置文件
# mkdir -p /etc/containerd 
# containerd config default > /etc/containerd/config.toml 
  • 修改为阿里云镜像
# sed -i "s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/google_containers#g"  /etc/containerd/config.toml 
# sed -i "s#https://registry-1.docker.io#https://registry.cn-hangzhou.aliyuncs.com#g"  /etc/containerd/config.toml 
  • 添加 SystemdCgroup = true
# sed -i '/containerd.runtimes.runc.options/a\ \ \ \ \ \ \ \ \ \ \ \ SystemdCgroup = true' /etc/containerd/config.toml 

2.3 启动

# systemctl daemon-reload 
# systemctl enable containerd 
# systemctl restart containerd 

2.4 测试

  • 下载镜像
[root@t-master ~]# ctr images pull docker.io/library/nginx:alpine
docker.io/library/nginx:alpine:                                                   resolved       |++++++++++++++++++++++++++++++++++++++|
index-sha256:d8da873105d3eb0d1e59f188b90ec412409ac213c63c0652d287fc2e9f9b6178:    done           |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:c1f4e1974241c3f9ddb2866b2bf8e7afbceaa42dae82aabda5e946d03f054ed2: done           |++++++++++++++++++++++++++++++++++++++|
layer-sha256:29d3f97df6fd99736a0676f9e57e53dfa412cf60b26d95008df9da8197f1f366:    done           |++++++++++++++++++++++++++++++++++++++|
config-sha256:bfad9487e175364fd6315426feeee34bf5e6f516d2fe6a4e9b592315e330828e:   done           |++++++++++++++++++++++++++++++++++++++|
layer-sha256:9aae54b2144e5b2b00c610f8805128f4f86822e1e52d3714c463744a431f0f4a:    done           |++++++++++++++++++++++++++++++++++++++|
layer-sha256:5df810e1c460527fe400cdd2cab62228f5fb3da0f2dce86a6a6c354972f19b6e:    done           |++++++++++++++++++++++++++++++++++++++|
layer-sha256:a5f0adaddd5456b7c5a3753ab541b5fad750f0a6499a15f63571b964eb3e2616:    done           |++++++++++++++++++++++++++++++++++++++|
layer-sha256:e6a4c36d7c0e358e5fc02ccdac645b18b85dcfec09d4fb5f8cbdc187ce9467a0:    done           |++++++++++++++++++++++++++++++++++++++|
layer-sha256:345aee38d3533398e0eb7118e4323a8970f7615136f2170dfb2b0278bbd9099d:    done           |++++++++++++++++++++++++++++++++++++++|
elapsed: 31.7s                                                                    total:  8.7 Mi (280.9 KiB/s)
unpacking linux/amd64 sha256:d8da873105d3eb0d1e59f188b90ec412409ac213c63c0652d287fc2e9f9b6178...
done
  • 查看下载的镜像
[root@t-master ~]#  ctr images ls
REF                            TYPE                                                      DIGEST                                                                  SIZE    PLATFORMS                                                                                LABELS
docker.io/library/nginx:alpine application/vnd.docker.distribution.manifest.list.v2+json sha256:d8da873105d3eb0d1e59f188b90ec412409ac213c63c0652d287fc2e9f9b6178 9.4 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/ppc64le,linux/s390x -
  • 附录:ctr命令
id containerd 命令 docker 命令 备注
1 ctr image ls docker images 获取image信息
2 ctr image pull nginx docker pull nginx pull 一个nginx的image
3 ctr image tag nginx nginx-test docker tag nginx nginx-test tag 一个nginx的image
4 ctr image push nginx-test docker push nginx-test push nginx-test的image
5 ctr image pull nginx docker pull nginx pull 一个nginx的image
6 ctr image import nginx.tar docker load<nginx.tar.gz 导入本地镜像ctr不支持压缩
7 ctr run -d --env 111 nginx-test nginx docker run -d --name=nginx nginx-test 运行的一个容器
8 ctr task ls docker ps 查看运行的容器

3. k8s集群安装

3.1 安装服务

【所有节点执行】

  • 安装服务
    安装kubeadm、kubelet、kubectl

注意:确保前边yum源已经添加。

 yum install -y kubelet-1.20.5 kubeadm-1.20.5 kubectl-1.20.5
  • 设置runtime
 crictl config runtime-endpoint /run/containerd/containerd.sock
  • 启动服务
systemctl daemon-reload
systemctl enable kubelet && systemctl start kubelet 
  • containerd设置代理(非必要)
[root@t-master ~]# mkdir /etc/systemd/system/containerd.service.d
[root@t-master ~]# cat > /etc/systemd/system/containerd.service.d/http_proxy.conf << EOF
[Service]
Environment="HTTP_PROXY=http://10.10.222.191:808/"
EOF
# systemctl restart containerd
  • 测试
#  crictl  pull nginx:alpine
#  crictl  rmi  nginx:alpine
#  crictl  images

3.2 master 初始化

【master上执行】

3.2.1 初始化文件

  • 生成初始化文件
# kubeadm config print init-defaults > kubeadm.yaml 

修改如下几处:

  • imageRepository修改为国内库(如阿里云)
  • criSocket 修改为刚才定义的containerd的socket
  • serviceSubnet: 10.1.0.0/16 给service定义一个子网络。
  • podSubnet: 10.244.0.0/16 pod子网要和后边的flannel里的网络对上,否则DNS启动不起来。
  • kube-proxy 的模式为 ipvs
  • 使用的containerd作为运行时,指定cgroupDriver为systemd
  • 修改后结果如下:(也可以直接用下边这个文件改一改然后初始化)
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.10.239.47
  bindPort: 6443
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  name: t-master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.1.0.0/16
  podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd

3.2.3 下载镜像(非必要)

  • 提前下载镜像
# kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
  • 查看下载的镜像
[root@t-master ~]# crictl images watch
IMAGE                                                                         TAG                 IMAGE ID            SIZE
docker.io/library/nginx                                                       alpine              72ab4137bd85a       9.82MB
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns                   1.7.0               bfe3a36ebd252       14MB
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd                      3.4.13-0            0369cf4303ffd       86.7MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver            v1.20.0             ca9843d3b5454       30.4MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager   v1.20.0             b9fa1895dcaa6       29.4MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy                v1.20.0             10cc881966cfd       49.5MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler            v1.20.0             3138b6e3d4712       14MB
registry.cn-hangzhou.aliyuncs.com/google_containers/pause                     3.2                 80d28bedfe5de       300kB


3.2.3 初始化

 # kubeadm init --config=kubeadm.yaml
  • 输出如下:
[root@t-master ~]# kubeadm init --config=kubeadm.yaml
[init] Using Kubernetes version: v1.20.5
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local t-master] and IPs [10.96.0.1 10.10.239.47]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost t-master] and IPs [10.10.239.47 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost t-master] and IPs [10.10.239.47 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 67.001606 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node t-master as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node t-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.10.239.47:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:b0a2ca593b614fcd25801643f32706fd54cd7d7af7838e6c381c0ffafd4b89c0
  • FAQ

初始化报错如下:

[root@t-master ~]# kubeadm init --config=kubeadm.yaml
[init] Using Kubernetes version: v1.20.5
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher

[解决]

# modprobe br_netfilter
# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables

3.2.3 使用

  • 配置kubelete
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config

当然你是root,也可以按着输出提示设置变量 export KUBECONFIG=/etc/kubernetes/admin.conf

  • 查看节点

如下可见,有一个master节点,因为没有配置网络所以是not ready。

[root@t-master ~]# kubectl get node
NAME       STATUS     ROLES                  AGE     VERSION
t-master   NotReady   control-plane,master   3h10m   v1.20.5

3.2.4 状态说明

  • 网络
[root@t-master ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:82:95:fb brd ff:ff:ff:ff:ff:ff
    inet 10.10.239.47/24 brd 10.10.239.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fe82:95fb/64 scope link
       valid_lft forever preferred_lft forever
3: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default qlen 1000
    link/ether d6:63:14:c1:f9:61 brd ff:ff:ff:ff:ff:ff
4: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
    link/ether aa:6f:f7:42:ce:01 brd ff:ff:ff:ff:ff:ff
    inet 10.1.0.1/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.1.0.10/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
  • pod
[root@t-master ~]# kubectl get pod -n kube-system
NAME                               READY   STATUS    RESTARTS   AGE
coredns-54d67798b7-hf4t8           0/1     Pending   0          2m10s
coredns-54d67798b7-m5ffl           0/1     Pending   0          2m10s
etcd-t-master                      1/1     Running   0          2m17s
kube-apiserver-t-master            1/1     Running   0          2m17s
kube-controller-manager-t-master   1/1     Running   0          2m17s
kube-proxy-xj5l4                   1/1     Running   0          2m11s
kube-scheduler-t-master            1/1     Running   0          2m17s

说明:
稍等一会儿,kube-system中的pod会慢慢成为Running状态。
但是coredns一直会Pending,用describe查看可见,因为master有污点。

[root@t-master ~]# kubectl describe -n kube-system
  Warning  FailedScheduling  3m35s  default-scheduler  0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.

我们不打算改master,接入node之后就可以了。

3.3 node节点接入

【node节点执行】

kubeadm join 10.10.239.47:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:b0a2ca593b614fcd25801643f32706fd54cd7d7af7838e6c381c0ffafd4b89c0

3.4 flannel网络

【master 节点执行】

# kubectl create -f kube-flannel.yml 
  • 查看目前网络

可见 多了一个flannel.1 的网卡,地址来自kube-flannel.yml 里的配置,注意需要和初始化时的podsubnet一致。

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:82:95:fb brd ff:ff:ff:ff:ff:ff
    inet 10.10.239.47/24 brd 10.10.239.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fe82:95fb/64 scope link
       valid_lft forever preferred_lft forever
3: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default qlen 1000
    link/ether d6:63:14:c1:f9:61 brd ff:ff:ff:ff:ff:ff
4: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
    link/ether aa:6f:f7:42:ce:01 brd ff:ff:ff:ff:ff:ff
    inet 10.1.0.1/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.1.0.10/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
5: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
    link/ether 5e:66:53:43:02:ed brd ff:ff:ff:ff:ff:ff
    inet 10.244.0.0/32 scope global flannel.1
       valid_lft forever preferred_lft forever
    inet6 fe80::5c66:53ff:fe43:2ed/64 scope link
       valid_lft forever preferred_lft forever
  • 查看pod

可见coredns 启动了

[root@t-master ~]# kubectl get pod -n kube-system
NAME                               READY   STATUS    RESTARTS   AGE
coredns-54d67798b7-bc2lc           1/1     Running   0          49m
coredns-54d67798b7-lmqtx           1/1     Running   0          49m
etcd-t-master                      1/1     Running   0          50m
kube-apiserver-t-master            1/1     Running   0          50m
kube-controller-manager-t-master   1/1     Running   0          50m
kube-flannel-ds-amd64-mhvhj        1/1     Running   0          31m
kube-flannel-ds-amd64-sldgb        1/1     Running   0          31m
kube-proxy-dtnc6                   1/1     Running   0          49m
kube-proxy-tkcr6                   1/1     Running   0          40m
kube-scheduler-t-master            1/1     Running   0          50m
  • 查看node

可见节点已经Ready

[root@t-master ~]# kubectl get node
NAME        STATUS   ROLES                  AGE   VERSION
t-master    Ready    control-plane,master   59m   v1.20.5
t-node-01   Ready    <none>                 49m   v1.20.5
Warning  FailedCreatePodSandBox  24s                 kubelet, node2     Failed create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "071c8fc6acb87838fd4ee341479a0769a97401c481a93b5b54f8812ba6fa0ed4" network for pod "coredns-5bfd685c78-mmjxc": NetworkPlugin cni failed to set up pod "coredns-5bfd685c78-mmjxc_kube-system" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/
Warning  FailedCreatePodSandBox  7m9s                 kubelet            Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "64cc7c14f3f2fb90ce373494ea2589c5d5ec9e64842a1b162b347ee30c0c02bf": stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 212,080评论 6 493
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 90,422评论 3 385
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 157,630评论 0 348
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 56,554评论 1 284
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 65,662评论 6 386
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 49,856评论 1 290
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 39,014评论 3 408
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 37,752评论 0 268
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 44,212评论 1 303
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 36,541评论 2 327
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 38,687评论 1 341
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 34,347评论 4 331
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 39,973评论 3 315
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 30,777评论 0 21
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 32,006评论 1 266
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 46,406评论 2 360
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 43,576评论 2 349

推荐阅读更多精彩内容