手动搭建部署Kubernetes高可用集群

集群版本信息

  • 系统版本 CentOS Linux release 7.7.1908
  • 系统内核 5.4.12-1.el7.elrepo.x86_64
  • kubenetes版本 1.14.8
  • etcd 3.3.18
  • flanneld 0.11.0
  • docker 19.03.5

部署haproxy

yum install -y haproxy
systemctl start haproxy
systemctl enable haproxy

haproxy配置文件;

global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     20480
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats

defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option                  http-server-close
    option                  forwardfor except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 20480
    stats refresh           5

listen admin_status
    bind  0.0.0.0:8000
    mode  http
    log   127.0.0.1 local3 err
    stats refresh 5s
    stats uri /
    #stats realm
    stats auth admin:admin
    stats hide-version

listen apiserver
  bind 10.0.0.14:6443
  balance  source
  mode  tcp
  option  tcpka
  option  tcplog
  server master1 10.0.0.15:6443 check inter 2000 rise 2 fall 5
  server master2 10.0.0.18:6443 check inter 2000 rise 2 fall 5
  server master3 10.0.0.21:6443 check inter 2000 rise 2 fall 5

部署keepalived

yum install -y keepalived
systemctl enable keepalived

修改keepalived配置文件;

# master-1的配置文件
global_defs {
   notification_email {
     abc@sss.com
   }
   notification_email_from abc@sss.com
   smtp_server smtp.sss.com
   smtp_connect_timeout 30
   router_id k8s-master-1
}
vrrp_script check_haproxy {
    script " /etc/keepalived/check_haproxy.sh"
    interval 3
    weight -20
}
track_interface {
        eth0
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 100
    priority 100
    advert_int 1
    nopreempt
    authentication {
        auth_type PASS
        auth_pass 6666
    }
    virtual_ipaddress {
        10.0.0.14 dev eth0
    }
    track_script {
        check_haproxy
    }


}

# master-2的配置文件
global_defs {
   notification_email {
     abc@sss.com
   }
   notification_email_from abc@sss.com
   smtp_server smtp.sss.com
   smtp_connect_timeout 30
   router_id k8s-master-2
}
vrrp_script check_haproxy {
    script " /etc/keepalived/check_haproxy.sh"
    interval 3
    weight -20
}
track_interface {
        eth0
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 100
    priority 80
    advert_int 1
    nopreempt
    authentication {
        auth_type PASS
        auth_pass 6666
    }
    virtual_ipaddress {
        10.0.0.14 dev eth0
    }
    track_script {
        check_haproxy
    }


}

# master-3的配置文件
global_defs {
   notification_email {
     abc@sss.com
   }
   notification_email_from abc@sss.com
   smtp_server smtp.sss.com
   smtp_connect_timeout 30
   router_id k8s-master-3
}
vrrp_script check_haproxy {
    script " /etc/keepalived/check_haproxy.sh"
    interval 3
    weight -20
}
track_interface {
        eth0
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 100
    priority 60
    advert_int 1
    nopreempt
    authentication {
        auth_type PASS
        auth_pass 6666
    }
    virtual_ipaddress {
        10.0.0.14 dev eth0
    }
    track_script {
        check_haproxy
    }


}

启动keepalived;

systemctl start keepalived

修改主机名加入到hosts;

cat >> /etc/hosts <<EOF
10.0.0.15  k8s-master-1
10.0.0.18  k8s-master-2
10.0.0.21  k8s-master-3
10.0.0.20  k8s-node-1
10.0.0.16  k8s-node-2
10.0.0.33  k8s-node-3
10.0.0.17  k8s-node-4
10.0.0.26  k8s-node-5
10.0.0.12  k8s-node-6
10.0.0.11  k8s-node-7
10.0.0.13  k8s-node-8
10.0.0.41  k8s-node-9
10.0.0.19  k8s-node-10
10.0.0.45  k8s-node-11
10.0.0.23  k8s-node-12
10.0.0.34  k8s-node-13
10.0.0.29  k8s-node-14
10.0.0.22  k8s-node-15
EOF

添加docker用户;

useradd -m docker

生成免密登录;

# 在一台master节点执行即可
ssh-keygen -t rsa
ssh-copy-id root@k8s-master-1
ssh-copy-id root@k8s-master-2
ssh-copy-id root@k8s-master-3
ssh-copy-id root@k8s-node-1
ssh-copy-id root@k8s-node-2
ssh-copy-id root@k8s-node-3
ssh-copy-id root@k8s-node-4
ssh-copy-id root@k8s-node-5
ssh-copy-id root@k8s-node-6
ssh-copy-id root@k8s-node-7
ssh-copy-id root@k8s-node-8
ssh-copy-id root@k8s-node-9
ssh-copy-id root@k8s-node-10
ssh-copy-id root@k8s-node-11
ssh-copy-id root@k8s-node-12
ssh-copy-id root@k8s-node-13
ssh-copy-id root@k8s-node-14
ssh-copy-id root@k8s-node-15

更新PATH变量;

echo 'PATH=/opt/k8s/bin:$PATH' >>/root/.bashrc
source /root/.bashrc

安装依赖包;

yum install -y epel-release
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget

关闭防火墙;

systemctl stop firewalld
systemctl disable firewalld
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat
iptables -P FORWARD ACCEPT

关闭swap分区;

swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab 

关闭selinux;

setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

关闭dnsmasq;

systemctl stop dnsmasq
systemctl disable dnsmasq

加载内核模块;

modprobe ip_vs_rr
modprobe br_netfilter

优化内核参数;

cat > /etc/sysctl.d/kubernetes.conf <<EOF
# 系统基础内核优化
vm.swappiness = 0
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv4.neigh.default.gc_stale_time=120
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2
net.ipv4.tcp_max_tw_buckets = 65000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 8192
net.ipv4.tcp_synack_retries = 2
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.ip_nonlocal_bind = 1

#k8s内核优化
net.ipv4.tcp_slow_start_after_idle=0
net.core.rmem_max=16777216
fs.inotify.max_user_watches=524288
kernel.softlockup_all_cpu_backtrace=1
kernel.softlockup_panic=1
fs.file-max=2097152
fs.inotify.max_user_instances=8192
fs.inotify.max_queued_events=16384
vm.max_map_count=262144
fs.may_detach_mounts=1
net.core.netdev_max_backlog=16384
net.ipv4.tcp_wmem=4096 12582912 16777216
net.core.wmem_max=16777216
net.core.somaxconn=32768
net.ipv4.tcp_max_syn_backlog=819200
net.bridge.bridge-nf-call-iptables=1
net.ipv4.tcp_rmem=4096 12582912 16777216
EOF
cp kubernetes.conf  /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf

设置系统时间;

调整系统 TimeZone
timedatectl set-timezone Asia/Shanghai

将当前的 UTC 时间写入硬件时钟

timedatectl set-local-rtc 0

重启依赖于系统时间的服务

systemctl restart rsyslog 
systemctl restart crond

关闭无关服务;

systemctl stop postfix && systemctl disable postfix

设置syslogd;

mkdir /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent

# 压缩历史日志
Compress=yes

SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000

# 最大占用空间 10G
SystemMaxUse=10G

# 单日志文件最大 200M
SystemMaxFileSize=200M

# 日志保存时间 2 周
MaxRetentionSec=2week

# 不将日志转发到 syslog
ForwardToSyslog=no
EOF

systemctl restart systemd-journald

创建相关目录;

mkdir -p  /opt/k8s/{bin,work} /etc/{kubernetes,etcd}/cert

升级内核;
内核升级步骤参考这里

关闭numa;

cp /etc/default/grub{,.bak}
vim /etc/default/grub # 在 GRUB_CMDLINE_LINUX 一行添加 `numa=off` 参数,如下所示:
diff /etc/default/grub.bak /etc/default/grub
6c6
< GRUB_CMDLINE_LINUX="crashkernel=auto console=tty0 console=ttyS0,115200n8"
---
> GRUB_CMDLINE_LINUX="crashkernel=auto console=tty0 console=ttyS0,115200n8 numa=off"

# 重新生成grub2配置文件
cp /boot/grub2/grub.cfg{,.bak}
grub2-mkconfig -o /boot/grub2/grub.cfg

分发集群参数配置脚本;

#!/usr/bin/bash


# 生成 EncryptionConfig 所需的加密 key
export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)


# 集群各机器 IP 数组
# 需要注意的是我这里三个变量分别针对集群不通角色定义的组,有master,worker,all node三个类型,可以根据部署不同的服务选择组别
export NODE_IPS=(10.0.0.15 10.0.0.18 10.0.0.21)
#export NODE_IPS=(10.0.0.20 10.0.0.16 10.0.0.33 10.0.0.17 10.0.0.26 10.0.0.12 10.0.0.11 10.0.0.13 10.0.0.41 10.0.0.19 10.0.0.45 10.0.0.23 10.0.0.34 10.0.0.29 10.0.0.22)
#export NODE_IPS=(10.0.0.15 10.0.0.18 10.0.0.21 10.0.0.20 10.0.0.16 10.0.0.33 10.0.0.17 10.0.0.26 10.0.0.12 10.0.0.11 10.0.0.13 10.0.0.41 10.0.0.19 10.0.0.45 10.0.0.23 10.0.0.34 10.0.0.29 10.0.0.22)


# 集群各 IP 对应的主机名数组
# 需要注意的是我这里三个变量分别针对集群不通角色定义的组,有master,worker,all node三个类型,可以根据部署不同的服务选择组别
export NODE_NAMES=(k8s-master-1 k8s-master-2 k8s-master-3)
#export NODE_NAMES=(k8s-node-1 k8s-node-2 k8s-node-3 k8s-node-4 k8s-node-5 k8s-node-6 k8s-node-7 k8s-node-8 k8s-node-9 k8s-node-10 k8s-node-11 k8s-node-12 k8s-node-13 k8s-node-14 k8s-node-15)
#export NODE_NAMES=(k8s-master-1 k8s-master-2 k8s-master-3 k8s-node-1 k8s-node-2 k8s-node-3 k8s-node-4 k8s-node-5 k8s-node-6 k8s-node-7 k8s-node-8 k8s-node-9 k8s-node-10 k8s-node-11 k8s-node-12 k8s-node-13 k8s-node-14 k8s-node-15)


# etcd 集群服务地址列表
export ETCD_ENDPOINTS="https://10.0.0.15:2379,https://10.0.0.18:2379,https://10.0.0.21:2379"


# etcd 集群间通信的 IP 和端口
export ETCD_NODES="k8s-master-1=https://10.0.0.15:2380,k8s-master-2=https://10.0.0.18:2380,k8s-master-3=https://10.0.0.21:2380"


# kube-apiserver 的反向代理(kube-nginx)地址端口
export KUBE_APISERVER="https://10.0.0.14:6443"


# 节点间互联网络接口名称
export IFACE="eth0"


# etcd 数据目录
export ETCD_DATA_DIR="/data/k8s/etcd/data"


# etcd WAL 目录,建议是 SSD 磁盘分区,或者和 ETCD_DATA_DIR 不同的磁盘分区
export ETCD_WAL_DIR="/data/k8s/etcd/wal"


# k8s 各组件数据目录
export K8S_DIR="/data/k8s/k8s"


# docker 数据目录
export DOCKER_DIR="/data/k8s/docker"


## 以下参数一般不需要修改


# TLS Bootstrapping 使用的 Token,可以使用命令 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成
BOOTSTRAP_TOKEN="41f7e4ba8b7be874fcff18bf5cf41a7c"


# 最好使用 当前未用的网段 来定义服务网段和 Pod 网段


# 服务网段,部署前路由不可达,部署后集群内路由可达(kube-proxy 保证)
SERVICE_CIDR="192.168.0.0/19"


# Pod 网段,建议 /16 段地址,部署前路由不可达,部署后集群内路由可达(flanneld 保证)
CLUSTER_CIDR="10.17.0.0/17"


# 服务端口范围 (NodePort Range)
export NODE_PORT_RANGE="30000-32767"


# flanneld 网络配置前缀
export FLANNEL_ETCD_PREFIX="/kubernetes/network"


# kubernetes 服务 IP (一般是 SERVICE_CIDR 中第一个IP)
export CLUSTER_KUBERNETES_SVC_IP="192.168.0.1"


# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
export CLUSTER_DNS_SVC_IP="192.168.0.2"


# 集群 DNS 域名(末尾不带点号)
export CLUSTER_DNS_DOMAIN="cluster.local"


# 将二进制目录 /opt/k8s/bin 加到 PATH 中
export PATH=/opt/k8s/bin:$PATH

################
# 后续使用的环境变量都定义在文件 environment.sh 中,请根据自己的机器、网络情况修改。然后,把它拷贝到所有节点的 /opt/k8s/bin 目录:

分发脚本;

source environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp environment.sh root@${node_ip}:/opt/k8s/bin/
    ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
  done

安装cfssl工具;

sudo mkdir -p /opt/k8s/cert && cd /opt/k8s
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
mv cfssl_linux-amd64 /opt/k8s/bin/cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
mv cfssljson_linux-amd64 /opt/k8s/bin/cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 /opt/k8s/bin/cfssl-certinfo

chmod +x /opt/k8s/bin/*
export PATH=/opt/k8s/bin:$PATH

创建证书配置文件;

cd /opt/k8s/work
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
EOF

创建证书签名请求;

cd /opt/k8s/work
cat > ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "sss"
    }
  ],
  "ca": {
    "expiry": "876000h"
 }
}
EOF

生成CA证书和私钥;

cd /opt/k8s/work
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
ls ca*

分发证书文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p /etc/kubernetes/cert"
    scp ca*.pem ca-config.json root@${node_ip}:/etc/kubernetes/cert
  done

部署kubectl

cd /opt/k8s/work
wget https://dl.k8s.io/v1.14.8/kubernetes-client-linux-amd64.tar.gz
tar zxf kubernetes-client-linux-amd64.tar.gz

分发kubectl到使用的节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kubernetes/client/bin/kubectl root@${node_ip}:/opt/k8s/bin/
    ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
  done

创建admin证书和私钥;

cd /opt/k8s/work
cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "system:masters",
      "OU": "sss"
    }
  ]
}
EOF

生成证书和私钥;

cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes admin-csr.json | cfssljson -bare admin
ls admin*

创建kubeconfig配置文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/work/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kubectl.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials admin \
  --client-certificate=/opt/k8s/work/admin.pem \
  --client-key=/opt/k8s/work/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=kubectl.kubeconfig

# 设置上下文参数
kubectl config set-context kubernetes \
  --cluster=kubernetes \
  --user=admin \
  --kubeconfig=kubectl.kubeconfig

# 设置默认上下文
kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig

分发kubeconfig配置文件;

# 分发到所有使用 kubectl 命令的节点
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p ~/.kube"
    scp kubectl.kubeconfig root@${node_ip}:~/.kube/config
  done

部署etcd

cd /opt/k8s/work
wget https://github.com/etcd-io/etcd/releases/download/v3.3.18/etcd-v3.3.18-linux-amd64.tar.gz
tar zxf etcd-v3.3.18-linux-amd64.tar.gz

分发etcd文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp etcd-v3.3.18-linux-amd64/etcd* root@${node_ip}:/opt/k8s/bin
    ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
  done

创建etcd证书和私钥;

cd /opt/k8s/work
cat > etcd-csr.json <<EOF
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "10.0.0.14",
    "10.0.0.15",
    "10.0.0.18",
    "10.0.0.21"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "sss"
    }
  ]
}
EOF

生成证书和私钥;

cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
    -ca-key=/opt/k8s/work/ca-key.pem \
    -config=/opt/k8s/work/ca-config.json \
    -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
ls etcd*pem

分发证书和私钥到etcd节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p /etc/etcd/cert"
    scp etcd*.pem root@${node_ip}:/etc/etcd/cert/
  done

创建etcd的systemd模板;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh

cat > etcd.service.template <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=${ETCD_DATA_DIR}
ExecStart=/opt/k8s/bin/etcd \\
  --data-dir=${ETCD_DATA_DIR} \\
  --wal-dir=${ETCD_WAL_DIR} \\
  --name=##NODE_NAME## \\
  --cert-file=/etc/etcd/cert/etcd.pem \\
  --key-file=/etc/etcd/cert/etcd-key.pem \\
  --trusted-ca-file=/etc/kubernetes/cert/ca.pem \\
  --peer-cert-file=/etc/etcd/cert/etcd.pem \\
  --peer-key-file=/etc/etcd/cert/etcd-key.pem \\
  --peer-trusted-ca-file=/etc/kubernetes/cert/ca.pem \\
  --peer-client-cert-auth \\
  --client-cert-auth \\
  --listen-peer-urls=https://##NODE_IP##:2380 \\
  --initial-advertise-peer-urls=https://##NODE_IP##:2380 \\
  --listen-client-urls=https://##NODE_IP##:2379,http://127.0.0.1:2379 \\
  --advertise-client-urls=https://##NODE_IP##:2379 \\
  --initial-cluster-token=etcd-cluster-0 \\
  --initial-cluster=${ETCD_NODES} \\
  --initial-cluster-state=new \\
  --auto-compaction-mode=periodic \\
  --auto-compaction-retention=1 \\
  --max-request-bytes=33554432 \\
  --quota-backend-bytes=6442450944 \\
  --heartbeat-interval=250 \\
  --election-timeout=2000
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

创建和分发etcd systemd文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < 3; i++ ))
  do
    sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" etcd.service.template > etcd-${NODE_IPS[i]}.service 
  done
ls *.service

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp etcd-${node_ip}.service root@${node_ip}:/etc/systemd/system/etcd.service
  done

启动etcd;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p ${ETCD_DATA_DIR} ${ETCD_WAL_DIR}"
    ssh root@${node_ip} "systemctl daemon-reload && systemctl enable etcd && systemctl restart etcd " &
  done

检查启动结果;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl status etcd|grep Active"
  done

# 如有异常执行命令查看日志
journalctl -u etcd

验证服务;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ETCDCTL_API=3 /opt/k8s/bin/etcdctl \
    --endpoints=https://${node_ip}:2379 \
    --cacert=/etc/kubernetes/cert/ca.pem \
    --cert=/etc/etcd/cert/etcd.pem \
    --key=/etc/etcd/cert/etcd-key.pem endpoint health
  done

输出结果

>>> 10.0.0.26
   Active: active (running) since Sun 2020-02-02 17:19:44 CST; 57s ago
>>> 10.0.0.6
   Active: active (running) since Sun 2020-02-02 17:19:45 CST; 57s ago
>>> 10.0.0.13
   Active: active (running) since Sun 2020-02-02 17:19:46 CST; 57s ago

查看当前的leader;

source /opt/k8s/bin/environment.sh
ETCDCTL_API=3 /opt/k8s/bin/etcdctl \
  -w table --cacert=/etc/kubernetes/cert/ca.pem \
  --cert=/etc/etcd/cert/etcd.pem \
  --key=/etc/etcd/cert/etcd-key.pem \
  --endpoints=${ETCD_ENDPOINTS} endpoint status 

# 输出结果
+-------------------------+------------------+---------+---------+-----------+-----------+------------+
|        ENDPOINT         |        ID        | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+-------------------------+------------------+---------+---------+-----------+-----------+------------+
| https://10.0.0.26:2379 |  45963767a0c532e |  3.3.18 |   20 kB |      true |         2 |          8 |
|  https://10.0.0.6:2379 | 6946bb21010b954b |  3.3.18 |   20 kB |     false |         2 |          8 |
| https://10.0.0.13:2379 | 4b8cb048b02115d4 |  3.3.18 |   20 kB |     false |         2 |          8 |
+-------------------------+------------------+---------+---------+-----------+-----------+------------+

部署flanneld

cd /opt/k8s/work
mkdir flannel
wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
tar -xzvf flannel-v0.11.0-linux-amd64.tar.gz -C flannel

将flanneld分发到集群所有节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp flannel/{flanneld,mk-docker-opts.sh} root@${node_ip}:/opt/k8s/bin/
    ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
  done

创建flanneld证书和私钥;

cd /opt/k8s/work
cat > flanneld-csr.json <<EOF
{
  "CN": "flanneld",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "sss"
    }
  ]
}
EOF

生成证书和私钥;

cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
ls flanneld*pem

将证书和私钥分发到所有节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p /etc/flanneld/cert"
    scp flanneld*.pem root@${node_ip}:/etc/flanneld/cert
  done

将pod网段写入etcd;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
etcdctl \
  --endpoints=${ETCD_ENDPOINTS} \
  --ca-file=/opt/k8s/work/ca.pem \
  --cert-file=/opt/k8s/work/flanneld.pem \
  --key-file=/opt/k8s/work/flanneld-key.pem \
  mk ${FLANNEL_ETCD_PREFIX}/config '{"Network":"'${CLUSTER_CIDR}'", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'

创建flanneld的systemd文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > flanneld.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service


[Service]
Type=notify
ExecStart=/opt/k8s/bin/flanneld \\
  -etcd-cafile=/etc/kubernetes/cert/ca.pem \\
  -etcd-certfile=/etc/flanneld/cert/flanneld.pem \\
  -etcd-keyfile=/etc/flanneld/cert/flanneld-key.pem \\
  -etcd-endpoints=${ETCD_ENDPOINTS} \\
  -etcd-prefix=${FLANNEL_ETCD_PREFIX} \\
  -iface=${IFACE} \\
  -ip-masq
ExecStartPost=/opt/k8s/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=always
RestartSec=5
StartLimitInterval=0


[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF

分发flanneld systemd当其他节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp flanneld.service root@${node_ip}:/etc/systemd/system/
  done

启动falnned;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl daemon-reload && systemctl enable flanneld && systemctl restart flanneld"
  done

检查启动结果;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl status flanneld|grep Active"
  done

# 查看日志
journalctl -u flanneld

查看flannel的pod信息;

source /opt/k8s/bin/environment.sh
etcdctl \
  --endpoints=${ETCD_ENDPOINTS} \
  --ca-file=/etc/kubernetes/cert/ca.pem \
  --cert-file=/etc/flanneld/cert/flanneld.pem \
  --key-file=/etc/flanneld/cert/flanneld-key.pem \
  get ${FLANNEL_ETCD_PREFIX}/config

查看pod子网列表;

source /opt/k8s/bin/environment.sh
etcdctl \
  --endpoints=${ETCD_ENDPOINTS} \
  --ca-file=/etc/kubernetes/cert/ca.pem \
  --cert-file=/etc/flanneld/cert/flanneld.pem \
  --key-file=/etc/flanneld/cert/flanneld-key.pem \
  ls ${FLANNEL_ETCD_PREFIX}/subnets

# 输出信息
/kubernetes/network/subnets/10.17.28.0-24
/kubernetes/network/subnets/10.17.54.0-24
/kubernetes/network/subnets/10.17.79.0-24
/kubernetes/network/subnets/10.17.13.0-24
/kubernetes/network/subnets/10.17.48.0-24
/kubernetes/network/subnets/10.17.76.0-24
/kubernetes/network/subnets/10.17.61.0-24
/kubernetes/network/subnets/10.17.4.0-24
/kubernetes/network/subnets/10.17.80.0-24
/kubernetes/network/subnets/10.17.92.0-24
/kubernetes/network/subnets/10.17.84.0-24
/kubernetes/network/subnets/10.17.5.0-24
/kubernetes/network/subnets/10.17.85.0-24
/kubernetes/network/subnets/10.17.58.0-24
/kubernetes/network/subnets/10.17.102.0-24
/kubernetes/network/subnets/10.17.65.0-24
/kubernetes/network/subnets/10.17.82.0-24
/kubernetes/network/subnets/10.17.64.0-24

查看某一个pod子网的信息;

source /opt/k8s/bin/environment.sh
etcdctl \
  --endpoints=${ETCD_ENDPOINTS} \
  --ca-file=/etc/kubernetes/cert/ca.pem \
  --cert-file=/etc/flanneld/cert/flanneld.pem \
  --key-file=/etc/flanneld/cert/flanneld-key.pem \
  get ${FLANNEL_ETCD_PREFIX}/subnets/10.17.28.0-24

验证网段之间是否互通;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh ${node_ip} "/usr/sbin/ip addr show flannel.1|grep -w inet"
  done

# 输出信息
>>> 10.0.0.15
    inet 10.17.82.0/32 scope global flannel.1
>>> 10.0.0.18
    inet 10.17.64.0/32 scope global flannel.1
>>> 10.0.0.21
    inet 10.17.92.0/32 scope global flannel.1
>>> 10.0.0.20
    inet 10.17.13.0/32 scope global flannel.1
>>> 10.0.0.16
    inet 10.17.54.0/32 scope global flannel.1
>>> 10.0.0.33
    inet 10.17.84.0/32 scope global flannel.1
>>> 10.0.0.17
    inet 10.17.79.0/32 scope global flannel.1
>>> 10.0.0.26
    inet 10.17.5.0/32 scope global flannel.1
>>> 10.0.0.12
    inet 10.17.48.0/32 scope global flannel.1
>>> 10.0.0.11
    inet 10.17.85.0/32 scope global flannel.1
>>> 10.0.0.13
    inet 10.17.58.0/32 scope global flannel.1
>>> 10.0.0.41
    inet 10.17.76.0/32 scope global flannel.1
>>> 10.0.0.19
    inet 10.17.28.0/32 scope global flannel.1
>>> 10.0.0.45
    inet 10.17.61.0/32 scope global flannel.1
>>> 10.0.0.23
    inet 10.17.4.0/32 scope global flannel.1
>>> 10.0.0.34
    inet 10.17.102.0/32 scope global flannel.1
>>> 10.0.0.29
    inet 10.17.65.0/32 scope global flannel.1
>>> 10.0.0.22
    inet 10.17.80.0/32 scope global flannel.1

各节点之前互ping;

for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh ${node_ip} "ping -c 1 10.17.82.0"
    ssh ${node_ip} "ping -c 1 10.17.64.0"
    ssh ${node_ip} "ping -c 1 10.17.92.0"
    ssh ${node_ip} "ping -c 1 10.17.13.0"
    ssh ${node_ip} "ping -c 1 10.17.54.0"
    ssh ${node_ip} "ping -c 1 10.17.84.0"
    ssh ${node_ip} "ping -c 1 10.17.79.0"
    ssh ${node_ip} "ping -c 1 10.17.5.0"
    ssh ${node_ip} "ping -c 1 10.17.48.0"
    ssh ${node_ip} "ping -c 1 10.17.58.0"
    ssh ${node_ip} "ping -c 1 10.17.85.0"
    ssh ${node_ip} "ping -c 1 10.17.76.0"
    ssh ${node_ip} "ping -c 1 10.17.28.0"
    ssh ${node_ip} "ping -c 1 10.17.61.0"
    ssh ${node_ip} "ping -c 1 10.17.4.0"
    ssh ${node_ip} "ping -c 1 10.17.102.0"
    ssh ${node_ip} "ping -c 1 10.17.65.0"
    ssh ${node_ip} "ping -c 1 10.17.80.0"
  done

部署kubernetes-server

cd /opt/k8s/work
wget https://dl.k8s.io/v1.14.8/kubernetes-server-linux-amd64.tar.gz
tar -xzvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes
tar -xzvf  kubernetes-src.tar.gz

将文件拷贝到master节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kubernetes/server/bin/{apiextensions-apiserver,cloud-controller-manager,kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubeadm,kubectl,kubelet,mounter} root@${node_ip}:/opt/k8s/bin/
    ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
  done

创建kubernetes证书和私钥;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kubernetes-csr.json <<EOF
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "10.0.0.14",
    "10.0.0.15",
    "10.0.0.18",
    "10.0.0.21",
    "${CLUSTER_KUBERNETES_SVC_IP}",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local."
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "sss"
    }
  ]
}
EOF

生成证书和私钥;

cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
ls kubernetes*pem

将生成的证书和私钥分发的master节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p /etc/kubernetes/cert"
    scp kubernetes*.pem root@${node_ip}:/etc/kubernetes/cert/
  done

创建加密配置文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
  - resources:
      - secrets
    providers:
      - aescbc:
          keys:
            - name: key1
              secret: ${ENCRYPTION_KEY}
      - identity: {}
EOF

将加密配置文件拷贝到master节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp encryption-config.yaml root@${node_ip}:/etc/kubernetes/
  done

创建审计策略文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > audit-policy.yaml <<EOF
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
  # The following requests were manually identified as high-volume and low-risk, so drop them.
  - level: None
    resources:
      - group: ""
        resources:
          - endpoints
          - services
          - services/status
    users:
      - 'system:kube-proxy'
    verbs:
      - watch


  - level: None
    resources:
      - group: ""
        resources:
          - nodes
          - nodes/status
    userGroups:
      - 'system:nodes'
    verbs:
      - get


  - level: None
    namespaces:
      - kube-system
    resources:
      - group: ""
        resources:
          - endpoints
    users:
      - 'system:kube-controller-manager'
      - 'system:kube-scheduler'
      - 'system:serviceaccount:kube-system:endpoint-controller'
    verbs:
      - get
      - update


  - level: None
    resources:
      - group: ""
        resources:
          - namespaces
          - namespaces/status
          - namespaces/finalize
    users:
      - 'system:apiserver'
    verbs:
      - get


  # Don't log HPA fetching metrics.
  - level: None
    resources:
      - group: metrics.k8s.io
    users:
      - 'system:kube-controller-manager'
    verbs:
      - get
      - list


  # Don't log these read-only URLs.
  - level: None
    nonResourceURLs:
      - '/healthz*'
      - /version
      - '/swagger*'


  # Don't log events requests.
  - level: None
    resources:
      - group: ""
        resources:
          - events


  # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - nodes/status
          - pods/status
    users:
      - kubelet
      - 'system:node-problem-detector'
      - 'system:serviceaccount:kube-system:node-problem-detector'
    verbs:
      - update
      - patch


  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - nodes/status
          - pods/status
    userGroups:
      - 'system:nodes'
    verbs:
      - update
      - patch


  # deletecollection calls can be large, don't log responses for expected namespace deletions
  - level: Request
    omitStages:
      - RequestReceived
    users:
      - 'system:serviceaccount:kube-system:namespace-controller'
    verbs:
      - deletecollection


  # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
  # so only log at the Metadata level.
  - level: Metadata
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - secrets
          - configmaps
      - group: authentication.k8s.io
        resources:
          - tokenreviews
  # Get repsonses can be large; skip them.
  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
      - group: admissionregistration.k8s.io
      - group: apiextensions.k8s.io
      - group: apiregistration.k8s.io
      - group: apps
      - group: authentication.k8s.io
      - group: authorization.k8s.io
      - group: autoscaling
      - group: batch
      - group: certificates.k8s.io
      - group: extensions
      - group: metrics.k8s.io
      - group: networking.k8s.io
      - group: policy
      - group: rbac.authorization.k8s.io
      - group: scheduling.k8s.io
      - group: settings.k8s.io
      - group: storage.k8s.io
    verbs:
      - get
      - list
      - watch


  # Default level for known APIs
  - level: RequestResponse
    omitStages:
      - RequestReceived
    resources:
      - group: ""
      - group: admissionregistration.k8s.io
      - group: apiextensions.k8s.io
      - group: apiregistration.k8s.io
      - group: apps
      - group: authentication.k8s.io
      - group: authorization.k8s.io
      - group: autoscaling
      - group: batch
      - group: certificates.k8s.io
      - group: extensions
      - group: metrics.k8s.io
      - group: networking.k8s.io
      - group: policy
      - group: rbac.authorization.k8s.io
      - group: scheduling.k8s.io
      - group: settings.k8s.io
      - group: storage.k8s.io
      
  # Default level for all other requests.
  - level: Metadata
    omitStages:
      - RequestReceived
EOF

分发审计策略文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp audit-policy.yaml root@${node_ip}:/etc/kubernetes/audit-policy.yaml
  done

创建metrics-server使用的证书;

cat > proxy-client-csr.json <<EOF
{
  "CN": "aggregator",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "sss"
    }
  ]
}
EOF

生产证书和私钥;

cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \
  -ca-key=/etc/kubernetes/cert/ca-key.pem  \
  -config=/etc/kubernetes/cert/ca-config.json  \
  -profile=kubernetes proxy-client-csr.json | cfssljson -bare proxy-client
ls proxy-client*.pem

分发证书和私钥到master节点;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp proxy-client*.pem root@${node_ip}:/etc/kubernetes/cert/
  done

创建apiserver systemd模板;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kube-apiserver.service.template <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target


[Service]
WorkingDirectory=${K8S_DIR}/kube-apiserver
ExecStart=/opt/k8s/bin/kube-apiserver \\
  --advertise-address=##NODE_IP## \\
  --default-not-ready-toleration-seconds=360 \\
  --default-unreachable-toleration-seconds=360 \\
  --feature-gates=DynamicAuditing=true \\
  --max-mutating-requests-inflight=2000 \\
  --max-requests-inflight=4000 \\
  --default-watch-cache-size=200 \\
  --delete-collection-workers=2 \\
  --encryption-provider-config=/etc/kubernetes/encryption-config.yaml \\
  --etcd-cafile=/etc/kubernetes/cert/ca.pem \\
  --etcd-certfile=/etc/kubernetes/cert/kubernetes.pem \\
  --etcd-keyfile=/etc/kubernetes/cert/kubernetes-key.pem \\
  --etcd-servers=${ETCD_ENDPOINTS} \\
  --bind-address=##NODE_IP## \\
  --secure-port=6443 \\
  --tls-cert-file=/etc/kubernetes/cert/kubernetes.pem \\
  --tls-private-key-file=/etc/kubernetes/cert/kubernetes-key.pem \\
  --insecure-port=0 \\
  --audit-dynamic-configuration \\
  --audit-log-maxage=15 \\
  --audit-log-maxbackup=3 \\
  --audit-log-maxsize=100 \\
  --audit-log-truncate-enabled \\
  --audit-log-path=${K8S_DIR}/kube-apiserver/audit.log \\
  --audit-policy-file=/etc/kubernetes/audit-policy.yaml \\
  --profiling \\
  --anonymous-auth=false \\
  --client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --enable-bootstrap-token-auth \\
  --requestheader-allowed-names="aggregator" \\
  --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-extra-headers-prefix="X-Remote-Extra-" \\
  --requestheader-group-headers=X-Remote-Group \\
  --requestheader-username-headers=X-Remote-User \\
  --service-account-key-file=/etc/kubernetes/cert/ca.pem \\
  --authorization-mode=Node,RBAC \\
  --runtime-config=api/all=true \\
  --enable-admission-plugins=NodeRestriction \\
  --allow-privileged=true \\
  --apiserver-count=3 \\
  --event-ttl=168h \\
  --kubelet-certificate-authority=/etc/kubernetes/cert/ca.pem \\
  --kubelet-client-certificate=/etc/kubernetes/cert/kubernetes.pem \\
  --kubelet-client-key=/etc/kubernetes/cert/kubernetes-key.pem \\
  --kubelet-https=true \\
  --kubelet-timeout=10s \\
  --proxy-client-cert-file=/etc/kubernetes/cert/proxy-client.pem \\
  --proxy-client-key-file=/etc/kubernetes/cert/proxy-client-key.pem \\
  --service-cluster-ip-range=${SERVICE_CIDR} \\
  --service-node-port-range=${NODE_PORT_RANGE} \\
  --enable-aggregator-routing=true \\
  --logtostderr=true \\
  --v=2
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536


[Install]
WantedBy=multi-user.target
EOF

替换apiserver systemd模板的变量;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < 3; i++ ))
  do
    sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-apiserver.service.template > kube-apiserver-${NODE_IPS[i]}.service 
  done
ls kube-apiserver*.service

分发systemd文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kube-apiserver-${node_ip}.service root@${node_ip}:/etc/systemd/system/kube-apiserver.service
  done

启动apiserver服务;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kube-apiserver"
    ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver"
  done

检查apiserver运行状态;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl status kube-apiserver |grep 'Active:'"
  done

检查apiserver写入etcd的数据;

source /opt/k8s/bin/environment.sh
ETCDCTL_API=3 etcdctl \
    --endpoints=${ETCD_ENDPOINTS} \
    --cacert=/opt/k8s/work/ca.pem \
    --cert=/opt/k8s/work/etcd.pem \
    --key=/opt/k8s/work/etcd-key.pem \
    get /registry/ --prefix --keys-only

检查集群信息;

[root@k8s-master-1 ~]# kubectl cluster-info
Kubernetes master is running at https://10.0.0.8:6443


To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@k8s-master-1 ~]# kubectl get all --all-namespaces
NAMESPACE   NAME                 TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   192.168.0.1   <none>        443/TCP   98m
[root@k8s-master-1 ~]# kubectl get componentstatuses
NAME                 STATUS      MESSAGE                                                                                     ERROR
controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused   
scheduler            Unhealthy   Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused   
etcd-0               Healthy     {"health":"true"}                                                                           
etcd-2               Healthy     {"health":"true"}                                                                           
etcd-1               Healthy     {"health":"true"} 

检查apiserver的监听端口;

netstat -lnpt|grep kube
# 输出信息
tcp        0      0 10.0.0.26:6443         0.0.0.0:*               LISTEN      14986/kube-apiserve

授权apiserver访问kubectl api权限;

kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

创建controller-manager证书和私钥

cd /opt/k8s/work
cat > kube-controller-manager-csr.json <<EOF
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "10.0.0.14",
      "10.0.0.15",
      "10.0.0.18",
      "10.0.0.21"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "system:kube-controller-manager",
        "OU": "sss"
      }
    ]
}
EOF

生成证书和私钥;

cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
ls kube-controller-manager*pem

分发证书和私钥到master节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kube-controller-manager*.pem root@${node_ip}:/etc/kubernetes/cert/
  done

创建kubeconfig文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/work/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-controller-manager.kubeconfig


kubectl config set-credentials system:kube-controller-manager \
  --client-certificate=kube-controller-manager.pem \
  --client-key=kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig


kubectl config set-context system:kube-controller-manager \
  --cluster=kubernetes \
  --user=system:kube-controller-manager \
  --kubeconfig=kube-controller-manager.kubeconfig


kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

分发kubeconfig文件到master节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kube-controller-manager.kubeconfig root@${node_ip}:/etc/kubernetes/
  done

创建controller-manager systemd模板文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kube-controller-manager.service.template <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes


[Service]
WorkingDirectory=${K8S_DIR}/kube-controller-manager
ExecStart=/opt/k8s/bin/kube-controller-manager \\
  --profiling \\
  --cluster-name=kubernetes \\
  --controllers=*,bootstrapsigner,tokencleaner \\
  --kube-api-qps=1000 \\
  --kube-api-burst=2000 \\
  --leader-elect \\
  --use-service-account-credentials\\
  --concurrent-service-syncs=2 \\
  --bind-address=##NODE_IP## \\
  --secure-port=10252 \\
  --tls-cert-file=/etc/kubernetes/cert/kube-controller-manager.pem \\
  --tls-private-key-file=/etc/kubernetes/cert/kube-controller-manager-key.pem \\
  --port=0 \\
  --authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
  --client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-allowed-names="" \\
  --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-extra-headers-prefix="X-Remote-Extra-" \\
  --requestheader-group-headers=X-Remote-Group \\
  --requestheader-username-headers=X-Remote-User \\
  --authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
  --cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \\
  --cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \\
  --experimental-cluster-signing-duration=876000h \\
  --horizontal-pod-autoscaler-sync-period=10s \\
  --concurrent-deployment-syncs=10 \\
  --concurrent-gc-syncs=30 \\
  --node-cidr-mask-size=24 \\
  --service-cluster-ip-range=${SERVICE_CIDR} \\
  --pod-eviction-timeout=6m \\
  --terminated-pod-gc-threshold=10000 \\
  --root-ca-file=/etc/kubernetes/cert/ca.pem \\
  --service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \\
  --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
  --logtostderr=true \\
  --v=2
Restart=on-failure
RestartSec=5


[Install]
WantedBy=multi-user.target
EOF

替换模板中的变量;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < 3; i++ ))
  do
    sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-controller-manager.service.template > kube-controller-manager-${NODE_IPS[i]}.service 
  done
ls kube-controller-manager*.service

分发到master节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kube-controller-manager-${node_ip}.service root@${node_ip}:/etc/systemd/system/kube-controller-manager.service
  done

启动controller-manager服务;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kube-controller-manager"
    ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl restart kube-controller-manager"
  done

检查服务状态;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl status kube-controller-manager|grep Active"
  done

# 查看日志
journalctl -u kube-controller-manager

查看监听的端口;

netstat -lnpt | grep kube-cont

查看输出的metrics;

curl -s --cacert /opt/k8s/work/ca.pem --cert /opt/k8s/work/admin.pem --key /opt/k8s/work/admin-key.pem https://10.0.0.15:10252/metrics |head

查看当前leader;

kubectl get endpoints kube-controller-manager --namespace=kube-system  -o yaml

创建kube-scheduler证书签名;

cd /opt/k8s/work
cat > kube-scheduler-csr.json <<EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
      "127.0.0.1",
      "10.0.0.14",
      "10.0.0.15",
      "10.0.0.18",
      "10.0.0.21"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "system:kube-scheduler",
        "OU": "sss"
      }
    ]
}
EOF

生成证书和私钥;

cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
ls kube-scheduler*pem

将证书和私钥分发到master节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kube-scheduler*.pem root@${node_ip}:/etc/kubernetes/cert/
  done

创建kubeconfig文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/work/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-scheduler.kubeconfig


kubectl config set-credentials system:kube-scheduler \
  --client-certificate=kube-scheduler.pem \
  --client-key=kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.kubeconfig


kubectl config set-context system:kube-scheduler \
  --cluster=kubernetes \
  --user=system:kube-scheduler \
  --kubeconfig=kube-scheduler.kubeconfig


kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

分发kubeconfig到master节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kube-scheduler.kubeconfig root@${node_ip}:/etc/kubernetes/
  done

创建kube-scheduler配置文件;

cd /opt/k8s/work
cat >kube-scheduler.yaml.template <<EOF
apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
bindTimeoutSeconds: 600
clientConnection:
  burst: 200
  kubeconfig: "/etc/kubernetes/kube-scheduler.kubeconfig"
  qps: 100
enableContentionProfiling: false
enableProfiling: true
hardPodAffinitySymmetricWeight: 1
healthzBindAddress: ##NODE_IP##:10251
leaderElection:
  leaderElect: true
metricsBindAddress: ##NODE_IP##:10251
EOF

替换模板中的变量;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < 3; i++ ))
  do
    sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-scheduler.yaml.template > kube-scheduler-${NODE_IPS[i]}.yaml
  done
ls kube-scheduler*.yaml

分发kube-scheduler到master节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kube-scheduler-${node_ip}.yaml root@${node_ip}:/etc/kubernetes/kube-scheduler.yaml
  done

创建kube-scheduler systemd模板文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kube-scheduler.service.template <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes


[Service]
WorkingDirectory=${K8S_DIR}/kube-scheduler
ExecStart=/opt/k8s/bin/kube-scheduler \\
  --config=/etc/kubernetes/kube-scheduler.yaml \\
  --bind-address=##NODE_IP## \\
  --secure-port=10259 \\
  --port=0 \\
  --tls-cert-file=/etc/kubernetes/cert/kube-scheduler.pem \\
  --tls-private-key-file=/etc/kubernetes/cert/kube-scheduler-key.pem \\
  --authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
  --client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-allowed-names="" \\
  --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-extra-headers-prefix="X-Remote-Extra-" \\
  --requestheader-group-headers=X-Remote-Group \\
  --requestheader-username-headers=X-Remote-User \\
  --authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
  --logtostderr=true \\
  --v=2
Restart=always
RestartSec=5
StartLimitInterval=0


[Install]
WantedBy=multi-user.target
EOF

替换模板中的变量;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < 3; i++ ))
  do
    sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-scheduler.service.template > kube-scheduler-${NODE_IPS[i]}.service 
  done
ls kube-scheduler*.service

分发systemd文件到master节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kube-scheduler-${node_ip}.service root@${node_ip}:/etc/systemd/system/kube-scheduler.service
  done

启动kube-scheduler服务;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kube-scheduler"
    ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-scheduler && systemctl restart kube-scheduler"
  done

检查服务状态;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl status kube-scheduler|grep Active"
  done

# 查看日志
journalctl -u kube-scheduler

查看监听端口;

netstat -lnpt |grep kube-sch
# 信息输出
tcp        0      0 10.0.0.26:10251        0.0.0.0:*               LISTEN      15851/kube-schedule 
tcp        0      0 10.0.0.26:10259        0.0.0.0:*               LISTEN      15851/kube-schedule 

curl -s http://10.0.0.26:10251/metrics |head   
# 信息输出           
# HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend.
# TYPE apiserver_audit_event_total counter
apiserver_audit_event_total 0
# HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend.
# TYPE apiserver_audit_requests_rejected_total counter
apiserver_audit_requests_rejected_total 0
# HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request.
# TYPE apiserver_client_certificate_expiration_seconds histogram
apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0
apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0

curl -s --cacert /opt/k8s/work/ca.pem --cert /opt/k8s/work/admin.pem --key /opt/k8s/work/admin-key.pem https://10.0.0.26:10259/metrics |head   
# 信息输出           
# HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend.
# TYPE apiserver_audit_event_total counter
apiserver_audit_event_total 0
# HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend.
# TYPE apiserver_audit_requests_rejected_total counter
apiserver_audit_requests_rejected_total 0
# HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request.
# TYPE apiserver_client_certificate_expiration_seconds histogram
apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0
apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0

查看当前leader;

 kubectl get endpoints kube-scheduler --namespace=kube-system  -o yaml                                         

部署docker

# *注意:如果需要使用systemd的话,请yum安装docker服务!!!安装步骤请参考阿里云docker-ce
# step 1: 安装必要的一些系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3: 更新并安装Docker-CE
yum makecache fast
yum -y install docker-ce-19.03.5

分发到node节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp docker/*  root@${node_ip}:/opt/k8s/bin/
    ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
  done

修改systemd文件;

vim /usr/lib/systemd/system/docker.service
[Service]
EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock

手动设置转发策略;

iptables -P FORWARD ACCEPT
# 加入到开机启动
/sbin/iptables -P FORWARD ACCEPT

创建docker配置文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > docker-daemon.json <<EOF
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com"],
    "insecure-registries": ["image.com"],  #这里可以添加自己的镜像仓库地址
    "max-concurrent-downloads": 20,
    "live-restore": true,
    "max-concurrent-uploads": 10,
    "debug": true,
    "log-opts": {
      "max-size": "100m",
      "max-file": "5"
    }
}
EOF

分发docker配置文件到node节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p  /etc/docker/ ${DOCKER_DIR}/{data,exec}"
    scp docker-daemon.json root@${node_ip}:/etc/docker/daemon.json
  done

启动docker服务;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl daemon-reload && systemctl enable docker && systemctl restart docker"
  done

检查服务运行状态;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl status docker|grep Active"
  done

# 查看日志
journalctl -u docker

检查docker0网桥;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "/usr/sbin/ip addr show flannel.1 && /usr/sbin/ip addr show docker0"
  done

部署kubelet

分发kubelet文件到node节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kubernetes/server/bin/kubelet root@${node_ip}:/opt/k8s/bin/
    ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
  done

创建kubectl bootstrap kubeconfig文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
  do
    echo ">>> ${node_name}"


    # 创建 token
    export BOOTSTRAP_TOKEN=$(kubeadm token create \
      --description kubelet-bootstrap-token \
      --groups system:bootstrappers:${node_name} \
      --kubeconfig ~/.kube/config)


    # 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/cert/ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig


    # 设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap \
      --token=${BOOTSTRAP_TOKEN} \
      --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig


    # 设置上下文参数
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=kubelet-bootstrap \
      --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig


    # 设置默认上下文
    kubectl config use-context default --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
  done

查看kubeadm为各节点创建的token;

 kubeadm token list --kubeconfig ~/.kube/config
# 输出信息
TOKEN                     TTL       EXPIRES                     USAGES                   DESCRIPTION               EXTRA GROUPS
4sa6ht.i4y10n6rg6xsgdau   23h       2020-02-05T16:47:49+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-node-3
iinmzc.icp0fxql23llb17n   23h       2020-02-05T16:47:49+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-node-1
jdy1vb.o77kqa4lqlpqu7pr   23h       2020-02-05T16:47:50+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-node-4
l98w3y.kvdkpp5pn9cdpnu9   23h       2020-02-05T16:47:50+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-node-6d
lo3lkv.u1bx9ncf1gwgvejb   23h       2020-02-05T16:47:50+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-node-5
vvm6cj.7i08iqyidyatgcnh   23h       2020-02-05T16:47:49+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-node-2

查看各token关联的secret;

 kubectl get secrets  -n kube-system|grep bootstrap-token
# 输出信息
bootstrap-token-4sa6ht                           bootstrap.kubernetes.io/token         7      3m45s
bootstrap-token-iinmzc                           bootstrap.kubernetes.io/token         7      3m45s
bootstrap-token-jdy1vb                           bootstrap.kubernetes.io/token         7      3m44s
bootstrap-token-l98w3y                           bootstrap.kubernetes.io/token         7      3m44s
bootstrap-token-lo3lkv                           bootstrap.kubernetes.io/token         7      3m44s
bootstrap-token-vvm6cj                           bootstrap.kubernetes.io/token         7      3m45s

分发kubestrap kubeconfig文件到node节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
  do
    echo ">>> ${node_name}"
    scp kubelet-bootstrap-${node_name}.kubeconfig root@${node_name}:/etc/kubernetes/kubelet-bootstrap.kubeconfig
  done

创建kubelet参数配置文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kubelet-config.yaml.template <<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: "##NODE_IP##"
staticPodPath: ""
syncFrequency: 1m
fileCheckFrequency: 20s
httpCheckFrequency: 20s
staticPodURL: ""
port: 10250
readOnlyPort: 0
rotateCertificates: true
serverTLSBootstrap: true
authentication:
  anonymous:
    enabled: false
  webhook:
    enabled: true
  x509:
    clientCAFile: "/etc/kubernetes/cert/ca.pem"
authorization:
  mode: Webhook
registryPullQPS: 0
registryBurst: 20
eventRecordQPS: 0
eventBurst: 20
enableDebuggingHandlers: true
enableContentionProfiling: true
healthzPort: 10248
healthzBindAddress: "##NODE_IP##"
clusterDomain: "${CLUSTER_DNS_DOMAIN}"
clusterDNS:
  - "${CLUSTER_DNS_SVC_IP}"
nodeStatusUpdateFrequency: 10s
nodeStatusReportFrequency: 1m
imageMinimumGCAge: 2m
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
volumeStatsAggPeriod: 1m
kubeletCgroups: ""
systemCgroups: ""
cgroupRoot: ""
cgroupsPerQOS: true
cgroupDriver: systemd
runtimeRequestTimeout: 10m
hairpinMode: promiscuous-bridge
maxPods: 220
podCIDR: "${CLUSTER_CIDR}"
podPidsLimit: -1
resolvConf: /etc/resolv.conf
maxOpenFiles: 1000000
kubeAPIQPS: 1000
kubeAPIBurst: 2000
serializeImagePulls: false
evictionHard:
  memory.available:  "100Mi"
  nodefs.available:  "10%"
  nodefs.inodesFree: "5%"
  imagefs.available: "15%"
evictionSoft: {}
enableControllerAttachDetach: true
failSwapOn: true
containerLogMaxSize: 20Mi
containerLogMaxFiles: 10
systemReserved: {}
kubeReserved: {}
systemReservedCgroup: ""
kubeReservedCgroup: ""
enforceNodeAllocatable: ["pods"]
EOF

分发配置文件到node节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do 
    echo ">>> ${node_ip}"
    sed -e "s/##NODE_IP##/${node_ip}/" kubelet-config.yaml.template > kubelet-config-${node_ip}.yaml.template
    scp kubelet-config-${node_ip}.yaml.template root@${node_ip}:/etc/kubernetes/kubelet-config.yaml
  done

创建kubelet systemd文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kubelet.service.template <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service


[Service]
WorkingDirectory=${K8S_DIR}/kubelet
ExecStart=/opt/k8s/bin/kubelet \\
  --allow-privileged=true \\
  --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \\
  --cert-dir=/etc/kubernetes/cert \\
  --cni-conf-dir=/etc/cni/net.d \\
  --container-runtime=docker \\
  --container-runtime-endpoint=unix:///var/run/dockershim.sock \\
  --root-dir=${K8S_DIR}/kubelet \\
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
  --config=/etc/kubernetes/kubelet-config.yaml \\
  --hostname-override=##NODE_NAME## \\
  --pod-infra-container-image=registry.cn-beijing.aliyuncs.com/images_k8s/pause-amd64:3.1 \\
  --image-pull-progress-deadline=15m \\
  --volume-plugin-dir=${K8S_DIR}/kubelet/kubelet-plugins/volume/exec/ \\
  --logtostderr=true \\
  --v=2
Restart=always
RestartSec=5
StartLimitInterval=0


[Install]
WantedBy=multi-user.target
EOF

分发文件到node节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
  do 
    echo ">>> ${node_name}"
    sed -e "s/##NODE_NAME##/${node_name}/" kubelet.service.template > kubelet-${node_name}.service
    scp kubelet-${node_name}.service root@${node_name}:/etc/systemd/system/kubelet.service
  done

bootstrap token auth和授权;

journalctl -u kubelet -a |grep -A 2 'certificatesigningrequests'

# 如果有报错创建一个clusterrolebinding
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers

重启kubelet服务;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kubelet/kubelet-plugins/volume/exec/"
    ssh root@${node_ip} "/usr/sbin/swapoff -a"
    ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet"
  done

查看crs;

 kubectl get csr
NAME        AGE     REQUESTOR                 CONDITION
csr-474q4   2m30s   system:bootstrap:sm0qba   Pending
csr-4npnk   2m6s    system:bootstrap:bfw5da   Pending
csr-5tjxd   2m29s   system:bootstrap:19706k   Pending
csr-74s92   2m5s    system:bootstrap:rmymzu   Pending
csr-7lmzp   2m31s   system:bootstrap:4ufgz4   Pending
csr-7vx8h   2m28s   system:bootstrap:846f4g   Pending
csr-djrnr   2m4s    system:bootstrap:ithk5w   Pending
csr-f452d   2m27s   system:bootstrap:ik8d7m   Pending
csr-fzhzp   2m30s   system:bootstrap:pah0o6   Pending
csr-mnd72   2m3s    system:bootstrap:q69jal   Pending
csr-ngd66   2m1s    system:bootstrap:nz5kjp   Pending
csr-pmckd   2m1s    system:bootstrap:r1ueec   Pending
csr-vc2xv   2m3s    system:bootstrap:jnl2sk   Pending
csr-w8g64   2m26s   system:bootstrap:54og3d   Pending
csr-xwhwv   2m      system:bootstrap:sv0mru   Pending

自动approve CSR请求;

cd /opt/k8s/work
cat > csr-crb.yaml <<EOF
 # Approve all CSRs for the group "system:bootstrappers"
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: auto-approve-csrs-for-group
 subjects:
 - kind: Group
   name: system:bootstrappers
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
   apiGroup: rbac.authorization.k8s.io
---
 # To let a node of the group "system:nodes" renew its own credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-client-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
   apiGroup: rbac.authorization.k8s.io
---
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: approve-node-server-renewal-csr
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeserver"]
  verbs: ["create"]
---
 # To let a node of the group "system:nodes" renew its own server credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-server-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: approve-node-server-renewal-csr
   apiGroup: rbac.authorization.k8s.io
EOF

kubectl apply -f csr-crb.yaml

查看kubelet状况;

kubectl get csr
# 等待1-10分钟
NAME        AGE   REQUESTOR                 CONDITION
csr-474q4   14m   system:bootstrap:sm0qba   Approved,Issued
csr-47n72   10m   system:bootstrap:19706k   Pending
csr-4npnk   14m   system:bootstrap:bfw5da   Approved,Issued
csr-5tjxd   14m   system:bootstrap:19706k   Approved,Issued
csr-6w9c9   10m   system:bootstrap:846f4g   Pending
csr-74s92   14m   system:bootstrap:rmymzu   Approved,Issued
csr-7lmzp   14m   system:bootstrap:4ufgz4   Approved,Issued
csr-7vx8h   14m   system:bootstrap:846f4g   Approved,Issued
csr-8gm78   10m   system:bootstrap:nz5kjp   Pending
csr-8r9gx   10m   system:bootstrap:54og3d   Pending
csr-94pkj   10m   system:bootstrap:ithk5w   Pending
csr-9p54h   10m   system:bootstrap:ik8d7m   Pending
csr-cns5k   10m   system:bootstrap:jnl2sk   Pending
csr-djrnr   14m   system:bootstrap:ithk5w   Approved,Issued
csr-f452d   14m   system:bootstrap:ik8d7m   Approved,Issued
csr-ffslh   10m   system:bootstrap:pah0o6   Pending
csr-fzhzp   14m   system:bootstrap:pah0o6   Approved,Issued
csr-l46qd   10m   system:bootstrap:rmymzu   Pending
csr-mfgzx   10m   system:bootstrap:r1ueec   Pending
csr-mnd72   14m   system:bootstrap:q69jal   Approved,Issued
csr-mpmn8   10m   system:bootstrap:bfw5da   Pending
csr-ngd66   14m   system:bootstrap:nz5kjp   Approved,Issued
csr-pmckd   14m   system:bootstrap:r1ueec   Approved,Issued
csr-qz4sm   10m   system:bootstrap:sm0qba   Pending
csr-sg8fh   10m   system:bootstrap:q69jal   Pending
csr-t5vww   10m   system:bootstrap:sv0mru   Pending
csr-vc2xv   14m   system:bootstrap:jnl2sk   Approved,Issued
csr-vt25x   10m   system:bootstrap:4ufgz4   Pending
csr-w8g64   14m   system:bootstrap:54og3d   Approved,Issued
csr-xwhwv   14m   system:bootstrap:sv0mru   Approved,Issued

手动aprove server cert csr;

kubectl certificate approve csr-4bqzv
kubectl certificate approve csr-69p6v
kubectl certificate approve csr-7gj6z
kubectl certificate approve csr-82snz
kubectl certificate approve csr-848lq
kubectl certificate approve csr-fmtgk
kubectl certificate approve csr-gpb9v
kubectl certificate approve csr-jkkrs
kubectl certificate approve csr-kf99f
kubectl certificate approve csr-mrj76
kubectl certificate approve csr-rhmth
kubectl certificate approve csr-tqhp6
kubectl certificate approve csr-xk7bw
kubectl certificate approve csr-zz5mz

在node节点上查看是否有公私钥文件;

ls -l /etc/kubernetes/cert/kubelet-*
-rw------- 1 root root 1285 Mar  9 15:50 /etc/kubernetes/cert/kubelet-client-2020-03-09-15-50-10.pem
lrwxrwxrwx 1 root root   59 Mar  9 15:50 /etc/kubernetes/cert/kubelet-client-current.pem -> /etc/kubernetes/cert/kubelet-client-2020-03-09-15-50-10.pem
-rw------- 1 root root 1338 Mar  9 15:54 /etc/kubernetes/cert/kubelet-server-2020-03-09-15-54-07.pem
lrwxrwxrwx 1 root root   59 Mar  9 15:54 /etc/kubernetes/cert/kubelet-server-current.pem -> /etc/kubernetes/cert/kubelet-server-2020-03-09-15-54-07.pem

查看kubelet提供的api接口;

netstat -lnpt|grep kubelet
tcp        0      0 10.0.0.20:10250        0.0.0.0:*               LISTEN      23183/kubelet       
tcp        0      0 127.0.0.1:42645         0.0.0.0:*               LISTEN      23183/kubelet       
tcp        0      0 10.0.0.20:10248        0.0.0.0:*               LISTEN      23183/kubelet

Kubelet api认证和授权;

# 在master节点上对一台node节点ip执行
curl -s --cacert /etc/kubernetes/cert/ca.pem --cert /opt/k8s/work/admin.pem --key /opt/k8s/work/admin-key.pem https://10.0.0.20:10250/metrics|head

bear token认证和授权;

kubectl create sa kubelet-api-test
kubectl create clusterrolebinding kubelet-api-test --clusterrole=system:kubelet-api-admin --serviceaccount=default:kubelet-api-test
SECRET=$(kubectl get secrets | grep kubelet-api-test | awk '{print $1}')
TOKEN=$(kubectl describe secret ${SECRET} | grep -E '^token' | awk '{print $2}')
echo ${TOKEN}

# 验证结果
 curl -s --cacert /etc/kubernetes/cert/ca.pem -H "Authorization: Bearer ${TOKEN}" https://10.0.0.20:10250/metrics|head
# HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend.
# TYPE apiserver_audit_event_total counter
apiserver_audit_event_total 0
# HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend.
# TYPE apiserver_audit_requests_rejected_total counter
apiserver_audit_requests_rejected_total 0
# HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request.
# TYPE apiserver_client_certificate_expiration_seconds histogram
apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0
apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0

部署kube-proxy

分发kube-proxy文件到node节点;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp kubernetes/server/bin/kube-proxy root@${node_ip}:/opt/k8s/bin/
    ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
  done

创建证书签名请求;

cd /opt/k8s/work
cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "sss"
    }
  ]
}
EOF

生成证书和私钥;

cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy
ls kube-proxy*

创建和分发kubeconfig;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/work/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig


kubectl config set-credentials kube-proxy \
  --client-certificate=kube-proxy.pem \
  --client-key=kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig


kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig


kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

分发kubeconfig;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
  do
    echo ">>> ${node_name}"
    scp kube-proxy.kubeconfig root@${node_name}:/etc/kubernetes/
  done

创建kube-proxy config文件模板;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kube-proxy-config.yaml.template <<EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
clientConnection:
  burst: 200
  kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig"
  qps: 100
bindAddress: ##NODE_IP##
healthzBindAddress: ##NODE_IP##:10256
metricsBindAddress: ##NODE_IP##:10249
enableProfiling: true
clusterCIDR: ${CLUSTER_CIDR}
hostnameOverride: ##NODE_NAME##
mode: "ipvs"
portRange: ""
iptables:
  masqueradeAll: false
ipvs:
  scheduler: rr
  excludeCIDRs: []
EOF

分发kube-proxy配置文件;

# 需要注意node节点数量去调整for循环中的数字
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < 15; i++ ))
  do 
    echo ">>> ${NODE_NAMES[i]}"
    sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-proxy-config.yaml.template > kube-proxy-config-${NODE_NAMES[i]}.yaml.template
    scp kube-proxy-config-${NODE_NAMES[i]}.yaml.template root@${NODE_NAMES[i]}:/etc/kubernetes/kube-proxy-config.yaml
  done

创建kube-proxy systemd配置文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target


[Service]
WorkingDirectory=${K8S_DIR}/kube-proxy
ExecStart=/opt/k8s/bin/kube-proxy \\
  --config=/etc/kubernetes/kube-proxy-config.yaml \\
  --logtostderr=true \\
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536


[Install]
WantedBy=multi-user.target
EOF

分发kube-proxy systemd配置文件;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
  do 
    echo ">>> ${node_name}"
    scp kube-proxy.service root@${node_name}:/etc/systemd/system/
  done

启动kube-proxy服务;

cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kube-proxy"
    ssh root@${node_ip} "modprobe ip_vs_rr"
    ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy"
  done

检查启动结果;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl status kube-proxy|grep Active"
  done

# 查看日志
journalctl -u kube-proxy

查看监听端口;

netstat -lnpt|grep kube-proxy
# 输出信息
tcp        0      0 10.0.0.10:10249        0.0.0.0:*               LISTEN      10096/kube-proxy    
tcp        0      0 10.0.0.10:10256        0.0.0.0:*               LISTEN      10096/kube-proxy

查看ipvs路由规则;

source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "/usr/sbin/ipvsadm -ln"
  done

# 输出信息
>>> 10.0.0.40
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 rr
  -> 10.0.0.6:6443               Masq    1      0          0         
  -> 10.0.0.13:6443              Masq    1      0          0         
  -> 10.0.0.26:6443              Masq    1      0          0         
>>> 10.0.0.4
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 rr
  -> 10.0.0.6:6443               Masq    1      0          0         
  -> 10.0.0.13:6443              Masq    1      0          0         
  -> 10.0.0.26:6443              Masq    1      0          0         
>>> 10.0.0.16
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 rr
  -> 10.0.0.6:6443               Masq    1      0          0         
  -> 10.0.0.13:6443              Masq    1      0          0         
  -> 10.0.0.26:6443              Masq    1      0          0         
>>> 10.0.0.17
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 rr
  -> 10.0.0.6:6443               Masq    1      0          0         
  -> 10.0.0.13:6443              Masq    1      0          0         
  -> 10.0.0.26:6443              Masq    1      0          0         
>>> 10.0.0.9
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 rr
  -> 10.0.0.6:6443               Masq    1      0          0         
  -> 10.0.0.13:6443              Masq    1      0          0         
  -> 10.0.0.26:6443              Masq    1      0          0         
>>> 10.0.0.10
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 rr
  -> 10.0.0.6:6443               Masq    1      0          0         
  -> 10.0.0.13:6443              Masq    1      0          0         
  -> 10.0.0.26:6443              Masq    1      0          0  

验证集群功能;

部署coredns插件

修改配置文件;

cd /opt/k8s/work/kubernetes/
tar -xzvf kubernetes-src.tar.gz
cd /opt/k8s/work/kubernetes/cluster/addons/dns/coredns
cp coredns.yaml.base coredns.yaml
source /opt/k8s/bin/environment.sh
sed -i -e "s/__PILLAR__DNS__DOMAIN__/${CLUSTER_DNS_DOMAIN}/" -e "s/__PILLAR__DNS__SERVER__/${CLUSTER_DNS_SVC_IP}/" coredns.yaml

创建coredns;

kubectl create -f coredns.yaml

检查coredns功能;

kubectl get all -n kube-system
NAME                          READY   STATUS         RESTARTS   AGE
pod/coredns-6dfb5d496-hgw5h   0/1     ErrImagePull   0          114s


NAME               TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
service/kube-dns   ClusterIP   192.168.0.2   <none>        53/UDP,53/TCP,9153/TCP   115s


NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns   0/1     1            0           115s


NAME                                DESIRED   CURRENT   READY   AGE
replicaset.apps/coredns-6dfb5d496   1         1         0       114s

创建登录token;

kubectl create sa dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')
DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')
echo ${DASHBOARD_LOGIN_TOKEN}
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 212,657评论 6 492
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 90,662评论 3 385
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 158,143评论 0 348
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 56,732评论 1 284
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 65,837评论 6 386
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 50,036评论 1 291
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 39,126评论 3 410
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 37,868评论 0 268
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 44,315评论 1 303
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 36,641评论 2 327
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 38,773评论 1 341
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 34,470评论 4 333
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 40,126评论 3 317
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 30,859评论 0 21
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 32,095评论 1 267
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 46,584评论 2 362
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 43,676评论 2 351