linux 3.10内核在集群具有不稳定性
需要升级内核至4.19+
#查看内核版本
uname -sr
#升级软件包
yum update -y --exclude=kernel*
#配置阿里云elrepo镜像
vim /etc/yum.repos.d/elrepo.repo
[elrepo]
name=elrepo
baseurl=https://mirrors.aliyun.com/elrepo/archive/kernel/el7/x86_64
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
#清空和刷新yum源数据缓存
yum clean all && yum makecache
#列出可用内核包
yum list --showduplicate kernel*
#安装内核
yum install -y kernel-lt-5.4.278
#查看内核位置
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
#重建内核
grub2-mkconfig -o /boot/grub2/grub.cfg
#修改开机默认使用内核
vi /etc/default/grub
GRUB_DEFAULT=0 (0代表第几个内核)
#重建内核
grub2-mkconfig -o /boot/grub2/grub.cfg
#重启生效
reboot
环境配置
1.基本配置
//检查主机名是否重复local
hostnamectl status
//如果重复设置主机名
hostnamectl set-hostname k8s-master1
hostnamectl set-hostname k8s-master2
hostnamectl set-hostname k8s-master3
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
hostnamectl set-hostname k8s-node3
//所有节点配置域名映射
vi /etc/hosts
192.168.159.130 k8s-master1
192.168.159.134 k8s-master2
192.168.159.135 k8s-master3
192.168.159.136 k8s-node1
192.168.159.137 k8s-node2
192.168.159.138 k8s-node3
192.168.159.250 k8s-master-lb
//所有节点关闭安全机制,避免与k8s的流量机制冲突,确保容器正常访问主机文件系统
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
setenforce 0
//所有节点关闭swap分区,Kubernetes需要关闭swap的主要原因是为了性能和稳定性,开启swap会导致性能问题,包括内存和I/O性能下降,同时通过cgroups设置的内存上限会失效;
swapoff -a && sysctl -w vm.swappiness=0
sed -ri 's/.*swap.*/#&/' /etc/fstab
//所有节点修改文件数限制
ulimit -SHn 65535
//所有节点永久修改
vi /etc/security/limits.conf
//文件尾部加上
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
//所有节点安装工具包
yum install wget git jq psmisc net-tools yum-utils device-mapper-persistent-data lvm2 -y
//所有节点安装ipvs管理工具
yum install ipvsadm ipset sysstat conntrack libseccomp -y
//所有节点配置ipvs,内核4.19+改为nf_conntrack,4.18下改为nf_conntrack_ipv4
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
//所有节点修改ipvs配置
vi /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
#所有节点执行命令
systemctl enable --now systemd-modules-load.service
#检查是否加载
lsmod | grep -e ip_vs -e nf_conntrack
#所有节点配置,可根据机器性能调整,其中(net.ipv4.ip_forward、net.bridge.bridge-nf-call-iptables、net.bridge.bridge-nf-call-ip6tables必要配置)
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
net.ipv4.conf.all.route_localnet = 1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16768
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16768
EOF
#生效
sysctl --system
reboot
2.Docker环境安装
//移除与docker相关的依赖
yum remove docker*
//配置docker yum源地址
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
//安装
yum install -y docker-ce-19.03.9 docker-ce-cli-19.03.9 containerd.io-1.4.4
//镜像加速
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors":[
"https://docker.1ms.run",
"https://hub.rat.dev",
"https://docker.1panel.live",
"https://6gkewrfd.mirror.aliyuncs.com"
]
}
EOF
//自启动
systemctl daemon-reload && systemctl enable --now docker
初始化证书颁发机构(CA根证书)
curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl_1.5.0_linux_amd64 -o cfssl
chmod +x cfssl
curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssljson_1.5.0_linux_amd64 -o cfssljson
chmod +x cfssljson
curl -L https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl-certinfo_1.5.0_linux_amd64 -o cfssl-certinfo
chmod +x cfssl-certinfo
mv cfssl* /usr/bin
创建一个 JSON 配置文件来生成 CA 文件
mkdir -p /etc/kubernetes/pki
cd /etc/kubernetes/pki
vi ca-config.json
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"server": {
"usages": [
"signing",
"key encipherment",
"server auth"
],
"expiry": "87600h"
},
"client": {
"usages": [
"signing",
"key encipherment",
"client auth"
],
"expiry": "87600h"
},
"peer": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
},
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
},
"etcd": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
创建一个ca签名请求文件
vi /etc/kubernetes/pki/ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names":[{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes"
}],
"ca": {
"expiry": "87600h"
}
}
- CN: 公用名,必填,一般是网站域名
- O:组织名,必填,一般营业执照名
- OU:单位部门,无限制
- C:单位所在城市
- ST:单位所在省份
- C:国家代码
生成ca证书和私钥
#ca.csr、ca.pem(公钥)、ca-key.pem(私钥)
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
etcd集群规模硬件配置推荐
注意防火墙或端口开放问题
systemctl stop firewalld
systemctl disable firewalld
生成etcd根证书
vi /etc/kubernetes/pki/etcd-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names":[{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "etcd"
}],
"ca": {
"expiry": "87600h"
}
}
mkdir /etc/kubernetes/pki/etcd -p
cfssl gencert -initca /etc/kubernetes/pki/etcd-csr.json | cfssljson -bare /etc/kubernetes/pki/etcd/ca -
为集群节点颁发证书(host表示为哪些节点颁发,主机名和ip)
vi /etc/kubernetes/pki/etcd-cluster-csr.json
{
"CN": "etcd-cluster",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"k8s-master1",
"k8s-master2",
"k8s-master3",
"127.0.0.1",
"192.168.159.130",
"192.168.159.134",
"192.168.159.135"
],
"names":[{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "etcd",
"OU": "System"
}]
}
cfssl gencert \
-ca=/etc/kubernetes/pki/etcd/ca.pem \
-ca-key=/etc/kubernetes/pki/etcd/ca-key.pem \
-config=/etc/kubernetes/pki/ca-config.json \
-profile=etcd \
etcd-cluster-csr.json | cfssljson -bare /etc/kubernetes/pki/etcd/etcd
将生成的etcd证书复制到etcd其他节点上
scp -r /etc/kubernetes/pki/etcd root@k8s-master2:/etc/kubernetes/pki
scp -r /etc/kubernetes/pki/etcd root@k8s-master3:/etc/kubernetes/pki
//下载安装包
wget https://github.com/etcd-io/etcd/releases/download/v3.4.34/etcd-v3.4.34-linux-amd64.tar.gz
//解压并把核心文件复制到/usr/local/bin
tar -zxvf etcd-v3.4.34-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.4.34-linux-amd64/etcd{,ctl}
//etcd节点各自修改etcd配置后并创建config配置
mkdir -p /etc/etcd
vi /etc/etcd/etcd.yaml
name: 'etcd-master1'
#数据目录
data-dir: /var/lib/etcd
#wal日志记录目录
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
#本机ip+2380 集群通信地址
listen-peer-urls: 'https://192.168.159.130:2380'
listen-client-urls: 'https://192.168.159.130:2379,http://127.0.0.1:2379'
max-snapshots: 5
max-wals: 5
cors:
#本机IP
initial-advertise-peer-urls: 'https://192.168.159.130:2380'
advertise-client-urls: 'https://192.168.159.130:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
#etcd整个集群地址
initial-cluster: 'etcd-master1=https://192.168.159.130:2380,etcd-master2=https://192.168.159.134:2380,etcd-master3=https://192.168.159.135:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
#证书配置
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/ca.pem'
auto-tls: true
debug: false
logger: zap
log-outputs: [stderr]
force-new-cluster: false
auto-compaction-mode: periodic
auto-compaction-retention: "1"
所有etcd节点加入开机自启
vi /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd service
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.yaml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
systemctl daemon-reload
systemctl enable --now etcd
测试etcd集群状态
etcdctl --endpoints="192.168.159.130:2379,192.168.159.134:2379,192.168.159.135:2379" --cacert=/etc/kubernetes/pki/etcd/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
//下载组件包
wget https://dl.k8s.io/v1.21.14/kubernetes-server-linux-amd64.tar.gz
//master节点解压
tar -xvf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
//Node节点只需要kubeproxy,kubelet
tar -xvf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,-proxy}
apiserver证书生成
vi /etc/kubernetes/pki/apiserver-csr.json
{
"CN": "kube-apiserver",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"10.96.0.1",
"127.0.0.1",
"192.168.159.250",
"192.168.159.130",
"192.168.159.134",
"192.168.159.135",
"192.168.159.136",
"192.168.159.137",
"192.168.159.138",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"names":[{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes"
}]
}
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=/etc/kubernetes/pki/ca-config.json -profile=kubernetes /etc/kubernetes/pki/apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
10.96.0 为规划的service网段
192.168.159.250 为规划的lb负载均衡ip
front-proxy证书
front-proxy是apiserver的聚合层,支持自定义插件,例如配置文件kind: Test识别不是自带操作就会交给聚合层。
生成front-proxy根ca
vi /etc/kubernetes/pki/front-proxy-ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
}
}
cfssl gencert -initca /etc/kubernetes/pki/front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
向front-proxy-client颁发证书
vi /etc/kubernetes/pki/front-proxy-client-csr.json
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=/etc/kubernetes/pki/ca-config.json -profile=kubernetes /etc/kubernetes/pki/front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
controller-manager证书
vi /etc/kubernetes/pki/controller-manager-csr.json
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names":[{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "Kubernetes"
}]
}
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=/etc/kubernetes/pki/ca-config.json -profile=kubernetes /etc/kubernetes/pki/controller-manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
生成controller-manager.conf文件
#如果没用lb负载均衡,那就使用master1的ip
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.159.250:6443 \
--kubeconfig=/etc/kubernetes/pki/controller-manager.conf
#设置环境项
kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/pki/controller-manager.conf
#设置密钥项
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/pki/controller-manager.pem \
--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/pki/controller-manager.conf
#设置默认环境
kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/pki/controller-manager.conf
scheduler证书
vi /etc/kubernetes/pki/scheduler-csr.json
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names":[{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "Kubernetes"
}]
}
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=/etc/kubernetes/pki/ca-config.json -profile=kubernetes /etc/kubernetes/pki/scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
生成scheduler.conf文件
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.159.250:6443 \
--kubeconfig=/etc/kubernetes/pki/scheduler.conf
#设置密钥项
kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/pki/scheduler.pem \
--client-key=/etc/kubernetes/pki/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/pki/scheduler.conf
#设置环境项
kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/etc/kubernetes/pki/scheduler.conf
#设置默认环境
kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/etc/kubernetes/pki/scheduler.conf
admin证书
vi /etc/kubernetes/pki/admin-csr.json
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names":[{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "Kubernetes"
}]
}
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=/etc/kubernetes/pki/ca-config.json -profile=kubernetes /etc/kubernetes/pki/admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
生成admin.conf文件
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.159.250:6443 \
--kubeconfig=/etc/kubernetes/pki/admin.conf
#设置密钥项
kubectl config set-credentials kubernetes-admin \
--client-certificate=/etc/kubernetes/pki/admin.pem \
--client-key=/etc/kubernetes/pki/admin-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/pki/admin.conf
#设置环境项
kubectl config set-context kubernetes-admin@kubernetes \
--cluster=kubernetes \
--user=kubernetes-admin \
--kubeconfig=/etc/kubernetes/pki/admin.conf
#设置默认环境
kubectl config use-context kubernetes-admin@kubernetes \
--kubeconfig=/etc/kubernetes/pki/admin.conf
ServiceAccount密钥生成
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
复制证书到其他master节点
scp -r /etc/kubernetes/pki root@k8s-master2:/etc/kubernetes/
scp -r /etc/kubernetes/pki root@k8s-master3:/etc/kubernetes/
apiserver的lb负载均衡配置
- nginx的upstream
- keepalied高可用
- haproxy
- 负载均衡云产品(阿里云slb、腾讯云elb....)
master节点里的组件自启动
- apiserver
//准备数据存放目录
mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
//自启服务脚本
vi /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=K8s API Server
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--v=2 \
--logtostderr=true \
--allow-privileged=true \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--insecure-port=0 \
--advertise-address=192.168.159.130 \
--service-cluster-ip-range=10.96.0.0/16 \
--service-node-port-range=30000-32767 \
--etcd-servers=https://192.168.159.130:2379,https://192.168.159.134:2379,https://192.168.159.135:2379 \
--etcd-cafile=/etc/kubernetes/pki/etcd/ca.pem \
--etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem \
--etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
--requestheader-allowed-names=aggregator,front-proxy-client \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-username-headers=X-Remote-User
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
加入开机自启
systemctl daemon-reload && systemctl enable --now kube-apiserver
- controller-manager
定义Pod网段
为 196.16.0.0/16
vi /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=K8s Controller Manager
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
--v=2 \
--logtostderr=true \
--address=127.0.0.1 \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \
--kubeconfig=/etc/kubernetes/pki/controller-manager.conf \
--leader-elect=true \
--use-service-account-credentials=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--pod-eviction-timeout=2m0s \
--controllers=*,bootstrapsigner,tokencleaner \
--allocate-node-cidrs=true \
--cluster-cidr=196.16.0.0/16 \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--node-cidr-mask-size=24
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
加入开机自启
systemctl daemon-reload && systemctl enable --now kube-controller-manager
- scheduler
vi /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=K8s Scheduler
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--v=2 \
--logtostderr=true \
--address=127.0.0.1 \
--leader-elect=true \
--kubeconfig=/etc/kubernetes/pki/scheduler.conf
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
加入开机自启
systemctl daemon-reload && systemctl enable --now kube-scheduler
- Kubelet通过TLS启动引导,自动颁发证书
配置bootstrap-kubelet.conf文件
#生成16位随机数 : b2b5b205a69fcba9
head -c 8 /dev/urandom | od -An -t x | tr -d ' '
#设置集群
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.159.250:6443 \
--kubeconfig=/etc/kubernetes/pki/bootstrap-kubelet.conf
#设置密钥
kubectl config set-credentials tls-bootstrap-token-user \
--token=07401b.b2b5b205a69fcba9 \
--kubeconfig=/etc/kubernetes/pki/bootstrap-kubelet.conf
#设置上下文
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=/etc/kubernetes/pki/bootstrap-kubelet.conf
#设置默认
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=/etc/kubernetes/pki/bootstrap-kubelet.conf
将bootstrap-kubelet.conf文件复制到所有节点pki目录下
mkdir -p /etc/kubernetes/pki
scp /etc/kubernetes/pki/bootstrap-kubelet.conf root@k8s-master2:/etc/kubernetes/pki
赋予kubectl命令操作集群权限
mkdir -p /root/.kube
cp /etc/kubernetes/pki/admin.conf /root/.kube/config
#验证
kubectl get nodes
创建集群引导权限yaml
vi /etc/kubernetes/bootstrap.secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-07401b
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by kubelet"
token-id: 07401b
token-secret: b2b5b205a69fcba9
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- kind: Group
name: system:bootstrappers:default-node-token
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- kind: Group
name: system:bootstrappers:default-node-token
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- kind: User
name: kube-apiserver
apiGroup: rbac.authorization.k8s.io
kubectl create -f /etc/kubernetes/bootstrap.secret.yaml
所有节点配置kubelet
mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
vi /usr/lib/systemd/system/kubelet.service
[Unit]
Description=k8s Kubelet
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
vi /usr/lib/systemd/system/kubelet.service.d/10-kubelet.conf
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/pki/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yaml --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.4.1"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
创建kubelet-conf.yaml文件
vi /etc/kubernetes/kubelet-conf.yaml
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
#定义的service网段内
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 1m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
加入开机自启
systemctl daemon-reload && systemctl enable --now kubelet
配置kube-proxy
//创建kube-proxy的sa
kubectl -n kube-system create serviceaccount kube-proxy
//角色绑定
kubectl create clusterrolebinding system:kube-proxy \
--clusterrole system:node-proxier \
--serviceaccount kube-system:kube-proxy
//导出变量
SECRET=$(kubectl -n kube-system get sa/kube-proxy --output=jsonpath='{.secrets[0].name}')
JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET --output=jsonpath='{.data.token}' | base64 -d)
PKI_DIR=/etc/kubernetes/pki
K8S_DIR=/etc/kubernetes
//设置集群
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.159.250:6443 \
--kubeconfig=/etc/kubernetes/kube-proxy.conf
#设置密钥
kubectl config set-credentials kubernetes \
--token=${JWT_TOKEN} \
--kubeconfig=/etc/kubernetes/kube-proxy.conf
#设置上下文
kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=kubernetes \
--kubeconfig=/etc/kubernetes/kube-proxy.conf
#设置默认
kubectl config use-context kubernetes \
--kubeconfig=/etc/kubernetes/kube-proxy.conf
将kube-proxy.conf文件发送到所有节点上
scp /etc/kubernetes/kube-proxy.conf root@k8s-master2:/etc/kubernetes/kube-proxy.conf
所有节点创建开机自启脚本
vi /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=K8s Kube Proxy
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.yaml \
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
所有节点创建kube-proxy.yaml文件
vi /etc/kubernetes/kube-proxy.yaml
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.conf
qps: 5
#Pod的网段
clusterCIDR: 196.16.0.0/16
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
systemctl daemon-reload && systemctl enable --now kube-proxy
安装calico网络
calico-etcd.yaml
---
# Source: calico/templates/calico-kube-controllers.yaml
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
---
# Source: calico/templates/calico-kube-controllers.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-etcd-secrets.yaml
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: calico-etcd-secrets
namespace: kube-system
data:
# Populate the following with etcd TLS configuration if desired, but leave blank if
# not using TLS for etcd.
# The keys below should be uncommented and the values populated with the base64
# encoded contents of each file that would be associated with the TLS data.
# Example command for encoding a file contents: cat <file> | base64 -w 0
# etcd-key: null
# etcd-cert: null
# etcd-ca: null
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "" # "/calico-secrets/etcd-ca"
etcd_cert: "" # "/calico-secrets/etcd-cert"
etcd_key: "" # "/calico-secrets/etcd-key"
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "0"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
---
# Source: calico/templates/calico-kube-controllers-rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Pods are monitored for changing labels.
# The node controller monitors Kubernetes nodes.
# Namespace and serviceaccount labels are used for policy.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
- serviceaccounts
verbs:
- watch
- list
- get
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
---
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# Used for creating service account tokens to be used by the CNI plugin
- apiGroups: [""]
resources:
- serviceaccounts/token
resourceNames:
- calico-node
verbs:
- create
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
# EndpointSlices are used for Service-based network policy rule
# enforcement.
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
---
# Source: calico/templates/calico-kube-controllers-rbac.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-node-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.io/calico/cni:v3.25.0
imagePullPolicy: IfNotPresent
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
securityContext:
privileged: true
# This init container mounts the necessary filesystems needed by the BPF data plane
# i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
# in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
- name: "mount-bpffs"
image: docker.io/calico/node:v3.25.0
imagePullPolicy: IfNotPresent
command: ["calico-node", "-init", "-best-effort"]
volumeMounts:
- mountPath: /sys/fs
name: sys-fs
# Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host
# so that it outlives the init container.
mountPropagation: Bidirectional
- mountPath: /var/run/calico
name: var-run-calico
# Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host
# so that it outlives the init container.
mountPropagation: Bidirectional
# Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary,
# executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly.
- mountPath: /nodeproc
name: nodeproc
readOnly: true
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.io/calico/node:v3.25.0
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
# Enable or Disable VXLAN on the default IPv6 IP pool.
- name: CALICO_IPV6POOL_VXLAN
value: "Never"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
lifecycle:
preStop:
exec:
command:
- /bin/calico-node
- -shutdown
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
- -bird-ready
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: bpffs
mountPath: /sys/fs/bpf
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sys-fs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
- name: bpffs
hostPath:
path: /sys/fs/bpf
type: Directory
# mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs.
- name: nodeproc
hostPath:
path: /proc
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0400
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-kube-controllers
image: docker.io/calico/kube-controllers:v3.25.0
imagePullPolicy: IfNotPresent
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,namespace,serviceaccount,workloadendpoint,node
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
periodSeconds: 10
volumes:
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0440
//修改etcd集群地址
sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://192.168.159.130:2379,https://192.168.159.134:2379,https://192.168.159.135:2379"#g' /etc/kubernetes/calico.yaml
//设置etcd的证书
ETCD_CA=`cat /etc/kubernetes/pki/etcd/ca.pem | base64 -w 0`
ETCD_CERT=`cat /etc/kubernetes/pki/etcd/etcd.pem | base64 -w 0`
ETCD_KEY=`cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 -w 0`
//修改etcd证书信息
sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" /etc/kubernetes/calico.yaml
//打开etcd_ca默认配置
sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' /etc/kubernetes/calico.yaml
//修改自己定义的Pod网段
sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@# value: "192.168.0.0/16"@ value: "196.16.0.0/16"@g' /etc/kubernetes/calico.yaml
//检查确认,失败了就手动修改上去
grep "CALICO_IPV4POOL_CIDR" /etc/kubernetes/calico.yaml -A 1
//应用配置
kubectl apply -f /etc/kubernetes/calico.yaml
命令修改可能出现格式的失败,建议直接打开编辑器修改
查看是否running
kubectl get pod -A -owide | grep calico
安装coreDNS(域名通信)
git clone https://github.com/coredns/deployment.git
cd deployment/kubernetes
//设置service网段的第10个ip
./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -
给master节点打标签
二进制安装的master节点默认是没有污点的,是可以调度Pod.
//添加不调度污点
kubectl taint nodes k8s-master1 node-role.kubernetes.io/master=:NoSchedule
kubectl taint nodes k8s-master2 node-role.kubernetes.io/master=:NoSchedule
kubectl taint nodes k8s-master3 node-role.kubernetes.io/master=:NoSchedule
//添加自定义标签
kubectl label node k8s-master1 node-role.kubernetes.io/master=''
kubectl label node k8s-master2 node-role.kubernetes.io/master=''
kubectl label node k8s-master3 node-role.kubernetes.io/master=''
给node节点打标签
kubectl label node k8s-node1 node-role.kubernetes.io/worker=''
kubectl label node k8s-node2 node-role.kubernetes.io/worker=''
kubectl label node k8s-node3 node-role.kubernetes.io/worker=''