一、部署k8s集群

一、基础环境准备:

创建nfs后端存储:

root@ha1:~# apt install nfs-server

root@ha1:~# mkdir -p /data/volumes

root@ha1:~# vim /etc/exports    #添加下面一行

/data/volumes *(rw,no_root_squash)      #  * 代表所有网段都可以挂载,指定网断例如:172.16.232.0,则是其他网段不能挂载

root@ha1:~# exportfs -av  #将nfs重新导出一下或者重启nfs-server,一般情况下不重启nfs-server,因为会影响业务。重启nfs-server:root@ha1:~# systemctl restart nfs-server.service

其他主机挂载:

root@master1:~# showmount -e 172.16.232.109  #查看,能看见不一定能挂成功,看是否有网络限制

root@master1:~#mount -t nfs 172.16.232.109:/data/volumes  /mnt

user:root/wys  passwd:magedu

禁用swap,关闭iptables,关闭selinux,

优化内核参数及限制参数

master节点和node节点使用containerd,harbor节点安装docker

主机名    IP                                VIP

master1                         172.16.232.101 172.16.232.188

master2                         172.16.232.102 172.16.232.188

master3                         172.16.232.103 172.16.232.188

harbor1         172.16.232.104 先部署使用1台

harbor2 172.16.232.105                暂未部署

etcd1 172.16.232.106

etcd2 172.16.232.107

etcd3 172.16.232.108

ha1 172.16.232.109

ha2-deploy 172.16.232.110    k8s部署节点(ansible)

node1 172.16.232.111

node2 172.16.232.112

二、集群部署

高可用负载均衡:

1.配置sysctl.conf 配置文件

root@ha1:~#  vim /etc/sysctl.conf

net.ipv4.ip_forward=1

net.bridge.bridge-nf-call-iptables=1

net.bridge.bridge-nf-call-ip6tables=1

net.bridge.bridge-nf-call-arptables=1

net.ipv4.tcp_tw_reuse=0

net.core.somaxconn=32768

net.netfilter.nf_conntrack_max=1000000

vm.swappiness=0

vm.max_map_count=655360

fs.file-max=6553600

net.ipv4.ip_nonlocal_bind = 1  #不开启的话backup状态的HA无法start haproxy,所以这里要添加上

配置文件生效

root@ha1:~#  sysctl -p

2.安装keepalived和haproxy

keepalived

root@ha1:~#  apt install keepalived haproxy -y

root@ha1:~#  cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf

root@ha1:~#  vim /etc/keepalived/keepalived.conf

vrrp_instance VI_1 {

    state MASTER

    interface ens33

    garp_master_delay 10

    smtp_alert

    virtual_router_id 51

    priority 100

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass 123456

    }

    virtual_ipaddress {

        172.16.232.188 dev ens33 label ens33:0

        172.16.232.189 dev ens33 label ens33:1

        172.16.232.190 dev ens33 label ens33:2

        172.16.232.191 dev ens33 label ens33:3

    }

}

root@ha1:~#systemctl restart keepalived.service

root@ha1:~#systemctl enable keepalived

*ha2配置内容和ha1相同,priority 100改为低优先级即可,测试VIP切换

haproxy

root@ha1:~#vim /etc/haproxy/haproxy.cfg

listen stats

  mode http

  bind 0.0.0.0:9999

  stats enable

  log global

  stats uri  /haproxy-status

  stats auth  haadmin:1q2w3e4r5t

listen k8s-apiserver-6443

  bind 172.16.232.188:6443

  mode tcp

  #balance source

  server 172.16.232.201 172.16.232.101:6443 check inter 2s fall 3 rise 5

  server 172.16.232.202 172.16.232.102:6443 check inter 2s fall 3 rise 5

  server 172.16.232.203 172.16.232.103:6443 check inter 2s fall 3 rise 5

root@ha1:~#systemctl restart haproxy

root@ha1:~#systemctl enable haproxy

查看监听端口:root@ha1:~#ss -ntl

1、harbor服务器安装

1.1 安装docker:

下载上传docker离线安装包

root@harbor1:/usr/local/src# tar xvf docker-20.10.17-binary-install.tar.gz

root@harbor1:/usr/local/src# bash docker-install.sh

root@harbor1:/usr/local/src# docker version

1.2 安装harbor

注意:商业证书机构申请证书并下载

生产环境中,推荐使用公有证书签发机构签发的证书(https://yundun.console.aliyun.com/?p=cas#/certExtend/buy)

证书签发机构的证书,可被信任(https://yundun.console.aliyun.com/?spm=5176.12818093.top-nav.37.4eab16d0ITgzBO&p=cas#/certExtend/free)

root@harbor1:/usr/local/src# mkdir /apps

root@harbor1:/usr/local/src# cd /apps/

root@harbor1:/apps# tar xvf harbor-offline-installer-v2.5.3.tgz

root@harbor1:/apps# cd harbor/

root@harbor1:/apps/harbor# mkdir certs

root@harbor1:/apps/harbor# cd certs/

root@harbor1:/apps/harbor/certs# openssl genrsa -out ca.key 4096  #私有CA key

root@harbor1:/apps/harbor/certs# openssl req -x509 -new -nodes -sha512 -days 3650 -subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=magedu.com"  -key ca.key  -out ca.crt      #自签发CA crt证书

root@harbor1:/apps/harbor/certs# touch /root/.rnd    #记录证书签发信息

root@harbor1:/apps/harbor/certs# openssl genrsa -out magedu.net.key 4096    #harbor服务器私钥

root@harbor1:/apps/harbor/certs# openssl req -sha512 -new -subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=magedu.net"  -key magedu.net.key  -out magedu.net.csr    #harbor服务器csr文件

root@harbor1:/apps/harbor/certs# vim v3.ext    #证书签发SAN文件

authorityKeyIdentifier=keyid,issuer

basicConstraints=CA:FALSE

keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment

extendedKeyUsage = serverAuth

subjectAltName = @alt_names

[alt_names]

DNS.1=magedu.com

DNS.2=harbor.magedu.net

DNS.3=harbor.magedu.local

root@harbor1:/apps/harbor/certs# openssl x509 -req -sha512 -days 3650 -extfile v3.ext -CA ca.crt -CAkey ca.key -CAcreateserial -in magedu.net.csr -out magedu.net.crt    #自签发harbor证书

部署harbor:

root@harbor1:/apps/harbor/certs# cd ..

root@harbor1:/apps/harbor# cp harbor.yml.tmpl harbor.yml

root@harbor1:/apps/harbor# vim harbor.yml

root@harbor1:/apps/harbor# grep -v "#" harbor.yml | grep -v "^#"

hostname: harbor.magedu.net

http:

port: 80

https:

port: 443

certificate: /apps/harbor/certs/magedu.net.crt

private_key: /apps/harbor/certs/magedu.net.key

root@harbor1:/apps/harbor# ./install.sh --help

root@harbor1:/apps/harbor# ./install.sh --with-trivy  --with-chartmuseum

打开浏览器登录验证

1.3 部署节点安装docker并同步harbor crt证书:

下载上传docker离线安装包

root@ha2-deploy:/usr/local/src# tar xvf docker-20.10.17-binary-install.tar.gz

root@ha2-deploy:/usr/local/src# bash docker-install.sh

root@ha2-deploy:/usr/local/src# docker version

root@ha2-deploy:~# mkdir /etc/docker/certs.d/harbor.magedu.net -p  #客户端创建证书目录

root@harbor1:/apps/harbor/certs# pwd

/apps/harbor/certs

root@harbor1:/apps/harbor/certs# scp magedu.net.crt 172.16.232.110:/etc/docker/certs.d/harbor.magedu.net

root@ha2-deploy:/etc/docker/certs.d/harbor.magedu.net# vim /etc/hosts

172.16.232.104 harbor.magedu.net

root@ha2-deploy:/etc/docker/certs.d/harbor.magedu.net# systemctl restart docker  #重启docker

客户端登录harbor:

root@ha2-deploy:/etc/docker/certs.d/harbor.magedu.net# cd

root@ha2-deploy:~# docker login harbor.magedu.net

Username: admin

Password:

测试push镜像到harbor:

root@ha2-deploy:~# docker pull alpine

root@ha2-deploy:~# docker tag alpine harbor.magedu.net/library/alpine:latest

root@ha2-deploy:~# docker push harbor.magedu.net/library/alpine:latest

2.ansible 部署:

2.1 基础环境准备:

分发秘钥:

root@ha2-deploy:/etc/kubeasz# apt install ansible

root@ha2-deploy:~# ssh-keygen

root@ha2-deploy:~#apt install sshpass        #安装sshpass命令⽤于同步公钥到各k8s服务器

root@ha2-deploy:~# cat key.sh

#!/bin/bash

#⽬标主机列表

IP="

172.16.232.101

172.16.232.102

172.16.232.103

172.16.232.106

172.16.232.107

172.16.232.108

172.16.232.111

172.16.232.112

"

for node in ${IP};do

    sshpass -p magedu ssh-copy-id  ${node}  -o  StrictHostKeyChecking=no

          echo  "${node} 秘钥copy完成"

    ssh ${node}  ln -sv /usr/bin/python3  /usr/bin/python

          echo  "${node} /usr/bin/python3 软连接创建完成"

done

root@ha2-deploy:~# ln -sv /usr/bin/python3  /usr/bin/python

root@ha2-deploy:~# bash key.sh  #执行脚本同步

验证可以免秘钥登录到其他服务器:

root@ha2-deploy:~# ssh 172.16.232.112

2.2下载kubeasz项目及组件

root@ha2-deploy:~# mkdir /etc/kubeasz/

root@ha2-deploy:/etc/kubeasz/

root@ha2-deploy:~# export release=3.3.1

root@ha2-deploy:~# wget https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown

root@ha2-deploy:~#vim ezdown #⾃定义下载组件版本

root@ha2-deploy:~#chmod +x ./ezdown

root@ha2-deploy:~#./ezdown -D

root@ha2-deploy:~#ll /etc/kubeasz/

2.3 生产并自定义hosts文件

root@ha2-deploy:/etc/kubeasz# ./ezctl new k8s-cluster1

1、编辑hosts文件:

指定etcd节点、master节点、node节点、VIP、运行时、网络组件类型、service IP 与pod IP范围等配置信息。

root@ha2-deploy:/etc/kubeasz/clusters/k8s-cluster1# vim hosts

# 'etcd' cluster should have odd member(s) (1,3,5,...)

[etcd]

172.16.232.106

172.16.232.107

172.16.232.108

# master node(s)

[kube_master]

172.16.232.101

172.16.232.102

# work node(s)

[kube_node]

172.16.232.111

172.16.232.112

# [optional] harbor server, a private docker registry

# 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one

[harbor]

#172.16.232.8 NEW_INSTALL=false

# [optional] loadbalance for accessing k8s from outside

[ex_lb]

#172.16.232.6 LB_ROLE=backup EX_APISERVER_VIP=172.16.232.250 EX_APISERVER_PORT=8443

#172.16.232.7 LB_ROLE=master EX_APISERVER_VIP=172.16.232.250 EX_APISERVER_PORT=8443

# [optional] ntp server for the cluster

[chrony]

#172.16.232.1

[all:vars]

# --------- Main Variables ---------------

# Secure port for apiservers

SECURE_PORT="6443"

# Cluster container-runtime supported: docker, containerd

# if k8s version >= 1.24, docker is not supported

CONTAINER_RUNTIME="containerd"

# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn

CLUSTER_NETWORK="calico"

# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'

PROXY_MODE="ipvs"

# K8S Service CIDR, not overlap with node(host) networking

SERVICE_CIDR="10.100.0.0/16"

# Cluster CIDR (Pod CIDR), not overlap with node(host) networking

CLUSTER_CIDR="10.200.0.0/16"

# NodePort Range

NODE_PORT_RANGE="30000-32767"

# Cluster DNS Domain

CLUSTER_DNS_DOMAIN="magedu.local"

# -------- Additional Variables (don't change the default value right now) ---

# Binaries Directory

bin_dir="/usr/local/bin"

# Deploy Directory (kubeasz workspace)

base_dir="/etc/kubeasz"

# Directory for a specific cluster

cluster_dir="{{ base_dir }}/clusters/k8s-cluster1"

# CA and other components cert/key Directory

ca_dir="/etc/kubernetes/ssl"

2、编辑config.yml文件:

将pause:3.7镜像上传至harbor:

root@ha2-deploy:/etc/kubeasz# docker login harbor.magedu.net

root@ha2-deploy:/etc/kubeasz# docker tag easzlab/pause:3.7 harbor.magedu.net/baseimages/pause:3.7

root@ha2-deploy:/etc/kubeasz# docker push harbor.magedu.net/baseimages/pause:3.7

root@ha2-deploy:/etc/kubeasz# vim clusters/k8s-cluster1/config.yml

root@ha2-deploy:/etc/kubeasz# cat clusters/k8s-cluster1/config.yml

# prepare

############################

# 可选离线安装系统软件包 (offline|online)

INSTALL_SOURCE: "online"

# 可选进行系统安全加固 github.com/dev-sec/ansible-collection-hardening

OS_HARDEN: false

############################

# role:deploy

############################

# default: ca will expire in 100 years

# default: certs issued by the ca will expire in 50 years

CA_EXPIRY: "876000h"

CERT_EXPIRY: "438000h"

# kubeconfig 配置参数

CLUSTER_NAME: "cluster1"

CONTEXT_NAME: "context-{{ CLUSTER_NAME }}"

# k8s version

K8S_VER: "1.24.2"

############################

# role:etcd

############################

# 设置不同的wal目录,可以避免磁盘io竞争,提高性能

ETCD_DATA_DIR: "/var/lib/etcd"

ETCD_WAL_DIR: ""

############################

# role:runtime [containerd,docker]

############################

# ------------------------------------------- containerd

# [.]启用容器仓库镜像

ENABLE_MIRROR_REGISTRY: true

# [containerd]基础容器镜像

SANDBOX_IMAGE: "harbor.magedu.net/baseimages/pause:3.7"

# [containerd]容器持久化存储目录

CONTAINERD_STORAGE_DIR: "/var/lib/containerd"

# ------------------------------------------- docker

# [docker]容器存储目录

DOCKER_STORAGE_DIR: "/var/lib/docker"

# [docker]开启Restful API

ENABLE_REMOTE_API: false

# [docker]信任的HTTP仓库

INSECURE_REG: '["http://easzlab.io.local:5000"]'

############################

# role:kube-master

############################

# k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名)

MASTER_CERT_HOSTS:

  - "172.16.232.188"

  - "api.myserver.com"

  #- "www.test.com"

# node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址)

# 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段

# https://github.com/coreos/flannel/issues/847

NODE_CIDR_LEN: 24

############################

# role:kube-node

############################

# Kubelet 根目录

KUBELET_ROOT_DIR: "/var/lib/kubelet"

# node节点最大pod 数

MAX_PODS: 500

# 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量

# 数值设置详见templates/kubelet-config.yaml.j2

KUBE_RESERVED_ENABLED: "no"

# k8s 官方不建议草率开启 system-reserved, 除非你基于长期监控,了解系统的资源占用状况;

# 并且随着系统运行时间,需要适当增加资源预留,数值设置详见templates/kubelet-config.yaml.j2

# 系统预留设置基于 4c/8g 虚机,最小化安装系统服务,如果使用高性能物理机可以适当增加预留

# 另外,集群安装时候apiserver等资源占用会短时较大,建议至少预留1g内存

SYS_RESERVED_ENABLED: "no"

############################

# role:network [flannel,calico,cilium,kube-ovn,kube-router]

############################

# ------------------------------------------- flannel

# [flannel]设置flannel 后端"host-gw","vxlan"等

FLANNEL_BACKEND: "vxlan"

DIRECT_ROUTING: false

# [flannel] flanneld_image: "quay.io/coreos/flannel:v0.10.0-amd64"

flannelVer: "v0.15.1"

flanneld_image: "easzlab.io.local:5000/easzlab/flannel:{{ flannelVer }}"

# ------------------------------------------- calico

# [calico]设置 CALICO_IPV4POOL_IPIP=“off”,可以提高网络性能,条件限制详见 docs/setup/calico.md

CALICO_IPV4POOL_IPIP: "Always"

# [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现

IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube_master'][0] }}"

# [calico]设置calico 网络 backend: brid, vxlan, none

CALICO_NETWORKING_BACKEND: "brid"

# [calico]设置calico 是否使用route reflectors

# 如果集群规模超过50个节点,建议启用该特性

CALICO_RR_ENABLED: false

# CALICO_RR_NODES 配置route reflectors的节点,如果未设置默认使用集群master节点

# CALICO_RR_NODES: ["192.168.1.1", "192.168.1.2"]

CALICO_RR_NODES: []

# [calico]更新支持calico 版本: [v3.3.x] [v3.4.x] [v3.8.x] [v3.15.x]

calico_ver: "v3.19.4"

# [calico]calico 主版本

calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"

# ------------------------------------------- cilium

# [cilium]镜像版本

cilium_ver: "1.11.6"

cilium_connectivity_check: true

cilium_hubble_enabled: false

cilium_hubble_ui_enabled: false

# ------------------------------------------- kube-ovn

# [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点

OVN_DB_NODE: "{{ groups['kube_master'][0] }}"

# [kube-ovn]离线镜像tar包

kube_ovn_ver: "v1.5.3"

# ------------------------------------------- kube-router

# [kube-router]公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet"

OVERLAY_TYPE: "full"

# [kube-router]NetworkPolicy 支持开关

FIREWALL_ENABLE: true

# [kube-router]kube-router 镜像版本

kube_router_ver: "v0.3.1"

busybox_ver: "1.28.4"

############################

# role:cluster-addon

############################

# coredns 自动安装

dns_install: "no"

corednsVer: "1.9.3"

ENABLE_LOCAL_DNS_CACHE: false

dnsNodeCacheVer: "1.21.1"

# 设置 local dns cache 地址

LOCAL_DNS_CACHE: "169.254.20.10"

# metric server 自动安装

metricsserver_install: "no"

metricsVer: "v0.5.2"

# dashboard 自动安装

dashboard_install: "no"

dashboardVer: "v2.5.1"

dashboardMetricsScraperVer: "v1.0.8"

# prometheus 自动安装

prom_install: "no"

prom_namespace: "monitor"

prom_chart_ver: "35.5.1"

# nfs-provisioner 自动安装

nfs_provisioner_install: "no"

nfs_provisioner_namespace: "kube-system"

nfs_provisioner_ver: "v4.0.2"

nfs_storage_class: "managed-nfs-storage"

nfs_server: "192.168.1.10"

nfs_path: "/data/nfs"

# network-check 自动安装

network_check_enabled: false

network_check_schedule: "*/5 * * * *"

############################

# role:harbor

############################

# harbor version,完整版本号

HARBOR_VER: "v2.1.3"

HARBOR_DOMAIN: "harbor.easzlab.io.local"

HARBOR_TLS_PORT: 8443

# if set 'false', you need to put certs named harbor.pem and harbor-key.pem in directory 'down'

HARBOR_SELF_SIGNED_CERT: true

# install extra component

HARBOR_WITH_NOTARY: false

HARBOR_WITH_TRIVY: false

HARBOR_WITH_CLAIR: false

HARBOR_WITH_CHARTMUSEUM: true

2.4 部署k8s集群

2.4.1 环境初始化

root@ha2-deploy:/etc/kubeasz# ./ezctl setup help

root@ha2-deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 01    #准备CA和基础系统设置

2.4.2 部署etcd集群:

可更改启动脚本路径及版本等自定义配置

root@ha2-deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 02  #部署etcd集群

验证etcd服务:

root@etcd1:~# for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health; done

https://172.16.232.106:2379 is healthy: successfully committed proposal: took = 10.718839ms

https://172.16.232.107:2379 is healthy: successfully committed proposal: took = 12.104588ms

https://172.16.232.108:2379 is healthy: successfully committed proposal: took = 12.340504ms

注:以上返回信息表示etcd集群运行正常,否则异常!

2.4.3 部署运行时:

master与node节点都要同时安装运⾏时(containerd或docker),可以⾃⾏使⽤部署⼯具匹配安装、yum安装、或⾃⾏使⽤⼆进制安装,因此此步骤为可选步骤!

#验证基础容器镜像

root@ha2-deploy:/etc/kubeasz# grep SANDBOX_IMAGE ./clusters/* -R

./clusters/k8s-cluster1/config.yml:SANDBOX_IMAGE: "harbor.magedu.net/baseimages/pause:3.7"

./clusters/k8s-cluster2/config.yml:SANDBOX_IMAGE: "easzlab.io.local:5000/easzlab/pause:3.7"

root@ha2-deploy:/etc/kubeasz# vim roles/containerd/templates/config.toml.j2

{% if ENABLE_MIRROR_REGISTRY %}

        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]

          endpoint = ["https://docker.mirrors.ustc.edu.cn", "http://hub-mirror.c.163.com"]

        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]

          endpoint = ["https://gcr.mirrors.ustc.edu.cn"]

        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]

          endpoint = ["https://gcr.mirrors.ustc.edu.cn/google-containers/"]

        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]

          endpoint = ["https://quay.mirrors.ustc.edu.cn"]

{% endif %}

        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor.magedu.net"]

          endpoint = ["https://harbor.magedu.net"]

        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.magedu.net".tls]

          insecure_skip_verify = true

        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.magedu.net".auth]

          username = "admin"

          password = "123456"

注:添加黑色这部分进行ansibl自动部署节点访问harbor自动登录,也可单独在节点服务器的此配置文件中手动添加

部署运行时:

root@ha2-deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 03

2.4.4 部署master:

可更改启动脚本参数及路径等自定义功能

root@ha2-deploy:/etc/kubeasz# vim roles/kube-master/tasks/main.yml    #可自定义配置

root@ha2-deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 04

2.4.5 部署node:

root@ha2-deploy:/etc/kubeasz# vim roles/kube-node/tasks/main.yml #可自定义配置

root@ha2-deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 05

验证服务器:

root@ha2-deploy:/etc/kubeasz# kubectl get node

NAME            STATUS                    ROLES    AGE    VERSION

172.16.232.101  Ready,SchedulingDisabled  master  13m    v1.24.2

172.16.232.102  Ready,SchedulingDisabled  master  13m    v1.24.2

172.16.232.111  Ready                      node    100s  v1.24.2

172.16.232.112  Ready                      node    100s  v1.24.2

2.4.6 部署网络服务calico:

root@ha2-deploy:/etc/kubeasz# vim ./clusters/k8s-cluster1/config.yml    #查看集群配置文件中calico版本

# [calico]更新⽀持calico 版本: [v3.3.x] [v3.4.x] [v3.8.x] [v3.15.x]

calico_ver: "v3.19.4"

查看calico镜像:

root@ha2-deploy:/etc/kubeasz# grep image roles/calico/templates/calico-v3.19.yaml.j2

image: easzlab.io.local:5000/calico/cni:{{ calico_ver }}

          image: easzlab.io.local:5000/calico/pod2daemon-flexvol:{{ calico_ver }}

          image: easzlab.io.local:5000/calico/node:{{ calico_ver }}

          image: easzlab.io.local:5000/calico/kube-controllers:{{ calico_ver }}

验证本地calico镜像:

root@ha2-deploy:/etc/kubeasz# docker images |grep calico

将镜像上传至本地harbor:

root@ha2-deploy:/etc/kubeasz# docker tag calico/node:v3.19.4 harbor.magedu.net/baseimages/calico-node:v3.19.4

root@ha2-deploy:/etc/kubeasz# docker push harbor.magedu.net/baseimages/calico-node:v3.19.4

root@ha2-deploy:/etc/kubeasz# docker tag calico/pod2daemon-flexvol:v3.19.4 harbor.magedu.net/baseimages/calico-pod2daemon-flexvol:v3.19.4

root@ha2-deploy:/etc/kubeasz# docker push harbor.magedu.net/baseimages/calico-pod2daemon-flexvol:v3.19.4

root@ha2-deploy:/etc/kubeasz# docker tag calico/cni:v3.19.4 harbor.magedu.net/baseimages/calico-cni:v3.19.4

root@ha2-deploy:/etc/kubeasz# docker push harbor.magedu.net/baseimages/calico-cni:v3.19.4

root@ha2-deploy:/etc/kubeasz# docker tag calico/kube-controllers:v3.19.4 harbor.magedu.net/baseimages/calico-kube-controllers:v3.19.4

root@ha2-deploy:/etc/kubeasz# docker push harbor.magedu.net/baseimages/calico-kube-controllers:v3.19.4

修改yaml文件中的镜像地址:

root@ha2-deploy:/etc/kubeasz# vim roles/calico/templates/calico-v3.19.yaml.j2

root@ha2-deploy:/etc/kubeasz# grep image roles/calico/templates/calico-v3.19.yaml.j2

  image: harbor.magedu.net/baseimages/calico-cni:v3.19.4

          image: harbor.magedu.net/baseimages/calico-pod2daemon-flexvol:v3.19.4

          image: harbor.magedu.net/baseimages/calico-node:v3.19.4

          image: harbor.magedu.net/baseimages/calico-kube-controllers:v3.19.4

root@ha2-deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 06

验证calico:

root@master1:~# calicoctl node status


验证网络:

创建pod测试跨主机网络通信是否正常:

root@master1:~# kubectl run net-test1 --image=alpine sleep 360000

root@master1:~# kubectl run net-test2 --image=alpine sleep 360000

2.5 集群节点伸缩管理:

集群管理主要是添加master、添加node、删除master与删除node等节点管理及监控

2.5.1 添加master节点:

root@ha2-deploy:/etc/kubeasz# ./ezctl help

root@ha2-deploy:/etc/kubeasz# ./ezctl add-master k8s-cluster1 172.16.232.103

2.5.2 添加node节点:

root@ha2-deploy:/etc/kubeasz# ./ezctl help

root@ha2-deploy:/etc/kubeasz# ./ezctl add-node k8s-cluster1 172.16.232.113

验证当前节点:

root@ha2-deploy:/etc/kubeasz# kubectl get node

验证calico状态:

root@master3:~# calicoctl node status

验证node节点路由:

root@master3:~# route -n

2.6 集群升级

下载升级二进制包,上传至deploy服务器

解压二进制包:

root@ha2-deploy:/usr/local/src# tar xf kubernetes-v1.24.3-client-linux-amd64.tar.gz

root@ha2-deploy:/usr/local/src# tar xf kubernetes-v1.24.3-node-linux-amd64.tar.gz

root@ha2-deploy:/usr/local/src# tar xf kubernetes-v1.24.3-server-linux-amd64.tar.gz

root@ha2-deploy:/usr/local/src# tar xf  kubernetes-v1.24.3.tar.gz

root@ha2-deploy:/usr/local/src# ll kubernetes/cluster/addons/    #次目录下有官方提供好的yaml文件,可以修改下直接使用

1.升级master

逐台进行升级:以升级master1 172.16.232.101为例:先在每个node上将101摘掉:

root@node1:~# vim /etc/kube-lb/conf/kube-lb.conf


root@node1:~# systemctl restart containerd.service    #重启服务

root@node1:~# systemctl reload kube-lb.service      #重新加载配置文件

在master1上将服务停掉:

root@master1:~# systemctl stop kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet

将二进制文件copy到master1:

root@ha2-deploy:/usr/local/src/kubernetes/server/bin# scp kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet kubectl 172.16.232.101:/usr/local/bin/

验证:

root@master1:~# /usr/local/bin/kube-apiserver --version

Kubernetes v1.24.3

启动服务:

root@master1:~# systemctl start kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet

root@ha2-deploy:/usr/local/src/kubernetes/server/bin# kubectl get node


在每个node上的配置文件中加上master1:

root@node1:~# vim /etc/kube-lb/conf/kube-lb.conf


按照以上步骤升级其他master节点。

master升级完成后验证:


2.升级node

升级前需要先进性驱逐,将pod容器迁移到其他节点。

以升级node1节点为例,逐台进行升级:

root@ha2-deploy:/usr/local/src/kubernetes/server/bin# kubectl cordon 172.16.232.111

root@ha2-deploy:/usr/local/src/kubernetes/server/bin# kubectl drain 172.16.232.111 --ignore-daemonsets --force

root@node1:~# systemctl stop kubelet kube-proxy.service  #在node节点上停止服务

root@ha2-deploy:/usr/local/src/kubernetes/server/bin# scp kubelet kube-proxy kubectl 172.16.232.111:/usr/local/bin/  # copy二进制文件到node

root@node1:~# systemctl start kubelet kube-proxy.service  #启动服务

验证:


取消node驱逐:

root@ha2-deploy:/usr/local/src/kubernetes/server/bin# kubectl uncordon 172.16.232.111


node 升级完成

将升级所需二进制文件在kubeasz/中替换为最新的,以便以后新增节点使用

root@ha2-deploy:/usr/local/src/kubernetes/server/bin# \cp kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet kubectl /etc/kubeasz/bin/

*自动升级(有业务风险,可能会中断业务)

root@ha2-deploy:/etc/kubeasz# ./ezctl upgrade k8s-cluster1

3.升级containerd

正常情况下先驱逐pod。然后停止服务或重启服务器,然后替换二进制启动服务

root@ha2-deploy:/usr/local/src# wgethttps://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz

root@ha2-deploy:/usr/local/src# tar xvf containerd-1.6.6-linux-amd64.tar.gz

root@ha2-deploy:/usr/local/src/bin# \cp ./* /etc/kubeasz/bin/containerd-bin/

在节点上关闭服务重启服务器,然后copy二进制文件:

root@node1:~# systemctl disable kubelet kube-proxy containerd.service

root@node1:~# reboot

root@ha2-deploy:/usr/local/src/bin# scp ./* 172.16.232.111:/usr/local/bin/

root@node1:~# systemctl start kubelet.service kube-proxy.service containerd.service

root@node1:~# systemctl enable kubelet.service kube-proxy.service containerd.service

按以上步骤将其他节点进行升级,完成后验证:


添加命令自动补全功能:

root@ha2-deploy:~# source <(kubectl completion bash)

root@ha2-deploy:~# vim /etc/profile

添加source <(kubectl completion bash)

3. DNS服务:

1、部署coreDNS:

https://github.com/coredns/coredns

https://coredns.io/

https://github.com/coredns/deployment/tree/master/kubernetes #部署清单⽂件

root@ha2-deploy:~/yaml/20230808#  cd /usr/local/src/kubernetes/cluster/addons/dns/coredns/

root@ha2-deploy:/usr/local/src/kubernetes/cluster/addons/dns/coredns# cp coredns.yaml.base /root/yaml/20230808/

root@ha2-deploy:~/yaml/20230808#  mv coredns.yaml.base coredns-v1.9.3.yaml

root@ha2-deploy:~/yaml/20230808# vim coredns-v1.9.3.yaml

root@ha2-deploy:~/yaml/20230808# vim coredns-v1.9.3.yaml

  Corefile: |

    .:53 {

        errors

        health {

            lameduck 5s

        }

        ready

        kubernetes magedu.local in-addr.arpa ip6.arpa {            #进入vim /etc/kubeasz/clusters/k8s-cluster1/hosts 查看

            pods insecure

            fallthrough in-addr.arpa ip6.arpa

            ttl 30

        }

        prometheus :9153

        forward . /etc/resolv.conf {

            max_concurrent 1000

        }

        cache 30

        loop

        reload

        loadbalance

    }

---

      nodeSelector:

        kubernetes.io/os: linux

      containers:

      - name: coredns

        image: harbor.magedu.net/baseimages/coredns:1.9.3      #使用本地镜像,以加快部署速度

        imagePullPolicy: IfNotPresent

        resources:

          limits:           

            memory: 256Mi

            cpu: 200m

          requests:

            cpu: 100m

            memory: 70Mi

        args: [ "-conf", "/etc/coredns/Corefile" ]

        volumeMounts:

        - name: config-volume

          mountPath: /etc/coredns

          readOnly: true

  ---

apiVersion: v1

kind: Service

metadata:

  name: kube-dns

  namespace: kube-system

  annotations:

    prometheus.io/port: "9153"

    prometheus.io/scrape: "true"

  labels:

    k8s-app: kube-dns

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

    kubernetes.io/name: "CoreDNS"

spec:

  selector:

    k8s-app: kube-dns

  clusterIP: 10.100.0.2      进入容器中查看cat /etc/resolv.conf

  ports:

将v1.9.3版本镜像上传到harbor

root@ha2-deploy:~/yaml/20230808# kubectl apply -f coredns-v1.9.3.yaml

进入容器验证域名解析:

root@ha2-deploy:~/yaml# kubectl exec -it linux70-nginx-deployment-55dc5fdcf9-qddjd bash -n myserver

root@linux70-nginx-deployment-55dc5fdcf9-qddjd:/# ping www.baidu.com

PING www.a.shifen.com (110.242.68.4) 56(84) bytes of data.

64 bytes from 110.242.68.4 (110.242.68.4): icmp_seq=1 ttl=127 time=15.3 ms

64 bytes from 110.242.68.4 (110.242.68.4): icmp_seq=2 ttl=127 time=14.0 ms

4.dashboard

部署kubernetes的web管理界⾯dashboard

4.1 部署dashboard

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# wgethttps://raw.githubusercontent.com/kubernetes/dashboard/v2.6.0/aio/deploy/recommended.yaml

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# mv recommended.yaml dashboard-v2.6.0.yaml

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# docker pull kubernetesui/dashboard:v2.6.0

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# docker pull kubernetesui/metrics-scraper:v1.0.8

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# docker tag kubernetesui/dashboard:v2.6.0 harbor.magedu.net/baseimages/dashboard:v2.6.0

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# docker push harbor.magedu.net/baseimages/dashboard:v2.6.0

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# docker tag kubernetesui/metrics-scraper:v1.0.8 harbor.magedu.net/baseimages/metrics-scraper:v1.0.8

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# docker push harbor.magedu.net/baseimages/metrics-scraper:v1.0.8

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# vim dashboard-v2.6.0.yaml


root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# kubectl apply -f dashboard-v2.6.0.yaml

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# vim admin-user.yaml

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# kubectl apply -f admin-user.yaml

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# vim admin-secret.yaml

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# kubectl apply -f admin-secret.yaml

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# kubectl get secrets -A

root@ha2-deploy:~/yaml/20230808/dashboard-v2.6.0# kubectl describe secrets dashboard-admin-user -n kubernetes-dashboard 

token:      eyJhbGciOiJSUzI1NiIsImtpZCI6ImxBaVIzX3FzWUM1d0tKMVRZVW9wa0xoTlpSY1l2RmV5aVlRUmN2ZjN6a1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdXNlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbi11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNGRmMjU4YzYtZTU0MC00N2MyLWIyZTctMDdiMWNkY2QwOGI1Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmFkbWluLXVzZXIifQ.fsyuQMi1PAsLSzTFl8FDfPH-Z7EAGKktCFVfTz_Ra5J8ChFr8_3lnFh7iXtFiRE5QSNzTBEH7ofWo_t6734UW2_5pu8Xz2QiwCDir8zuNrh5fjZ6naVmorldsoqUkLcDqzkDXw5CSmlMhfhGHeFvLkvBvgPh6KRsVpCOw8UgpLhfngDilywpKYheevY8J4EplTh34P4IcXzjhT6ZiAgAJYFS_9b2DZoUzYB-TM8w5XzSw8BvXUI-Z9Ab2HSWyip01mApxEOvcc2x6xbJh2SL10ntPjji9c98HWx74EI4RD5CXiRZoYQ15-PJQ_nvy16w5wPTw8u3Iq_0MlX6pxhw3A

复制token使用浏览器登录dashboard

©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 216,544评论 6 501
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 92,430评论 3 392
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 162,764评论 0 353
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 58,193评论 1 292
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 67,216评论 6 388
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 51,182评论 1 299
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 40,063评论 3 418
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 38,917评论 0 274
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 45,329评论 1 310
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 37,543评论 2 332
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 39,722评论 1 348
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 35,425评论 5 343
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 41,019评论 3 326
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 31,671评论 0 22
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 32,825评论 1 269
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 47,729评论 2 368
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 44,614评论 2 353

推荐阅读更多精彩内容