Kubernetes接入安全容器(Kata Containers)

[图片上传失败...(image-b3efec-1699254079658)]

1:环境介绍

主机 IP Config 系统
kubernetes-master 10.0.0.12 2C4G CentOS 7.9
kubernetes-worker 10.0.0.13 2C2G CentOS 7.9

[图片上传失败...(image-67140c-1699254079658)]

虚拟机一定是要开启CPU虚拟化功能才可以

2:基础配置(全部节点配置)

1:主机名配置
[root@10.0.0.12 ~]# hostnamectl set-hostname kubernetes-master-1
[root@10.0.0.13 ~]# hostnamectl set-hostname kubernetes-worker-1

2:配置Hosts解析
cat << eof>> /etc/hosts
10.0.0.12   kubernetes-master-1
10.0.0.13   kubernetes-worker-1
eof

3:配置时间同步
yum install -y ntpdate    # 安装NTP
timedatectl set-timezone Asia/Shanghai     # 修改时区
ntpdate ntp.aliyun.com    # 同步时间
0 */1 * * * /usr/sbin/ntpdate ntp.aliyun.com    # 写入计划任务

4:关闭防火墙
[root@kubernetes-master-1 ~]# systemctl disable firewalld --now

5:关闭swap分区与selinux
# 临时关闭selinux
[root@kubernetes-master-1 ~]# setenforce 0    
# 永久关闭
[root@kubernetes-master-1 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
# 临时关闭
[root@kubernetes-master-1 ~]# swapoff -a
# 永久关闭
[root@kubernetes-master-1 ~]# sed -i 's/.*swap.*/#&/g' /etc/fstab

6:加载IPVS模块
[root@kubernetes-master-1 ~]# yum -y install ipset ipvsadm
[root@kubernetes-master-1 ~]# cat << eof>> /etc/sysconfig/modules/ipvs.modules
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
eof

[root@kubernetes-master-1 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

3:部署Containerd

[root@kubernetes-master-1 ~]# cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

# 1.20+需要开启br_netfilter
[root@kubernetes-master-1 ~]# modprobe overlay
[root@kubernetes-master-1 ~]# modprobe br_netfilter

# 配置内核参数
[root@kubernetes-master-1 ~]# cat << eof>>/etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
eof

[root@kubernetes-master-1 ~]# sysctl --system

# 部署源并安装containerd
[root@kubernetes-master-1 ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
[root@kubernetes-master-1 ~]# yum list |grep containerd
[root@kubernetes-master-1 ~]# yum -y install containerd.io
[root@kubernetes-master-1 ~]# mkdir -p /etc/containerd
[root@kubernetes-master-1 ~]# containerd config default > /etc/containerd/config.toml

# 修改cgroup Driver为systemd
[root@kubernetes-master-1 ~]# sed -ri 's#systemd_cgroup = false#systemd_cgroup = true#' /etc/containerd/config.toml

# 更改sandbox_image
[root@kubernetes-master-1 ~]# sed -ri 's#k8s.gcr.io\/pause:3.6#registry.aliyuncs.com\/google_containers\/pause:3.7#' /etc/containerd/config.toml

4:部署Kata

检查当前`虚拟机`是否支持嵌套虚拟化
# 在虚拟主机上执行,N表示不支持
[root@kubernetes-master-1 ~]# cat /sys/module/kvm_intel/parameters/nested
N
# 接下来我们来开启嵌套虚拟化
[root@kubernetes-master-1 ~]# modprobe -r kvm-intel
[root@kubernetes-master-1 ~]# modprobe kvm-intel nested=1
[root@kubernetes-master-1 ~]# cat /sys/module/kvm_intel/parameters/nested
Y

# 查看虚拟机是否支持虚拟化
[root@kubernetes-master-1 ~]# cat /proc/cpuinfo |grep vmx

# 配置kata仓库及安装kata
# 前提是我们需要安装了  yum-utils
[root@kubernetes-master-1 ~]# yum-config-manager --add-repo http://download.opensuse.org/repositories/home:/katacontainers:/releases:/x86_64:/stable-1.11/CentOS_7/home:katacontainers:releases:x86_64:stable-1.11.repo

# 安装
[root@kubernetes-master-1 ~]# yum -y install kata-runtime kata-proxy kata-shim --nogpgcheck

# 检查kata是否安装成功
[root@kubernetes-master-1 ~]# kata-runtime version
kata-runtime  : 1.11.5
   commit   : 3fc5e06c2e50a265e97aae4b730e21e04969633e
   OCI specs: 1.0.1-dev


# 配置containerd动态使用kata(默认runc)
[root@kubernetes-master-1 ~]# cat /etc/containerd/config.toml 
version = 2
root = "/var/lib/containerd"
state = "/run/containerd"
plugin_dir = ""
disabled_plugins = []
required_plugins = []
oom_score = 0

[grpc]
  address = "/run/containerd/containerd.sock"
  tcp_address = ""
  tcp_tls_cert = ""
  tcp_tls_key = ""
  uid = 0
  gid = 0
  max_recv_message_size = 16777216
  max_send_message_size = 16777216

[ttrpc]
  address = ""
  uid = 0
  gid = 0

[debug]
  address = ""
  uid = 0
  gid = 0
  level = ""

[metrics]
  address = ""
  grpc_histogram = false

[cgroup]
  path = ""

[timeouts]
  "io.containerd.timeout.shim.cleanup" = "5s"
  "io.containerd.timeout.shim.load" = "5s"
  "io.containerd.timeout.shim.shutdown" = "3s"
  "io.containerd.timeout.task.state" = "2s"

[plugins]
  [plugins."io.containerd.gc.v1.scheduler"]
    pause_threshold = 0.02
    deletion_threshold = 0
    mutation_threshold = 100
    schedule_delay = "0s"
    startup_delay = "100ms"
  [plugins."io.containerd.grpc.v1.cri"]
    disable_tcp_service = true
    stream_server_address = "127.0.0.1"
    stream_server_port = "0"
    stream_idle_timeout = "4h0m0s"
    enable_selinux = false
    selinux_category_range = 1024
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.7"
    stats_collect_period = 10
    systemd_cgroup = false
    enable_tls_streaming = false
    max_container_log_line_size = 16384
    disable_cgroup = false
    disable_apparmor = false
    restrict_oom_score_adj = false
    max_concurrent_downloads = 3
    disable_proc_mount = false
    unset_seccomp_profile = ""
    tolerate_missing_hugetlb_controller = true
    disable_hugetlb_controller = true
    ignore_image_defined_volumes = false
    [plugins."io.containerd.grpc.v1.cri".containerd]
      snapshotter = "overlayfs"
      default_runtime_name = "runc"
      no_pivot = false
      disable_snapshot_annotations = true
      discard_unpacked_layers = false
      [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
        runtime_type = "io.containerd.runtime.v1.linux"
      [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
        runtime_type = "io.containerd.runtime.v1.linux"
        runtime_engine = "/usr/bin/kata-runtime"
        runtime_root = ""
        privileged_without_host_devices = false
        base_runtime_spec = ""
      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
          runtime_type = "io.containerd.runc.v2"
          runtime_engine = ""
          runtime_root = ""
          privileged_without_host_devices = false
          base_runtime_spec = ""
          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
         [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]
         runtime_type = "io.containerd.kata.v2"
      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.katacli]
         runtime_type = "io.containerd.runc.v1"
         [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.katacli.options]
           NoPivotRoot = false
           NoNewKeyring = false
           ShimCgroup = ""
           IoUid = 0
           IoGid = 0
           BinaryName = "/usr/bin/kata-runtime"
           Root = ""
           CriuPath = ""
           SystemdCgroup = false
    [plugins."io.containerd.grpc.v1.cri".cni]
      bin_dir = "/opt/cni/bin"
      conf_dir = "/etc/cni/net.d"
      max_conf_num = 1
      conf_template = ""
    [plugins."io.containerd.grpc.v1.cri".registry]
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
          endpoint = ["https://6ze43vnb.mirror.aliyuncs.com"]
    [plugins."io.containerd.grpc.v1.cri".image_decryption]
      key_model = ""
    [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
      tls_cert_file = ""
      tls_key_file = ""
  [plugins."io.containerd.internal.v1.opt"]
    path = "/opt/containerd"
  [plugins."io.containerd.internal.v1.restart"]
    interval = "10s"
  [plugins."io.containerd.metadata.v1.bolt"]
    content_sharing_policy = "shared"
  [plugins."io.containerd.monitor.v1.cgroups"]
    no_prometheus = false
  [plugins."io.containerd.runtime.v1.linux"]
    shim = "containerd-shim"
    runtime = "runc"
    runtime_root = ""
    no_shim = false
    shim_debug = false
  [plugins."io.containerd.runtime.v2.task"]
    platforms = ["linux/amd64"]
  [plugins."io.containerd.service.v1.diff-service"]
    default = ["walking"]
  [plugins."io.containerd.snapshotter.v1.devmapper"]
    root_path = ""
    pool_name = ""
    base_image_size = ""
    async_remove = false

[root@kubernetes-master-1 ~]# systemctl daemon-reload
[root@kubernetes-master-1 ~]# systemctl restart containerd.service
[root@kubernetes-master-1 ~]# systemctl status containerd.service 

5:部署Kubernetes

# 添加源地址
[root@kubernetes-master-1 ~]# cat << eof>> /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
eof

# 安装kubeadm  kubectl  kubelet
[root@kubernetes-master-1 ~]# yum -y install kubeadm-1.24.0-0 kubelet-1.24.0-0 kubectl-1.24.0-0

# 设置crictl
[root@kubernetes-master-1 ~]# cat << EOF >> /etc/crictl.yaml
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 10 
debug: false
EOF

# 生成配置
[root@kubernetes-master-1 ~]# kubeadm config print init-defaults > kubeadm-init.yaml

[root@kubernetes-master-1 ~]# cat kubeadm-init.yaml 
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.0.0.12
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: kubernetes-master-1
  taints:
  - effect: "NoSchedule"
    key: "node-role.kubernetes.io/master"
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.24.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 200.1.0.0/16
  podSubnet: 100.1.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd

# 查看所需镜像列表
[root@kubernetes-master-1 ~]# kubeadm config images list --config kubeadm-init.yaml
registry.aliyuncs.com/google_containers/kube-apiserver:v1.24.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.24.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.24.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.24.0
registry.aliyuncs.com/google_containers/pause:3.7
registry.aliyuncs.com/google_containers/etcd:3.5.3-0
registry.aliyuncs.com/google_containers/coredns:v1.8.6

# 预拉取镜像
[root@kubernetes-master-1 ~]# kubeadm config images pull --config kubeadm-init.yaml
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.24.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.24.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.24.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.24.0
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.7
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.5.3-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.8.6

# 初始化集群
[root@kubernetes-master-1 ~]# kubeadm init --config=kubeadm-init.yaml

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
  echo "source <(kubectl completion bash)" >> ~/.bashrc
  source ~/.bashrc
  
Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.0.0.12:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:6fe3166a52d72deeb6b90e77c86caa75dce2ee13656f1ce238a25c59af6e2503 

# 部署网络插件
[root@kubernetes-master-1 ~]# kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml

6:验证Kata的可用性

1:创建runtimeClass

[root@kubernetes-master-1 ~]# cat kata.yaml 
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
  name: kata-containers
handler: kata

2:创建3个Nginx测试容器并使用Kata
[root@kubernetes-master-1 ~]# cat nginx-1.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-1
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-1
  template:
    metadata:
      labels:
        app: nginx-1
    spec:
      runtimeClassName: kata-containers
      containers:
      - name: nginx-1
        image: nginx:alpine
        ports:
        - containerPort: 80
[root@kubernetes-master-1 ~]# cat nginx-2.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-2
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-2
  template:
    metadata:
      labels:
        app: nginx-2
    spec:
      runtimeClassName: kata-containers
      containers:
      - name: nginx-2
        image: nginx:alpine
        ports:
        - containerPort: 80
[root@kubernetes-master-1 ~]# cat nginx-3.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-3
  annotations:
    io.kubernetes.cri.untrusted-workload: "true"
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-3
  template:
    metadata:
      labels:
        app: nginx-3
    spec:
      containers:
      - name: nginx-3
        image: nginx:alpine
        ports:
        - containerPort: 80


[root@kubernetes-master-1 ~]# kubectl get pod
NAME                       READY   STATUS    RESTARTS   AGE
nginx-1-6cd7c88fff-x89vv   1/1     Running   0          3m58s
nginx-2-5d89b8b869-dpprr   1/1     Running   0          3m56s
nginx-3-78957bd984-pv4qv   1/1     Running   0          3m52s

# 去worker的Kata查看容器
[root@kubernetes-worker-1 ~]# kata-runtime list
ID                                                                 PID         STATUS      BUNDLE                                                                                                                  CREATED                          OWNER
3d847b07ec3fbf228abb90557affd53e90ac2b8483860515a953e5f365e50194   -1          running     /run/containerd/io.containerd.runtime.v2.task/k8s.io/3d847b07ec3fbf228abb90557affd53e90ac2b8483860515a953e5f365e50194   2022-07-31T06:03:40.341752158Z   #0
912a2806949ff0f46d26fedf0c51664126d7dd9429b888b3a73e186790993daa   -1          running     /run/containerd/io.containerd.runtime.v2.task/k8s.io/912a2806949ff0f46d26fedf0c51664126d7dd9429b888b3a73e186790993daa   2022-07-31T06:03:40.172305331Z   #0
04fdccc4d59256191ab98dbbdf1d432320f640f8e26c7434bdcd48d1950341b1   -1          running     /run/containerd/io.containerd.runtime.v2.task/k8s.io/04fdccc4d59256191ab98dbbdf1d432320f640f8e26c7434bdcd48d1950341b1   2022-07-31T06:03:38.209287414Z   #0
6cae70f9863b788accb7a128961c0938d0054835eef37aae9990f47aa6f688f9   -1          running     /run/containerd/io.containerd.runtime.v2.task/k8s.io/6cae70f9863b788accb7a128961c0938d0054835eef37aae9990f47aa6f688f9   2022-07-31T06:03:38.409381969Z   #0


# 再次启动一个Nginx容器
[root@kubernetes-master-1 ~]# kubectl exec -it pods/nginx-1-6cd7c88fff-x89vv -- uname -r
5.4.32-11.2.container
[root@kubernetes-master-1 ~]# kubectl exec -it pods/nginx-2-5d89b8b869-dpprr -- uname -r
5.4.32-11.2.container
[root@kubernetes-master-1 ~]# kubectl exec -it pods/nginx-3-78957bd984-pv4qv -- uname -r
5.4.208-1.el7.elrepo.x86_64

很明显的看到前面两个内核和第三个就不一样了
这就和我们的内核隔离开了
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 205,033评论 6 478
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 87,725评论 2 381
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 151,473评论 0 338
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 54,846评论 1 277
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 63,848评论 5 368
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,691评论 1 282
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 38,053评论 3 399
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,700评论 0 258
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 42,856评论 1 300
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,676评论 2 323
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,787评论 1 333
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,430评论 4 321
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 39,034评论 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,990评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,218评论 1 260
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 45,174评论 2 352
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,526评论 2 343

推荐阅读更多精彩内容