初识kubernetes第五周

一. 基于StatefulSet控制器运行Redis Cluster

4.png

1.1 构建redis镜像

# 编写dockerfile文件
# cat Dockerfile 
FROM harbor.zhao.net/baseimages/zhao-centos-base:7.9.2009 

MAINTAINER jwzhao "jwzhao@zhao.net"

ADD redis-4.0.14.tar.gz /usr/local/src
RUN ln -sv /usr/local/src/redis-4.0.14 /usr/local/redis && cd /usr/local/redis && make && cp src/redis-cli /usr/sbin/ && cp src/redis-server  /usr/sbin/ && mkdir -pv /data/redis-data 
ADD redis.conf /usr/local/redis/redis.conf 
ADD run_redis.sh /usr/local/redis/run_redis.sh
RUN chmod a+x /usr/local/redis/run_redis.sh

EXPOSE 6379

CMD ["/usr/local/redis/run_redis.sh"]

# 构建镜像,上传至本地仓库
# cat build-command.sh 
#!/bin/bash
TAG=$1
nerdctl build -t harbor.zhao.net/zhao/redis:${TAG} .
nerdctl push  harbor.zhao.net/zhao/redis:${TAG}

# bash build-command.sh v4.0.14

1.2 测试redis镜像

# 创建容器
# nerdctl run -it -p 6379:6379 harbor.zhao.net/zhao/redis:v4.0.14

#进入容器测试
root@k8s-master2:~# nerdctl exec -it 7dcbcd6645e2 bash
[root@7dcbcd6645e2 /]# redis-cli                 
127.0.0.1:6379> auth
(error) ERR wrong number of arguments for 'auth' command
127.0.0.1:6379> auth 123456
OK
127.0.0.1:6379> set key1 value1
OK
127.0.0.1:6379> get key1
"value1"

1.3 准备pv

使用之前安装的nfs

# mkdir /data/k8sdata/web/redis{0,1,2,3,4,5}

创建pv

# cat redis-cluster-pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv0
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 172.20.20.81
    path: /data/k8sdata/web/redis0 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv1
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 172.20.20.81
    path: /data/k8sdata/web/redis1 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv2
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 172.20.20.81
    path: /data/k8sdata/web/redis2 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv3
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 172.20.20.81
    path: /data/k8sdata/web/redis3 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv4
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 172.20.20.81
    path: /data/k8sdata/web/redis4 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv5
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 172.20.20.81
    path: /data/k8sdata/web/redis5

# kubectl apply -f redis-cluster-pv.yaml

查看

# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                                 STORAGECLASS          REASON   AGE
redis-cluster-pv0                          5Gi        RWO            Retain           Available                                                                        7s
redis-cluster-pv1                          5Gi        RWO            Retain           Available                                                                        7s
redis-cluster-pv2                          5Gi        RWO            Retain           Available                                                                        7s
redis-cluster-pv3                          5Gi        RWO            Retain           Available                                                                        7s
redis-cluster-pv4                          5Gi        RWO            Retain           Available                                                                        7s
redis-cluster-pv5                          5Gi        RWO            Retain           Available                                                                        7s

1.4 部署redis cluster

1.4.1 准备redis配置文件

# cat redis.conf 
appendonly yes
cluster-enabled yes
cluster-config-file /var/lib/redis/nodes.conf
cluster-node-timeout 5000
dir /var/lib/redis
port 6379

# 创建configmap
# kubectl create configmap redis-conf --from-file=redis.conf -n web
# kubectl describe cm redis-conf -n web
Name:         redis-conf
Namespace:    web
Labels:       <none>
Annotations:  <none>

Data
====
redis.conf:
----
appendonly yes
cluster-enabled yes
cluster-config-file /var/lib/redis/nodes.conf
cluster-node-timeout 5000
dir /var/lib/redis
port 6379


BinaryData
====

Events:  <none>

1.4.2 创建redis cluster

# 编写yaml文件
# cat redis.yaml 
apiVersion: v1
kind: Service
metadata:
  name: redis
  namespace: web
  labels:
    app: redis
spec:
  selector:
    app: redis
    appCluster: redis-cluster
  ports:
  - name: redis
    port: 6379
  clusterIP: None
  
---
apiVersion: v1
kind: Service
metadata:
  name: redis-access
  namespace: web
  labels:
    app: redis
spec:
  selector:
    app: redis
    appCluster: redis-cluster
  ports:
  - name: redis-access
    protocol: TCP
    port: 6379
    targetPort: 6379

---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: redis
  namespace: web
spec:
  serviceName: redis
  replicas: 6
  selector:
    matchLabels:
      app: redis
      appCluster: redis-cluster
  template:
    metadata:
      labels:
        app: redis
        appCluster: redis-cluster
    spec:
        #    terminationGracePeriodSeconds: 20
        #   affinity:
        #   podAntiAffinity:
        #  preferredDuringSchedulingIgnoredDuringExecution:
        #  - weight: 100
        #    podAffinityTerm:
        #      labelSelector:
        #        matchExpressions:
        #        - key: app
        #          operator: In
        #           values:
        #         - redis
        #     topologyKey: kubernetes.io/hostname
      containers:
      - name: redis
        image: redis:4.0.14
        command:
          - "redis-server"
        args:
          - "/etc/redis/redis.conf"
          - "--protected-mode"
          - "no"
        resources:
          requests:
            cpu: "500m"
            memory: "500Mi"
        ports:
        - containerPort: 6379
          name: redis
          protocol: TCP
        - containerPort: 16379
          name: cluster
          protocol: TCP
        volumeMounts:
        - name: conf
          mountPath: /etc/redis
        - name: data
          mountPath: /var/lib/redis
      volumes:
      - name: conf
        configMap:
          name: redis-conf
          items:
          - key: redis.conf
            path: redis.conf
  volumeClaimTemplates:
  - metadata:
      name: data
      namespace: web
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 5Gi

# kubectl apply -f redis.yaml 

检查

# kubectl get pod -n web
NAME                                          READY   STATUS    RESTARTS   AGE
redis-0                                       1/1     Running   0          3m16s
redis-1                                       1/1     Running   0          3m2s
redis-2                                       1/1     Running   0          2m15s
redis-3                                       1/1     Running   0          103s
redis-4                                       1/1     Running   0          100s
redis-5                                       1/1     Running   0          96s

# 做持久化存储,每个节点都有自己单独的存储位置
# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS        CLAIM                                 STORAGECLASS          REASON   AGE
redis-cluster-pv0                          5Gi        RWO            Retain           Bound         web/data-redis-0                                                     11h
redis-cluster-pv1                          5Gi        RWO            Retain           Bound         web/data-redis-1                                                     11h
redis-cluster-pv2                          5Gi        RWO            Retain           Bound         web/data-redis-3                                                     11h
redis-cluster-pv3                          5Gi        RWO            Retain           Bound         web/data-redis-2                                                     11h
redis-cluster-pv4                          5Gi        RWO            Retain           Bound         web/data-redis-4                                                     11h
redis-cluster-pv5                          5Gi        RWO            Retain           Bound         web/data-redis-5                                                     11h
# kubectl get pvc -n web
NAME                      STATUS   VOLUME                    CAPACITY   ACCESS MODES   STORAGECLASS   AGE
data-redis-0              Bound    redis-cluster-pv0         5Gi        RWO                           10h
data-redis-1              Bound    redis-cluster-pv1         5Gi        RWO                           10h
data-redis-2              Bound    redis-cluster-pv3         5Gi        RWO                           10h
data-redis-3              Bound    redis-cluster-pv2         5Gi        RWO                           10h
data-redis-4              Bound    redis-cluster-pv4         5Gi        RWO                           10h
data-redis-5              Bound    redis-cluster-pv5         5Gi        RWO                           10h

1.5 初始化redis cluster

初始化只需要一次,redis 4及之前的版本需要使用redis-tribe工具进行初始化,redis 5开始使用redis-cli

# 创建初始化pod,准备创建集群工具
# kubectl run -it ubuntu1804 --image=ubuntu:18.04 --restart=Never -n web bash
root@ubuntu1804:/# apt update
root@ubuntu1804:/# apt install  python2.7 python-pip redis-tools dnsutils iputils-ping net-tools
root@ubuntu1804:/# pip install --upgrade pip
root@ubuntu1804:/# pip install redis-trib==0.5.1

# 创建集群,创建主节点
# redis-trib.py create \
`dig +short redis-0.redis.web.svc.zhao.local`:6379 \
`dig +short redis-1.redis.web.svc.zhao.local`:6379 \
`dig +short redis-2.redis.web.svc.zhao.local`:6379

# 添加从节点
# redis-trib.py replicate \
--master-addr `dig +short redis-0.redis.web.svc.zhao.local`:6379 \
--slave-addr `dig +short redis-3.redis.web.svc.zhao.local`:6379

# redis-trib.py replicate \
--master-addr `dig +short redis-1.redis.web.svc.zhao.local`:6379 \
--slave-addr `dig +short redis-4.redis.web.svc.zhao.local`:6379

# redis-trib.py replicate \
--master-addr `dig +short redis-2.redis.web.svc.zhao.local`:6379 \
--slave-addr `dig +short redis-5.redis.web.svc.zhao.local`:6379

# 域名格式:$(podname).(service name).namespace.svc.cluster.local

1.6 验证redis cluster

# 集群状态
# kubectl exec -it redis-1 bash -n web
root@redis-1:/data# redis-cli
127.0.0.1:6379> CLUSTER INFO
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:5
cluster_my_epoch:2
cluster_stats_messages_ping_sent:446
cluster_stats_messages_pong_sent:460
cluster_stats_messages_meet_sent:4
cluster_stats_messages_sent:910
cluster_stats_messages_ping_received:459
cluster_stats_messages_pong_received:450
cluster_stats_messages_meet_received:1
cluster_stats_messages_received:910

# 集群节点信息
127.0.0.1:6379> CLUSTER NODES
c63d746e4a906166690620d164375fd70e05d24b 10.200.36.106:6379@16379 myself,master - 0 1692858663000 2 connected 5462-10922
8e99a3a1f612282c1c7b03220057f6a0d538b971 10.200.107.212:6379@16379 slave d0783b29740f3656586b09c6a7bc19d85e5945f4 0 1692858664595 5 connected
d0783b29740f3656586b09c6a7bc19d85e5945f4 10.200.107.211:6379@16379 master - 0 1692858663000 1 connected 0-5461
8e9f8f3dc05816939eb555e14e70dabc9dbde1e6 10.200.169.163:6379@16379 slave ebe218e82adaf297a210365b370a7a5b73caac83 0 1692858663993 3 connected
ebe218e82adaf297a210365b370a7a5b73caac83 10.200.169.162:6379@16379 master - 0 1692858664000 0 connected 10923-16383
85b589683bb6ac742d5439f35c4863900ea44780 10.200.36.107:6379@16379 slave c63d746e4a906166690620d164375fd70e05d24b 0 1692858662584 4 connected

# 测试数据写入
root@redis-2:/data# redis-cli 
127.0.0.1:6379> SET testkey testvalue
OK
127.0.0.1:6379> SET testkey1 testvalue1
OK

# 在slave验证
root@redis-5:/data# redis-cli 
127.0.0.1:6379> keys *
1) "testkey1"
2) "testkey"

二. 基于StatefulSet控制器运行MySQL一主多从

5.png

2.1 准备基础镜像

# xtrabackup镜像
# nerdctl pull registry.cn-hangzhou.aliyuncs.com/hxpdocker/xtrabackup:1.0
# nerdctl tag registry.cn-hangzhou.aliyuncs.com/hxpdocker/xtrabackup:1.0 harbor.zhao.net/baseimages/xtrabackup:1.0
# nerdctl push harbor.zhao.net/baseimages/xtrabackup:1.0

# mysql镜像
# nerdctl pull mysql:5.7.36
# nerdctl tag mysql:5.7.36 harbor.zhao.net/baseimages/mysql:5.7.36
# nerdctl push harbor.zhao.net/baseimages/mysql:5.7.36

2.2 创建PV

PVC会自动基于PV创建,只需要有多个可用PV即可,PV数量取决于计划启动多少mysql pod,本次创建6个PV,也就最多启动6个mysql pod

2.2.1 创建数据目录

# 在nfs服务器创建数据目录
# mkdir -p /data/k8sdata/web/mysql-datadir-{1,2,3,4,5,6}

2.2.2 创建PV

# cat mysql-persistentvolume.yaml 
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-1
  namespace: web
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-1 
    server: 172.20.20.81
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-2
  namespace: web
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-2
    server: 172.20.20.81
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-3
  namespace: web
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-3
    server: 172.20.20.81
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-4
  namespace: web
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-4
    server: 172.20.20.81
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-5
  namespace: web
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-5
    server: 172.20.20.81

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-6
  namespace: web
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-6
    server: 172.20.20.81

# kubectl apply -f mysql-persistentvolume.yaml

检查

# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                                 STORAGECLASS          REASON   AGE
mysql-datadir-1                            50Gi       RWO            Retain           Available                                                                        6s
mysql-datadir-2                            50Gi       RWO            Retain           Available                                                                        6s
mysql-datadir-3                            50Gi       RWO            Retain           Available                                                                        5s
mysql-datadir-4                            50Gi       RWO            Retain           Available                                                                        5s
mysql-datadir-5                            50Gi       RWO            Retain           Available                                                                        5s
mysql-datadir-6                            50Gi       RWO            Retain           Available                                                                        5s

2.3 运行mysql服务

2.3.1 创建configmap配置文件

# cat mysql-configmap.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql
  namespace: web
  labels:
    app: mysql
data:
  master.cnf: |
    # Apply this config only on the master.
    [mysqld]
    log-bin
    log_bin_trust_function_creators=1
    lower_case_table_names=1
  slave.cnf: |
    # Apply this config only on slaves.
    [mysqld]
    super-read-only
    log_bin_trust_function_creators=1

# kubectl apply -f mysql-configmap.yaml

验证

# kubectl get configmaps -n web
NAME               DATA   AGE
mysql              2      105s
# kubectl describe cm mysql -n web
Name:         mysql
Namespace:    web
Labels:       app=mysql
Annotations:  <none>

Data
====
master.cnf:
----
# Apply this config only on the master.
[mysqld]
log-bin
log_bin_trust_function_creators=1
lower_case_table_names=1

slave.cnf:
----
# Apply this config only on slaves.
[mysqld]
super-read-only
log_bin_trust_function_creators=1


BinaryData
====

Events:  <none>

2.3.2 创建service

# cat mysql-services.yaml 
# Headless service for stable DNS entries of StatefulSet members.
apiVersion: v1
kind: Service
metadata:
  namespace: web
  name: mysql
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  clusterIP: None
  selector:
    app: mysql
---
# Client service for connecting to any MySQL instance for reads.
# For writes, you must instead connect to the master: mysql-0.mysql.
apiVersion: v1
kind: Service
metadata:
  name: mysql-read
  namespace: web
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  selector:
    app: mysql

# kubectl apply -f mysql-services.yaml

验证

# kubectl get svc -n web
NAME                      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
mysql                     ClusterIP   None             <none>        3306/TCP                                       17s
mysql-read                ClusterIP   10.100.74.27     <none>        3306/TCP                                       17s

2.3.3 创建基于statefulset的mysql服务

# cat mysql-statefulset.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mysql
  namespace: web
spec:
  selector:
    matchLabels:
      app: mysql
  serviceName: mysql
  replicas: 3
  template:
    metadata:
      labels:
        app: mysql
    spec:
      initContainers:
      - name: init-mysql
        image: harbor.zhao.net/baseimages/mysql:5.7.36
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Generate mysql server-id from pod ordinal index.
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          echo [mysqld] > /mnt/conf.d/server-id.cnf
          # Add an offset to avoid reserved server-id=0 value.
          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
          # Copy appropriate conf.d files from config-map to emptyDir.
          if [[ $ordinal -eq 0 ]]; then
            cp /mnt/config-map/master.cnf /mnt/conf.d/
          else
            cp /mnt/config-map/slave.cnf /mnt/conf.d/
          fi
        volumeMounts:
        - name: conf
          mountPath: /mnt/conf.d
        - name: config-map
          mountPath: /mnt/config-map
      - name: clone-mysql
        image: harbor.zhao.net/baseimages/xtrabackup:1.0 
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Skip the clone if data already exists.
          [[ -d /var/lib/mysql/mysql ]] && exit 0
          # Skip the clone on master (ordinal index 0).
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          [[ $ordinal -eq 0 ]] && exit 0
          # Clone data from previous peer.
          ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
          # Prepare the backup.
          xtrabackup --prepare --target-dir=/var/lib/mysql
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
      containers:
      - name: mysql
        image: harbor.zhao.net/baseimages/mysql:5.7.36
        env:
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "1"
        ports:
        - name: mysql
          containerPort: 3306
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 200m
            memory: 500Mi
        livenessProbe:
          exec:
            command: ["mysqladmin", "ping"]
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
        readinessProbe:
          exec:
            # Check we can execute queries over TCP (skip-networking is off).
            command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
          initialDelaySeconds: 5
          periodSeconds: 2
          timeoutSeconds: 1
      - name: xtrabackup
        image: harbor.zhao.net/baseimages/xtrabackup:1.0 
        ports:
        - name: xtrabackup
          containerPort: 3307
        command:
        - bash
        - "-c"
        - |
          set -ex
          cd /var/lib/mysql
          # Determine binlog position of cloned data, if any.
          if [[ -f xtrabackup_slave_info ]]; then
            # XtraBackup already generated a partial "CHANGE MASTER TO" query
            # because we're cloning from an existing slave.
            mv xtrabackup_slave_info change_master_to.sql.in
            # Ignore xtrabackup_binlog_info in this case (it's useless).
            rm -f xtrabackup_binlog_info
          elif [[ -f xtrabackup_binlog_info ]]; then
            # We're cloning directly from master. Parse binlog position.
            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
            rm xtrabackup_binlog_info
            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
                  MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
          fi
          # Check if we need to complete a clone by starting replication.
          if [[ -f change_master_to.sql.in ]]; then
            echo "Waiting for mysqld to be ready (accepting connections)"
            until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
            echo "Initializing replication from clone position"
            # In case of container restart, attempt this at-most-once.
            mv change_master_to.sql.in change_master_to.sql.orig
            mysql -h 127.0.0.1 <<EOF
          $(<change_master_to.sql.orig),
            MASTER_HOST='mysql-0.mysql',
            MASTER_USER='root',
            MASTER_PASSWORD='',
            MASTER_CONNECT_RETRY=10;
          START SLAVE;
          EOF
          fi
          # Start a server to send backups when requested by peers.
          exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
            "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
      volumes:
      - name: conf
        emptyDir: {}
      - name: config-map
        configMap:
          name: mysql
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 10Gi

# kubectl apply -f mysql-statefulset.yaml

验证

# kubectl get pod -n web
NAME                                          READY   STATUS      RESTARTS        AGE
mysql-0                                       2/2     Running     0               3m14s
mysql-1                                       2/2     Running     0               2m8s
mysql-2                                       2/2     Running     0               66s

2.4 验证mysql集群服务

2.4.1 验证mysql数据主从同步

# kubectl exec -it mysql-0 bash -n web
root@mysql-0:/# mysql
mysql> create database zhao;
Query OK, 1 row affected (0.01 sec)

mysql> show databases;
+------------------------+
| Database               |
+------------------------+
| information_schema     |
| mysql                  |
| performance_schema     |
| sys                    |
| xtrabackup_backupfiles |
| zhao                   |
+------------------------+
6 rows in set (0.03 sec)

# 进入slave节点验证
# kubectl exec -it mysql-1 bash -n web
root@mysql-1:/# mysql
mysql> show databases;
+------------------------+
| Database               |
+------------------------+
| information_schema     |
| mysql                  |
| performance_schema     |
| sys                    |
| xtrabackup_backupfiles |
| zhao                   |
+------------------------+
6 rows in set (0.03 sec)

mysql> show slave status\G;    #查看slave节点状态
*************************** 1. row ***************************
               Slave_IO_State: Waiting for master to send event
                  Master_Host: mysql-0.mysql
                  Master_User: root
                  Master_Port: 3306
                Connect_Retry: 10
              Master_Log_File: mysql-0-bin.000003
          Read_Master_Log_Pos: 313
               Relay_Log_File: mysql-1-relay-bin.000002
                Relay_Log_Pos: 481
        Relay_Master_Log_File: mysql-0-bin.000003
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes
              Replicate_Do_DB: 
          Replicate_Ignore_DB: 
           Replicate_Do_Table: 
       Replicate_Ignore_Table: 
      Replicate_Wild_Do_Table: 
  Replicate_Wild_Ignore_Table: 
                   Last_Errno: 0
                   Last_Error: 
                 Skip_Counter: 0
          Exec_Master_Log_Pos: 313
              Relay_Log_Space: 690
              Until_Condition: None
               Until_Log_File: 
                Until_Log_Pos: 0
           Master_SSL_Allowed: No
           Master_SSL_CA_File: 
           Master_SSL_CA_Path: 
              Master_SSL_Cert: 
            Master_SSL_Cipher: 
               Master_SSL_Key: 
        Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
                Last_IO_Errno: 0
                Last_IO_Error: 
               Last_SQL_Errno: 0
               Last_SQL_Error: 
  Replicate_Ignore_Server_Ids: 
             Master_Server_Id: 100
                  Master_UUID: 0b00f7a1-4258-11ee-9972-32b3d00c71c1
             Master_Info_File: /var/lib/mysql/master.info
                    SQL_Delay: 0
          SQL_Remaining_Delay: NULL
      Slave_SQL_Running_State: Slave has read all relay log; waiting for more updates
           Master_Retry_Count: 86400
                  Master_Bind: 
      Last_IO_Error_Timestamp: 
     Last_SQL_Error_Timestamp: 
               Master_SSL_Crl: 
           Master_SSL_Crlpath: 
           Retrieved_Gtid_Set: 
            Executed_Gtid_Set: 
                Auto_Position: 0
         Replicate_Rewrite_DB: 
                 Channel_Name: 
           Master_TLS_Version: 
1 row in set (0.00 sec)

ERROR: 
No query specified

2.4.2 验证mysql服务高可用

# kubectl delete pod mysql-0 -n web # 删除mysql的master
# kubectl exec -it mysql-0 bash -n web  #等待mysql主节点自动重建后,进入pod查看数据
root@mysql-0:/# mysql
mysql> show databases;
+------------------------+
| Database               |
+------------------------+
| information_schema     |
| mysql                  |
| performance_schema     |
| sys                    |
| xtrabackup_backupfiles |
| zhao                   |
+------------------------+
6 rows in set (0.06 sec)

mysql> create database zhao1;
Query OK, 1 row affected (0.01 sec)

再次登录slave节点查看数据是否还能同步

# kubectl exec -it mysql-1 bash -n web
root@mysql-1:/# mysql
mysql> show databases;
+------------------------+
| Database               |
+------------------------+
| information_schema     |
| mysql                  |
| performance_schema     |
| sys                    |
| xtrabackup_backupfiles |
| zhao                   |
| zhao1                  |
+------------------------+
7 rows in set (0.01 sec)

三. 实现单Pod多容器并实现LNMP且上一步骤的MySQL pod作为SQL服务器

LNMP:基于nginx+php实现搭建WordPress博客站点


image.png

3.1 准备镜像

3.1.1 nginx镜像

# 构建nginx基础镜像

# 准备dockerfile文件
# cat Dockerfile 
#Nginx Base Image
FROM harbor.zhao.net/baseimages/zhao-centos-base:7.9.2009 

MAINTAINER  jwzhao@zhao.net

RUN yum install -y vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop
ADD nginx-1.22.1.tar.gz /usr/local/src/
RUN cd /usr/local/src/nginx-1.22.1 && ./configure --prefix=/apps/nginx  && make && make install && ln -sv  /apps/nginx/sbin/nginx /usr/sbin/nginx  &&rm -rf /usr/local/src/nginx-1.22.1.tar.gz

# 构建镜像
# cat build-command.sh 
#!/bin/bash
nerdctl build -t harbor.zhao.net/pub-images/nginx-base-wordpress:v1.22.1  .
sleep 1
nerdctl push  harbor.zhao.net/pub-images/nginx-base-wordpress:v1.22.1
# bash build-command.sh

# 准备构建nginx业务jingx

# 准备nginx配置文件
# cat nginx.conf 
user  nginx nginx;
worker_processes  auto;

events {
    worker_connections  1024;
}

http {
    include       mime.types;
    default_type  application/octet-stream;

    sendfile        on;
    keepalive_timeout  65;
    client_max_body_size 10M;
    client_body_buffer_size 16k;
    client_body_temp_path  /apps/nginx/tmp   1 2 2;
    gzip  on;


    server {
        listen       80;
        server_name  blogs.magedu.net;

        location / {
            root    /home/nginx/wordpress;
            index   index.php index.html index.htm;
        }

        location ~ \.php$ {
            root           /home/nginx/wordpress;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  $document_root$fastcgi_script_name;
             include        fastcgi_params;
        }

        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
    }
}

# 运行脚本
# cat run_nginx.sh 
#!/bin/bash
/apps/nginx/sbin/nginx
tail -f /etc/hosts

# 编写dockerfile文件
# cat Dockerfile 
FROM harbor.zhao.net/pub-images/nginx-base-wordpress:v1.22.1 

ADD nginx.conf /apps/nginx/conf/nginx.conf
ADD run_nginx.sh /apps/nginx/sbin/run_nginx.sh
RUN mkdir -pv /home/nginx/wordpress
RUN chown nginx.nginx /home/nginx/wordpress/ -R

EXPOSE 80 443

CMD ["/apps/nginx/sbin/run_nginx.sh"] 

# 构建镜像
# cat build-command.sh 
#!/bin/bash
TAG=$1
nerdctl build -t harbor.zhao.net/zhao/wordpress-nginx:${TAG} .
echo "镜像制作完成,即将上传至Harbor服务器"
sleep 1
nerdctl push  harbor.zhao.net/zhao/wordpress-nginx:${TAG}
echo "镜像上传完成"

# bash build-command.sh v1

3.1.2 php镜像

# php配置文件
# cat www.conf |egrep -v '^;|^\s*$'
[www]
user = nginx
group = nginx
listen = 0.0.0.0:9000
pm = dynamic
pm.max_children = 50
pm.start_servers = 5
pm.min_spare_servers = 5
pm.max_spare_servers = 35
slowlog = /opt/remi/php56/root/var/log/php-fpm/www-slow.log
php_admin_value[error_log] = /opt/remi/php56/root/var/log/php-fpm/www-error.log
php_admin_flag[log_errors] = on
php_value[session.save_handler] = files
php_value[session.save_path]    = /opt/remi/php56/root/var/lib/php/session
php_value[soap.wsdl_cache_dir]  = /opt/remi/php56/root/var/lib/php/wsdlcache

# 运行脚本
# cat run_php.sh 
#!/bin/bash
/opt/remi/php56/root/usr/sbin/php-fpm
tail -f /etc/hosts

# 编写dockerfile文件
# cat Dockerfile 
#PHP Base Image
FROM harbor.zhao.net/baseimages/zhao-centos-base:7.9.2009 

MAINTAINER  jwzhao@zhao.net

RUN yum install -y  https://mirrors.tuna.tsinghua.edu.cn/remi/enterprise/remi-release-7.rpm && yum install  php56-php-fpm php56-php-mysql -y 
ADD www.conf /opt/remi/php56/root/etc/php-fpm.d/www.conf
ADD run_php.sh /usr/local/bin/run_php.sh
EXPOSE 9000

CMD ["/usr/local/bin/run_php.sh"] 

# 构建镜像
# cat build-command.sh 
#!/bin/bash
TAG=$1
nerdctl build -t harbor.zhao.net/zhao/wordpress-php-5.6:${TAG} .
echo "镜像制作完成,即将上传至Harbor服务器"
sleep 1
nerdctl push harbor.zhao.net/zhao/wordpress-php-5.6:${TAG}
echo "镜像上传完成"

# bash build-command.sh v1

3.2 安装wordpress服务

3.2.1 准备WordPress

# nfs服务器上准备数据目录
# mkdir /data/k8sdata/web/wordpress

# 下载并解压
# cd /data/k8sdata/web/wordpress/
# wget https://cn.wordpress.org/wordpress-5.0.19-zh_CN.tar.gz
# tar xvf wordpress-5.0.19-zh_CN.tar.gz

# 授权,文件的用户和组ID与镜像中的nginx用户ID一致
# chown 2088.2088  /data/k8sdata/web/wordpress -R

3.2.2 准备yaml文件

# cat wordpress.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: wordpress-app
  name: wordpress-app-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: wordpress-app
  template:
    metadata:
      labels:
        app: wordpress-app
    spec:
      containers:
      - name: wordpress-app-nginx
        image: harbor.zhao.net/zhao/wordpress-nginx:v1 
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http
        - containerPort: 443
          protocol: TCP
          name: https
        volumeMounts:
        - name: wordpress
          mountPath: /home/nginx/wordpress
          readOnly: false

      - name: wordpress-app-php
        image: harbor.zhao.net/zhao/wordpress-php-5.6:v1
        imagePullPolicy: Always
        ports:
        - containerPort: 9000
          protocol: TCP
          name: http
        volumeMounts:
        - name: wordpress
          mountPath: /home/nginx/wordpress
          readOnly: false

      volumes:
      - name: wordpress
        nfs:
          server: 172.20.20.81
          path: /data/k8sdata/web/wordpress 


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: wordpress-app
  name: wordpress-app-spec
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30031
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30033
  selector:
    app: wordpress-app

# kubectl apply -f wordpress.yaml

验证

# kubectl get pod -n web
NAME                                          READY   STATUS      RESTARTS        AGE
mysql-0                                       2/2     Running     0               3h38m
mysql-1                                       2/2     Running     0               3h53m
mysql-2                                       2/2     Running     0               3h52m
wordpress-app-deployment-77b8c777b6-hnw9z     2/2     Running     0               17s
# kubectl get svc -n web
NAME                      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
mysql                     ClusterIP   None             <none>        3306/TCP                                       9h
mysql-read                ClusterIP   10.100.74.27     <none>        3306/TCP                                       9h
wordpress-app-spec        NodePort    10.100.185.237   <none>        80:30031/TCP,443:30033/TCP                     3h58m

3.2.3 访问php测试页

#cat /data/k8sdata/web/wordpress/index.php 
<? php
    phpinfo();
?>

访问


image.png

3.3 初始化WordPress

3.3.1 mysql数据库准备

mysql环境使用上一步搭建的mysql集群环境

 # 创建wordpress的数据库
# kubectl exec -it mysql-0 bash -n web
root@mysql-0:/# mysql
mysql> create database wordpress;
Query OK, 1 row affected (0.01 sec)

mysql> grant all privileges on wordpress.* to "wordpress"@"%" identified by "wordpress";
Query OK, 0 rows affected, 1 warning (0.02 sec)

mysql> flush privileges;
Query OK, 0 rows affected (0.02 sec)

mysql> exit
Bye
root@mysql-0:/# mysql -uwordpress -p
Enter password: 
mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| wordpress          |
+--------------------+
2 rows in set (0.01 sec)

3.3.2 初始化WordPress,由于当时忘保存图片了,找了张之前的

image.png

如图,按照之前数据库的信息填写,在数据库主机要填 mysql-0.mysql.web.svc.zhao.local
即创建完成后,在服务器目录的php文件中

# cat wp-config.php
// ** MySQL settings - You can get this info from your web host ** //
/** The name of the database for WordPress */
define('DB_NAME', 'wordpress');

/** MySQL database username */
define('DB_USER', 'wordpress');

/** MySQL database password */
define('DB_PASSWORD', 'wordpress');

/** MySQL hostname */
define('DB_HOST', 'mysql-0.mysql.web.svc.zhao.local');

/** Database Charset to use in creating database tables. */
define('DB_CHARSET', 'utf8mb4');

/** The Database Collate type. Don't change this if in doubt. */
define('DB_COLLATE', '');

后面创建后,登录(该地址做了haproxy)


image.png

四. 基于Zookeeper案例实现微服务动态注册和发现案例

7f5.png

4.1 运行provide/生产者

4.1.1 准备镜像

# 准备dubbo-demo-provider
# 调整zookeeper实际连接地址
# vim dubbo-demo-provider-2.1.5/conf/dubbo.properties
...
dubbo.registry.address=zookeeper://zookeeper1.web.svc.zhao.local:2181 | zookeeper://zookeeper2.web.svc.zhao.local:2181 | zookeeperr
://zookeeper3.web.svc.zhao.local:2181
...

# 运行脚本
# cat run_java.sh 
#!/bin/bash
su - nginx -c "/apps/dubbo/provider/bin/start.sh"
tail -f /etc/hosts

# 编写dockerfile文件
# cat Dockerfile 
#Dubbo provider
FROM harbor.zhao.net/pub-images/jdk-base:v8.212 

MAINTAINER jwzhao "jwzhao@zhao.net"

RUN yum install file nc -y
RUN mkdir -p /apps/dubbo/provider
ADD dubbo-demo-provider-2.1.5/  /apps/dubbo/provider
ADD run_java.sh /apps/dubbo/provider/bin 
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/provider/bin/*.sh

CMD ["/apps/dubbo/provider/bin/run_java.sh"]

# 构建镜像
# cat build-command.sh 
#!/bin/bash
nerdctl build -t harbor.zhao.net/zhao/dubbo-demo-provider:v1  .
sleep 3
nerdctl push harbor.zhao.net/zhao/dubbo-demo-provider:v1

# bash build-command.sh

4.1.2 运行provider服务

# 准备yaml文件
# cat provider.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: web-provider
  name: web-provider-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-provider
  template:
    metadata:
      labels:
        app: web-provider
    spec:
      containers:
      - name: web-provider-container
        image: harbor.zhao.net/zhao/dubbo-demo-provider:v1 
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 20880
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: web-provider
  name: web-provider-spec
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 20880
    #nodePort: 30001
  selector:
    app: web-provider

# 创建
# kubectl apply -f provider.yaml

检查

# kubectl get pod -n web
NAME                                          READY   STATUS      RESTARTS      AGE
web-provider-deployment-5f57975f56-2kkzj      1/1     Running     0             81s
zookeeper1-6c8bf4dc47-s5njl                   1/1     Running     1 (19h ago)   6d17h
zookeeper2-795697b464-j4csr                   1/1     Running     1 (19h ago)   6d17h
zookeeper3-ccbd549d5-nbbkv                    1/1     Running     1 (19h ago)   6d17h
# kubectl get svc -n web
NAME                      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
web-provider-spec         NodePort    10.100.187.13    <none>        80:37172/TCP                                   96s
zookeeper                 ClusterIP   10.100.176.138   <none>        2181/TCP                                       6d21h
zookeeper1                NodePort    10.100.169.221   <none>        2181:32181/TCP,2888:54176/TCP,3888:41454/TCP   6d21h
zookeeper2                NodePort    10.100.196.57    <none>        2181:32182/TCP,2888:36924/TCP,3888:37919/TCP   6d21h
zookeeper3                NodePort    10.100.214.231   <none>        2181:32183/TCP,2888:54926/TCP,3888:46586/TCP   6d21h

4.1.3 使用图形化工具查看provider注册

image.png

4.2 运行consumer/消费者

4.2.1 准备镜像

#准备dubbo-demo-consumer-2.1.5文件
#调整zookeeper连接地址
# vim dubbo-demo-consumer-2.1.5/conf/dubbo.properties
...
dubbo.registry.address=zookeeper://zookeeper1.web.svc.zhao.local:2181 | zookeeper://zookeeper2.web.svc.zhao.local:2181 | zookeeperr
://zookeeper3.web.svc.zhao.local:2181
...

# 运行脚本
# cat run_java.sh 
#!/bin/bash
su - nginx -c "/apps/dubbo/consumer/bin/start.sh"
tail -f /etc/hosts

# 编写dockerfile文件
r# cat Dockerfile 
#Dubbo consumer
FROM harbor.zhao.net/pub-images/jdk-base:v8.212 

MAINTAINER jwzhao "jwzhao@zhao.net"

RUN yum install file -y
RUN mkdir -p /apps/dubbo/consumer 
ADD dubbo-demo-consumer-2.1.5  /apps/dubbo/consumer
ADD run_java.sh /apps/dubbo/consumer/bin 
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/consumer/bin/*.sh

CMD ["/apps/dubbo/consumer/bin/run_java.sh"]

# 构建镜像
# cat build-command.sh 
#!/bin/bash
nerdctl build -t harbor.zhao.net/zhao/dubbo-demo-consumer:v1  .
sleep 3
nerdctl push harbor.zhao.net/zhao/dubbo-demo-consumer:v1

# bash build-command.sh

4.2.2 运行consumer服务

# 准备yaml文件
# cat consumer.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: web-consumer
  name: web-consumer-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-consumer
  template:
    metadata:
      labels:
        app: web-consumer
    spec:
      containers:
      - name: web-consumer-container
        image: harbor.zhao.net/zhao/dubbo-demo-consumer:v1 
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: web-consumer
  name: web-consumer-server
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    #nodePort: 30001
  selector:
    app: web-consumer

# 启动
# kubectl apply -f consumer.yaml

检查

# kubectl get pod -n web
NAME                                          READY   STATUS        RESTARTS      AGE
web-consumer-deployment-657447cd85-wwxgk      1/1     Running       0             9s
web-provider-deployment-5f57975f56-2kkzj      1/1     Running       0             13m
zookeeper1-6c8bf4dc47-s5njl                   1/1     Running       1 (23h ago)   6d21h
zookeeper2-795697b464-j4csr                   1/1     Running       1 (23h ago)   6d21h
zookeeper3-ccbd549d5-nbbkv                    1/1     Running       1 (23h ago)   6d21h
# kubectl get svc -n web
NAME                      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
web-consumer-server       NodePort    10.100.32.214    <none>        80:57814/TCP                                   5s
web-provider-spec         NodePort    10.100.187.13    <none>        80:37172/TCP                                   13m
zookeeper                 ClusterIP   10.100.176.138   <none>        2181/TCP                                       6d21h
zookeeper1                NodePort    10.100.169.221   <none>        2181:32181/TCP,2888:54176/TCP,3888:41454/TCP   6d21h
zookeeper2                NodePort    10.100.196.57    <none>        2181:32182/TCP,2888:36924/TCP,3888:37919/TCP   6d21h
zookeeper3                NodePort    10.100.214.231   <none>        2181:32183/TCP,2888:54926/TCP,3888:46586/TCP   6d21h

4.2.3 使用图形化工具查看

image.png

4.3 运行dubboadmin

4.3.1 准备镜像

# 准备dubboadmin
# 修改配置文件zookeeper地址
# vim dubboadmin/WEB-INF/dubbo.properties
dubbo.registry.address=zookeeper://zookeeper1.web.svc.zhao.local:2181
dubbo.admin.root.password=root
dubbo.admin.guest.password=guest

# 重新压缩文件
# zip -r dubboadmin.zip dubboadmin

# 准备dockerfile文件
# cat Dockerfile 
#Dubbo dubboadmin
FROM harbor.zhao.net/pub-images/tomcat-base:v8.5.43

MAINTAINER jwzhao "jwzhao@zhao.net"

RUN yum install unzip -y  
ADD server.xml /apps/tomcat/conf/server.xml
ADD logging.properties /apps/tomcat/conf/logging.properties
ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
ADD dubboadmin.zip  /data/tomcat/webapps/dubboadmin.zip
RUN cd /data/tomcat/webapps && unzip dubboadmin.zip && rm -rf dubboadmin.war && chown -R nginx.nginx /data /apps

EXPOSE 8080 8443

CMD ["/apps/tomcat/bin/run_tomcat.sh"]

# 构建
# cat build-command.sh 
#!/bin/bash
TAG=$1
nerdctl build -t harbor.zhao.net/zhao/dubboadmin:${TAG}  .
sleep 3
nerdctl push  harbor.zhao.net/zhao/dubboadmin:${TAG}

# bash build-command.sh v3

4.3.2 运行dubboadmin

# 准备yaml文件
# cat dubboadmin.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: web-dubboadmin
  name: web-dubboadmin-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-dubboadmin
  template:
    metadata:
      labels:
        app: web-dubboadmin
    spec:
      containers:
      - name: web-dubboadmin-container
        image: harbor.zhao.net/zhao/dubboadmin:v3
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: web-dubboadmin
  name: web-dubboadmin-service
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30180
  selector:
    app: web-dubboadmin

# 启动
# kubectl apply -f dubboadmin.yaml

检查

# kubectl get pod -n web
NAME                                          READY   STATUS      RESTARTS      AGE
web-consumer-deployment-657447cd85-wwxgk      1/1     Running     0             14m
web-dubboadmin-deployment-6fc7b9d447-b6rjj    1/1     Running     0             28s
web-provider-deployment-5f57975f56-2kkzj      1/1     Running     0             27m
zookeeper1-6c8bf4dc47-s5njl                   1/1     Running     1 (23h ago)   6d21h
zookeeper2-795697b464-j4csr                   1/1     Running     1 (23h ago)   6d21h
zookeeper3-ccbd549d5-nbbkv                    1/1     Running     1 (23h ago)   6d21h
root@k8s-master2:~/k8s-data/yaml/magedu/dubbo/dubboadmin# kubectl get svc -n web
NAME                      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
web-consumer-server       NodePort    10.100.32.214    <none>        80:57814/TCP                                   14m
web-dubboadmin-service    NodePort    10.100.245.113   <none>        80:30180/TCP                                   36s
web-provider-spec         NodePort    10.100.187.13    <none>        80:37172/TCP                                   27m
zookeeper                 ClusterIP   10.100.176.138   <none>        2181/TCP                                       6d21h
zookeeper1                NodePort    10.100.169.221   <none>        2181:32181/TCP,2888:54176/TCP,3888:41454/TCP   6d21h
zookeeper2                NodePort    10.100.196.57    <none>        2181:32182/TCP,2888:36924/TCP,3888:37919/TCP   6d21h
zookeeper3                NodePort    10.100.214.231   <none>        2181:32183/TCP,2888:54926/TCP,3888:46586/TCP   6d21h

4.3.3 验证生产者和消费者

  • 浏览器访问k8s中node节点的30180端口

用户名:root 密码:root


image.png

登录


image.png

image.png

image.png
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容