一.基于StatefulSet控制器运行Redis Cluster
- 创建共享目录
[root@deploy redis-cluster]# mkdir -pv /data/k8sdata/magedu/redis{0,1,2,3}
- 创建pv
[root@deploy pv]# pwd
/k8s-data/yaml/magedu/redis-cluster/pv
[root@deploy pv]# kubectl apply -f redis-cluster-pv.yaml
[root@deploy pv]# kubectl get pv -n magedu
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
redis-cluster-pv0 5Gi RWO Retain Available 58s
redis-cluster-pv1 5Gi RWO Retain Available 58s
redis-cluster-pv2 5Gi RWO Retain Available 58s
redis-cluster-pv3 5Gi RWO Retain Available 58s
- 准备redis配置文件
[root@deploy redis-cluster]# vim redis.conf
appendonly yes
cluster-enabled yes
cluster-config-file /var/lib/redis/nodes.conf
cluster-node-timeout 5000
dir /var/lib/redis
port 6379
基于配置文件创建 configmap:
[root@deploy redis-cluster]# pwd
/k8s-data/yaml/magedu/redis-cluster
[root@deploy redis-cluster]# kubectl create configmap redis-conf --from-file=redis.conf -n magedu
configmap/redis-conf created
[root@deploy redis-cluster]# kubectl get configmaps -n magedu
NAME DATA AGE
kube-root-ca.crt 1 5h43m
redis-conf 1 24s
- 创建 redis-cluster
[root@deploy redis-cluster]# pwd
/k8s-data/yaml/magedu/redis-cluster
[root@deploy redis-cluster]# kubectl apply -f redis.yaml
- 初始化集群
[root@deploy redis-cluster]# kubectl run -it ubuntu1804 --image=ubuntu:18.04 --restart=Never -n magedu bash
root@ubuntu:/# apt update
root@ubuntu1804:/# apt install python2.7 python-pip redis-tools dnsutils iputils-ping net-tools
root@ubuntu1804:/# pip install --upgrade pip
root@ubuntu1804:/# pip install redis-trib==0.5.1
root@ubuntu1804:/# ping redis-0.redis.magedu.svc.qj.local
PING redis-0.redis.magedu.svc.qj.local (10.200.104.14) 56(84) bytes of data.
64 bytes from redis-0.redis.magedu.svc.qj.local (10.200.104.14): icmp_seq=1 ttl=62 time=0.536 ms
root@ubuntu1804:/# redis-trib.py create \
> `dig +short redis-0.redis.magedu.svc.qj.local`:6379 \
> `dig +short redis-1.redis.magedu.svc.qj.local`:6379
Redis-trib 0.5.1 Copyright (c) HunanTV Platform developers
INFO:root:Instance at 10.200.166.138:6379 checked
INFO:root:Instance at 10.200.104.14:6379 checked
INFO:root:Add 8192 slots to 10.200.166.138:6379
INFO:root:Add 8192 slots to 10.200.104.14:6379
将 redis-2 加入 redis-0:
root@ubuntu1804:/# redis-trib.py replicate --master-addr `dig +short redis-0.redis.magedu.svc.qj.local`:6379 --slave-addr `dig +short redis-2.redis.magedu.svc.qj.local`:6379
Redis-trib 0.5.1 Copyright (c) HunanTV Platform developers
INFO:root:Instance at 10.200.135.6:6379 has joined 10.200.104.14:6379; now set replica
INFO:root:Instance at 10.200.135.6:6379 set as replica to b02385e87d0942edbedd71077b99648c68ec6590
将 redis-3 加入 redis-1:
root@ubuntu1804:/# redis-trib.py replicate --master-addr `dig +short redis-1.redis.magedu.svc.qj.local`:6379 --slave-addr `dig +short redis-3.redis.magedu.svc.qj.local`:6379
Redis-trib 0.5.1 Copyright (c) HunanTV Platform developers
INFO:root:Instance at 10.200.104.15:6379 has joined 10.200.166.138:6379; now set replica
INFO:root:Instance at 10.200.104.15:6379 set as replica to 5075edc38b01803800bb6543a7d06168c76d984f
- 验证 redis cluster 状态
root@redis-0:/data# redis-cli
27.0.0.1:6379> CLUSTER INFO
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:4
cluster_size:2
cluster_current_epoch:2
cluster_my_epoch:2
cluster_stats_messages_ping_sent:645
cluster_stats_messages_pong_sent:657
cluster_stats_messages_meet_sent:3
cluster_stats_messages_sent:1305
cluster_stats_messages_ping_received:656
cluster_stats_messages_pong_received:648
cluster_stats_messages_meet_received:1
cluster_stats_messages_received:1305
127.0.0.1:6379> CLUSTER NODES
3731fec54f4a2b48721f528d1c14eff98aa9f0e2 10.200.104.15:6379@16379 slave 5075edc38b01803800bb6543a7d06168c76d984f 0 1679971681837 1 connected
5075edc38b01803800bb6543a7d06168c76d984f 10.200.166.138:6379@16379 master - 0 1679971682844 1 connected 0-8191
e4d9eeb215ebd412c76b96496bbdd08532c12acb 10.200.135.6:6379@16379 slave b02385e87d0942edbedd71077b99648c68ec6590 0 1679971682000 2 connected
b02385e87d0942edbedd71077b99648c68ec6590 10.200.104.14:6379@16379 myself,master - 0 1679971680000 2 connected 8192-16383
测试在 master 写入数据:
[root@master1 ~]# kubectl exec -it -n magedu redis-0 bash
root@redis-0:/data# redis-cli
127.0.0.1:6379> set key1 valye1
OK
127.0.0.1:6379> set key2 valye2
(error) MOVED 4998 10.200.166.138:6379 # 无法写入,应该在10.200.166.138:6379节点执行写入
在 slave 验证数据:
[root@master1 ~]# kubectl exec -it -n magedu redis-2 bash
root@redis-2:/data# redis-cli
127.0.0.1:6379> keys *
1) "key1"
[root@master1 ~]## kubectl exec -it -n magedu redis-1 bash
root@redis-1:/data# redis-cli
127.0.0.1:6379> keys * # master0不会有master1的数据(否则集群是有问题的)
(empty list or set)
二.基于StatefulSet控制器运行MySQL一主多从
- 镜像准备
[root@deploy ~]# docker pull registry.cn-hangzhou.aliyuncs.com/hxpdocker/xtrabackup:1.0
[root@deploy ~]# docker tag registry.cn-hangzhou.aliyuncs.com/hxpdocker/xtrabackup:1.0 qj.harbor.com/mysql-images/xtrabackup:1.0
[root@deploy ~]# docker push qj.harbor.com/mysql-images/xtrabackup:1.0
[root@deploy ~]# docker pull mysql:5.7.36
[root@deploy ~]# docker tag mysql:5.7 qj.harbor.com/mysql-images/mysql:5.7.36
[root@deploy ~]# docker push qj.harbor.com/mysql-images/mysql:5.7.36
- 创建共享目录
[root@deploy ~]# mkdir -p /data/k8sdata/magedu/mysql-datadir-{1..4}
- 创建pv
[root@deploy pv]# pwd
/k8s-data/yaml/magedu/mysql/pv
[root@deploy pv]# kubectl apply -f mysql-persistentvolume.yaml
persistentvolume/mysql-datadir-1 created
persistentvolume/mysql-datadir-2 created
persistentvolume/mysql-datadir-3 created
persistentvolume/mysql-datadir-4 created
[root@deploy pv]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
mysql-datadir-1 50Gi RWO Retain Available 4s
mysql-datadir-2 50Gi RWO Retain Available 4s
mysql-datadir-3 50Gi RWO Retain Available 4s
mysql-datadir-4 50Gi RWO Retain Available
- 创建configmap
[root@deploy mysql]# pwd
/k8s-data/yaml/magedu/mysql
[root@deploy mysql]# kubectl apply -f mysql-configmap.yaml
[root@deploy mysql]# kubectl get configmaps -n magedu
NAME DATA AGE
kube-root-ca.crt 1 23h
mysql 2 15s
redis-conf 1 17h
[root@deploy mysql]# kubectl describe configmaps -n magedu mysql
Name: mysql
Namespace: magedu
Labels: app=mysql
Annotations: <none>
Data
====
master.cnf:
----
# Apply this config only on the master.
[mysqld]
log-bin
log_bin_trust_function_creators=1
lower_case_table_names=1
slave.cnf:
----
# Apply this config only on slaves.
[mysqld]
super-read-only
log_bin_trust_function_creators=1
BinaryData
====
Events: <none>
- 创建service定义mysql访问方式
[root@deploy mysql]# pwd
/k8s-data/yaml/magedu/mysql
[root@deploy mysql]# kubectl apply -f mysql-services.yaml
- 运⾏mysql服务
[root@deploy mysql]# pwd
/k8s-data/yaml/magedu/mysql
[root@deploy mysql]# kubectl apply -f mysql-statefulset.yaml
[root@deploy mysql]# kubectl logs -f -n magedu mysql-0
- 验证MySQL Pod状态
[root@deploy data]# kubectl get pod -n magedu | grep mysql
mysql-0 2/2 Running 0 3h19m
mysql-1 2/2 Running 0 3h19m
- 验证MySQL主从同步是否正常
[root@deploy mysql]# kubectl -n magedu exec -it pods/mysql-0 bash
root@mysql-0:/# mysql
mysql> create database qj;
Query OK, 1 row affected (0.01 sec)
mysql> show databases;
+------------------------+
| Database |
+------------------------+
| information_schema |
| mysql |
| performance_schema |
| qj |
| sys |
| xtrabackup_backupfiles |
+------------------------+
6 rows in set (0.06 sec)
[root@deploy mysql]# kubectl -n magedu exec -it pods/mysql-1 bash
root@mysql-1:/# mysql
mysql> show databases;
+------------------------+
| Database |
+------------------------+
| information_schema |
| mysql |
| performance_schema |
| qj |
| sys |
| xtrabackup_backupfiles |
+------------------------+
6 rows in set (0.04 sec)
mysql> shwo slave status;
*************************** 1. row ***************************
Slave_IO_State: Waiting for master to send event
Master_Host: mysql-0.mysql
Master_User: root
Master_Port: 3306
Connect_Retry: 10
Master_Log_File: mysql-0-bin.000003
Read_Master_Log_Pos: 307
Relay_Log_File: mysql-1-relay-bin.000002
Relay_Log_Pos: 475
Relay_Master_Log_File: mysql-0-bin.000003
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
三.实现单Pod多容器并实现LNMP且上一步骤的MySQL pod作为SQL服务器
- 制作php镜像
[root@deploy php]# pwd
/k8s-data/dockerfile/web/magedu/wordpress/php
[root@deploy php]# ls
build-command.sh Dockerfile run_php.sh www.conf
[root@deploy php]# sh build-command.sh v1
- 制作nginx镜像
[root@deploy nginx]# pwd
/k8s-data/dockerfile/web/magedu/wordpress/nginx
[root@deploy nginx]# ls
build-command.sh Dockerfile index.html nginx.conf run_nginx.sh
[root@deploy nginx]# sh build-command.sh v1
- 创建共享目录
[root@deploy wordpress]# mkdir /data/k8sdata/magedu/wordpress -p
- 制作yaml文件
[root@deploy wordpress]# pwd
/k8s-data/yaml/magedu/wordpress
[root@deploy wordpress]# vim wordpress.yaml
[root@deploy wordpress]# cat wordpress.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: wordpress-app
name: wordpress-app-deployment
namespace: magedu
spec:
replicas: 1
selector:
matchLabels:
app: wordpress-app
template:
metadata:
labels:
app: wordpress-app
spec:
containers:
- name: wordpress-app-nginx
image: qj.harbor.com/magedu/wordpress-nginx:v1
imagePullPolicy: Always
ports:
- containerPort: 80
protocol: TCP
name: http
- containerPort: 443
protocol: TCP
name: https
volumeMounts:
- name: wordpress
mountPath: /home/nginx/wordpress
readOnly: false
- name: wordpress-app-php
image: qj.harbor.com/magedu/wordpress-php-5.6:v1
imagePullPolicy: Always
ports:
- containerPort: 9000
protocol: TCP
name: http
volumeMounts:
- name: wordpress
mountPath: /home/nginx/wordpress
readOnly: false
volumes:
- name: wordpress
nfs:
server: 10.20.0.10
path: /data/k8sdata/magedu/wordpress
---
kind: Service
apiVersion: v1
metadata:
labels:
app: wordpress-app
name: wordpress-app-spec
namespace: magedu
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
nodePort: 30031
- name: https
port: 443
protocol: TCP
targetPort: 443
nodePort: 30033
selector:
app: wordpress-app
- 创建容器
[root@deploy wordpress]# kubectl apply -f wordpress.yaml
[root@deploy wordpress]# kubectl get pod -A | grep wordpress
magedu wordpress-app-deployment-5bc785c5c8-777nm 2/2 Running 0 70s
- 测试nginx php
[root@deploy wordpress]# echo "wordpress" >> /data/k8sdata/magedu/wordpress/index.html
[root@deploy wordpress]# curl 10.20.0.31:30031
wordpress
[root@deploy wordpress]# cat /data/k8sdata/magedu/wordpress/test.php
<?php
phpinfo();
?>
image.png
image.png
- 初始化 WordPress 站点
[root@deploy wordpress]# wget https://cn.wordpress.org/wordpress-5.7.8-zh_CN.tar.gz
[root@deploy wordpress]# tar -xf wordpress-5.7.8-Ch_CN.tar.gz
[root@deploy wordpress]# mv wordpress/* ./
[root@deploy wordpress]# chown 2088.2088 wordpress/ -R
-
浏览器访问
image.png - k8s 中 MySQL 创建数据库
[root@deploy wordpress]# kubectl exec -it -n magedu mysql-0 bash
root@mysql-0:/# mysql
mysql> CREATE DATABASE wordpress;
Query OK, 1 row affected (0.01 sec)
mysql> GRANT ALL PRIVILEGES ON wordpress.* TO "wordpress"@"%" IDENTIFIED BY "123456";
Query OK, 0 rows affected, 1 warning (0.02 sec)
mysql> flush privileges;
Query OK, 0 rows affected (0.07 sec)
[root@wordpress-app-deployment-5bc785c5c8-jv978 /]# ping mysql
PING mysql.magedu.svc.qj.local (10.200.225.199) 56(84) bytes of data.
64 bytes from mysql-0.mysql.magedu.svc.qj.local (10.200.225.199): icmp_seq=1 ttl=62 time=0.329 ms
64 bytes from mysql-0.mysql.magedu.svc.qj.local (10.200.225.199): icmp_seq=2 ttl=62 time=0.366 ms
image.png
image.png
四.基于Zookeeper案例实现微服务动态注册和发现案例
1.创建生产者 provider
- 准备镜像
[root@deploy provider]# pwd
/k8s-data/dockerfile/web/magedu/dubbo/provider
[root@deploy provider]# ls
build-command.sh Dockerfile dubbo-demo-provider-2.1.5 dubbo-demo-provider-2.1.5-assembly.tar.gz run_java.sh
[root@deploy provider]# chmod +x *.sh
[root@deploy provider]# vim dubbo-demo-provider-2.1.5/conf/dubbo.properties #修改zookeeper地址
[root@deploy provider]# vim dubbo-demo-provider-2.1.5/conf/dubbo.properties
dubbo.registry.address=zookeeper://zookeeper1.qj.svc.qj.local:2181 | zookeeper://zookeeper2.qj.svc.qj.local:2181 | zookeeper://zookeeper3.qj.svc.qj.local:2181
[root@deploy provider]# sh build-command.sh #构建provider镜像
- 编写yaml文件,运⾏provider服务
[root@deploy provider]# pwd
/k8s-data/yaml/magedu/dubbo/provider
[root@deploy provider]# kubectl apply -f provider.yaml
[root@deploy provider]# kubectl get pod -A | grep provider
magedu magedu-provider-deployment-5974b6b86b-rpphd 1/1 Running 0 77
[root@deploy provider]# kubectl logs -f -n magedu magedu-provider-deployment-5974b6b86b-ckqkx
Ncat: Idle timeout expired (1000 ms).
Ncat: Idle timeout expired (1000 ms). #表示已启动,目前是空闲的,无人连接
2.创建运行消费者 consumer
- 准备镜像
[root@deploy consumer]# pwd
/k8s-data/dockerfile/web/magedu/dubbo/consumer
[root@deploy consumer]# chmod +x *.sh
[root@deploy consumer]# vim dubbo-demo-consumer-2.1.5/conf/dubbo.properties 修改zookeeper地址
dubbo.registry.address=zookeeper://zookeeper1.magedu.svc.qj.local:2181 | zookeeper://zookeeper2.magedu.svc.qj.local:2181 | zookeeper://zookeeper3.magedu.svc.qj.local:2181
[root@deploy consumer]# sh build-command.sh
- 编写yaml文件,运⾏consumer服务
[root@deploy consumer]# pwd
/k8s-data/yaml/magedu/dubbo/consumer
[root@deploy consumer]# ls
consumer.yaml
[root@deploy consumer]# kubectl apply -f consumer.yaml
[root@deploy consumer]# kubectl get pod -A | grep consumer
magedu magedu-consumer-deployment-6d76cc864f-cznl2 1/1 Running 0 3m40s
[root@magedu-provider-deployment-7cf4d65cbc-pbnwf logs]# tail -f stdout.log
[16:58:41] Hello world22, request from consumer: /10.200.104.21:49246
[16:58:43] Hello world23, request from consumer: /10.200.104.21:49246
[16:58:45] Hello world24, request from consumer: /10.200.104.21:49246
[16:58:47] Hello world25, request from consumer: /10.200.104.21:49246
[16:58:49] Hello world26, request from consumer: /10.200.104.21:49246
[16:58:51] Hello world27, request from consumer: /10.200.104.21:49246
[16:58:53] Hello world28, request from consumer: /10.200.104.21:49246
[16:58:55] Hello world29, request from consumer: /10.200.104.21:49246
[16:58:57] Hello world30, request from consumer: /10.200.104.21:49246
[16:58:59] Hello world31, request from consumer: /10.200.104.21:49246
3.控制台 dubboadmin
[root@deploy dubboadmin]# pwd
/k8s-data/dockerfile/web/magedu/dubbo/dubboadmin
[root@deploy dubboadmin]# ls
build-command.sh catalina.sh Dockerfile dubboadmin dubboadmin.war dubboadmin.war.bak logging.properties run_tomcat.sh server.xml
[root@deploy dubboadmin]# chmod +x *.sh
[root@deploy dubboadmin]# vim dubboadmin/WEB-INF/dubbo.properties # 修改注册中心地址
dubbo.registry.address=zookeeper://zookeeper1.magedu.svc.qj.local:2181
dubbo.admin.root.password=root # 登录用户 密码
dubbo.admin.guest.password=guest
[root@deploy dubboadmin]# sh build-command.sh v1
[root@deploy dubboadmin]# pwd
/k8s-data/yaml/magedu/dubbo/dubboadmin
[root@deploy dubboadmin]# kubectl apply -f dubboadmin.yaml
image.png