什么时候用openstack,什么时候用k8s:
在公有云(弹性服务器)的情况下,是要用openstack,是因为我们的用户,客户要登录到集群内部,使用一台一台云主机,像美团,京东,淘宝这种只对外提供业务情况下,不需要登录到集群的内部,只需要登录到集群的app端口就好,这种情况下使用k8s!!!
前提条件:需要在k8s上面部署动态存储类storageclass ,推荐nfs存储类,因为部署起来比较方便
部署gpmall服务需要的组件分别是mariadb, zookeeper ,kafka , redis,nginx 和jar包
mariadb有条件的话可以部署成mycat实现读写分离部署
mariadb:
创建data目录 给其加满权限 做nfs共享存储
mariadb-config.yaml
cat mariadb.config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mariadb-config
namespace: gpmall
labels:
app: mariadb
data:
my.cnf: |
#!includedir /etc/my.cnf.d
[mysqld]
init_connect='SET collation_connection = utf8_unicode_ci'
init_connect='SET NAMES utf8'
character-set-server=utf8
collation-server=utf8_unicode_ci
skip-character-set-client-handshake
kubectl get configmaps -n mariadb
mariadb.secart.:
kubectl explain pods.spec.containers.env.valueFrom
vim mariadb.secart.yaml
apiVersion: v1
kind: Secret
metadata:
name: mariadb-secret
namespace: gpmall
type: opaque
data:
password: MTIzNDU2 #123456
kubectl describe secrets -n mariadb mariadb-secret
命令行创建:
kubectl create secret generic mariadb-root-password --from-literal=password=123456
cat mariadb.statefulset.yaml
apiVersion: v1
kind: Service
metadata:
name: mariadb-svc
namespace: gpmall
labels:
app: mariadb
spec:
ports:
- port: 3306
name: mariadb
targetPort: 3306
selector:
app: mariadb
clusterIP: 10.107.93.218
---
apiVersion: v1
kind: Service
metadata:
name: mariadb-headless
namespace: gpmall
labels:
app: mariadb
spec:
ports:
- port: 3306
name: mariadb
clusterIP: None
selector:
app: mariadb
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mariadb
namespace: gpmall
labels:
app: mariadb
spec:
selector:
matchLabels:
app: mariadb
serviceName: mariadb-headless
replicas: 1
template: #模板
metadata:
labels:
app: mariadb
spec:
restartPolicy: Always
initContainers:
- name: init-mariadb
image: busybox
command:
- sh
- "-c"
- |
set -ex
cat > /docker-entrypoint-initdb.d/mysql-gpmall-init.sql << EOF
grant all privileges on *.* to root@localhost identified by '123456' with grant option;
grant all privileges on *.* to root@"%" identified by '123456' with grant option;
create database gpmall;
use gpmall;
source /data/gpmall.sql
EOF
volumeMounts:
- name: init-mariadb
mountPath: /docker-entrypoint-initdb.d
containers:
- name: mariadb
imagePullPolicy: IfNotPresent
env:
- name: MARIADB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mariadb-secret
key: password
image: mariadb:10.3
ports:
- containerPort: 3306
volumeMounts:
- name: mariadb-config
mountPath: /etc/my.cmf #注意!!!
subPath: my.cnf #指定文件挂载
- name: init-mariadb
mountPath: /docker-entrypoint-initdb.d
- name: mariadb-data
mountPath: /data
- name: mariadb-nfs-storageclass
mountPath: /var/lib/mysql #mysql存储数据的地方
resources:
requests:
memory: "512Mi"
cpu: "0.5"
limits:
memory: "2Gi"
cpu: "2"
volumes:
- name: mariadb-config
configMap:
name: mariadb-config
- name: init-mariadb
emptyDir: {}
- name: mariadb-data
nfs:
server: k8s-gpmall-master
path: /data
volumeClaimTemplates:
- metadata:
name: mariadb-nfs-storageclass
spec:
accessModes:
- "ReadWriteOnce"
storageClassName: "nfs-storageclass"
resources:
requests:
storage: 3Gi
创建完成后
kubectl exec -it mariadb --bash
进mysql查看gpmall.sql是否导入
cat /etc/my.cnf(如果没有的话,查看yaml文件里面是否把config进行了调用) /etc/mysql/my.cnf
zookeeper+kafkaa:
mkdir /zookeeper
mkdir /kafka
cat zk.statefulSet.yaml
apiVersion: v1
kind: Service
metadata:
name: zookeeper-headless
namespace: gpmall
labels:
app: zookeeper
spec:
ports:
- port: 2888
name: zk-server
- port: 3888
name: zk-leader-election
clusterIP: None
selector:
app: zookeeper
---
apiVersion: v1
kind: Service
metadata:
name: zookeeper-svc
namespace: gpmall
labels:
app: zookeeper
spec:
ports:
- port: 2181
name: zookeeper-client
clusterIP: 10.105.101.24
selector:
app: zookeeper
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget #自主中断时(除了节点坏掉)保障应用的高可用 如果应用不可中断,就需要设置pdb
metadata:
name: zookeeper-pod
namespace: gpmall
spec:
selector:
matchLabels:
app: zookeeper
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zookeeper
namespace: gpmall
spec:
selector:
matchLabels:
app: zookeeper
serviceName: zookeeper-headless
replicas: 2
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
template:
metadata:
labels:
app: zookeeper
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zookeeper
topologyKey: "kubernets.io/hostname"
containers:
- name: zookeeper
imagePullPolicy: IfNotPresent
image: "registry.cn-hangzhou.aliyuncs.com/k8s-image01/kubernetes-zookeeper:1.0-3.4.10"
resources:
requests:
memory: "1Gi"
cpu: "0.5"
limits:
memory: "2Gi"
cpu: "1"
ports:
- containerPort: 2181
name: zk-client
- containerPort: 2888
name: zk-server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=2 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: date
mountPath: /var/lib/zookeeper
volumeClaimTemplates:
- metadata:
name: date
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "nfs-storageclass"
resources:
requests:
storage: 3Gi
kubectl get svc -n zookeeper
cat kafka-ckuster.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka-headless
namespace: gpmall
labels:
app: kafka
spec:
type: ClusterIP
clusterIP: None # 创建无头服务,如果需要对外暴露端口可自行创建service
ports:
- name: kafka
port: 9092
targetPort: kafka
selector:
app: kafka
---
apiVersion: v1
kind: Service
metadata:
name: kafka
namespace: gpmall
labels:
app: kafka
spec:
type: ClusterIP
clusterIP: 10.96.53.51
ports:
- name: kafka
port: 9092
targetPort: kafka
selector:
app: kafka
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: kafka
namespace: gpmall
labels:
app: kafka
spec:
selector:
matchLabels:
app: kafka
serviceName: kafka-headless
replicas: 2
updateStrategy:
type: "RollingUpdate"
template:
metadata:
name: "kafka"
labels:
app: kafka
spec:
affinity:
podAntiAffinity: #pod的反亲和性
requiredDuringSchedulingIgnoredDuringExecution: #硬亲和性(硬策略)
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- kafka
topologyKey: "kubernetes.io/hostname"
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: kafka
image: "docker.io/bitnami/kafka:2.3.0-debian-9-r4"
imagePullPolicy: "IfNotPresent"
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KAFKA_CFG_ZOOKEEPER_CONNECT
value: "zookeeper-headless.gpmall:2181" #无头的形式
- name: KAFKA_PORT_NUMBER
value: "9092"
- name: KAFKA_CFG_LISTENERS
value: "PLAINTEXT://$(MY_POD_IP):$(KAFKA_PORT_NUMBER)"
- name: KAFKA_CFG_ADVERTISED_LISTENERS
value: 'PLAINTEXT://$(MY_POD_IP):$(KAFKA_PORT_NUMBER)'
- name: ALLOW_PLAINTEXT_LISTENER
value: "yes"
- name: KAFKA_HEAP_OPTS
value: "-Xmx512m -Xms512m"
- name: KAFKA_CFG_LOGS_DIRS
value: /opt/bitnami/kafka/data
ports:
- name: kafka
containerPort: 9092
livenessProbe:
tcpSocket:
port: kafka
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 2
readinessProbe:
tcpSocket:
port: kafka
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
volumeMounts:
- name: data
mountPath: /bitnami/kafka
volumeClaimTemplates:
- metadata:
name: data
spec:
storageClassName: nfs-storageclass # 指定为上面创建的 storageclass
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 3Gi
进入kafka创建kafka测试文件
./kafka-topics.sh --create --zookeeper zookeeper-0.zookeeper-headless.gpmall:2181 --replication-factor 1 --partitions 1 --topic test
查看kafka测试文件
./kafka-topics.sh --list --zookeeper zookeeper-0.zookeeper-headless.gpmall:2181
redis:
mkdir /redis
cat redis-deployment.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: redis-config
namespace: gpmall
labels:
app: redis
data:
redis.conf: |
bind 0.0.0.0
protected-mode yes
port 6379
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: gpmall
spec:
replicas: 1
selector:
matchLabels:
run: redis
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
run: redis
spec:
containers:
- name: redis
image: redis:5.0.13
ports:
- containerPort: 6379
name: client
volumeMounts:
- name: config
mountPath: /etc/redis/
volumes:
- name: config
configMap:
name: redis-config
---
apiVersion: v1
kind: Service
metadata:
name: redis-svc
namespace: gpmall
spec:
selector:
run: redis-svc
type: ClusterIP
clusterIP: 10.110.8.71
ports:
- port: 6379
targetPort: 6379
ar包与主机映射:
为什么先部署mysql redis kafka zk
因为jar包需要部署在以上服务 作用在上面的,jar包的启动依赖于以上四个服务 (主机列表)jar的启动需要通过主机列表对接其他服务
分布式服务:把整体的一个东西拆解成一个一个服务,服务于服务之间通过svc和基与statfults主机名进行连接
jar
dockerfile:
把jar包放在同一目录
cat Dockerfile
FROM centos:7
LABEL ZMX 2769718603@qq.com
RUN yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel
COPY shopping-provider-0.0.1-SNAPSHOT.jar \
user-provider-0.0.1-SNAPSHOT.jar \
gpmall-shopping-0.0.1-SNAPSHOT.jar \
gpmall-user-0.0.1-SNAPSHOT.jar \
start.sh /
RUN chmod +x /start.sh
EXPOSE 8081
EXPOSE 8082
EXPOSE 8083
CMD nohup sh -c "/start.sh && java -jar /gpmall-user-0.0.1-SNAPSHOT.jar"
cat start.sh
#!/bin/bash
nohup java -jar shopping-provider-0.0.1-SNAPSHOT.jar &
sleep 10
nohup java -jar user-provider-0.0.1-SNAPSHOT.jar &
sleep 10
nohup java -jar gpmall-shopping-0.0.1-SNAPSHOT.jar &
sleep 10
docker build -t jar:v1.0 .
需要写主机地址映射 引入hostAliases
kubectl explain deployment.spec.template.spec.hostAliases
=====================
apiVersion: v1
kind: Service
metadata:
name: jar-svc
namespace: gpmall
labels:
app: jar
spec:
selector:
run: jar #写要控制的pod名字
type: ClusterIP #内网
clusterIP: 10.100.122.89 # 手动写ip 防止误删后IP地址发生改变 默认不写会自动配置ip
ports:
- name: shopping
port: 8081 #宣告
targetPort: 8081
- name: user
port: 8082 #宣告
targetPort: 8082
- name: cashier
port: 8083 #宣告
targetPort: 8083
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jar
namespace: gpmall
spec:
replicas: 1
selector:
matchLabels:
run: jar
template:
metadata:
labels:
run: jar
spec:
nodeName: k8s-gpmall-node2
hostAliases:
- ip: 10.96.53.51
hostnames:
- kafka.mall
- ip: 10.97.155.140
hostnames:
- mysql.mall
- ip: 10.97.123.1
hostnames:
- redis.mall
- ip: 10.106.224.211
hostnames:
- zookeeper.mall
containers:
- name: jar
image: jar:v1.0
#command:
#- sh
#- -c
#- sleep 3600
ports:
- name: shopping
containerPort: 8081
- name: user
containerPort: 8082
- name: cashier
containerPort: 8083
导入:dist文件
NGINX:
cat nginx-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-config
namespace: gpmall
labels:
run: nginx
data:
default.conf: |
server {
listen 80;
listen [::]:80;
server_name localhost;
#access_log /var/log/nginx/host.access.log main;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
location /user {
proxy_pass http://10.100.122.89:8082;
}
location /shopping {
proxy_pass http://10.100.122.89:8081;
}
location /cashier {
proxy_pass http://10.100.122.89:8083;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
[root@k8s-gpmall-master nginx]# cat default.conf
server {
listen 80;
listen [::]:80;
server_name localhost;
#access_log /var/log/nginx/host.access.log main;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
location /user {
proxy_pass http://10.98.146.111:8082;
}
location /shopping {
proxy_pass http://10.98.146.111:8081;
}
location /cashier {
proxy_pass http://10.98.146.111:8083;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
cat Dockerfile
FROM nginx
LABEL ZMX 2769718603@qq.com
RUN cd /usr/share/nginx/html/ && rm -rf *
COPY default.conf /etc/nginx/conf.d/default.conf
ADD dist /usr/share/nginx/html
EXPOSE 80
CMD ["nginx","-g","daemon off;"]
cat nginx-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-config
namespace: gpmall
labels:
run: nginx
data:
default.conf: |
server {
listen 80;
listen [::]:80;
server_name localhost;
#access_log /var/log/nginx/host.access.log main;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
location /user {
proxy_pass http://10.100.122.89:8082;
}
location /shopping {
proxy_pass http://10.100.122.89:8081;
}
location /cashier {
proxy_pass http://10.100.122.89:8083;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
cat nginx.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx-svc
namespace: gpmall
labels:
run: nginx
spec:
selector:
run: nginx
type: NodePort
clusterIP: 10.98.93.122
ports:
- port: 80
protocol: TCP
targetPort: 80
nodePort: 32080
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: gpmall
labels:
run: nginx
spec:
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
replicas: 1
selector:
matchLabels:
run: nginx
template:
metadata:
name: nginx
labels:
run: nginx
spec:
# nodeName: k8s-gpmall-node1
containers:
- name: nginx
image: nginx:v2
volumeMounts:
- name: config
mountPath: /etc/nginx/conf.d/
ports:
- containerPort: 80
volumes:
- name: config
configMap:
name: nginx-config