1. 部署存储
此案例 底层使用 nfs 存储,k8s 每个节点 全部安装 nfs-utils
# 01. 安装 nfs
yum install nfs-utils -y
# 02. 配置
cat > /etc/exports <<'EOF'
/data/nfs *(rw,no_root_squash)
EOF
# 03. 创建数据目录
mkdir -p /data/nfs
# 04. 启动服务
systemctl start nfs
systemctl enable nfs
2. 编写 yml 文件
2.1 创建 名称空间
01-ns-elasticsearch.yml
apiVersion: v1
kind: Namespace
metadata:
name: elasticsearch
2.2 创建动态存储类
02-sc-elasticseatch.yml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: es-nfs-storage
provisioner: yunxi
parameters:
archiveOnDelete: "true"
reclaimPolicy: Retain
allowVolumeExpansion: True
2.3 角色授权即绑定
03-nfs-rbac.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: elasticsearch
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: elasticsearch
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: elasticsearch
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: elasticsearch
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: elasticsearch
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
2.4 部署 nfs 控制器
04-nfs-deploy.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
namespace: elasticsearch
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: yunxi
- name: NFS_SERVER
value: 192.168.0.191
- name: NFS_PATH
value: /data/nfs/es
volumes:
- name: nfs-client-root
nfs:
server: 192.168.0.191
path: /data/nfs/es
2.5 部署 services 资源
05-es-svc.yml
apiVersion: v1
kind: Service
metadata:
name: es-cluster-svc
namespace: elasticsearch
spec:
selector:
app: es
type: NodePort
ports:
- name: restful
port: 9200
targetPort: 9200
nodePort: 32000
2.6 部署 es
06-es-sts.yml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es-cluster
namespace: elasticsearch
spec:
serviceName: es-cluster-svc
replicas: 3
selector:
matchLabels:
app: es
template:
metadata:
labels:
app: es
spec:
initContainers:
- name: increase-vm-max-map
image: busybox:1.32
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
- name: increase-fd-ulimit
image: busybox:1.32
command: ["sh", "-c", "ulimit -n 65536"]
securityContext:
privileged: true
containers:
- name: es-container
image: elasticsearch:7.8.0
ports:
- name: restful
containerPort: 9200
protocol: TCP
- name: internal
containerPort: 9300
protocol: TCP
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
env:
- name: cluster.name
value: es-prod
# 定义节点名,使用metadata.name名称
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
# 初始化集群时,ES从中选出master节点
- name: cluster.initial_master_nodes
# 对应metadata.name名称加编号,编号从0开始
value: "es-cluster-0,es-cluster-1,es-cluster-2"
- name: discovery.zen.minimum_master_nodes
value: "2"
- name: discovery.seed_hosts
value: "es-cluster-0.es-cluster-svc,es-cluster-1.es-cluster-svc,es-cluster-2.es-cluster-svc"
- name: ES_JAVA_OPTS
value: "-Xms1g -Xmx1g"
- name: network.host
value: "0.0.0.0"
volumeClaimTemplates:
- metadata:
name: data
labels:
app: es-volume
namespace: elasticsearch
spec:
# 存储卷可以被单个节点读写
accessModes:
- "ReadWriteOnce"
storageClassName: es-nfs-storage
resources:
requests:
storage: 10Gi
2.7 扩展 - kibana 部署
07-kibana.yml
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: elasticsearch
labels:
app: kibana
spec:
type: NodePort
ports:
- port: 5601
nodePort: 35601
targetPort: 5601
selector:
app: kibana
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: elasticsearch
labels:
app: kibana
spec:
replicas: 1
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
containers:
- name: kibana
image: kibana:7.8.0
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
env:
- name: ELASTICSEARCH_HOSTS
value: http://es-cluster-svc:9200
ports:
- containerPort: 5601