持久化存储 StorageClass+operator部署rabbitmq-cluster

StorageClass作为对存储资源的抽象定义,对用户设置的PVC申请屏蔽后端的细节,一方面减轻用户对于存储资源细节的关注,另一方面也减轻了管理员手工管理PV的工作,由系统自动完成PV的创建和绑定,实现了动态的资源供应。使用基于StorageClass的动态资源供应模式将逐步成为云平台的标准存储配置

#下载k8s nfs插件
#地址:https://github.com/kubernetes-retired/external-storage/tree/master/nfs 如果是海外的服务器就不用下载直接在image写镜像就行,国内需要下载下来后re-tag后推到自己的仓库

#授权认证
#vim rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
   name: nfs-provisioner-runner
rules:
   -  apiGroups: [""]
      resources: ["persistentvolumes"]
      verbs: ["get", "list", "watch", "create", "delete"]
   -  apiGroups: [""]
      resources: ["persistentvolumeclaims"]
      verbs: ["get", "list", "watch", "update"]
   -  apiGroups: ["storage.k8s.io"]
      resources: ["storageclasses"]
      verbs: ["get", "list", "watch"]
   -  apiGroups: [""]
      resources: ["events"]
      verbs: ["watch", "create", "update", "patch"]
   -  apiGroups: [""]
      resources: ["services", "endpoints"]
      verbs: ["get","create","list", "watch","update"]
   -  apiGroups: ["extensions"]
      resources: ["podsecuritypolicies"]
      resourceNames: ["nfs-provisioner"]
      verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-provisioner
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-provisioner-runner
  apiGroup: rbac.authorization.k8s.io

准备nfs插件的deploy.yaml

组件hub地址:https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner

kind: Deployment
apiVersion: apps/v1
metadata:
   name: nfs-client-provisioner
spec:
   replicas: 1
   strategy:
     type: Recreate
   selector:
     matchLabels:
        app: nfs-client-provisioner
   template:
      metadata:
         labels:
            app: nfs-client-provisioner
      spec:
         serviceAccount: nfs-provisioner
         containers:
            -  name: nfs-client-provisioner
               image: registry.cn-xxxxx.aliyuncs.com/xxxxx/nfs-subdir-external-provisioner:v1 #这里替换成你自己re-tag的镜像地址
               volumeMounts:
                 -  name: nfs-client-root
                    mountPath:  /persistentvolumes
               env:
                 -  name: PROVISIONER_NAME #供应者的名字
                    value: storage/nfs #名字虽然可以随便起,以后引用要一致
                 -  name: NFS_SERVER
                    value: 192.168.0.32 #nfs server地址
                 -  name: NFS_PATH
                    value: /data/nfs #共享目录
         volumes:
           - name: nfs-client-root
             nfs:
               server: 192.168.0.32 #nfs server地址
               path: /data/nfs #共享目录

sc.yaml

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-storage
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: storage/nfs #与deploy中的供应者名字一致
reclaimPolicy: Delete
allowVolumeExpansion: True  #允许pvc创建后扩容

安装rabbitmq动态挂载到这个StorageClass,使用rabbitmq-cluster-operator安装rabbitmq集群

rabbitmq-cluster-operator github的项目地址:https://github.com/rabbitmq/cluster-operator/tree/0.49.0 感兴趣的可以去看看

#执行
kubectl apply -f https://github.com/rabbitmq/cluster-operator/releases/latest/download/cluster-operator.yml

#新建rabbitmq-cluster.yaml文件
apiVersion: rabbitmq.com/v1beta1
kind: RabbitmqCluster
metadata:
  name: rabbitmq
  namespace: default
spec:
  image: rabbitmq:3.10.2-management
  replicas: 3
  imagePullSecrets:
  service:
    type: NodePort
  persistence:
    storageClassName: nfs-storage
    storage: 1Gi
  resources:
    requests:
      cpu: 100m
      memory: 400Mi
    limits:
      cpu: 100m
      memory: 600Mi
#部署MQ实例

kubectl apply -f rabbitmq-cluster.yaml

[root@master rabbitmq]# kubectl get pod|grep rabbit
rabbitmq-linger-server-0                 2/2     Running   0              3h2m
rabbitmq-linger-server-1                 2/2     Running   0              3h2m
rabbitmq-linger-server-2                 2/2     Running   0              3h2m
[root@master rabbitmq-linger]# kubectl get all|grep rabbit
pod/rabbitmq-server-0                 2/2     Running   0              3h3m
pod/rabbitmq-server-1                 2/2     Running   0              3h3m
pod/rabbitmq-server-2                 2/2     Running   0              3h3m
service/rabbitmq         NodePort    10.1.185.240   <none>        15692:30500/TCP,5672:31640/TCP,15672:32642/TCP   3h3m
service/rabbitmq-nodes   ClusterIP   None           <none>        4369/TCP,25672/TCP                               3h3m
statefulset.apps/rabbitmq-server   3/3     3h3m
rabbitmqcluster.rabbitmq.com/rabbitmq   True               True               3h3m
#获取账号密码
kubectl  get secret rabbitmq-default-user  -o jsonpath="{.data.username}" | base64 --decode  #如果不是在default ns下 需要加上ns的名称
kubectl   get secret rabbitmq-default-user -o jsonpath="{.data.password}" | base64 --decode

使用ip+32642(15672映射的端口)访问web界面

image.png
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容