1、在K8s中 部署Jenkins优点和缺点问题,简单介绍下:
1.传统Jenkins集群架构一些问题
- Master发生故障时,整个流程都不可用
- Slave集群的环境配置不一样,来完成不同语言的编译打包,但是这些差异化的配置导致管理起来不方便,维护麻烦
- 资源分配不均衡,有的slave要运行的job出现排队等待,而有的salve处于空闲状态
- 资源浪费,每台slave可能是物理机或者虚拟机,当slave处于空闲状态时,也不能完全释放掉资源
2.K8s中Jenkins集群架构优点
- 当Jenkins Master接受到Build请求后,会根据配置的Label动态创建一个运行在Pod中的Jenkins Slave并注册到Master上,当运行完Job后,这个Slave会被注销并且这个Pod也会自动删除,恢复到最初的状态(这个策略可以设置)
- 服务高可用,当Jenkins Master出现故障时,Kubernetes会自动创建一个新的Jenkins Master容器,并且将Volume分配给新创建的容器,保证数据不丢失,从而达到集群服务高可用的作用
- 动态伸缩,合理使用资源,每次运行Job时,会自动创建一个Jenkins Slave,Job完成后,Slave自动注销并删除容器,资源自动释放,并且Kubernetes会根据每个资源的使用情况,动态分配slave到空闲的节点上创建,降低出现因某节点资源利用率高,降低出现因某节点利用率高出现排队的情况
- 扩展性好,当Kubernetes集群的资源严重不足导致Job排队等待时,可以很容器的添加一个Kubernetes Node到集群,从而实现扩展
2、集群环境
3、使用Deployment和StatefulSet,两个控制器方式部署Jenkins
Jenkins-Deployment 控制器方式
1)k8s-node1部署NFS服务端配置
# 所有服务端节点安装nfs
yum install -y nfs-utils
systemctl enable nfs-server rpcbind --now
# 创建nfs共享目录,授权
mkdir -p /data/k8s
chown -R 777 /data/k8s
# 写入/etc/exports文件
cat > /etc/exports << EOF
echo "/data/k8s 192.168.56.0/24(rw,no_root_squash,sync)" >/etc/exports
EOF
# 重启
systemctl reload nfs-server
# 使用如下命令验证:
showmout -e 192.168.56.11
....
创建Jenkins集群所需的YAML文件
1)创建命名空间和存放Jenkins的YAML目录
kubectl create namespace devops
mkdir -p /opt/jenkins
2)为Jenkins数据持久化存储创建一个PV
cat >/opt/jenkins/jenkins_pv.yaml <<EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: opspv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Delete
nfs:
server: 192.168.56.11
path: /data/k8s
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: opspvc
namespace: devops
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
EOF
3)创建Jenkins集群权限serviceAccount文件
cat >/opt/jenkins/jenkins_rbac.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: jenkins
namespace: devops
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: jenkins
rules:
- apiGroups: ["extensions", "apps"]
resources: ["deployments"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources: ["services"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update","watch"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create","delete","get","list","patch","update","watch"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: jenkins
namespace: devops
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: jenkins
subjects:
- kind: ServiceAccount
name: jenkins
namespace: devops
EOF
4)创建Jenkins Deployment
cat jenkins_deployment.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: jenkins
namespace: devops
spec:
template:
metadata:
labels:
app: jenkins
spec:
terminationGracePeriodSeconds: 10
serviceAccount: jenkins
containers:
- name: jenkins
image: jenkins/jenkins:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080 #外部访问端口
name: web
protocol: TCP
- containerPort: 50000 #jenkins save发现端口
name: agent
protocol: TCP
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 500m
memory: 512Mi
livenessProbe:
httpGet:
path: /login
port: 8080
initialDelaySeconds: 60 #容器初始化完成后,等待60秒进行探针检查
timeoutSeconds: 5
failureThreshold: 12 #当Pod成功启动且检查失败时,Kubernetes将在放弃之前尝试failureThreshold次。放弃生存检查意味着重新启动Pod。而放弃就绪检查,Pod将被标记为未就绪。默认为3.最小值为1
readinessProbe:
httpGet:
path: /login
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 5
failureThreshold: 12
volumeMounts: #需要将jenkins_home目录挂载出来
- name: jenkinshome
subPath: jenkins
mountPath: /var/jenkins_home
env:
- name: LIMITS_MEMORY
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: 1Mi
- name: JAVA_OPTS
value: -Xmx$(LIMITS_MEMORY)m -XshowSettings:vm -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85 -Duser.timezone=Asia/Shanghai
securityContext:
fsGroup: 1000
volumes:
- name: jenkinshome
persistentVolumeClaim:
claimName: opspvc
5)创建Jenkins SVC
cat >/opt/jenkins/jenkins_svc.yaml <<EOF
apiVersion: v1
kind: Service
metadata:
name: jenkins
namespace: demon
labels:
app: jenkins
spec:
selector:
app: jenkins
type: NodePort
ports:
- name: web
port: 8080
targetPort: web
nodePort: 30002
- name: agent
port: 50000
targetPort: agent
EOF
6)依次创建
[root@k8s-node1 jenkins]# ls
jenkins_deployment.yaml jenkins_pv.yaml jenkins_rbac.yaml jenkins_svc.yaml
[root@k8s-node1 jenkins]# kubectl apply -f ./
7)查看结果
[root@k8s-node1 jenkins]# kubectl get pv,pvc,pod,svc -n devops
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/opspv 10Gi RWX Delete Bound devops/opspvc 1h
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/opspvc Bound opspv 10Gi RWX 1h
NAME READY STATUS RESTARTS AGE
pod/jenkins-6d7bc49b74-d9jxc 1/1 Running 0 1h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/jenkins NodePort 10.1.148.201 <none> 8080:30002/TCP,50000:26723/TCP 1h
8080:端口为我们jenkins访问端口
50000:端口为jenkins save发现端口
Jenkins-StatefulSet 控制器方式
1.创建两个目录
mkdir -pv /data/nfs-client/ /data/jenkins
2.nfs服务端
# 所有服务端节点安装nfs
yum install -y nfs-utils
systemctl enable nfs-server rpcbind --now
# 创建nfs共享目录,授权
mkdir -p /data/k8s && chown -R 777 /data/k8s
# 写入/etc/exports文件
cat > /etc/exports << EOF
echo "/data/k8s 192.168.56.0/24(rw,no_root_squash,sync)" >/etc/exports
EOF
# 重启
systemctl reload nfs-server
# 使用如下命令验证:
showmout -e 192.168.56.11
....
部署nfs-client所有认证(rbac、class、)
1.创建rbac.yaml文件
cat > rbac.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
EOF
4.创建class.yaml文件
cat > class.yaml << EOF
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
EOF
5.创建deployment.yaml
cat > deployment.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 192.168.1.201 # 修改为自己NFS服务IP
- name: NFS_PATH
value: /data/k8s # 修改为共享目录
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.201 # 修改为自己NFS服务IP
path: /data/k8s # 修改为共享目录
EOF
创建Jenkins yaml文件
1)创建jenkins_rbac.yaml
cat > jenkins_rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: jenkins
namespace: devops
rules:
- apiGroups: ["extensions", "apps"]
resources: ["deployments"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources: ["services"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update","watch"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create","delete","get","list","patch","update","watch"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: jenkins
namespace: devops
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: jenkins
subjects:
- kind: ServiceAccount
name: jenkins
namespace: devops
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: jenkinsClusterRole
namespace: devops
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["create","delete","get","list","patch","update","watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: jenkinsClusterRoleBinding
namespace: devops
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: jenkinsClusterRole
subjects:
- kind: ServiceAccount
name: jenkins
namespace: devops
EOF
2)创建jenkins_serviceaccount.yaml
cat > jenkins_serviceaccount.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: jenkins
namespace: devops
EOF
3)创建jenkins_StatefulSet.yaml
cat > jenkins_StatefulSet.yaml << EOF
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: jenkins
namespace: devops01
labels:
name: jenkins
spec:
serviceName: jenkins
selector:
matchLabels:
app: jenkins
replicas: 1
updateStrategy:
type: RollingUpdate
template:
metadata:
name: jenkins
labels:
app: jenkins
spec:
terminationGracePeriodSeconds: 10
serviceAccountName: jenkins
containers:
- name: jenkins
image: jenkins/jenkins:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080 #外部访问端口
name: web
protocol: TCP
- containerPort: 50000 #jenkins save发现端口
name: agent
protocol: TCP
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 500m
memory: 512Mi
livenessProbe:
httpGet:
path: /login
port: 8080
initialDelaySeconds: 60 #容器初始化完成后,等待60秒进行探针检查
timeoutSeconds: 5
failureThreshold: 12 #当Pod成功启动且检查失败时,Kubernetes将在放弃之前尝试failureThreshold次。放弃生存检查意味着重新启动Pod。而放弃就绪检查,Pod将被标记为未就绪。默认为3.最小值为1
readinessProbe:
httpGet:
path: /login
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 5
failureThreshold: 12
volumeMounts: #需要将jenkins_home目录挂载出来
- name: jenkins-home
subPath: jenkins
mountPath: /var/jenkins_home
env:
- name: LIMITS_MEMORY
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: 1Mi
- name: JAVA_OPTS
value: -Xmx$(LIMITS_MEMORY)m -XshowSettings:vm -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85 -Duser.timezone=Asia/Shanghai
securityContext:
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: jenkins-home
spec:
storageClassName: "managed-nfs-storage"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
EOF
4)创建jenkins_Service.yaml
cat > jenkins_Service.yaml << EOF
apiVersion: v1
kind: Service
metadata:
name: jenkins
namespace: devops01
labels:
app: jenkins
spec:
selector:
app: jenkins
type: NodePort
ports:
- name: web
port: 8080
targetPort: web
nodePort: 8081
- name: agent
port: 50000
targetPort: agent
EOF
5)依次创建
[root@k8s-node1 jenkins]# ls
jenkins_rbac.yaml jenkins_Service.yaml jenkins_StatefulSet.yaml jenkins_rbac.yaml
[root@k8s-node1 jenkins]# kubectl apply -f ./
7)查看结果
.....
Jenkins部署OK后,可以通过浏览器访问集群任意IP的svc端口
管理员密码路径:持久化在/data/k8s下,所以jenkins的所有配置都在这下面
cat /data/k8s/jenkins/secrets/initialAdminPassword
1)直接推荐安装即可
2)安装完成后我们进入jenkins主页面
3)Jenkins–>插件–>安装插件Kubernetes
Jenkins中配置k8s
1)系统管理->系统配置
2)配置拉到最下面找到Kubernetes插件
Name # 配置的名称
Kubernetes URL # 这里的URL是K8s内部的URL,实际上就是`svcname` = `https://kubernetes.default.svc.cluster.local`
Kubernetes Namespace k8s的命名空间 # 实际上就是Jenkins所在的命名空间
3)Jenkins URL配置
Jenkins URL # 这里的URL是jenkins的svc名称加上命名空间,实际上就是在k8s集群内部访问jenkins的一个方法,这里也不需要修改
http://jenkins.demon.svc.cluster.local:8080
4)配置添 Jenkins Slave Pod模板
Name = Pod 名称 Namespave = Pod命名空间 Labels = Pod标签
5)容器的模板配置
6)创建volume的配置
Jenkins Master收到Build请求时,会根据配置的Label动态创建一个运行在Pod中的Jenkins Slave并注册到Master上,当Job运行完,这个Slave会被注销并且这个Pod也会自动删除,恢复到最初状态
7)测试验证
新建Job选择流水线
流水线Pipeline
def label = "jenkins-slave"
podTemplate(label: label, cloud: 'kubernetes')
{
node(label) {
stage('pull code') {
echo "拉取代码"
}
stage('build') {
echo "代码编译"
}
stage('SonarQube') {
echo "质量扫描"
}
}
}