CoreDNS is a DNS server that chains plugins (github.com)
一、部署CoreDNS
1、下载所需要的镜像
root@k8s-master1-etcd1:~# docker pull coredns/coredns:1.8.7
root@k8s-master1-etcd1:~# docker tag docker.io/coredns/coredns:1.8.7 harbor.host.com/base/coredns:1.8.7
root@k8s-master1-etcd1:~# docker push harbor.host.com/base/coredns:1.8.7
2、下载yaml文件
root@k8s-master1-etcd1:~/kubernetes_1.23.5# ll
total 478680
drwxr-xr-x 2 root root 4096 Apr 23 19:02 ./
drwx------ 10 root root 4096 Apr 23 19:02 ../
-rw-r--r-- 1 root root 26535838 Apr 23 19:01 kubernetes-client-darwin-amd64.tar.gz
-rw-r--r-- 1 root root 121635346 Apr 23 19:01 kubernetes-node-linux-amd64.tar.gz
-rw-r--r-- 1 root root 341423907 Apr 23 19:01 kubernetes-server-linux-amd64.tar.gz
-rw-r--r-- 1 root root 554989 Apr 23 19:01 kubernetes.tar.gz
解压完后在kubernetes/cluster/addons/dns/coredns
找到coredns的yaml文件进行修改
root@k8s-master1-etcd1:~/kubernetes_1.23.5/kubernetes/cluster/addons/dns/coredns# ll
total 44
drwxr-xr-x 2 root root 4096 Mar 17 00:15 ./
drwxr-xr-x 5 root root 4096 Mar 17 00:15 ../
-rw-r--r-- 1 root root 1075 Mar 17 00:15 Makefile
-rw-r--r-- 1 root root 5060 Mar 17 00:15 coredns.yaml.base
-rw-r--r-- 1 root root 5110 Mar 17 00:15 coredns.yaml.in
-rw-r--r-- 1 root root 5112 Mar 17 00:15 coredns.yaml.sed
-rw-r--r-- 1 root root 344 Mar 17 00:15 transforms2salt.sed
-rw-r--r-- 1 root root 287 Mar 17 00:15 transforms2sed.sed
root@k8s-master1-etcd1:~/kubernetes_1.23.5/kubernetes/cluster/addons/dns/coredns# pwd
#cp coredns.yaml.base /root/yaml/coredns.yaml
3、修改部分内容如下:cat coredns.yaml
根据实际情况修改
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa { #修改成当时部署k8s时host文件里面CLUSTER_DNS_DOMAIN对应的值,我这里是cluster.local
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-cluster-critical
serviceAccountName: coredns
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
containers:
- name: coredns
image: harbor.host.com/base/coredns:1.8.7 #修改成自己的镜像仓库地址
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 200Mi #根据实际修改资源
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.100.0.2 #改成pod里面/etc/resolv.conf相同的地址
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
root@k8s-master1-etcd1:~/yaml# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
4、验证
验证容器是否启动
image.png
进入容器里面进行验证
image.png
二、官方Dashborad
部署dashbord需要用到二个镜像(changlog里查对应版本)
docker pull kubernetesui/dashboard:v2.5.1
docker pull kubernetesui/metrics-scraper:v1.0.7
下载yaml文件
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.1/aio/deploy/recommended.yaml
#修改service暴露服务
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #nodeport暴露
ports:
- port: 443
targetPort: 8443
nodePort: 30004 #暴露端口
selector:
k8s-app: kubernetes-dashboard
root@k8s-master1-etcd1:~/yaml# kubectl apply -f dashboard-v2.5.1.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper create
image.png
创建admin账户
admin-user.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
kubectl apply -f admin-user.yml
拿到admin账户的token,测试登录
kubectl get secret -A | grep admin
root@k8s-master1-etcd1:~/yaml# kubectl describe secret -n kubernetes-dashboard admin-user-token-frzts
image.png