1. ReplicationController资源
副本控制器
rc:
保证指定数量的pod始终存活,rc通过标签选择器来关联pod
#创建rc
[root@k8s-master k8s_yaml]# vim k8s_rc.yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx
spec:
replicas: 5
selector:
app: myweb
template:
metadata:
labels:
app: myweb
spec:
containers:
- name: myweb
image: 10.0.0.11:5000/nginx:1.13
ports:
- containerPort: 80
[root@k8s-master k8s_yaml]# kubectl create -f k8s_rc.yaml
replicationcontroller "nginx" created
[root@k8s-master k8s_yaml]# kubectl get rc
NAME DESIRED CURRENT READY AGE
nginx 5 5 0 6s
[root@k8s-master k8s_yaml]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx 1/1 Running 2 15h
nginx-b2l78 1/1 Running 0 15s
nginx-gh210 1/1 Running 0 15s
nginx-gs025 1/1 Running 0 15s
nginx-k4hp5 1/1 Running 0 15s
nginx-twf7x 1/1 Running 0 15s
test 2/2 Running 4 15h
k8s资源的常见操作:
增删改查
kubectl create -f xxx.yaml
kubectl get pod|rc
kubectl describe pod nginx
kubectl delete pod nginx 或者kubectl delete -f xxx.yaml
kubectl edit pod nginx
image在node节点上重启 kubelet.service 恢复
[root@k8s-master k8s_yaml]# kubectl edit rc nginx
spec:
replicas: 10
[root@k8s-master k8s_yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx 1/1 Running 2 16h 172.18.49.2 k8s-node2
nginx-4dht9 0/1 ContainerCreating 0 6s <none> k8s-node1
nginx-9661w 0/1 ContainerCreating 0 6s <none> k8s-node1
nginx-9ntg5 1/1 Running 0 1m 172.18.49.4 k8s-node2
nginx-b2l78 1/1 Running 0 11m 172.18.42.3 k8s-node1
nginx-gh210 1/1 Running 0 11m 172.18.49.3 k8s-node2
nginx-gs025 1/1 Running 0 11m 172.18.42.4 k8s-node1
nginx-jfg7f 0/1 ContainerCreating 0 6s <none> k8s-node2
nginx-l8l6h 0/1 ContainerCreating 0 6s <none> k8s-node1
nginx-nl4s0 1/1 Running 0 1m 172.18.49.5 k8s-node2
nginx-sld3s 0/1 ContainerCreating 0 6s <none> k8s-node2
test 2/2 Running 4 15h 172.18.42.2 k8s-node1
2. rc的滚动升级
新建一个nginx-rc1.15.yaml
#上传docker_nginx1.15的镜像包,并打标签
wget http://192.168.37.202/linux59/docker_nginx1.15.tar.gz
docker load -i docker_nginx1.15.tar.gz
docker tag docker.io/nginx:latest 10.0.0.11:5000/nginx:1.15
docker push 10.0.0.11:5000/nginx:1.15
#创建k8s_rc2.yaml 配置文件
cd k8s_yaml/
mkdir /rc
mv k8s_rc.yaml rc/
cd rc/
cp k8s_rc.yaml k8s_rc2.yaml
查看内容修改的差别 k8s_rc.yaml & k8s_rc2.yaml
#检查当前nginx_1.13的版本
[root@k8s-master rc]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx 1/1 Running 2 17h 172.18.49.2 k8s-node2
nginx-309cg 1/1 Running 0 5m 172.18.49.3 k8s-node2
nginx-nt1tr 1/1 Running 0 5m 172.18.49.4 k8s-node2
nginx-sh229 1/1 Running 0 5m 172.18.42.3 k8s-node1
nginx-w517q 1/1 Running 0 5m 172.18.42.2 k8s-node1
nginx-wkhcv 1/1 Running 0 5m 172.18.42.4 k8s-node1
test 2/2 Running 0 51m 172.18.49.6 k8s-node2
[root@k8s-master rc]# curl -I 172.18.49.4
HTTP/1.1 200 OK
Server: nginx/1.13.12
升级为nginx_1.15
kubectl rolling-update nginx -f k8s_rc2.yaml --update-period=10s
回滚到nginx_1.13
kubectl rolling-update nginx2 -f k8s_rc.yaml --update-period=1s
3. service资源
service帮助pod暴露端口
创建一个service
apiVersion: v1
kind: Service
metadata:
name: myweb
spec:
type: NodePort #ClusterIP
ports:
- port: 80 #clusterIP
nodePort: 30000 #node port
targetPort: 80 #pod port
selector:
app: myweb2
具体配置步骤
[root@k8s-master svc]# mkdir svc
[root@k8s-master svc]# cd svc/
[root@k8s-master svc]# pwd
/root/k8s_yaml/svc
[root@k8s-master svc]# vim k8s_svc.yaml
apiVersion: v1
kind: Service
metadata:
name: myweb
spec:
type: NodePort #ClusterIP
ports:
- port: 80 #clusterIP
nodePort: 30000 #node port
targetPort: 80 #pod port
selector:
app: myweb
#生成svc
kubectl create -f k8s_svc.yaml
#查看svc的两种方法
kubectl get svc
kubectl get service
#查看所有资源类型
kubectl get all -o wide
#确保标签一样,修改为myweb
kubectl edit svc myweb
app: myweb
#查看端口是否暴露成功
kubectl describe svc myweb
浏览器访问已经可以访问了
10.0.0.12:30000
10.0.0.13:30000
4. 负载均衡
#另一种修改数量的方法
[root@k8s-master svc]# kubectl scale rc nginx --replicas=3
#k8s进入容器的方法
[root@k8s-master svc]# kubectl exec -it nginx-5mf4r /bin/bash
root@nginx-5mf4r:/# echo '11111' >/usr/share/nginx/html/index.html
root@nginx-5mf4r:/# exit
[root@k8s-master svc]# kubectl exec -it nginx-ppjb3 /bin/bash
root@nginx-ppjb3:/# echo '2222' >/usr/share/nginx/html/index.html
root@nginx-ppjb3:/# exit
添加随机端口
[root@k8s-master svc]# vim /etc/kubernetes/apiserver
KUBE_API_ARGS="--service-node-port-range=3000-50000"
[root@k8s-master svc]# systemctl restart kube-apiserver.service
[root@k8s-master svc]# kubectl expose rc nginx --port=80 --type=NodePort
service "nginx" exposed
[root@k8s-master svc]# kubectl get all -o wide
NAME DESIRED CURRENT READY AGE CONTAINER(S) IMAGE(S) SELECTOR
rc/nginx 3 3 3 1h myweb 10.0.0.11:5000/nginx:1.13 app=myweb
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
svc/kubernetes 10.254.0.1 <none> 443/TCP 1d <none>
svc/myweb 10.254.173.22 <nodes> 80:30000/TCP 52m app=myweb
svc/nginx 10.254.22.101 <nodes> 80:4336/TCP 2s app=myweb
NAME READY STATUS RESTARTS AGE IP NODE
po/nginx 1/1 Running 2 19h 172.18.49.2 k8s-node2
po/nginx-5mf4r 1/1 Running 0 1h 172.18.42.4 k8s-node1
po/nginx-ppjb3 1/1 Running 0 1h 172.18.49.3 k8s-node2
po/nginx-vvh1m 1/1 Running 0 1h 172.18.49.4 k8s-node2
po/test 2/2 Running 0 2h 172.18.49.6 k8s-node2
image
service默认使用iptables来实现负载均衡, k8s 1.8新版本中推荐使用lvs(四层负载均衡 传输层tcp,udp)