后端存储
本地存储
- emptyDir
使用emptyDir的存储方式,就类似于在创建docker容器时的命令docker run -v /xx ,意思是将物理机里随机的一个目录映射到容器的/xx 目录里,如果 /xx 目录不存在,会自动在容器里创建
apiVersion: v1
kind: Pod
metadata:
name: demo
labels:
role: myrole
spec:
volumes:
- name: volume1
emptyDir: {}
containers:
- name: demo1
image: busybox
imagePullPolicy: IfNotPresent
command: ['sh','-c','sleep 5000']
volumeMounts:
- mountPath: /xx
name: volume1
- name: demo2
image: busybox
imagePullPolicy: IfNotPresent
command: ['sh','-c','sleep 5000']
volumeMounts:
- mountPath: /xx
name: volume1
- hostPath
使用hostPath的存储方式,就类似于在创建docker容器时的命令docker run -v /data:/xx,意思是在物理机里的目录/data映射到容器的/xx的目录,如果/xx目录不存在,会自动在容器里创建
apiVersion: v1
kind: Pod
metadata:
name: demo
labels:
purpose: demonstrate-envars
spec:
volumes:
- name: volume1
hostPath:
path: /data
containers:
- name: demo1
image: busybox
imagePullPolicy: IfNotPresent
command: ['sh','-c','sleep 5000']
volumeMounts:
- mountPath: /xx
name: volume1
- name: demo2
image: busybox
imagePullPolicy: IfNotPresent
command: ['sh','-c','sleep 5000']
volumeMounts:
- mountPath: /xx
name: volume1
## 查看pod存储卷
[root@s1 volume]# kubectl describe pod demo | grep -A3 Volumes
Volumes:
volume1:
Type: HostPath (bare host directory volume)
Path: /data
## 查看pods的位置,当前在s2上
[root@s1 volume]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
demo 2/2 Running 0 8s 10.244.78.141 s2 <none> <none>
## 拷贝文件到pod的卷中
[root@s1 volume]# kubectl cp /etc/hosts demo:/xx -c demo1
## 在s2的data目录中查看拷贝的文件
[root@s2 ~]# ll /data/
total 4
-rw-r--r-- 1 root root 206 Jan 2 11:42 hosts
网络存储
- NFS
## 在s4上搭建nfs服务器
# 安装nfs组件
[root@s4 ~]# yum -y install nfs-u*
# 创建共享目录/123
[root@s4 ~]# mkdir /123
# 共享 /123
[root@s4 ~]# echo "/123 *(rw,sync,no_root_squash)" >> /etc/exports
## 各节点安装nfs组件
[root@s1 ~]# yum -y install nfs-u*
[root@s2 ~]# yum -y install nfs-u*
[root@s3 ~]# yum -y install nfs-u*
# 测试连接
[root@s1 volume]# showmount -e 10.211.55.10
Export list for 10.211.55.10:
/123 *
apiVersion: v1
kind: Pod
metadata:
name: demo
labels:
aa: bb
spec:
volumes:
- name: volume1
nfs:
server: 10.211.55.10
path: /123
containers:
- name: demo1
image: busybox
imagePullPolicy: IfNotPresent
command: ['sh','-c','sleep 5000']
volumeMounts:
- mountPath: /xx
name: volume1
- iscsi
## 在s4上搭建iscsi服务器
#添加一块新硬盘,当前为/dev/sdb
#创建分区当作LUN
[root@s4 ~]# fdisk /dev/sdb
n
p
1
w
[root@s4 ~]# partprobe /dev/sdb
[root@s4 ~]# ls /dev/sdb
/dev/sdb
[root@s4 ~]# vgcreate iscsi_vg /dev/sdb1
Physical volume "/dev/sdb1" successfully created.
Volume group "iscsi_vg" successfully created
[root@s4 ~]# lvcreate -L 2G -n iscsi_lvm iscsi_vg
Logical volume "iscsi_lvm" created.
[root@s4 ~]# targetcli
Warning: Could not load preferences file /root/.targetcli/prefs.bin.
targetcli shell version 2.1.fb49
Copyright 2011-2013 by Datera, Inc and others.
For help on commands, type 'help'.
/> /backstores/block create s4.disk1 /dev/iscsi_vg/iscsi_lvm
Created block storage object s4.disk1 using /dev/iscsi_vg/iscsi_lvm.
/> /iscsi create iqn.2020-01.com.example:s4
Created target iqn.2020-01.com.example:s4.
Created TPG 1.
Global pref auto_add_default_portal=true
Created default portal listening on all IPs (0.0.0.0), port 3260.
/> /iscsi/iqn.2020-01.com.example:s4/tpg1/acls create iqn.2020-01.com.example:k8s
Created Node ACL for iqn.2020-01.com.example:k8s
/> /iscsi/iqn.2020-01.com.example:s4/tpg1/luns create /backstores/block/s4.disk1
Created LUN 0.
Created LUN 0->0 mapping in node ACL iqn.2020-01.com.example:k8s
/> /iscsi/iqn.2020-01.com.example:s4/tpg1/portals create 10.211.55.x
Using default IP port 3260
/> saveconfig
Configuration saved to /etc/target/saveconfig.json
/> exit
Global pref auto_save_on_exit=true
Last 10 configs saved in /etc/target/backup/.
Configuration saved to /etc/target/saveconfig.json
[root@s4 ~]# targetcli ls /
o- / ......................................................................................................................... [...]
o- backstores .............................................................................................................. [...]
| o- block .................................................................................................. [Storage Objects: 1]
| | o- s4.disk1 .......................................................... [/dev/iscsi_vg/iscsi_lvm (2.0GiB) write-thru activated]
| | o- alua ................................................................................................... [ALUA Groups: 1]
| | o- default_tg_pt_gp ....................................................................... [ALUA state: Active/optimized]
| o- fileio ................................................................................................. [Storage Objects: 0]
| o- pscsi .................................................................................................. [Storage Objects: 0]
| o- ramdisk ................................................................................................ [Storage Objects: 0]
o- iscsi ............................................................................................................ [Targets: 1]
| o- iqn.2020-01.com.example:s4 ........................................................................................ [TPGs: 1]
| o- tpg1 ............................................................................................... [no-gen-acls, no-auth]
| o- acls .......................................................................................................... [ACLs: 1]
| | o- iqn.2020-01.com.example:k8s .......................................................................... [Mapped LUNs: 1]
| | o- mapped_lun0 .............................................................................. [lun0 block/s4.disk1 (rw)]
| o- luns .......................................................................................................... [LUNs: 1]
| | o- lun0 .................................................... [block/s4.disk1 (/dev/iscsi_vg/iscsi_lvm) (default_tg_pt_gp)]
| o- portals .................................................................................................... [Portals: 1]
| o- 0.0.0.0:3260 ..................................................................................................... [OK]
o- loopback ......................................................................................................... [Targets: 0]
## 各节点安装nfs组件
[root@s1 ~]# yum -y install iscsi*
[root@s1 volume]# echo "InitiatorName=iqn.2020-01.com.example:k8s" > /etc/iscsi/initiatorname.iscsi
[root@s2 ~]# yum -y install iscsi*
[root@s2 ~]# echo "InitiatorName=iqn.2020-01.com.example:k8s" > /etc/iscsi/initiatorname.iscsi
[root@s3 ~]# yum -y install iscsi*
[root@s3 ~]# echo "InitiatorName=iqn.2020-01.com.example:k8s" > /etc/iscsi/initiatorname.iscsi
apiVersion: v1
kind: Pod
metadata:
name: iscsipd
spec:
containers:
- name: iscsiod-c
image: nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- name: iscsipd-rw
mountPath: "/mnt/iscsipd"
volumes:
- name: iscsipd-rw
iscsi:
targetPortal: 10.211.55.10:3260
iqn: iqn.2020-01.com.example:k8s
lun: 0
fsType: xfs
readOnly: false
## 进入容器查看挂载磁盘
[root@s1 volume]# kubectl get pods
NAME READY STATUS RESTARTS AGE
iscsipd 1/1 Running 0 2m30s
[root@s1 volume]# kubectl exec iscsipd -it bash
root@iscsipd:/# df -hT
Filesystem Type Size Used Avail Use% Mounted on
overlay overlay 41G 2.8G 39G 7% /
tmpfs tmpfs 64M 0 64M 0% /dev
tmpfs tmpfs 917M 0 917M 0% /sys/fs/cgroup
/dev/mapper/centos-root xfs 41G 2.8G 39G 7% /etc/hosts
/dev/sdb xfs 2.0G 33M 2.0G 2% /mnt/iscsipd
shm tmpfs 64M 0 64M 0% /dev/shm
tmpfs tmpfs 917M 12K 917M 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs tmpfs 917M 0 917M 0% /proc/acpi
tmpfs tmpfs 917M 0 917M 0% /proc/scsi
tmpfs tmpfs 917M 0 917M 0% /sys/firmware
root@iscsipd:/# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 64G 0 disk
|-sda1 8:1 0 1G 0 part
`-sda2 8:2 0 63G 0 part
|-centos-root 253:0 0 41G 0 lvm /etc/hosts
|-centos-swap 253:1 0 2G 0 lvm
`-centos-home 253:2 0 20G 0 lvm
sdb 8:16 0 2G 0 disk /mnt/iscsipd
sr0 11:0 1 1024M 0 rom
root@iscsipd:/#
持久性存储
PersistentVolume
PersistentVolume(简写pv)和后端存储关联,pv和后端存储都由管理员来创建,pv不属于任何命名空间对全局可见
#创建pv
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv01
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
storageClassName: yy
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
nfs:
path: /zz
server: 10.211.55.10
# 查看pv
[root@s1 volume]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv01 5Gi RWO Recycle Available yy 59s
[root@s1 volume]# kubectl describe pv pv01
Name: pv01
Labels: <none>
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"PersistentVolume","metadata":{"annotations":{},"name":"pv01"},"spec":{"accessModes":["ReadWriteOnce"],"capacity...
Finalizers: [kubernetes.io/pv-protection]
StorageClass:
Status: Available
Claim:
Reclaim Policy: Recycle
Access Modes: RWO
VolumeMode: Filesystem
Capacity: 5Gi
Node Affinity: <none>
Message:
Source:
Type: NFS (an NFS mount that lasts the lifetime of a pod)
Server: 10.211.55.10
Path: /zz
ReadOnly: false
Events: <none>
PersistentVolumeClaim
PersistentVolumeClaim(简称pvc)是基于命名空间里创建的,不同命名空间的pvc互相隔离
pvc通过storage的大小和accessModes的值和pv进行关联,即如果pvc里的storage的大小和accessModes的值和pv里的storage的大小和accessMode的值一样的话,那么pvc就和pv进行了关联
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc01
spec:
storageClassName: yy
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 5Gi
[root@s1 volume]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO yy 10s
在pv和pvc的accessModes值相同的情况下,如果pv的storage大小 >= pvc的storage的大小的话,是可以关联的,如果pv的storage的大小 < pvc的storage的大小的是不能关联的
storageClassName
在pv和pvc的storageClassName相同下,再次去对比storage的大小和accessModes的值
使用持久性存储
apiVersion: v1
kind: Pod
metadata:
name: nginx1
spec:
volumes:
- name: myv
persistentVolumeClaim:
claimName: pvc01
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
volumeMounts:
- mountPath: "/mnt"
name: myv
restartPolicy: Always
# s1
[root@s1 volume]# kubectl cp /etc/hosts nginx1:/mnt
# s4
[root@s4 zz]# ls
hosts