Centos7 Kvm 虚拟机迁移
1、虚拟机静态迁移
#虚拟机静态迁移两种方式
#1、迁移vm至其他机器上运行,且无共享存储
#2、迁移vm至其他机器上运行,且有共享存储
#迁移vm的思路是将disk img磁盘映像文件与xml配置文件拷贝至目的主机重新定义即可
#环境中部署有共享存储方案,只是需要重新在目标主机定义即可完成vm的迁移
#实验为无共享存储方式
2、准备disk img和xml配置等文件
[root@node71 ~]# virsh list
Id 名称 状态
----------------------------------------------------
3 oel1 running
[root@node71 ~]# virsh domblklist oel1
目标 源
------------------------------------------------
vda /home/data/vm/oel1/oel1_qcow2.img
vdb /home/data/vm/oel1/oel1_qcow2_1.img
[root@node71 ~]# virsh shutdown oel1
域 oel1 被关闭
[root@node71 ~]# virsh dumpxml oel1 > ./oel1.xml
#scp远程拷贝文件至目标主机
[root@node71 ~]# scp /home/data/vm/oel1/oel1_qcow2.img root@192.168.2.110:/data/vm/oel1/
The authenticity of host '192.168.2.110 (192.168.2.110)' can't be established.
ECDSA key fingerprint is ab:ef:f7:83:53:37:da:4d:35:c0:56:0f:9e:bd:01:10.
Are you sure you want to continue connecting (yes/no)? ys
Please type 'yes' or 'no': yes
Warning: Permanently added '192.168.2.110' (ECDSA) to the list of known hosts.
root@192.168.2.110's password:
oel1_qcow2.img 100% 2690MB 41.4MB/s 01:05
[root@node71 ~]# scp /home/data/vm/oel1/oel1_qcow2_1.img root@192.168.2.110:/data/vm/oel1/
root@192.168.2.110's password:
oel1_qcow2_1.img 100% 1091MB 40.4MB/s 00:27
[root@node71 ~]# scp ./oel1.xml root@192.168.2.110:/etc/libvirt/qemu/
root@192.168.2.110's password:
oel1.xml 100% 3089 3.0KB/s 00:00
[root@node71 ~]#
3、目标主机将迁移过来的xml配置文件重新注册到kvm
#注册之前,需要确认xml当中配置的disk路径与实际disk 的img文件路径是否一直,
#vi /etc/libvirt/qemu/oel1.xml
<source file='/data/vm/oel1/oel1_qcow2.img'/>
<source file='/data/vm/oel1/oel1_qcow2_1.img'/>
#重新编辑修改正确的disk路径
#目标主机注册迁移虚拟机
[root@node72 ~]# virsh define /etc/libvirt/qemu/oel1.xml
定义域 oel1(从 /etc/libvirt/qemu/oel1.xml)
#启动vm
[root@node72 ~]# virsh start oel1
域 oel1 已开始
#console接入vm
[root@node72 ~]# virsh console oel1
连接到域 oel1
换码符为 ^]
mount: mount point /proc/bus/usb does not exist
Welcome to Oracle Linux Server
Starting udev: [ OK ]
Setting hostname localhost.localdomain: [ OK ]
Setting up Logical Volume Management: 2 logical volume(s) in volume group "VolGroup" now active
[ OK ]
Checking filesystems
Checking all file systems.
[/sbin/fsck.ext4 (1) -- /] fsck.ext4 -a /dev/mapper/VolGroup-lv_root
/dev/mapper/VolGroup-lv_root: clean, 23019/1215840 files, 505899/4859904 blocks
[/sbin/fsck.ext4 (1) -- /boot] fsck.ext4 -a /dev/vda1
/dev/vda1: clean, 44/128016 files, 81219/512000 blocks
[ OK ]
Remounting root filesystem in read-write mode: [ OK ]
Mounting local filesystems: [ OK ]
Enabling /etc/fstab swaps: [ OK ]
Entering non-interactive startup
Starting monitoring for VG VolGroup: 2 logical volume(s) in volume group "VolGroup" monitored
[ OK ]
Bringing up loopback interface: [ OK ]
Bringing up interface eth0:
Determining IP information for eth0... done.
[ OK ]
Starting auditd: [ OK ]
Starting system logger: [ OK ]
Starting kdump:[FAILED]
Mounting filesystems: [ OK ]
Starting acpi daemon: [ OK ]
Retrigger failed udev events[ OK ]
Starting sshd: [ OK ]
Starting postfix: [ OK ]
Starting crond: [ OK ]
Oracle Linux Server release 6.6
Kernel 3.8.13-44.1.1.el6uek.x86_64 on an x86_64
localhost.localdomain login: root
Password:
Last login: Tue Oct 25 02:58:14 on ttyS0
[root@localhost ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:37:5f:f6 brd ff:ff:ff:ff:ff:ff
inet 192.168.2.134/16 brd 192.168.255.255 scope global eth0
inet6 fe80::5054:ff:fe37:5ff6/64 scope link
valid_lft forever preferred_lft forever
#测试网络连通性
[root@localhost ~]# ping -c 2 192.168.0.1
PING 192.168.0.1 (192.168.0.1) 56(84) bytes of data.
64 bytes from 192.168.0.1: icmp_seq=1 ttl=255 time=4.74 ms
64 bytes from 192.168.0.1: icmp_seq=2 ttl=255 time=48.6 ms
--- 192.168.0.1 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1063ms
rtt min/avg/max/mdev = 4.744/26.673/48.603/21.930 ms
[root@localhost ~]#
[root@node72 ~]#
#检查KVM机器的vnc端口,是否开启,开启则说明可以vnc接入
[root@node72 ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 5 192.168.122.1:53 *:*
LISTEN 0 1 *:5910 *:*
LISTEN 0 128 *:22 *:*
LISTEN 0 128 127.0.0.1:631 *:*
LISTEN 0 100 127.0.0.1:25 *:*
LISTEN 0 128 :::22 :::*
LISTEN 0 128 ::1:631 :::*
LISTEN 0 100 ::1:25 :::*
[root@node72 ~]#
4、动态迁移
#采用共享存储的方式存放disk img映像文件
#nfs共享存储
[root@node1 ~]# iptables -F
[root@node1 ~]# cat /etc/exports
/data *(rw,sync,no_root_squash)
[root@node1 ~]#
#Kvm节点挂载nfs共享存储,挂载路径一致
[root@node71 ~]# mount -t nfs 192.168.0.211:/data /mnt
[root@node72 ~]# mount -t nfs 192.168.0.211:/data /mnt
#两台KVM检查nfs挂载后vm磁盘文件权限,貌似nobady权限也是可以
[root@node72 ~]# ll -h /mnt/vm/oel2/
总用量 2.0G
-rw-r--r-- 1 nobody nobody 2.0G 10月 26 11:18 oel2_qcow2.img
[root@node72 ~]#
#有node71运行vm之后,将vm迁移至node72,vm的disk img存放在/mnt/vm/oel1路径下
#检查node71
[root@node71 ~]# virsh domblklist oel2
目标 源
------------------------------------------------
vda /mnt/vm/oel2/oel2_qcow2.img
hda -
[root@node71 ~]# virsh list --all
Id 名称 状态
----------------------------------------------------
5 oel2 running
[root@node71 ~]#
[root@node71 ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 5 192.168.122.1:53 *:*
LISTEN 0 128 *:22 *:*
LISTEN 0 1 *:5911 *:*
LISTEN 0 128 127.0.0.1:631 *:*
LISTEN 0 100 127.0.0.1:25 *:*
LISTEN 0 64 *:59589 *:*
LISTEN 0 64 :::60821 :::*
LISTEN 0 128 :::22 :::*
LISTEN 0 128 ::1:631 :::*
LISTEN 0 100 ::1:25 :::*
[root@node71 ~]#
#node72检查oel2的disk 文件
#迁移过程中报错,在启动虚拟机时需要关闭cache
[root@node71 ~]# virsh migrate --live oel2 qemu+ssh://192.168.2.110/system
The authenticity of host '192.168.2.110 (192.168.2.110)' can't be established.
ECDSA key fingerprint is ab:ef:f7:83:53:37:da:4d:35:c0:56:0f:9e:bd:01:10.
Are you sure you want to continue connecting (yes/no)? yes
root@192.168.2.110's password:
错误:不安全的迁移:如果磁盘使用 cache != none 则迁移会导致数据崩溃
错误:无法解析地址 'node72' 服务 '49152': 未知的名称或服务
[root@node71 ~]#
#这里实验,尝试忽略错误,即使不安全也尝试迁移,并且增加在hosts文件增加名称解析
[root@node71 ~]# cat /etc/hosts
192.168.2.111 node71
192.168.2.110 node72
[root@node71 ~]#
[root@node71 ~]# virsh migrate --live --verbose --abort-on-error --unsafe oel2 qemu+ssh://192.168.2.110/system
root@192.168.2.110's password:
迁移: [100 %]
[root@node71 ~]# virsh list --all
Id 名称 状态
----------------------------------------------------
- oel2 关闭
[root@node71 ~]#
#在node72检查,此时node72上没有oel2的xml配置文件需要重新导出一份(node71上还存在)
[root@node72 ~]# virsh list --all
Id 名称 状态
----------------------------------------------------
3 oel2 running
[root@node72 ~]# virsh dumpxml oel2 > /etc/libvirt/qemu/oel2.xml
[root@node72 ~]# ls /etc/libvirt/qemu/oel2.xml
/etc/libvirt/qemu/oel2.xml
[root@node72 ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 64 *:40147 *:*
LISTEN 0 5 192.168.122.1:53 *:*
LISTEN 0 128 *:22 *:*
LISTEN 0 1 *:5911 *:*
LISTEN 0 128 127.0.0.1:631 *:*
LISTEN 0 100 127.0.0.1:25 *:*
LISTEN 0 128 :::22 :::*
LISTEN 0 128 ::1:631 :::*
LISTEN 0 100 ::1:25 :::*
LISTEN 0 64 :::42983 :::*
[root@node72 ~]#
#测试vnc连接无任何问题
#且测试vm网络连通性,一个PING包都不掉
virsh migrate命令帮助
[root@node71 ~]# virsh migrate --help
NAME
migrate - 将域迁移到另一个主机中
SYNOPSIS
migrate <domain> <desturi> [--live] [--offline] [--p2p] [--direct] [--tunnelled] [--persistent] [--undefinesource] [--suspend] [--copy-storage-all] [--copy-storage-inc] [--change-protection] [--unsafe] [--verbose] [--compressed] [--auto-converge] [--rdma-pin-all] [--abort-on-error] [--migrateuri <string>] [--graphicsuri <string>] [--listen-address <string>] [--dname <string>] [--timeout <number>] [--xml <string>] [--migrate-disks <string>]
DESCRIPTION
将域迁移到另一个主机中。热迁移时添加 --live。
OPTIONS
[--domain] <string> 域名,id 或 uuid
[--desturi] <string> 客户端(常规迁移)或者源(p2p 迁移)中看到到目的地主机连接 URI
--live 热迁移
--offline 离线迁移
--p2p 点对点迁移
--direct 直接迁移
--tunnelled 管道迁移
--persistent 目的地中的持久 VM
--undefinesource 在源中取消定义 VM
--suspend 部启用目的地主机中的域
--copy-storage-all 使用全磁盘复制的非共享存储进行迁移
--copy-storage-inc 使用增值复制(源和目的地共享同一基础映像)的非共享存储进行迁移
--change-protection 迁移结束前不得对域进行任何配置更改
--unsafe 即使不安全也要强制迁移
--verbose 显示迁移进程
--compressed 实时迁移过程中压缩重复的页
--auto-converge force convergence during live migration
--rdma-pin-all support memory pinning during RDMA live migration
--abort-on-error 在迁移过程中忽略软错误
--migrateuri <string> 迁移 URI, 通常可省略
--graphicsuri <string> 无空隙图形迁移中使用的图形 URI
--listen-address <string> listen address that destination should bind to for incoming migration
--dname <string> 在迁移过长中重新命名为一个新名称(如果支持)
--timeout <number> 如果 live 迁移超时(以秒计)则强制虚拟机挂起
--xml <string> 包含为目标更新的 XML 的文件名
--migrate-disks <string> comma separated list of disks to be migrated