背景
记录通过ovs+ovn实现容器间通信的一些主要过程
准备环境
准备准备ovs和containerd环境
准备2个虚拟机
初始化Vagrantfile
vagrant init bento/ubuntu-22.04 --box-version 202510.26.0
编辑Vagrantfile,添加
config.vm.define "ovs1" do |ovs1|
ovs1.vm.network "private_network", ip: "192.168.56.10"
end
config.vm.define "ovs2" do |ovs2|
ovs2.vm.network "private_network", ip: "192.168.56.11"
end
最终类似
# -*- mode: ruby -*-
# vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.
# Every Vagrant development environment requires a box. You can search for
# boxes at https://vagrantcloud.com/search.
config.vm.box = "bento/ubuntu-22.04"
config.vm.box_version = "202510.26.0"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# NOTE: This will enable public access to the opened port
# config.vm.network "forwarded_port", guest: 80, host: 8080
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine and only allow access
# via 127.0.0.1 to disable public access
# config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1"
# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"
# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"
# Disable the default share of the current code directory. Doing this
# provides improved isolation between the vagrant box and your host
# by making sure your Vagrantfile isn't accessible to the vagrant box.
# If you use this you may want to enable additional shared subfolders as
# shown above.
# config.vm.synced_folder ".", "/vagrant", disabled: true
# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
# config.vm.provider "virtualbox" do |vb|
# # Display the VirtualBox GUI when booting the machine
# vb.gui = true
#
# # Customize the amount of memory on the VM:
# vb.memory = "1024"
# end
#
# View the documentation for the provider you are using for more
# information on available options.
# Enable provisioning with a shell script. Additional provisioners such as
# Ansible, Chef, Docker, Puppet and Salt are also available. Please see the
# documentation for more information about their specific syntax and use.
# config.vm.provision "shell", inline: <<-SHELL
# apt-get update
# apt-get install -y apache2
# SHELL
config.vm.define "ovs1" do |ovs1|
ovs1.vm.network "private_network", ip: "192.168.56.10"
end
config.vm.define "ovs2" do |ovs2|
ovs2.vm.network "private_network", ip: "192.168.56.11"
end
end
进入虚拟机
进入ovs1
vagrant ssh ovs1
进入ovs2
vagrant ssh ovs2
安装ovs
sudo su
apt-get update
apt-get install openvswitch-switch -y
安装ovn
ovs1中
apt install ovn-host ovn-common ovn-central
ovs2中
apt install ovn-host ovn-common
开启南向数据库
ovs1中
ovn-sbctl set-connection ptcp:6642:192.168.56.10
连接南向数据库
ovs1中
ovs-vsctl set Open_vSwitch . external-ids:ovn-remote="tcp:192.168.56.10:6642" external-ids:ovn-encap-ip="192.168.56.10" external-ids:ovn-encap-type=geneve external-ids:system-id=ovs1
ovs2中
ovs-vsctl set Open_vSwitch . external-ids:ovn-remote="tcp:192.168.56.10:6642" external-ids:ovn-encap-ip="192.168.56.11" external-ids:ovn-encap-type=geneve external-ids:system-id=ovs2
实验
logic switch实验
准备测试ns
ovs1中
ip netns add ns1
ip link add v1 type veth peer name v1p
ip link set v1p netns ns1
ip netns exec ns1 ip link set v1p address 00:00:00:00:01:01
ip netns exec ns1 ip addr add 10.0.11.11/24 dev v1p
ip netns exec ns1 ip link set v1p up
ip link set v1 up
ovs-vsctl add-port br-int v1
ovs2中
ip netns add ns1
ip link add v1 type veth peer name v1p
ip link set v1p netns ns1
ip netns exec ns1 ip link set v1p address 00:00:00:00:01:02
ip netns exec ns1 ip addr add 10.0.11.12/24 dev v1p
ip netns exec ns1 ip link set v1p up
ip link set v1 up
ovs-vsctl add-port br-int v1
准备logic switch和端口
ovs1中
ovn-nbctl ls-add ls1
ovn-nbctl lsp-add ls1 ls1-ovs1-ns1
ovn-nbctl lsp-set-addresses ls1-ovs1-ns1 00:00:00:00:01:01
ovn-nbctl lsp-set-port-security ls1-ovs1-ns1 00:00:00:00:01:01
ovn-nbctl lsp-add ls1 ls1-ovs2-ns1
ovn-nbctl lsp-set-addresses ls1-ovs2-ns1 00:00:00:00:01:02
ovn-nbctl lsp-set-port-security ls1-ovs2-ns1 00:00:00:00:01:02
绑定端口
ovs1中
ovs-vsctl set interface v1 external-ids:iface-id=ls1-ovs1-ns1
ovs2中
ovs-vsctl set interface v1 external-ids:iface-id=ls1-ovs2-ns1
测试
ip netns exec ns1 ping 10.0.11.12
logic router实验
准备ns
ovs2中
ip netns add ns2
ip link add v2 type veth peer name v2p
ip link set v2p netns ns2
ip netns exec ns2 ip link set v2p address 00:00:00:00:02:01
ip netns exec ns2 ip addr add 10.0.12.11/24 dev v2p
ip netns exec ns2 ip link set v2p up
ip link set v2 up
ovs-vsctl add-port br-int v2
准备logic switch和端口
ovs1中
ovn-nbctl ls-add ls2
ovn-nbctl lsp-add ls2 ls2-ovs2-ns2
ovn-nbctl lsp-set-addresses ls2-ovs2-ns2 00:00:00:00:02:01
ovn-nbctl lsp-set-port-security ls2-ovs2-ns2 00:00:00:00:02:01
绑定端口
ovs2中
ovs-vsctl set interface v2 external-ids:iface-id=ls2-ovs2-ns2
创建logic router和端口
ovs1中
ovn-nbctl lr-add lr1
ovn-nbctl lrp-add lr1 lr1-ls1 00:00:00:00:01:00 10.0.11.254/24
ovn-nbctl lrp-add lr1 lr1-ls2 00:00:00:00:02:00 10.0.12.254/24
连接logic switch和logic router
ovs1中
ovn-nbctl lsp-add ls1 ls1-lr1
ovn-nbctl lsp-set-type ls1-lr1 router
ovn-nbctl lsp-set-addresses ls1-lr1 00:00:00:00:01:00
ovn-nbctl lsp-set-options ls1-lr1 router-port=lr1-ls1
ovn-nbctl lsp-add ls2 ls2-lr1
ovn-nbctl lsp-set-type ls2-lr1 router
ovn-nbctl lsp-set-addresses ls2-lr1 00:00:00:00:02:00
ovn-nbctl lsp-set-options ls2-lr1 router-port=lr1-ls2
ovn-nbctl lsp-set-addresses ls1-lr1 "00:00:00:00:01:00 10.0.11.254"
ovn-nbctl lsp-set-addresses ls1-ovs1-ns1 "00:00:00:00:01:01 10.0.11.1"
ovn-nbctl lsp-set-addresses ls2-lr1 "00:00:00:00:02:00 10.0.12.254"
ovn-nbctl lsp-set-addresses ls2-ovs2-ns2 "00:00:00:00:02:01 10.0.12.1"
配置路由
ovs1中
ip netns exec ns1 ip route add default via 10.0.11.254 dev v1p
ovs2中
ip netns exec ns2 ip route add default via 10.0.12.254 dev v2p
测试
ovs1中
ip netns exec ns1 ping 10.0.12.11
补充
trace
ovn
ovn-trace --detailed ls1 'inport == "ls1-ovs1-ns1" && eth.src == 00:00:00:00:01:01 && eth.dst == 00:00:00:00:01:02'
和
ovn-trace --detailed ls1 'inport == "ls1-ovs1-ns1" && eth.src == 00:00:00:00:01:01 && ip4.src == 10.0.11.11 && eth.dst == 00:00:00:00:01:00 && ip4.dst == 10.0.12.1 && ip.ttl == 64 && icmp4.type == 8'
ovs
ovs-appctl ofproto/trace br-int in_port=v1,dl_src=00:00:00:00:01:01,dl_dst=00:00:00:00:01:02 -generate
db
ovs
ovsdb-client list-dbs unix:/var/run/openvswitch/db.sock
ovsdb-client list-tables unix:/var/run/openvswitch/db.sock
ovn nb
ovsdb-client list-dbs unix:/var/run/ovn/ovnnb_db.sock
ovsdb-client list-tables unix:/var/run/ovn/ovnnb_db.sock
ovn sb
ovsdb-client list-dbs unix:/var/run/ovn/ovnsb_db.sock
ovsdb-client list-tables unix:/var/run/ovn/ovnsb_db.sock
日志
/var/log/ovn/ovn-controller.log
/var/log/ovn/ovn-northd.log
/var/log/ovn/ovsdb-server-nb.log
/var/log/ovn/ovsdb-server-sb.log