From: Ondrej Lichtner <olichtne(a)redhat.com>
As discussed with Jiri Benc, when using multicast groups with vxlan it's
a good idea to have at least 3 tunnel end points and check functionality
of paralel communication.
This commit adds a guest machine to the first baremetal machine that's
bridged into the same network. This guest makes the 3rd vxlan endpoint.
Another change is the execution of parallel pings from each host to
every other host.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
.../regression_tests/phase3/vxlan_multicast.README | 62 ++++--
recipes/regression_tests/phase3/vxlan_multicast.py | 228 +++++++++++++++++++++
.../regression_tests/phase3/vxlan_multicast.xml | 42 +++-
3 files changed, 310 insertions(+), 22 deletions(-)
create mode 100644 recipes/regression_tests/phase3/vxlan_multicast.py
diff --git a/recipes/regression_tests/phase3/vxlan_multicast.README
b/recipes/regression_tests/phase3/vxlan_multicast.README
index cc082da..a354144 100644
--- a/recipes/regression_tests/phase3/vxlan_multicast.README
+++ b/recipes/regression_tests/phase3/vxlan_multicast.README
@@ -12,31 +12,51 @@ Topology:
| |
+-+--+ +-+--+
+-------|eth1|------+ +-------|eth1|------+
-| +-+--+ | | +-+--+ |
-| | | | | |
-| +-----+ | | +-----+ |
-| |vxlan| | | |vxlan| |
-| +-----+ | | +-----+ |
-| | | |
-| host1 | | host2 |
-| | | |
-| | | |
-| | | |
-+-------------------+ +-------------------+
+|host1 +---++ | |host2 +-+--+ |
+| | | | | |
+| | | | +-----+ |
+|+-----+ +-+-+ | | |vxlan| |
+||vxlan|--+br0| | | +-----+ |
+|+-----+ +-+-+ | | |
+| | | | |
+| | | | |
+| | | | |
+| +-+-+ | | |
++---------|tap|-----+ +-------------------+
+ +-+-+
+ |
+ +-+--+
++---------|eth1|----+
+|guest1 +-+--+ |
+| | |
+| +--+--+ |
+| |vxlan| |
+| +-----+ |
+| |
++---------+---------+
-Number of hosts: 2
+Number of hosts: 3
Host #1 description:
- One ethernet device configured with ip address {$net}.1/24
- VXLAN interface on top of the ethernet interface using group_ip 239.1.1.1
+ One ethernet device
+ Tap device connecting to a guest machine
+ Bridge br0 enslaving eth1 and tap devices, configured with ip address
+ {$net}.1/24
+ VXLAN interface on top of the bridge interface using group_ip 239.1.1.1
configured with ip addresses:
{$vxlan_net}.1/24
{$vxlan_net6}::1/64
-Host #2 description:
+Guest #1 description:
One ethernet device configured with ip address {$net}.2/24
VXLAN interface on top of the ethernet interface using group_ip 239.1.1.1
configured with ip addresses:
{$vxlan_net}.2/24
{$vxlan_net6}::2/64
+Host #2 description:
+ One ethernet device configured with ip address {$net}.3/24
+ VXLAN interface on top of the ethernet interface using group_ip 239.1.1.1
+ configured with ip addresses:
+ {$vxlan_net}.3/24
+ {$vxlan_net6}::3/64
Test name:
vxlan_test.py
Test description:
@@ -44,10 +64,22 @@ Test description:
+ count: 100
+ interval: 0.1s
+ host1.vxlan -> host2.vxlan expecting PASS
+ + host1.vxlan -> guest1.vxlan expecting PASS
+ + host2.vxlan -> host1.vxlan expecting PASS
+ + host2.vxlan -> guest1.vxlan expecting PASS
+ + guest1.vxlan -> host1.vxlan expecting PASS
+ + guest1.vxlan -> host2.vxlan expecting PASS
+ All pings are executed in parallel
Ping6:
+ count: 100
+ interval: 0.1s
+ host1.vxlan -> host2.vxlan expecting PASS
+ + host1.vxlan -> guest1.vxlan expecting PASS
+ + host2.vxlan -> host1.vxlan expecting PASS
+ + host2.vxlan -> guest1.vxlan expecting PASS
+ + guest1.vxlan -> host1.vxlan expecting PASS
+ + guest1.vxlan -> host2.vxlan expecting PASS
+ All pings are executed in parallel
Netperf:
+ duration: 60s, repeated 5 times to calculate confidence
+ host1.vxlan -> host2.vxlan TCP_STREAM ipv4
diff --git a/recipes/regression_tests/phase3/vxlan_multicast.py
b/recipes/regression_tests/phase3/vxlan_multicast.py
new file mode 100644
index 0000000..9cfa900
--- /dev/null
+++ b/recipes/regression_tests/phase3/vxlan_multicast.py
@@ -0,0 +1,228 @@
+from lnst.Controller.Task import ctl
+from lnst.Controller.PerfRepoUtils import perfrepo_baseline_to_dict
+from lnst.Controller.PerfRepoUtils import netperf_result_template
+
+from lnst.RecipeCommon.ModuleWrap import ping, ping6, netperf
+from lnst.RecipeCommon.IRQ import pin_dev_irqs
+from lnst.RecipeCommon.PerfRepo import generate_perfrepo_comment
+
+# ------
+# SETUP
+# ------
+
+mapping_file = ctl.get_alias("mapping_file")
+perf_api = ctl.connect_PerfRepo(mapping_file)
+
+product_name = ctl.get_alias("product_name")
+
+m1 = ctl.get_host("testmachine1")
+m2 = ctl.get_host("testmachine2")
+g1 = ctl.get_host("guest1")
+
+m1.sync_resources(modules=["IcmpPing", "Icmp6Ping",
"Netperf"])
+m2.sync_resources(modules=["IcmpPing", "Icmp6Ping",
"Netperf"])
+
+
+# ------
+# TESTS
+# ------
+
+ipv = ctl.get_alias("ipv")
+mtu = ctl.get_alias("mtu")
+netperf_duration = int(ctl.get_alias("netperf_duration"))
+nperf_reserve = int(ctl.get_alias("nperf_reserve"))
+nperf_confidence = ctl.get_alias("nperf_confidence")
+nperf_max_runs = int(ctl.get_alias("nperf_max_runs"))
+nperf_cpupin = ctl.get_alias("nperf_cpupin")
+nperf_cpu_util = ctl.get_alias("nperf_cpu_util")
+nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
+nperf_debug = ctl.get_alias("nperf_debug")
+pr_user_comment = ctl.get_alias("perfrepo_comment")
+
+pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
+
+test_if1 = m1.get_interface("test_if")
+test_if1.set_mtu(mtu)
+test_if2 = m2.get_interface("test_if")
+test_if2.set_mtu(mtu)
+test_if3 = g1.get_interface("test_if")
+test_if3.set_mtu(mtu)
+
+if nperf_cpupin:
+ m1.run("service irqbalance stop")
+ m2.run("service irqbalance stop")
+ g1.run("service irqbalance stop")
+
+ m1_phy1 = m1.get_interface("eth1")
+ m1_phy2 = m1.get_interface("eth2")
+ m2_phy1 = m2.get_interface("eth1")
+ g1_phy1 = g1.get_interface("eth1")
+ dev_list = [(m1, m1_phy1), (m1, m1_phy2), (m2, m2_phy1), (g1, g1_phy1)]
+
+ # this will pin devices irqs to cpu #0
+ for m, d in dev_list:
+ pin_dev_irqs(m, d, 0)
+
+nperf_opts = ""
+if nperf_cpupin and nperf_num_parallel == 1:
+ nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
+ctl.wait(15)
+
+ping_opts = {"count": 100, "interval": 0.1}
+
+ipv4_endpoints = [(m1, test_if1, 0, {"scope": 0}),
+ (m2, test_if2, 0, {"scope": 0}),
+ (g1, test_if3, 0, {"scope": 0})]
+ipv6_endpoints = [(m1, test_if1, 1, {"scope": 0}),
+ (m2, test_if2, 1, {"scope": 0}),
+ (g1, test_if3, 1, {"scope": 0})]
+
+ipv4_pings = []
+for x in ipv4_endpoints:
+ for y in ipv4_endpoints:
+ if not x == y:
+ ipv4_pings.append(ping(x, y, options=ping_opts, bg=True))
+
+for i in ipv4_pings:
+ i.wait()
+
+ipv6_pings = []
+for x in ipv6_endpoints:
+ for y in ipv6_endpoints:
+ if not x == y:
+ ipv6_pings.append(ping6(x, y, options=ping_opts, bg=True))
+
+for i in ipv6_pings:
+ i.wait()
+
+ctl.wait(2)
+if ipv in [ 'ipv4', 'both' ]:
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ result_tcp.add_tag(product_name)
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ tcp_res_data = netperf((m1, test_if1, 0, {"scope": 0}),
+ (m2, test_if2, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ result_udp.add_tag(product_name)
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, test_if1, 0, {"scope": 0}),
+ (m2, test_if2, 0, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "netperf_opts": nperf_opts},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+if ipv in [ 'ipv6', 'both' ]:
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ result_tcp.add_tag(product_name)
+ if nperf_num_parallel > 1:
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ tcp_res_data = netperf((m1, test_if1, 1, {"scope": 0}),
+ (m2, test_if2, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "TCP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "netperf_opts" : nperf_opts + "
-6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ result_tcp.set_comment(pr_comment)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ result_udp.add_tag(product_name)
+ if nperf_num_parallel > 1:
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ baseline = perfrepo_baseline_to_dict(baseline)
+
+ udp_res_data = netperf((m1, test_if1, 1, {"scope": 0}),
+ (m2, test_if2, 1, {"scope": 0}),
+ client_opts={"duration" : netperf_duration,
+ "testname" : "UDP_STREAM",
+ "confidence" : nperf_confidence,
+ "num_parallel" : nperf_num_parallel,
+ "cpu_util" : nperf_cpu_util,
+ "runs": nperf_max_runs,
+ "debug": nperf_debug,
+ "netperf_opts" : nperf_opts +
"-6"},
+ baseline = baseline,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+
+if nperf_cpupin:
+ m1.run("service irqbalance start")
+ m2.run("service irqbalance start")
diff --git a/recipes/regression_tests/phase3/vxlan_multicast.xml
b/recipes/regression_tests/phase3/vxlan_multicast.xml
index 2327c6b..176b3d8 100644
--- a/recipes/regression_tests/phase3/vxlan_multicast.xml
+++ b/recipes/regression_tests/phase3/vxlan_multicast.xml
@@ -16,18 +16,24 @@
<network>
<host id="testmachine1">
<interfaces>
- <eth id="eth" label="tnet">
+ <eth id="eth1" label="tnet"/>
+ <eth id="eth2" label="to_guest1"/>
+ <bridge id="br0">
+ <slaves>
+ <slave id="eth1"/>
+ <slave id="eth2"/>
+ </slaves>
<addresses>
<address value="{$net}.1/24" />
</addresses>
- </eth>
+ </bridge>
<vxlan id="test_if">
<options>
<option name="id" value="1"/>
<option name="group_ip"
value="239.1.1.1"/>
</options>
<slaves>
- <slave id="eth"/>
+ <slave id="br0"/>
</slaves>
<addresses>
<address value="{$vxlan_net}.1/24" />
@@ -36,9 +42,9 @@
</vxlan>
</interfaces>
</host>
- <host id="testmachine2">
+ <host id="guest1">
<interfaces>
- <eth id="eth" label="tnet">
+ <eth id="eth1" label="to_guest1">
<addresses>
<address value="{$net}.2/24" />
</addresses>
@@ -49,7 +55,7 @@
<option name="group_ip"
value="239.1.1.1"/>
</options>
<slaves>
- <slave id="eth"/>
+ <slave id="eth1"/>
</slaves>
<addresses>
<address value="{$vxlan_net}.2/24" />
@@ -58,7 +64,29 @@
</vxlan>
</interfaces>
</host>
+ <host id="testmachine2">
+ <interfaces>
+ <eth id="eth1" label="tnet">
+ <addresses>
+ <address value="{$net}.3/24" />
+ </addresses>
+ </eth>
+ <vxlan id="test_if">
+ <options>
+ <option name="id" value="1"/>
+ <option name="group_ip"
value="239.1.1.1"/>
+ </options>
+ <slaves>
+ <slave id="eth1"/>
+ </slaves>
+ <addresses>
+ <address value="{$vxlan_net}.3/24" />
+ <address value="{$vxlan_net6}::3/64" />
+ </addresses>
+ </vxlan>
+ </interfaces>
+ </host>
</network>
- <task python="vxlan_test.py" />
+ <task python="vxlan_multicast_test.py" />
</lnstrecipe>
--
2.8.3