[PATCH 1/2] Task: make PerfRepo an optional dependency
by Ondrej Lichtner
From: Ondrej Lichtner <olichtne(a)redhat.com>
Since PerfRepo support is now dependent on an external library it should
be an optional dependency so that it doesn't crash LNST if the user is
not using PerfRepo.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
lnst/Controller/Task.py | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/lnst/Controller/Task.py b/lnst/Controller/Task.py
index 19499da..3903746 100644
--- a/lnst/Controller/Task.py
+++ b/lnst/Controller/Task.py
@@ -13,9 +13,6 @@ rpazdera(a)redhat.com (Radek Pazdera)
import hashlib
import re
import logging
-from perfrepo import PerfRepoRESTAPI
-from perfrepo import PerfRepoTestExecution
-from perfrepo import PerfRepoValue
from lnst.Common.Utils import dict_to_dot, list_to_dot, deprecated
from lnst.Common.Config import lnst_config
from lnst.Controller.XmlTemplates import XmlTemplateError
@@ -23,6 +20,15 @@ from lnst.Common.Path import Path
from lnst.Controller.PerfRepoMapping import PerfRepoMapping
from lnst.Common.Utils import Noop
+try:
+ from perfrepo import PerfRepoRESTAPI
+ from perfrepo import PerfRepoTestExecution
+ from perfrepo import PerfRepoValue
+except:
+ PerfRepoRESTAPI = None
+ PerfRepoTestExecution = None
+ PerfRepoValue = None
+
# The handle to be imported from each task
ctl = None
@@ -544,7 +550,12 @@ class PerfRepoAPI(object):
return False
def connect(self, url, username, password):
- self._rest_api = PerfRepoRESTAPI(url, username, password)
+ if PerfRepoRESTAPI is not None:
+ self._rest_api = PerfRepoRESTAPI(url, username, password)
+ if not self._rest_api.connected():
+ self._rest_api = None
+ else:
+ self._rest_api = None
def new_result(self, mapping_key, name, hash_ignore=[]):
if not self.connected():
--
2.6.3
8 years, 4 months
[PATCH v2 2/2] recipes: add a delay when rx offload is turned off
by Jan Tluka
This patch modifies all tests that set rx offload. We saw that setting
this offload off for certain NICs such as ixgbe does reset of the device.
Therefore the link goes down and a delay has to be added after the
offload setting so that data sent over the device pass through.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 4 ++++
recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py | 5 +++++
recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py | 5 +++++
recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py | 5 +++++
recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py | 5 +++++
5 files changed, 24 insertions(+)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index ac20b60..fa2e02d 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -149,6 +149,10 @@ for setting in offload_settings:
dev_features))
m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
dev_features))
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
# Ping test
for vlan1 in vlans:
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
index d3cc786..d15929a 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
@@ -166,6 +166,11 @@ for setting in offload_settings:
h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
if ipv in [ 'ipv4', 'both' ]:
g1.run(ping_mod)
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
index ca169af..ed12b67 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
@@ -166,6 +166,11 @@ for setting in offload_settings:
h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
if ipv in [ 'ipv4', 'both' ]:
g1.run(ping_mod)
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
index 76aa386..8ac0da4 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
@@ -153,6 +153,11 @@ for setting in offload_settings:
g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
if ipv in [ 'ipv4', 'both' ]:
g1.run(ping_mod)
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
index dcde1b2..5dae9da 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
@@ -152,6 +152,11 @@ for setting in offload_settings:
g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
if ipv in [ 'ipv4', 'both' ]:
g1.run(ping_mod)
--
2.4.3
8 years, 4 months
[PATCH v2 1/2] recipes: remove duplicated netperf tests in vlan
recipes
by Jan Tluka
This patch modifies all tests in phase1 and phase2 that involve 3 vlans.
The change is that the netperf test is run only on one of the vlans
instead on all of them that reduces the time the test takes.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 319 ++++++++++----------
.../regression_tests/phase1/3_vlans_over_bond.py | 335 ++++++++++-----------
.../regression_tests/phase2/3_vlans_over_team.py | 334 ++++++++++----------
3 files changed, 494 insertions(+), 494 deletions(-)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index 907ad3c..ac20b60 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -64,50 +64,68 @@ ping_mod6 = ctl.get_module("Icmp6Ping",
"count" : 100,
"interval" : 0.1
})
+
+m1_vlan1 = m1.get_interface(vlans[0])
+m2_vlan1 = m2.get_interface(vlans[0])
+
+p_opts = "-L %s" % (m2_vlan1.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
netperf_srv = ctl.get_module("Netperf",
options={
- "role" : "server"
+ "role" : "server",
+ "bind": m1_vlan1.get_ip(0)
})
netperf_srv6 = ctl.get_module("Netperf",
options={
"role" : "server",
+ "bind": m1_vlan1.get_ip(1),
"netperf_opts" : " -6"
})
netperf_cli_tcp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_udp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_tcp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
netperf_cli_udp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
if nperf_mode == "multi":
@@ -121,171 +139,154 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
-for vlan1 in vlans:
- m1_vlan1 = m1.get_interface(vlan1)
- for vlan2 in vlans:
- m2_vlan2 = m2.get_interface(vlan2)
-
- ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
- "iface": m1_vlan1.get_devname()})
-
- ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
- "iface": m1_vlan1.get_ip(1)})
-
- netperf_srv.update_options({"bind": m1_vlan1.get_ip(0)})
- netperf_srv6.update_options({"bind": m1_vlan1.get_ip(1)})
+for setting in offload_settings:
+ #apply offload setting
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
- p_opts = "-L %s" % (m2_vlan2.get_ip(0))
- if nperf_cpupin and nperf_mode != "multi":
- p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+ m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
+ dev_features))
+ m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
+ dev_features))
- netperf_cli_tcp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
+ # Ping test
+ for vlan1 in vlans:
+ m1_vlan1 = m1.get_interface(vlan1)
+ for vlan2 in vlans:
+ m2_vlan2 = m2.get_interface(vlan2)
- netperf_cli_udp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
+ ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
+ "iface": m1_vlan1.get_devname()})
- netperf_cli_tcp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- netperf_cli_udp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- if vlan1 == vlan2:
- # These tests should pass
- # Ping between same VLANs
- for setting in offload_settings:
- dev_features = ""
- for offload in setting:
- dev_features += " %s %s" % (offload[0], offload[1])
-
- m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
- dev_features))
- m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
- dev_features))
+ ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
+ "iface": m1_vlan1.get_ip(1)})
+ if vlan1 == vlan2:
+ # These tests should pass
+ # Ping between same VLANs
if ipv in [ 'ipv4', 'both' ]:
- # Ping test
m1.run(ping_mod)
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp
- result_tcp = perf_api.new_result("tcp_ipv4_id",
- "tcp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
-
if ipv in [ 'ipv6', 'both' ]:
- # Ping test
m1.run(ping_mod6)
+ else:
+ # These tests should fail
+ # Ping across different VLAN
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod, expect="fail")
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv6, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp ipv6
- result_tcp = perf_api.new_result("tcp_ipv6_id",
- "tcp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.set_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp6, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.set_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
- # These tests should fail
- # Ping across different VLAN
- else:
- if ipv in [ 'ipv4', 'both' ]:
- m1.run(ping_mod, expect="fail")
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6, expect="fail")
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6, expect="fail")
+
+ # Netperf test (both TCP and UDP)
+ if ipv in [ 'ipv4', 'both' ]:
+ srv_proc = m1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_tcp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ srv_proc = m1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_tcp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_tcp.set_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.set_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
#reset offload states
dev_features = ""
diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py
index c14f1f0..3b9b582 100644
--- a/recipes/regression_tests/phase1/3_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py
@@ -65,50 +65,68 @@ ping_mod6 = ctl.get_module("Icmp6Ping",
"count" : 100,
"interval" : 0.1
})
+
+m1_vlan1 = m1.get_interface(vlans[0])
+m2_vlan1 = m2.get_interface(vlans[0])
+
+p_opts = "-L %s" % (m2_vlan1.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
netperf_srv = ctl.get_module("Netperf",
options={
- "role" : "server"
+ "role" : "server",
+ "bind": m1_vlan1.get_ip(0)
})
netperf_srv6 = ctl.get_module("Netperf",
options={
"role" : "server",
+ "bind": m1_vlan1.get_ip(1),
"netperf_opts" : " -6"
})
netperf_cli_tcp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_udp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_tcp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
netperf_cli_udp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
if nperf_mode == "multi":
@@ -122,175 +140,156 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
-for vlan1 in vlans:
- m1_vlan1 = m1.get_interface(vlan1)
- for vlan2 in vlans:
- m2_vlan2 = m2.get_interface(vlan2)
-
- ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
- "iface": m1_vlan1.get_devname()})
-
- ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
- "iface": m1_vlan1.get_ip(1)})
-
- netperf_srv.update_options({"bind": m1_vlan1.get_ip(0)})
-
- netperf_srv6.update_options({"bind": m1_vlan1.get_ip(1)})
-
- p_opts = "-L %s" % (m2_vlan2.get_ip(0))
- if nperf_cpupin and nperf_mode != "multi":
- p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
-
- netperf_cli_tcp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
-
- netperf_cli_udp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
-
- netperf_cli_tcp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- netperf_cli_udp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- if vlan1 == vlan2:
- # These tests should pass
- # Ping between same VLANs
- for setting in offload_settings:
- #apply offload setting
- dev_features = ""
- for offload in setting:
- dev_features += " %s %s" % (offload[0], offload[1])
- m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
- dev_features))
- m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
- dev_features))
- m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
- dev_features))
-
+for setting in offload_settings:
+ #apply offload setting
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+
+ m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
+ dev_features))
+ m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
+ dev_features))
+ m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
+ dev_features))
+
+ # Ping test
+ for vlan1 in vlans:
+ m1_vlan1 = m1.get_interface(vlan1)
+ for vlan2 in vlans:
+ m2_vlan2 = m2.get_interface(vlan2)
+
+ ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
+ "iface": m1_vlan1.get_devname()})
+
+ ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
+ "iface": m1_vlan1.get_ip(1)})
+
+ if vlan1 == vlan2:
+ # These tests should pass
+ # Ping between same VLANs
if ipv in [ 'ipv4', 'both' ]:
- # Ping test
m1.run(ping_mod)
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp
- result_tcp = perf_api.new_result("tcp_ipv4_id",
- "tcp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
-
if ipv in [ 'ipv6', 'both' ]:
- # Ping test
m1.run(ping_mod6)
+ else:
+ # These tests should fail
+ # Ping across different VLAN
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod, expect="fail")
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv6, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp ipv6
- result_tcp = perf_api.new_result("tcp_ipv6_id",
- "tcp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp6, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
- # These tests should fail
- # Ping across different VLAN
- else:
- if ipv in [ 'ipv4', 'both' ]:
- m1.run(ping_mod, expect="fail")
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6, expect="fail")
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6, expect="fail")
+
+ # Netperf test (both TCP and UDP)
+ if ipv in [ 'ipv4', 'both' ]:
+ srv_proc = m1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_tcp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ srv_proc = m1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_tcp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
#reset offload states
dev_features = ""
diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py
index dc163c0..99161a1 100644
--- a/recipes/regression_tests/phase2/3_vlans_over_team.py
+++ b/recipes/regression_tests/phase2/3_vlans_over_team.py
@@ -53,7 +53,6 @@ for vlan in vlans:
vlan_if2 = m2.get_interface(vlan)
vlan_if2.set_mtu(mtu)
-
ctl.wait(15)
ping_mod = ctl.get_module("IcmpPing",
@@ -66,50 +65,68 @@ ping_mod6 = ctl.get_module("Icmp6Ping",
"count" : 100,
"interval" : 0.1
})
+
+m1_vlan1 = m1.get_interface(vlans[0])
+m2_vlan1 = m2.get_interface(vlans[0])
+
+p_opts = "-L %s" % (m2_vlan1.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
netperf_srv = ctl.get_module("Netperf",
options={
- "role" : "server"
+ "role" : "server",
+ "bind": m1_vlan1.get_ip(0)
})
netperf_srv6 = ctl.get_module("Netperf",
options={
"role" : "server",
+ "bind": m1_vlan1.get_ip(1),
"netperf_opts" : " -6"
})
netperf_cli_tcp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_udp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_tcp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
netperf_cli_udp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
if nperf_mode == "multi":
@@ -123,173 +140,156 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
-for vlan1 in vlans:
- m1_vlan1 = m1.get_interface(vlan1)
- for vlan2 in vlans:
- m2_vlan2 = m2.get_interface(vlan2)
-
- ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
- "iface": m1_vlan1.get_devname()})
-
- ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
- "iface": m1_vlan1.get_ip(1)})
-
- netperf_srv.update_options({"bind": m1_vlan1.get_ip(0)})
-
- netperf_srv6.update_options({"bind": m1_vlan1.get_ip(1)})
-
- p_opts = "-L %s" % (m2_vlan2.get_ip(0))
- if nperf_cpupin and nperf_mode != "multi":
- p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
-
- netperf_cli_tcp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts})
-
- netperf_cli_udp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts})
-
- netperf_cli_tcp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- netperf_cli_udp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- if vlan1 == vlan2:
- # These tests should pass
- # Ping between same VLANs
- for setting in offload_settings:
- dev_features = ""
- for offload in setting:
- dev_features += " %s %s" % (offload[0], offload[1])
- m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
- dev_features))
- m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
- dev_features))
- m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
- dev_features))
-
+for setting in offload_settings:
+ #apply offload setting
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+
+ m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
+ dev_features))
+ m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
+ dev_features))
+ m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
+ dev_features))
+
+ # Ping test
+ for vlan1 in vlans:
+ m1_vlan1 = m1.get_interface(vlan1)
+ for vlan2 in vlans:
+ m2_vlan2 = m2.get_interface(vlan2)
+
+ ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
+ "iface": m1_vlan1.get_devname()})
+
+ ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
+ "iface": m1_vlan1.get_ip(1)})
+
+ if vlan1 == vlan2:
+ # These tests should pass
+ # Ping between same VLANs
if ipv in [ 'ipv4', 'both' ]:
- # Ping test
m1.run(ping_mod)
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp
- result_tcp = perf_api.new_result("tcp_ipv4_id",
- "tcp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
-
if ipv in [ 'ipv6', 'both' ]:
m1.run(ping_mod6)
+ else:
+ # These tests should fail
+ # Ping across different VLAN
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod, expect="fail")
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv6, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp ipv6
- result_tcp = perf_api.new_result("tcp_ipv6_id",
- "tcp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp6, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
- # These tests should fail
- # Ping across different VLAN
- else:
- if ipv in [ 'ipv4', 'both' ]:
- m1.run(ping_mod, expect="fail")
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6, expect="fail")
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6, expect="fail")
+
+ # Netperf test (both TCP and UDP)
+ if ipv in [ 'ipv4', 'both' ]:
+ srv_proc = m1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_tcp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ srv_proc = m1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_tcp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
#reset offload states
dev_features = ""
--
2.4.3
8 years, 4 months
[PATCH 1/3] Netperf: remove family option
by Ondrej Lichtner
From: Ondrej Lichtner <olichtne(a)redhat.com>
This option was in the netperf module since the beginning and was never
used once. I'm not sure what it's original purpose was, but I'm removing
it now.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
test_modules/Netperf.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py
index 4ff8643..2033c57 100644
--- a/test_modules/Netperf.py
+++ b/test_modules/Netperf.py
@@ -33,7 +33,6 @@ class Netperf(TestGeneric):
self._testname = self.get_opt("testname", default="TCP_STREAM")
self._confidence = self.get_opt("confidence")
self._bind = self.get_opt("bind", opt_type="addr")
- self._family = self.get_opt("family")
self._cpu_util = self.get_opt("cpu_util")
self._num_parallel = int(self.get_opt("num_parallel", default=1))
self._runs = self.get_opt("runs", default=1)
--
2.6.2
8 years, 4 months
[PATCH] recipes: add a delay when rx offload is turned off
by Jan Tluka
This patch modifies all tests that set rx offload. We saw that setting
this offload off for certain NICs such as ixgbe does reset of the device.
Therefore the link goes down and a delay has to be added after the
offload setting so that data sent over the device pass through.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 5 +++++
recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py | 5 +++++
recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py | 5 +++++
recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py | 5 +++++
recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py | 5 +++++
5 files changed, 25 insertions(+)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index fdb58e0..502c76c 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -181,6 +181,11 @@ for setting in offload_settings:
m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
dev_features))
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
if ipv in [ 'ipv4', 'both' ]:
# Netperf test (both TCP and UDP)
srv_proc = m1.run(netperf_srv, bg=True)
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
index d3cc786..d15929a 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
@@ -166,6 +166,11 @@ for setting in offload_settings:
h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
if ipv in [ 'ipv4', 'both' ]:
g1.run(ping_mod)
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
index ca169af..ed12b67 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
@@ -166,6 +166,11 @@ for setting in offload_settings:
h1.run("ethtool -K %s %s" % (h1_nic.get_devname(), dev_features))
h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
if ipv in [ 'ipv4', 'both' ]:
g1.run(ping_mod)
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
index 76aa386..8ac0da4 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
@@ -153,6 +153,11 @@ for setting in offload_settings:
g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
if ipv in [ 'ipv4', 'both' ]:
g1.run(ping_mod)
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
index dcde1b2..5dae9da 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
@@ -152,6 +152,11 @@ for setting in offload_settings:
g1.run("ethtool -K %s %s" % (g1_guestnic.get_devname(), dev_features))
h2.run("ethtool -K %s %s" % (h2_nic.get_devname(), dev_features))
+ if ("rx", "off") in setting:
+ # when rx offload is turned off some of the cards might get reset
+ # and link goes down, so wait a few seconds until NIC is ready
+ ctl.wait(15)
+
if ipv in [ 'ipv4', 'both' ]:
g1.run(ping_mod)
--
2.4.3
8 years, 4 months
[PATCH] recipes: remove duplicated netperf tests in vlan recipes
by Jan Tluka
This patch modifies all tests in phase1 and phase2 that involve 3 vlans.
The change is that the netperf test is run only on one of the vlans
instead on all of them that reduces the time the test takes.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 345 ++++++++++----------
.../regression_tests/phase1/3_vlans_over_bond.py | 350 +++++++++++----------
.../regression_tests/phase2/3_vlans_over_team.py | 347 ++++++++++----------
3 files changed, 525 insertions(+), 517 deletions(-)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index 907ad3c..fdb58e0 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -54,6 +54,7 @@ for vlan in vlans:
ctl.wait(15)
+# ICMP/ICMP6 tests
ping_mod = ctl.get_module("IcmpPing",
options={
"count" : 100,
@@ -64,50 +65,99 @@ ping_mod6 = ctl.get_module("Icmp6Ping",
"count" : 100,
"interval" : 0.1
})
+
+for vlan1 in vlans:
+ m1_vlan1 = m1.get_interface(vlan1)
+ for vlan2 in vlans:
+ m2_vlan2 = m2.get_interface(vlan2)
+
+ ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
+ "iface": m1_vlan1.get_devname()})
+
+ ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
+ "iface": m1_vlan1.get_ip(1)})
+
+ if vlan1 == vlan2:
+ # These tests should pass
+ # Ping between same VLANs
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod)
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6)
+ else:
+ # These tests should fail
+ # Ping across different VLAN
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod, expect="fail")
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6, expect="fail")
+
+# Netperf tests
+# performance is measured only on one of the vlans
+m1_vlan1 = m1.get_interface(vlans[0])
+m2_vlan1 = m2.get_interface(vlans[0])
+
netperf_srv = ctl.get_module("Netperf",
options={
- "role" : "server"
- })
+ "role" : "server",
+ "bind": m1_vlan1.get_ip(0)
+ })
netperf_srv6 = ctl.get_module("Netperf",
options={
"role" : "server",
+ "bind": m1_vlan1.get_ip(1),
"netperf_opts" : " -6"
})
+
+p_opts = "-L %s" % (m2_vlan1.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
netperf_cli_tcp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_udp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_tcp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
netperf_cli_udp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
if nperf_mode == "multi":
@@ -121,171 +171,124 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
-for vlan1 in vlans:
- m1_vlan1 = m1.get_interface(vlan1)
- for vlan2 in vlans:
- m2_vlan2 = m2.get_interface(vlan2)
-
- ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
- "iface": m1_vlan1.get_devname()})
-
- ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
- "iface": m1_vlan1.get_ip(1)})
-
- netperf_srv.update_options({"bind": m1_vlan1.get_ip(0)})
- netperf_srv6.update_options({"bind": m1_vlan1.get_ip(1)})
-
- p_opts = "-L %s" % (m2_vlan2.get_ip(0))
- if nperf_cpupin and nperf_mode != "multi":
- p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
-
- netperf_cli_tcp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
-
- netperf_cli_udp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
-
- netperf_cli_tcp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- netperf_cli_udp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- if vlan1 == vlan2:
- # These tests should pass
- # Ping between same VLANs
- for setting in offload_settings:
- dev_features = ""
- for offload in setting:
- dev_features += " %s %s" % (offload[0], offload[1])
-
- m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
- dev_features))
- m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
- dev_features))
-
- if ipv in [ 'ipv4', 'both' ]:
- # Ping test
- m1.run(ping_mod)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp
- result_tcp = perf_api.new_result("tcp_ipv4_id",
- "tcp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
-
- if ipv in [ 'ipv6', 'both' ]:
- # Ping test
- m1.run(ping_mod6)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv6, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp ipv6
- result_tcp = perf_api.new_result("tcp_ipv6_id",
- "tcp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.set_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp6, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.set_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
- # These tests should fail
- # Ping across different VLAN
- else:
- if ipv in [ 'ipv4', 'both' ]:
- m1.run(ping_mod, expect="fail")
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6, expect="fail")
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+
+ m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
+ dev_features))
+ m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
+ dev_features))
+
+ if ipv in [ 'ipv4', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_tcp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_tcp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_tcp.set_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.set_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
#reset offload states
dev_features = ""
diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py
index c14f1f0..71c1777 100644
--- a/recipes/regression_tests/phase1/3_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py
@@ -55,6 +55,7 @@ for vlan in vlans:
ctl.wait(15)
+# ICMP/ICMP6 tests
ping_mod = ctl.get_module("IcmpPing",
options={
"count" : 100,
@@ -65,50 +66,99 @@ ping_mod6 = ctl.get_module("Icmp6Ping",
"count" : 100,
"interval" : 0.1
})
+
+for vlan1 in vlans:
+ m1_vlan1 = m1.get_interface(vlan1)
+ for vlan2 in vlans:
+ m2_vlan2 = m2.get_interface(vlan2)
+
+ ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
+ "iface": m1_vlan1.get_devname()})
+
+ ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
+ "iface": m1_vlan1.get_ip(1)})
+
+ if vlan1 == vlan2:
+ # These tests should pass
+ # Ping between same VLANs
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod)
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6)
+ else:
+ # These tests should fail
+ # Ping across different VLAN
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod, expect="fail")
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6, expect="fail")
+
+# Netperf tests
+# performance is measured only on one of the vlans
+m1_vlan1 = m1.get_interface(vlans[0])
+m2_vlan1 = m2.get_interface(vlans[0])
+
netperf_srv = ctl.get_module("Netperf",
options={
- "role" : "server"
+ "role" : "server",
+ "bind": m1_vlan1.get_ip(0)
})
netperf_srv6 = ctl.get_module("Netperf",
options={
"role" : "server",
+ "bind": m1_vlan1.get_ip(1),
"netperf_opts" : " -6"
})
+
+p_opts = "-L %s" % (m2_vlan1.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
netperf_cli_tcp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_udp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_tcp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
netperf_cli_udp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
if nperf_mode == "multi":
@@ -122,175 +172,127 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
-for vlan1 in vlans:
- m1_vlan1 = m1.get_interface(vlan1)
- for vlan2 in vlans:
- m2_vlan2 = m2.get_interface(vlan2)
-
- ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
- "iface": m1_vlan1.get_devname()})
-
- ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
- "iface": m1_vlan1.get_ip(1)})
-
- netperf_srv.update_options({"bind": m1_vlan1.get_ip(0)})
-
- netperf_srv6.update_options({"bind": m1_vlan1.get_ip(1)})
-
- p_opts = "-L %s" % (m2_vlan2.get_ip(0))
- if nperf_cpupin and nperf_mode != "multi":
- p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
-
- netperf_cli_tcp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
-
- netperf_cli_udp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts })
-
- netperf_cli_tcp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- netperf_cli_udp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- if vlan1 == vlan2:
- # These tests should pass
- # Ping between same VLANs
- for setting in offload_settings:
- #apply offload setting
- dev_features = ""
- for offload in setting:
- dev_features += " %s %s" % (offload[0], offload[1])
- m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
- dev_features))
- m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
- dev_features))
- m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
- dev_features))
-
- if ipv in [ 'ipv4', 'both' ]:
- # Ping test
- m1.run(ping_mod)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp
- result_tcp = perf_api.new_result("tcp_ipv4_id",
- "tcp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
-
- if ipv in [ 'ipv6', 'both' ]:
- # Ping test
- m1.run(ping_mod6)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv6, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp ipv6
- result_tcp = perf_api.new_result("tcp_ipv6_id",
- "tcp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp6, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
- # These tests should fail
- # Ping across different VLAN
- else:
- if ipv in [ 'ipv4', 'both' ]:
- m1.run(ping_mod, expect="fail")
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6, expect="fail")
+for setting in offload_settings:
+ #apply offload setting
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
+ dev_features))
+ m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
+ dev_features))
+ m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
+ dev_features))
+
+ if ipv in [ 'ipv4', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_udp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_udp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
#reset offload states
dev_features = ""
diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py
index dc163c0..4c0bc87 100644
--- a/recipes/regression_tests/phase2/3_vlans_over_team.py
+++ b/recipes/regression_tests/phase2/3_vlans_over_team.py
@@ -56,6 +56,7 @@ for vlan in vlans:
ctl.wait(15)
+# ICMP/ICMP6 tests
ping_mod = ctl.get_module("IcmpPing",
options={
"count" : 100,
@@ -66,50 +67,99 @@ ping_mod6 = ctl.get_module("Icmp6Ping",
"count" : 100,
"interval" : 0.1
})
+
+for vlan1 in vlans:
+ m1_vlan1 = m1.get_interface(vlan1)
+ for vlan2 in vlans:
+ m2_vlan2 = m2.get_interface(vlan2)
+
+ ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
+ "iface": m1_vlan1.get_devname()})
+
+ ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
+ "iface": m1_vlan1.get_ip(1)})
+
+ if vlan1 == vlan2:
+ # These tests should pass
+ # Ping between same VLANs
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod)
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6)
+ else:
+ # These tests should fail
+ # Ping across different VLAN
+ if ipv in [ 'ipv4', 'both' ]:
+ m1.run(ping_mod, expect="fail")
+
+ if ipv in [ 'ipv6', 'both' ]:
+ m1.run(ping_mod6, expect="fail")
+
+# Netperf tests
+# performance is measured only on one of the vlans
+m1_vlan1 = m1.get_interface(vlans[0])
+m2_vlan1 = m2.get_interface(vlans[0])
+
netperf_srv = ctl.get_module("Netperf",
options={
- "role" : "server"
+ "role" : "server",
+ "bind": m1_vlan1.get_ip(0)
})
netperf_srv6 = ctl.get_module("Netperf",
options={
"role" : "server",
+ "bind": m1_vlan1.get_ip(1),
"netperf_opts" : " -6"
})
+
+p_opts = "-L %s" % (m2_vlan1.get_ip(0))
+if nperf_cpupin and nperf_mode != "multi":
+ p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
+
netperf_cli_tcp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_udp = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(0),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": p_opts
})
netperf_cli_tcp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "TCP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
netperf_cli_udp6 = ctl.get_module("Netperf",
options={
"role" : "client",
+ "netperf_server": m1_vlan1.get_ip(1),
"duration" : netperf_duration,
"testname" : "UDP_STREAM",
"confidence" : nperf_confidence,
"cpu_util" : nperf_cpu_util,
- "runs": nperf_max_runs
+ "runs": nperf_max_runs,
+ "netperf_opts": "-L %s -6" % (m2_vlan1.get_ip(1))
})
if nperf_mode == "multi":
@@ -123,173 +173,126 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
-for vlan1 in vlans:
- m1_vlan1 = m1.get_interface(vlan1)
- for vlan2 in vlans:
- m2_vlan2 = m2.get_interface(vlan2)
-
- ping_mod.update_options({"addr": m2_vlan2.get_ip(0),
- "iface": m1_vlan1.get_devname()})
-
- ping_mod6.update_options({"addr": m2_vlan2.get_ip(1),
- "iface": m1_vlan1.get_ip(1)})
-
- netperf_srv.update_options({"bind": m1_vlan1.get_ip(0)})
-
- netperf_srv6.update_options({"bind": m1_vlan1.get_ip(1)})
-
- p_opts = "-L %s" % (m2_vlan2.get_ip(0))
- if nperf_cpupin and nperf_mode != "multi":
- p_opts += " -T%s,%s" % (nperf_cpupin, nperf_cpupin)
-
- netperf_cli_tcp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts})
-
- netperf_cli_udp.update_options({"netperf_server": m1_vlan1.get_ip(0),
- "netperf_opts": p_opts})
-
- netperf_cli_tcp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- netperf_cli_udp6.update_options({"netperf_server": m1_vlan1.get_ip(1),
- "netperf_opts": "-L %s -6" % (m2_vlan2.get_ip(1))})
-
- if vlan1 == vlan2:
- # These tests should pass
- # Ping between same VLANs
- for setting in offload_settings:
- dev_features = ""
- for offload in setting:
- dev_features += " %s %s" % (offload[0], offload[1])
- m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
- dev_features))
- m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
- dev_features))
- m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
- dev_features))
-
- if ipv in [ 'ipv4', 'both' ]:
- # Ping test
- m1.run(ping_mod)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp
- result_tcp = perf_api.new_result("tcp_ipv4_id",
- "tcp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6)
-
- # Netperf test (both TCP and UDP)
- srv_proc = m1.run(netperf_srv6, bg=True)
- ctl.wait(2)
-
- # prepare PerfRepo result for tcp ipv6
- result_tcp = perf_api.new_result("tcp_ipv6_id",
- "tcp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_tcp.set_parameter(offload[0], offload[1])
- result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
- result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
- result_tcp.add_tag(product_name)
- if nperf_mode == "multi":
- result_tcp.add_tag("multithreaded")
- result_tcp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_tcp)
- netperf_baseline_template(netperf_cli_tcp6, baseline)
-
- tcp_res_data = m2.run(netperf_cli_tcp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_tcp, tcp_res_data)
- perf_api.save_result(result_tcp)
-
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
- result_udp.set_parameter('netperf_server_on_vlan', vlan1)
- result_udp.set_parameter('netperf_client_on_vlan', vlan2)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- perf_api.save_result(result_udp)
-
- srv_proc.intr()
- # These tests should fail
- # Ping across different VLAN
- else:
- if ipv in [ 'ipv4', 'both' ]:
- m1.run(ping_mod, expect="fail")
-
- if ipv in [ 'ipv6', 'both' ]:
- m1.run(ping_mod6, expect="fail")
+for setting in offload_settings:
+ dev_features = ""
+ for offload in setting:
+ dev_features += " %s %s" % (offload[0], offload[1])
+ m1.run("ethtool -K %s %s" % (m1_phy1.get_devname(),
+ dev_features))
+ m1.run("ethtool -K %s %s" % (m1_phy2.get_devname(),
+ dev_features))
+ m2.run("ethtool -K %s %s" % (m2_phy1.get_devname(),
+ dev_features))
+
+ if ipv in [ 'ipv4', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp
+ result_tcp = perf_api.new_result("tcp_ipv4_id",
+ "tcp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_udp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
+
+ if ipv in [ 'ipv6', 'both' ]:
+ # Netperf test (both TCP and UDP)
+ srv_proc = m1.run(netperf_srv6, bg=True)
+ ctl.wait(2)
+
+ # prepare PerfRepo result for tcp ipv6
+ result_tcp = perf_api.new_result("tcp_ipv6_id",
+ "tcp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_tcp.set_parameter(offload[0], offload[1])
+ result_tcp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_tcp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_tcp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_tcp.add_tag("multithreaded")
+ result_tcp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_tcp)
+ netperf_baseline_template(netperf_cli_tcp6, baseline)
+
+ tcp_res_data = m2.run(netperf_cli_tcp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_tcp, tcp_res_data)
+ perf_api.save_result(result_tcp)
+
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+ result_udp.set_parameter('netperf_server_on_vlan', vlan1)
+ result_udp.set_parameter('netperf_client_on_vlan', vlan2)
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ perf_api.save_result(result_udp)
+
+ srv_proc.intr()
#reset offload states
dev_features = ""
--
2.4.3
8 years, 4 months