[PATCH v3] regression_tests: skip unnecessary offload option for udp testing
by Kamil Jerabek
gro offload does not have any impact on udp testing. In that case, there were
tested two equal combination of offloads for udp. This commit makes skip this
one unnecessary combination. It has also impact on testing time of tests.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 114 +++++------
.../regression_tests/phase1/3_vlans_over_bond.py | 114 +++++------
recipes/regression_tests/phase1/bonding_test.py | 106 +++++-----
recipes/regression_tests/phase1/simple_netperf.py | 84 +++++---
.../phase1/virtual_bridge_2_vlans_over_bond.py | 4 +-
.../phase1/virtual_bridge_vlan_in_guest.py | 4 +-
.../phase1/virtual_bridge_vlan_in_host.py | 4 +-
.../regression_tests/phase2/3_vlans_over_team.py | 110 ++++++-----
recipes/regression_tests/phase2/team_test.py | 220 +++++++++++----------
...l_ovs_bridge_2_vlans_over_active_backup_bond.py | 118 +++++------
.../phase2/virtual_ovs_bridge_vlan_in_guest.py | 118 +++++------
.../phase2/virtual_ovs_bridge_vlan_in_host.py | 118 +++++------
12 files changed, 583 insertions(+), 531 deletions(-)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index a508697..1b68889 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -242,34 +242,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
@@ -302,34 +303,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.set_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.set_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py
index 6c036f2..0d5afad 100644
--- a/recipes/regression_tests/phase1/3_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py
@@ -241,34 +241,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
@@ -301,34 +302,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase1/bonding_test.py b/recipes/regression_tests/phase1/bonding_test.py
index 4bbf573..44c60a0 100644
--- a/recipes/regression_tests/phase1/bonding_test.py
+++ b/recipes/regression_tests/phase1/bonding_test.py
@@ -219,32 +219,33 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
@@ -277,32 +278,33 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase1/simple_netperf.py b/recipes/regression_tests/phase1/simple_netperf.py
index 15f4f6f..9ce52c1 100644
--- a/recipes/regression_tests/phase1/simple_netperf.py
+++ b/recipes/regression_tests/phase1/simple_netperf.py
@@ -193,6 +193,7 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
+<<<<<<< HEAD
# prepare PerfRepo result for udp
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
@@ -210,6 +211,9 @@ for setting in offload_settings:
result_udp.add_tag("multithreaded")
result_udp.set_parameter("num_parallel", nperf_num_parallel)
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
baseline = perf_api.get_baseline_of_result(result_udp)
netperf_baseline_template(netperf_cli_udp, baseline)
udp_res_data = m2.run(netperf_cli_udp,
@@ -218,6 +222,35 @@ for setting in offload_settings:
netperf_result_template(result_udp, udp_res_data)
result_udp.set_comment(pr_comment)
perf_api.save_result(result_udp)
+=======
+
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+>>>>>>> 28441ac... regression_tests: skip unnecessary offload option for udp testing
srv_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -247,31 +280,32 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter("num_parallel", nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
# reset offload states
diff --git a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
index fd421ef..a2d5399 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
@@ -297,7 +297,7 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
# prepare PerfRepo result for udp
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
@@ -366,7 +366,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv6_id",
"udp_ipv6_result",
hash_ignore=['kernel_release',
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
index 62fc800..66620f7 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
@@ -234,7 +234,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
hash_ignore=['kernel_release',
@@ -299,7 +299,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv6_id",
"udp_ipv6_result",
hash_ignore=['kernel_release',
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
index ec43321..2160e0b 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
@@ -234,7 +234,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
hash_ignore=['kernel_release',
@@ -299,7 +299,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- if enable_udp_perf:
+ if enable_udp_perf and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv6_id",
"udp_ipv6_result",
hash_ignore=['kernel_release',
diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py
index a157c45..5936347 100644
--- a/recipes/regression_tests/phase2/3_vlans_over_team.py
+++ b/recipes/regression_tests/phase2/3_vlans_over_team.py
@@ -242,33 +242,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
@@ -302,33 +303,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase2/team_test.py b/recipes/regression_tests/phase2/team_test.py
index fc6ed08..014194d 100644
--- a/recipes/regression_tests/phase2/team_test.py
+++ b/recipes/regression_tests/phase2/team_test.py
@@ -221,33 +221,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine1")
- result_udp.set_parameter('netperf_client', "testmachine2")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine1")
+ result_udp.set_parameter('netperf_client', "testmachine2")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -282,33 +283,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine1")
- result_udp.set_parameter('netperf_client', "testmachine2")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*5)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine1")
+ result_udp.set_parameter('netperf_client', "testmachine2")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*5)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
@@ -388,33 +390,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine2")
- result_udp.set_parameter('netperf_client', "testmachine1")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m1.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine2")
+ result_udp.set_parameter('netperf_client', "testmachine1")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m1.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -449,33 +452,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine2")
- result_udp.set_parameter('netperf_client', "testmachine1")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m1.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine2")
+ result_udp.set_parameter('netperf_client', "testmachine1")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m1.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
index affc6f5..897b173 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
@@ -277,35 +277,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = g3.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g3.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -345,35 +346,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = g3.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g3.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
index 5b7ca94..f084f37 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
@@ -222,35 +222,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -287,35 +288,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
index 6acb1b1..31e57af 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
@@ -221,35 +221,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
@@ -287,35 +288,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
--
2.5.5
7 years, 7 months
[PATCH 0/2] Followup for SPAN tests
by Yotam Gigi
The patcheset includes two changes:
- migrating to the newly added ProcessAPI.out function and removed the
usage of "save_output" flag on HostAPI.cmd
- Removed un-nececery tc cleanup on the Restore phase in the Slave.
There is no need to clear tc ingress filters as the ingress qdiscs
removal will remove the filters too.
Yotam Gigi (2):
Slave: Remove un-nececery tc filter cleanup logic.
recipes: switchdev: migrated to the new ProcessAPI.out() function.
lnst/Slave/InterfaceManager.py | 6 ------
recipes/switchdev/l2-021-span.py | 4 ++--
2 files changed, 2 insertions(+), 8 deletions(-)
--
2.4.11
7 years, 7 months
[patch lnst 1/3] Command: save output always
by Jiri Pirko
From: Jiri Pirko <jiri(a)mellanox.com>
Just ignore the user option and save the stderr and stdout always.
Allows to shorten the run method call from tasks.
Signed-off-by: Jiri Pirko <jiri(a)mellanox.com>
---
lnst/Common/NetTestCommand.py | 10 ++--------
lnst/Controller/NetTestController.py | 2 --
lnst/Controller/Task.py | 2 +-
lnst/RecipeCommon/IRQ.py | 2 +-
regression-tests/tests/28/recipe1.py | 2 +-
regression-tests/tests/28/recipe2.py | 2 +-
regression-tests/tests/28/recipe3.py | 2 +-
regression-tests/tests/28/recipe4.py | 2 +-
8 files changed, 8 insertions(+), 16 deletions(-)
diff --git a/lnst/Common/NetTestCommand.py b/lnst/Common/NetTestCommand.py
index 774c26e..3c290dc 100644
--- a/lnst/Common/NetTestCommand.py
+++ b/lnst/Common/NetTestCommand.py
@@ -400,7 +400,6 @@ class NetTestCommandGeneric(object):
class NetTestCommandExec(NetTestCommandGeneric):
def __init__(self, command):
super(NetTestCommandExec, self).__init__(command)
- self._save_output = "save_output" in command
def run(self):
try:
@@ -409,15 +408,10 @@ class NetTestCommandExec(NetTestCommandGeneric):
self._command["command"])
else:
stdout, stderr = self.exec_cmd(self._command["command"])
- res_data = None
- if self._save_output:
- res_data = { "stdout": stdout, "stderr": stderr }
+ res_data = {"stdout": stdout, "stderr": stderr}
self.set_pass(res_data)
except ExecCmdFail as e:
- res_data = None
- if self._save_output:
- res_data = { "stdout": e.get_stdout(),
- "stderr": e.get_stderr() }
+ res_data = {"stdout": e.get_stdout(), "stderr": e.get_stderr()}
if "bg_id" in self._command:
logging.info("Command probably intentionally killed. Passing.")
self.set_pass(res_data)
diff --git a/lnst/Controller/NetTestController.py b/lnst/Controller/NetTestController.py
index 791dd95..c8a58b3 100644
--- a/lnst/Controller/NetTestController.py
+++ b/lnst/Controller/NetTestController.py
@@ -439,8 +439,6 @@ class NetTestController:
if "from" in cmd_data:
cmd["from"] = cmd_data["from"]
- if "save_output" in cmd_data:
- cmd["save_output"] = cmd_data["save_output"]
elif cmd["type"] in ["wait", "intr", "kill"]:
# 'proc_id' is used to store bg_id for wait/kill/intr
# 'bg_id' is used for test/exec
diff --git a/lnst/Controller/Task.py b/lnst/Controller/Task.py
index 6f36e86..fefe2cb 100644
--- a/lnst/Controller/Task.py
+++ b/lnst/Controller/Task.py
@@ -282,7 +282,7 @@ class HostAPI(object):
elif arg == "netns":
cmd["netns"] = argval
elif arg == "save_output":
- cmd["save_output"] = argval
+ pass # now ignored as output is saved always
else:
msg = "Argument '%s' not recognised by the run() method." % arg
raise TaskError(msg)
diff --git a/lnst/RecipeCommon/IRQ.py b/lnst/RecipeCommon/IRQ.py
index 4548a89..825a2b3 100644
--- a/lnst/RecipeCommon/IRQ.py
+++ b/lnst/RecipeCommon/IRQ.py
@@ -21,7 +21,7 @@ cpu: integer
'''
def pin_dev_irqs(machine, device, cpu):
pi = machine.run("grep %s /proc/interrupts | cut -f1 -d: | sed 's/ //'"
- % device.get_devname(), save_output=True)
+ % device.get_devname())
res = pi.get_result()
intrs = res["res_data"]["stdout"]
for intr in intrs.split('\n'):
diff --git a/regression-tests/tests/28/recipe1.py b/regression-tests/tests/28/recipe1.py
index a0834e9..ed19f23 100644
--- a/regression-tests/tests/28/recipe1.py
+++ b/regression-tests/tests/28/recipe1.py
@@ -4,7 +4,7 @@ m1 = ctl.get_host("testmachine1")
m1.sync_resources(modules=["Custom"], tools=[])
-test = m1.run("while true; do echo test; sleep 1; done", bg=True, save_output="yes")
+test = m1.run("while true; do echo test; sleep 1; done", bg=True)
ctl.wait(5)
diff --git a/regression-tests/tests/28/recipe2.py b/regression-tests/tests/28/recipe2.py
index b47ff5a..7b739bb 100644
--- a/regression-tests/tests/28/recipe2.py
+++ b/regression-tests/tests/28/recipe2.py
@@ -4,7 +4,7 @@ m1 = ctl.get_host("testmachine1")
m1.sync_resources(modules=["Custom"], tools=[])
-test = m1.run("for i in `seq 5`; do echo test; sleep 1; done", bg=True, save_output="yes")
+test = m1.run("for i in `seq 5`; do echo test; sleep 1; done", bg=True)
test.wait()
diff --git a/regression-tests/tests/28/recipe3.py b/regression-tests/tests/28/recipe3.py
index 802d93f..9d9fd9b 100644
--- a/regression-tests/tests/28/recipe3.py
+++ b/regression-tests/tests/28/recipe3.py
@@ -4,7 +4,7 @@ m1 = ctl.get_host("testmachine1")
m1.sync_resources(modules=["Custom"], tools=[])
-test = m1.run("while true; do echo test; sleep 1; done", bg=True, save_output="yes")
+test = m1.run("while true; do echo test; sleep 1; done", bg=True)
ctl.wait(5)
diff --git a/regression-tests/tests/28/recipe4.py b/regression-tests/tests/28/recipe4.py
index e8e8f99..ae36c37 100644
--- a/regression-tests/tests/28/recipe4.py
+++ b/regression-tests/tests/28/recipe4.py
@@ -4,7 +4,7 @@ m1 = ctl.get_host("testmachine1")
m1.sync_resources(modules=["Custom"], tools=[])
-test = m1.run("echo test", save_output="yes")
+test = m1.run("echo test")
output = test.get_result()["res_data"]["stdout"]
custom = ctl.get_module("Custom", options={ "fail": True })
--
2.5.5
7 years, 7 months
[PATCH v4 00/18] PyRecipes draft - changelog
by Jiri Prochazka
Changes v2 -> v3
Task.py: fixed raw_input message in breakpoint()
Task: add HostAPI methods: added commit message
merged multi_match related patches
rebased and reordered the commits so they are bisectable and logically ordered
Changes v3 -> v4
Task.py: added missing params to host machine requirements dict
Jiri Prochazka (18):
NetTestController: add multi_match argument to __init__
NetTestController: store defined aliases in attribute
NetTestController: add run_mode attribute
Task: add new InterfaceAPI method init_iface
Task: add HostAPI methods
Task: add new ControllerAPI methods
NetTestController: add set_machine_requirements method
NetTestController: add prepare_test_env and init_taskapi methods
Task: add set_machine_requirements and prepare_test_env methods
Task: add global level TaskAPI methods used in PyRecipes
__init__: export module level API methods
PyRecipes: add PyRecipes support
pyrecipes: add example recipes
lnst-ctl: remove overriden aliases
NetTestController: remove obsolete code
Task: remove deprecated methods
Task: improve breakpoint feature
lnst-ctl: remove config_only option
lnst-ctl | 37 +--
lnst/Controller/Machine.py | 15 +-
lnst/Controller/NetTestController.py | 466 ++++++-----------------------------
lnst/Controller/Task.py | 246 +++++++++---------
lnst/__init__.py | 1 +
pyrecipes/3_vlans.py | 34 +++
pyrecipes/example.py | 33 +++
pyrecipes/ping_flood.py | 48 ++++
8 files changed, 344 insertions(+), 536 deletions(-)
create mode 100644 pyrecipes/3_vlans.py
create mode 100644 pyrecipes/example.py
create mode 100644 pyrecipes/ping_flood.py
--
2.4.11
7 years, 7 months
[PATCH v4 1/8] Netperf: add option udp_size
by Kamil Jerabek
This option set UDP datagram size for netperf test on client. If this option
is not explicitly specified, default value from netperf is used.
The option accepts integer value describing datagram size, it should be also
concatenated with G/M/K/g/m/k.
In this commit I also added this option to regression_test: simple_netperf test.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
test_modules/Netperf.py | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py
index d09bdfc..e6c68ad 100644
--- a/test_modules/Netperf.py
+++ b/test_modules/Netperf.py
@@ -39,6 +39,7 @@ class Netperf(TestGeneric):
self._cpu_util = self.get_opt("cpu_util")
self._num_parallel = int(self.get_opt("num_parallel", default=1))
self._runs = self.get_opt("runs", default=1)
+ self._udp_size = self.get_opt("udp_size")
self._debug = int_it(self.get_opt("debug", default=0))
self._threshold = self._parse_threshold(self.get_opt("threshold"))
@@ -141,6 +142,16 @@ class Netperf(TestGeneric):
else:
cmd += " -- %s" % self._testoptions
+ if self._udp_size is not None:
+ """
+ udp packets will have this size
+ """
+ if self._is_omni() or self._testoptions:
+ cmd += " -m %s" % self._udp_size
+ else:
+ cms += " -- -m %s" % self._udp_size
+
+
elif self._role == "server":
cmd = "netserver -D"
if self._bind is not None:
--
2.5.5
7 years, 7 months
[patch lnst 1/3] Command: save output always
by Jiri Pirko
From: Jiri Pirko <jiri(a)mellanox.com>
Just ignore the user option and save the stderr and stdout always.
Allows to shorten the run method call from tasks.
Signed-off-by: Jiri Pirko <jiri(a)mellanox.com>
---
lnst/Common/NetTestCommand.py | 10 ++--------
lnst/Controller/NetTestController.py | 2 --
lnst/Controller/Task.py | 2 +-
lnst/RecipeCommon/IRQ.py | 2 +-
regression-tests/tests/28/recipe1.py | 2 +-
regression-tests/tests/28/recipe2.py | 2 +-
regression-tests/tests/28/recipe3.py | 2 +-
regression-tests/tests/28/recipe4.py | 2 +-
8 files changed, 8 insertions(+), 16 deletions(-)
diff --git a/lnst/Common/NetTestCommand.py b/lnst/Common/NetTestCommand.py
index 774c26e..3c290dc 100644
--- a/lnst/Common/NetTestCommand.py
+++ b/lnst/Common/NetTestCommand.py
@@ -400,7 +400,6 @@ class NetTestCommandGeneric(object):
class NetTestCommandExec(NetTestCommandGeneric):
def __init__(self, command):
super(NetTestCommandExec, self).__init__(command)
- self._save_output = "save_output" in command
def run(self):
try:
@@ -409,15 +408,10 @@ class NetTestCommandExec(NetTestCommandGeneric):
self._command["command"])
else:
stdout, stderr = self.exec_cmd(self._command["command"])
- res_data = None
- if self._save_output:
- res_data = { "stdout": stdout, "stderr": stderr }
+ res_data = {"stdout": stdout, "stderr": stderr}
self.set_pass(res_data)
except ExecCmdFail as e:
- res_data = None
- if self._save_output:
- res_data = { "stdout": e.get_stdout(),
- "stderr": e.get_stderr() }
+ res_data = {"stdout": e.get_stdout(), "stderr": e.get_stderr()}
if "bg_id" in self._command:
logging.info("Command probably intentionally killed. Passing.")
self.set_pass(res_data)
diff --git a/lnst/Controller/NetTestController.py b/lnst/Controller/NetTestController.py
index 791dd95..c8a58b3 100644
--- a/lnst/Controller/NetTestController.py
+++ b/lnst/Controller/NetTestController.py
@@ -439,8 +439,6 @@ class NetTestController:
if "from" in cmd_data:
cmd["from"] = cmd_data["from"]
- if "save_output" in cmd_data:
- cmd["save_output"] = cmd_data["save_output"]
elif cmd["type"] in ["wait", "intr", "kill"]:
# 'proc_id' is used to store bg_id for wait/kill/intr
# 'bg_id' is used for test/exec
diff --git a/lnst/Controller/Task.py b/lnst/Controller/Task.py
index 6f36e86..fefe2cb 100644
--- a/lnst/Controller/Task.py
+++ b/lnst/Controller/Task.py
@@ -282,7 +282,7 @@ class HostAPI(object):
elif arg == "netns":
cmd["netns"] = argval
elif arg == "save_output":
- cmd["save_output"] = argval
+ pass # now ignored as output is saved always
else:
msg = "Argument '%s' not recognised by the run() method." % arg
raise TaskError(msg)
diff --git a/lnst/RecipeCommon/IRQ.py b/lnst/RecipeCommon/IRQ.py
index 4548a89..825a2b3 100644
--- a/lnst/RecipeCommon/IRQ.py
+++ b/lnst/RecipeCommon/IRQ.py
@@ -21,7 +21,7 @@ cpu: integer
'''
def pin_dev_irqs(machine, device, cpu):
pi = machine.run("grep %s /proc/interrupts | cut -f1 -d: | sed 's/ //'"
- % device.get_devname(), save_output=True)
+ % device.get_devname())
res = pi.get_result()
intrs = res["res_data"]["stdout"]
for intr in intrs.split('\n'):
diff --git a/regression-tests/tests/28/recipe1.py b/regression-tests/tests/28/recipe1.py
index a0834e9..ed19f23 100644
--- a/regression-tests/tests/28/recipe1.py
+++ b/regression-tests/tests/28/recipe1.py
@@ -4,7 +4,7 @@ m1 = ctl.get_host("testmachine1")
m1.sync_resources(modules=["Custom"], tools=[])
-test = m1.run("while true; do echo test; sleep 1; done", bg=True, save_output="yes")
+test = m1.run("while true; do echo test; sleep 1; done", bg=True)
ctl.wait(5)
diff --git a/regression-tests/tests/28/recipe2.py b/regression-tests/tests/28/recipe2.py
index b47ff5a..7b739bb 100644
--- a/regression-tests/tests/28/recipe2.py
+++ b/regression-tests/tests/28/recipe2.py
@@ -4,7 +4,7 @@ m1 = ctl.get_host("testmachine1")
m1.sync_resources(modules=["Custom"], tools=[])
-test = m1.run("for i in `seq 5`; do echo test; sleep 1; done", bg=True, save_output="yes")
+test = m1.run("for i in `seq 5`; do echo test; sleep 1; done", bg=True)
test.wait()
diff --git a/regression-tests/tests/28/recipe3.py b/regression-tests/tests/28/recipe3.py
index 802d93f..9d9fd9b 100644
--- a/regression-tests/tests/28/recipe3.py
+++ b/regression-tests/tests/28/recipe3.py
@@ -4,7 +4,7 @@ m1 = ctl.get_host("testmachine1")
m1.sync_resources(modules=["Custom"], tools=[])
-test = m1.run("while true; do echo test; sleep 1; done", bg=True, save_output="yes")
+test = m1.run("while true; do echo test; sleep 1; done", bg=True)
ctl.wait(5)
diff --git a/regression-tests/tests/28/recipe4.py b/regression-tests/tests/28/recipe4.py
index e8e8f99..ae36c37 100644
--- a/regression-tests/tests/28/recipe4.py
+++ b/regression-tests/tests/28/recipe4.py
@@ -4,7 +4,7 @@ m1 = ctl.get_host("testmachine1")
m1.sync_resources(modules=["Custom"], tools=[])
-test = m1.run("echo test", save_output="yes")
+test = m1.run("echo test")
output = test.get_result()["res_data"]["stdout"]
custom = ctl.get_module("Custom", options={ "fail": True })
--
2.5.5
7 years, 7 months
[PATCH v3] regression_tests: skip unnecessary offload option for udp testing
by Kamil Jerabek
gro offload does not have any impact on udp testing. In that case, there were
tested two equal combination of offloads for udp. This commit makes skip this
one unnecessary combination. It has also impact on testing time of tests.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 114 +++++------
.../regression_tests/phase1/3_vlans_over_bond.py | 114 +++++------
recipes/regression_tests/phase1/bonding_test.py | 106 +++++-----
recipes/regression_tests/phase1/simple_netperf.py | 84 +++++---
.../phase1/virtual_bridge_2_vlans_over_bond.py | 4 +-
.../phase1/virtual_bridge_vlan_in_guest.py | 4 +-
.../phase1/virtual_bridge_vlan_in_host.py | 4 +-
.../regression_tests/phase2/3_vlans_over_team.py | 110 ++++++-----
recipes/regression_tests/phase2/team_test.py | 220 +++++++++++----------
...l_ovs_bridge_2_vlans_over_active_backup_bond.py | 118 +++++------
.../phase2/virtual_ovs_bridge_vlan_in_guest.py | 118 +++++------
.../phase2/virtual_ovs_bridge_vlan_in_host.py | 118 +++++------
12 files changed, 583 insertions(+), 531 deletions(-)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index a508697..1b68889 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -242,34 +242,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
@@ -302,34 +303,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.set_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.set_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py
index 6c036f2..0d5afad 100644
--- a/recipes/regression_tests/phase1/3_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py
@@ -241,34 +241,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
@@ -301,34 +302,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase1/bonding_test.py b/recipes/regression_tests/phase1/bonding_test.py
index 4bbf573..44c60a0 100644
--- a/recipes/regression_tests/phase1/bonding_test.py
+++ b/recipes/regression_tests/phase1/bonding_test.py
@@ -219,32 +219,33 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
@@ -277,32 +278,33 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase1/simple_netperf.py b/recipes/regression_tests/phase1/simple_netperf.py
index 15f4f6f..9ce52c1 100644
--- a/recipes/regression_tests/phase1/simple_netperf.py
+++ b/recipes/regression_tests/phase1/simple_netperf.py
@@ -193,6 +193,7 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
+<<<<<<< HEAD
# prepare PerfRepo result for udp
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
@@ -210,6 +211,9 @@ for setting in offload_settings:
result_udp.add_tag("multithreaded")
result_udp.set_parameter("num_parallel", nperf_num_parallel)
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
baseline = perf_api.get_baseline_of_result(result_udp)
netperf_baseline_template(netperf_cli_udp, baseline)
udp_res_data = m2.run(netperf_cli_udp,
@@ -218,6 +222,35 @@ for setting in offload_settings:
netperf_result_template(result_udp, udp_res_data)
result_udp.set_comment(pr_comment)
perf_api.save_result(result_udp)
+=======
+
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
+>>>>>>> 28441ac... regression_tests: skip unnecessary offload option for udp testing
srv_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -247,31 +280,32 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter("num_parallel", nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
# reset offload states
diff --git a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
index fd421ef..a2d5399 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
@@ -297,7 +297,7 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
# prepare PerfRepo result for udp
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
@@ -366,7 +366,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv6_id",
"udp_ipv6_result",
hash_ignore=['kernel_release',
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
index 62fc800..66620f7 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
@@ -234,7 +234,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
hash_ignore=['kernel_release',
@@ -299,7 +299,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv6_id",
"udp_ipv6_result",
hash_ignore=['kernel_release',
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
index ec43321..2160e0b 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
@@ -234,7 +234,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
hash_ignore=['kernel_release',
@@ -299,7 +299,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- if enable_udp_perf:
+ if enable_udp_perf and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv6_id",
"udp_ipv6_result",
hash_ignore=['kernel_release',
diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py
index a157c45..5936347 100644
--- a/recipes/regression_tests/phase2/3_vlans_over_team.py
+++ b/recipes/regression_tests/phase2/3_vlans_over_team.py
@@ -242,33 +242,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
@@ -302,33 +303,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase2/team_test.py b/recipes/regression_tests/phase2/team_test.py
index fc6ed08..014194d 100644
--- a/recipes/regression_tests/phase2/team_test.py
+++ b/recipes/regression_tests/phase2/team_test.py
@@ -221,33 +221,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine1")
- result_udp.set_parameter('netperf_client', "testmachine2")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine1")
+ result_udp.set_parameter('netperf_client', "testmachine2")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -282,33 +283,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine1")
- result_udp.set_parameter('netperf_client', "testmachine2")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*5)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine1")
+ result_udp.set_parameter('netperf_client', "testmachine2")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*5)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
@@ -388,33 +390,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine2")
- result_udp.set_parameter('netperf_client', "testmachine1")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m1.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine2")
+ result_udp.set_parameter('netperf_client', "testmachine1")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m1.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -449,33 +452,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine2")
- result_udp.set_parameter('netperf_client', "testmachine1")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m1.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine2")
+ result_udp.set_parameter('netperf_client', "testmachine1")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m1.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
index affc6f5..897b173 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
@@ -277,35 +277,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = g3.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g3.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -345,35 +346,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = g3.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g3.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
index 5b7ca94..f084f37 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
@@ -222,35 +222,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -287,35 +288,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
index 6acb1b1..31e57af 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
@@ -221,35 +221,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
@@ -287,35 +288,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
--
2.5.5
7 years, 7 months
[PATCH v4 7/8] regression_tests: add udp_size to netperf tests
by Kamil Jerabek
This commit adds a udp_size parameter to all udp Netperf clients in our
regression_tests/phase1-2 directories. The value of the parameter can be
controlled via alias "nperf_udp_size" that I've added to all recipes.
The default value is set by Netperf to size of out buffer otherwise
to value specified in this alias.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 13 +++++++++++++
.../regression_tests/phase1/3_vlans_over_bond.py | 13 +++++++++++++
recipes/regression_tests/phase1/bonding_test.py | 13 +++++++++++++
recipes/regression_tests/phase1/simple_netperf.py | 10 ++++++++++
.../phase1/virtual_bridge_2_vlans_over_bond.py | 13 +++++++++++++
.../phase1/virtual_bridge_vlan_in_guest.py | 13 +++++++++++++
.../phase1/virtual_bridge_vlan_in_host.py | 13 +++++++++++++
.../regression_tests/phase2/3_vlans_over_team.py | 13 +++++++++++++
recipes/regression_tests/phase2/team_test.py | 21 +++++++++++++++++++++
...al_ovs_bridge_2_vlans_over_active_backup_bond.py | 13 +++++++++++++
.../phase2/virtual_ovs_bridge_vlan_in_guest.py | 13 +++++++++++++
.../phase2/virtual_ovs_bridge_vlan_in_host.py | 13 +++++++++++++
12 files changed, 161 insertions(+)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index 8144815..a508697 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -166,6 +167,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
for setting in offload_settings:
#apply offload setting
dev_features = ""
@@ -245,6 +250,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.add_tag(product_name)
@@ -301,6 +310,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.set_tag(product_name)
diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py
index 41f2b95..6c036f2 100644
--- a/recipes/regression_tests/phase1/3_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py
@@ -43,6 +43,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -167,6 +168,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
for setting in offload_settings:
#apply offload setting
dev_features = ""
@@ -244,6 +249,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.add_tag(product_name)
@@ -300,6 +309,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.add_tag(product_name)
diff --git a/recipes/regression_tests/phase1/bonding_test.py b/recipes/regression_tests/phase1/bonding_test.py
index 39e7df8..4bbf573 100644
--- a/recipes/regression_tests/phase1/bonding_test.py
+++ b/recipes/regression_tests/phase1/bonding_test.py
@@ -43,6 +43,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -176,6 +177,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ctl.wait(15)
for setting in offload_settings:
@@ -222,6 +227,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -276,6 +285,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase1/simple_netperf.py b/recipes/regression_tests/phase1/simple_netperf.py
index fe9d96b..15f4f6f 100644
--- a/recipes/regression_tests/phase1/simple_netperf.py
+++ b/recipes/regression_tests/phase1/simple_netperf.py
@@ -147,6 +147,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel" : nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel" : nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ctl.wait(15)
for setting in offload_settings:
@@ -198,6 +202,9 @@ for setting in offload_settings:
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -249,6 +256,9 @@ for setting in offload_settings:
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
index 37f703f..fd421ef 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
@@ -48,6 +48,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, g2, h2, g3, g4], pr_user_comment)
@@ -188,6 +189,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ping_mod_bad = ctl.get_module("IcmpPing",
options={
"addr" : g4_guestnic.get_ip(0),
@@ -304,6 +309,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -368,6 +377,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
index b4b5c6c..62fc800 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
@@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment)
@@ -171,6 +172,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
# configure mtu
h1.get_interface("nic").set_mtu(mtu)
h1.get_interface("tap").set_mtu(mtu)
@@ -240,6 +245,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -301,6 +310,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
index 1ebdd91..ec43321 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
@@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment)
@@ -170,6 +171,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
# configure mtu
h1.get_interface("nic").set_mtu(mtu)
h1.get_interface("tap").set_mtu(mtu)
@@ -240,6 +245,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -301,6 +310,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py
index e9cae83..a157c45 100644
--- a/recipes/regression_tests/phase2/3_vlans_over_team.py
+++ b/recipes/regression_tests/phase2/3_vlans_over_team.py
@@ -43,6 +43,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -167,6 +168,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
for setting in offload_settings:
#apply offload setting
dev_features = ""
@@ -244,6 +249,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.add_tag(product_name)
@@ -300,6 +309,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.add_tag(product_name)
diff --git a/recipes/regression_tests/phase2/team_test.py b/recipes/regression_tests/phase2/team_test.py
index 1aa0d0f..fc6ed08 100644
--- a/recipes/regression_tests/phase2/team_test.py
+++ b/recipes/regression_tests/phase2/team_test.py
@@ -42,6 +42,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -175,6 +176,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ctl.wait(15)
for setting in offload_settings:
@@ -223,6 +228,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server', "testmachine1")
result_udp.set_parameter('netperf_client', "testmachine2")
result_udp.add_tag(product_name)
@@ -280,6 +289,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server', "testmachine1")
result_udp.set_parameter('netperf_client', "testmachine2")
result_udp.add_tag(product_name)
@@ -382,6 +395,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server', "testmachine2")
result_udp.set_parameter('netperf_client', "testmachine1")
result_udp.add_tag(product_name)
@@ -439,6 +456,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server', "testmachine2")
result_udp.set_parameter('netperf_client', "testmachine1")
result_udp.add_tag(product_name)
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
index d795714..affc6f5 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
@@ -48,6 +48,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, g2, h2, g3, g4], pr_user_comment)
@@ -187,6 +188,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ping_mod_bad = ctl.get_module("IcmpPing",
options={
"addr" : g4_guestnic.get_ip(0),
@@ -283,6 +288,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -347,6 +356,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
index c96e2a6..5b7ca94 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
@@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment)
@@ -168,6 +169,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ctl.wait(15)
for setting in offload_settings:
@@ -228,6 +233,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -289,6 +298,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
index eaa1cab..6acb1b1 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
@@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment)
@@ -167,6 +168,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ctl.wait(15)
for setting in offload_settings:
@@ -227,6 +232,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -289,6 +298,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
--
2.5.5
7 years, 7 months
[PATCH v3] regression_tests: add udp_size to netperf tests
by Kamil Jerabek
This commit adds a udp_size parameter to all udp Netperf clients in our
regression_tests/hase1-2 directories. The value of the parameter can be
controlled via alias "nperf_udp_size" that I've added to all recipes.
The default value is set by Netperf to size of out buffer otherwise
to value specified in this alias.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 13 +++++++++++++
.../regression_tests/phase1/3_vlans_over_bond.py | 13 +++++++++++++
recipes/regression_tests/phase1/bonding_test.py | 13 +++++++++++++
recipes/regression_tests/phase1/simple_netperf.py | 13 +++++++++----
.../phase1/virtual_bridge_2_vlans_over_bond.py | 15 +++++++++++++--
.../phase1/virtual_bridge_vlan_in_guest.py | 13 +++++++++++++
.../phase1/virtual_bridge_vlan_in_host.py | 13 +++++++++++++
.../regression_tests/phase2/3_vlans_over_team.py | 13 +++++++++++++
recipes/regression_tests/phase2/team_test.py | 21 +++++++++++++++++++++
...al_ovs_bridge_2_vlans_over_active_backup_bond.py | 13 +++++++++++++
.../phase2/virtual_ovs_bridge_vlan_in_guest.py | 13 +++++++++++++
.../phase2/virtual_ovs_bridge_vlan_in_host.py | 13 +++++++++++++
12 files changed, 160 insertions(+), 6 deletions(-)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index 8144815..a508697 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -166,6 +167,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
for setting in offload_settings:
#apply offload setting
dev_features = ""
@@ -245,6 +250,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.add_tag(product_name)
@@ -301,6 +310,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.set_tag(product_name)
diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py
index 41f2b95..6c036f2 100644
--- a/recipes/regression_tests/phase1/3_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py
@@ -43,6 +43,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -167,6 +168,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
for setting in offload_settings:
#apply offload setting
dev_features = ""
@@ -244,6 +249,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.add_tag(product_name)
@@ -300,6 +309,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.add_tag(product_name)
diff --git a/recipes/regression_tests/phase1/bonding_test.py b/recipes/regression_tests/phase1/bonding_test.py
index 39e7df8..4bbf573 100644
--- a/recipes/regression_tests/phase1/bonding_test.py
+++ b/recipes/regression_tests/phase1/bonding_test.py
@@ -43,6 +43,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -176,6 +177,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ctl.wait(15)
for setting in offload_settings:
@@ -222,6 +227,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -276,6 +285,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase1/simple_netperf.py b/recipes/regression_tests/phase1/simple_netperf.py
index d68d10c..8d9fb1d 100644
--- a/recipes/regression_tests/phase1/simple_netperf.py
+++ b/recipes/regression_tests/phase1/simple_netperf.py
@@ -108,8 +108,7 @@ netperf_cli_udp = ctl.get_module("Netperf",
"runs" : nperf_max_runs,
"netperf_opts" : p_opts,
"debug" : nperf_debug,
- "max_deviation" : nperf_max_dev,
- "udp_size" : nperf_udp_size
+ "max_deviation" : nperf_max_dev
})
netperf_cli_udp6 = ctl.get_module("Netperf",
@@ -123,8 +122,7 @@ netperf_cli_udp6 = ctl.get_module("Netperf",
"runs" : nperf_max_runs,
"netperf_opts" : p_opts6,
"debug" : nperf_debug,
- "max_deviation" : nperf_max_dev,
- "udp_size" : nperf_udp_size
+ "max_deviation" : nperf_max_dev
})
netperf_srv = ctl.get_module("Netperf",
@@ -150,6 +148,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel" : nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel" : nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ctl.wait(15)
for setting in offload_settings:
@@ -255,6 +257,9 @@ for setting in offload_settings:
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
index 37f703f..5841fad 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
@@ -48,6 +48,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, g2, h2, g3, g4], pr_user_comment)
@@ -142,7 +143,8 @@ netperf_cli_udp = ctl.get_module("Netperf",
"netperf_opts" : "-L %s" %
(g3_guestnic.get_ip(0)),
"debug" : nperf_debug,
- "max_deviation" : nperf_max_dev
+ "max_deviation" : nperf_max_dev,
+ "udp_size" : nperf_udp_size
})
netperf_cli_tcp6 = ctl.get_module("Netperf",
@@ -174,7 +176,8 @@ netperf_cli_udp6 = ctl.get_module("Netperf",
"netperf_opts" :
"-L %s -6" % (g3_guestnic.get_ip(1)),
"debug" : nperf_debug,
- "max_deviation" : nperf_max_dev
+ "max_deviation" : nperf_max_dev,
+ "udp_size" : nperf_udp_size
})
if nperf_mode == "multi":
@@ -304,6 +307,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -368,6 +375,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
index b4b5c6c..62fc800 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
@@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment)
@@ -171,6 +172,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
# configure mtu
h1.get_interface("nic").set_mtu(mtu)
h1.get_interface("tap").set_mtu(mtu)
@@ -240,6 +245,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -301,6 +310,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
index 1ebdd91..ec43321 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
@@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment)
@@ -170,6 +171,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
# configure mtu
h1.get_interface("nic").set_mtu(mtu)
h1.get_interface("tap").set_mtu(mtu)
@@ -240,6 +245,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -301,6 +310,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py
index e9cae83..a157c45 100644
--- a/recipes/regression_tests/phase2/3_vlans_over_team.py
+++ b/recipes/regression_tests/phase2/3_vlans_over_team.py
@@ -43,6 +43,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -167,6 +168,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
for setting in offload_settings:
#apply offload setting
dev_features = ""
@@ -244,6 +249,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.add_tag(product_name)
@@ -300,6 +309,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
result_udp.add_tag(product_name)
diff --git a/recipes/regression_tests/phase2/team_test.py b/recipes/regression_tests/phase2/team_test.py
index 1aa0d0f..fc6ed08 100644
--- a/recipes/regression_tests/phase2/team_test.py
+++ b/recipes/regression_tests/phase2/team_test.py
@@ -42,6 +42,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([m1, m2], pr_user_comment)
@@ -175,6 +176,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ctl.wait(15)
for setting in offload_settings:
@@ -223,6 +228,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server', "testmachine1")
result_udp.set_parameter('netperf_client', "testmachine2")
result_udp.add_tag(product_name)
@@ -280,6 +289,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server', "testmachine1")
result_udp.set_parameter('netperf_client', "testmachine2")
result_udp.add_tag(product_name)
@@ -382,6 +395,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server', "testmachine2")
result_udp.set_parameter('netperf_client', "testmachine1")
result_udp.add_tag(product_name)
@@ -439,6 +456,10 @@ for setting in offload_settings:
'redhat_release'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.set_parameter('netperf_server', "testmachine2")
result_udp.set_parameter('netperf_client', "testmachine1")
result_udp.add_tag(product_name)
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
index d795714..affc6f5 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
@@ -48,6 +48,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, g2, h2, g3, g4], pr_user_comment)
@@ -187,6 +188,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ping_mod_bad = ctl.get_module("IcmpPing",
options={
"addr" : g4_guestnic.get_ip(0),
@@ -283,6 +288,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -347,6 +356,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
index c96e2a6..5b7ca94 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
@@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment)
@@ -168,6 +169,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ctl.wait(15)
for setting in offload_settings:
@@ -228,6 +233,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -289,6 +298,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
index eaa1cab..6acb1b1 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
@@ -44,6 +44,7 @@ nperf_mode = ctl.get_alias("nperf_mode")
nperf_num_parallel = int(ctl.get_alias("nperf_num_parallel"))
nperf_debug = ctl.get_alias("nperf_debug")
nperf_max_dev = ctl.get_alias("nperf_max_dev")
+nperf_udp_size = ctl.get_alias("nperf_udp_size")
pr_user_comment = ctl.get_alias("perfrepo_comment")
pr_comment = generate_perfrepo_comment([h1, g1, h2], pr_user_comment)
@@ -167,6 +168,10 @@ if nperf_mode == "multi":
netperf_cli_tcp6.update_options({"num_parallel": nperf_num_parallel})
netperf_cli_udp6.update_options({"num_parallel": nperf_num_parallel})
+if nperf_udp_size is not None:
+ netperf_cli_udp.update_options({"udp_size" : nperf_udp_size})
+ netperf_cli_udp6.update_options({"udp_size" : nperf_udp_size})
+
ctl.wait(15)
for setting in offload_settings:
@@ -227,6 +232,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
@@ -289,6 +298,10 @@ for setting in offload_settings:
r'host\d+\..*tap\d*\.devname'])
for offload in setting:
result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
result_udp.add_tag(product_name)
if nperf_mode == "multi":
result_udp.add_tag("multithreaded")
--
2.5.5
7 years, 7 months
[PATCH v2] regression_tests: skip unnecessary offload option for udp testing
by Kamil Jerabek
gro offload does not have any impact on udp testing. In that case, there were
tested two equal combination of offloads for udp. This commit makes skip this
one unnecessary combination. It has also impact on testing time of tests.
Signed-off-by: Kamil Jerabek <kjerabek(a)redhat.com>
---
recipes/regression_tests/phase1/3_vlans.py | 114 +++++------
.../regression_tests/phase1/3_vlans_over_bond.py | 114 +++++------
recipes/regression_tests/phase1/bonding_test.py | 106 +++++-----
recipes/regression_tests/phase1/simple_netperf.py | 101 +++++-----
.../phase1/virtual_bridge_2_vlans_over_bond.py | 4 +-
.../phase1/virtual_bridge_vlan_in_guest.py | 4 +-
.../phase1/virtual_bridge_vlan_in_host.py | 4 +-
.../regression_tests/phase2/3_vlans_over_team.py | 110 ++++++-----
recipes/regression_tests/phase2/team_test.py | 220 +++++++++++----------
...l_ovs_bridge_2_vlans_over_active_backup_bond.py | 118 +++++------
.../phase2/virtual_ovs_bridge_vlan_in_guest.py | 118 +++++------
.../phase2/virtual_ovs_bridge_vlan_in_host.py | 118 +++++------
12 files changed, 576 insertions(+), 555 deletions(-)
diff --git a/recipes/regression_tests/phase1/3_vlans.py b/recipes/regression_tests/phase1/3_vlans.py
index a508697..1b68889 100644
--- a/recipes/regression_tests/phase1/3_vlans.py
+++ b/recipes/regression_tests/phase1/3_vlans.py
@@ -242,34 +242,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
@@ -302,34 +303,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.set_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.set_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase1/3_vlans_over_bond.py b/recipes/regression_tests/phase1/3_vlans_over_bond.py
index 6c036f2..0d5afad 100644
--- a/recipes/regression_tests/phase1/3_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/3_vlans_over_bond.py
@@ -241,34 +241,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
@@ -301,34 +302,35 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase1/bonding_test.py b/recipes/regression_tests/phase1/bonding_test.py
index 4bbf573..44c60a0 100644
--- a/recipes/regression_tests/phase1/bonding_test.py
+++ b/recipes/regression_tests/phase1/bonding_test.py
@@ -219,32 +219,33 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
@@ -277,32 +278,33 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase1/simple_netperf.py b/recipes/regression_tests/phase1/simple_netperf.py
index 8d9fb1d..76bb615 100644
--- a/recipes/regression_tests/phase1/simple_netperf.py
+++ b/recipes/regression_tests/phase1/simple_netperf.py
@@ -194,31 +194,33 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter("num_parallel", nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -248,31 +250,32 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- # prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter("num_parallel", nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ # prepare PerfRepo result for udp ipv6
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter("num_parallel", nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
# reset offload states
diff --git a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
index 5841fad..afddf13 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_2_vlans_over_bond.py
@@ -295,7 +295,7 @@ for setting in offload_settings:
result_tcp.set_comment(pr_comment)
perf_api.save_result(result_tcp)
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
# prepare PerfRepo result for udp
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
@@ -364,7 +364,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv6_id",
"udp_ipv6_result",
hash_ignore=['kernel_release',
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
index 62fc800..66620f7 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_guest.py
@@ -234,7 +234,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
hash_ignore=['kernel_release',
@@ -299,7 +299,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv6_id",
"udp_ipv6_result",
hash_ignore=['kernel_release',
diff --git a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
index ec43321..2160e0b 100644
--- a/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase1/virtual_bridge_vlan_in_host.py
@@ -234,7 +234,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- if enable_udp_perf is not None:
+ if enable_udp_perf is not None and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv4_id",
"udp_ipv4_result",
hash_ignore=['kernel_release',
@@ -299,7 +299,7 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- if enable_udp_perf:
+ if enable_udp_perf and ("gro", "off") not in setting:
result_udp = perf_api.new_result("udp_ipv6_id",
"udp_ipv6_result",
hash_ignore=['kernel_release',
diff --git a/recipes/regression_tests/phase2/3_vlans_over_team.py b/recipes/regression_tests/phase2/3_vlans_over_team.py
index a157c45..5936347 100644
--- a/recipes/regression_tests/phase2/3_vlans_over_team.py
+++ b/recipes/regression_tests/phase2/3_vlans_over_team.py
@@ -242,33 +242,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
@@ -302,33 +303,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
- result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server_on_vlan', vlans[0])
+ result_udp.set_parameter('netperf_client_on_vlan', vlans[0])
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
srv_proc.intr()
diff --git a/recipes/regression_tests/phase2/team_test.py b/recipes/regression_tests/phase2/team_test.py
index fc6ed08..014194d 100644
--- a/recipes/regression_tests/phase2/team_test.py
+++ b/recipes/regression_tests/phase2/team_test.py
@@ -221,33 +221,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine1")
- result_udp.set_parameter('netperf_client', "testmachine2")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine1")
+ result_udp.set_parameter('netperf_client', "testmachine2")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -282,33 +283,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine1")
- result_udp.set_parameter('netperf_client', "testmachine2")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*5)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine1")
+ result_udp.set_parameter('netperf_client', "testmachine2")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*5)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
@@ -388,33 +390,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine2")
- result_udp.set_parameter('netperf_client', "testmachine1")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = m1.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine2")
+ result_udp.set_parameter('netperf_client', "testmachine1")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = m1.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -449,33 +452,34 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.set_parameter('netperf_server', "testmachine2")
- result_udp.set_parameter('netperf_client', "testmachine1")
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = m1.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.set_parameter('netperf_server', "testmachine2")
+ result_udp.set_parameter('netperf_client', "testmachine1")
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = m1.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
index affc6f5..897b173 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_2_vlans_over_active_backup_bond.py
@@ -277,35 +277,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = g3.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = g3.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -345,35 +346,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = g3.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = g3.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
index 5b7ca94..f084f37 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_guest.py
@@ -222,35 +222,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
if ipv in [ 'ipv6', 'both' ]:
@@ -287,35 +288,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
diff --git a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
index 6acb1b1..31e57af 100644
--- a/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
+++ b/recipes/regression_tests/phase2/virtual_ovs_bridge_vlan_in_host.py
@@ -221,35 +221,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp
- result_udp = perf_api.new_result("udp_ipv4_id",
- "udp_ipv4_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv4_id",
+ "udp_ipv4_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
@@ -287,35 +288,36 @@ for setting in offload_settings:
perf_api.save_result(result_tcp)
# prepare PerfRepo result for udp ipv6
- result_udp = perf_api.new_result("udp_ipv6_id",
- "udp_ipv6_result",
- hash_ignore=[
- 'kernel_release',
- 'redhat_release',
- r'guest\d+\.hostname',
- r'guest\d+\..*hwaddr',
- r'host\d+\..*tap\d*\.hwaddr',
- r'host\d+\..*tap\d*\.devname'])
- for offload in setting:
- result_udp.set_parameter(offload[0], offload[1])
-
- if nperf_udp_size is not None:
- result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
-
- result_udp.add_tag(product_name)
- if nperf_mode == "multi":
- result_udp.add_tag("multithreaded")
- result_udp.set_parameter('num_parallel', nperf_num_parallel)
-
- baseline = perf_api.get_baseline_of_result(result_udp)
- netperf_baseline_template(netperf_cli_udp6, baseline)
-
- udp_res_data = h2.run(netperf_cli_udp6,
- timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
-
- netperf_result_template(result_udp, udp_res_data)
- result_udp.set_comment(pr_comment)
- perf_api.save_result(result_udp)
+ if ("gro", "off") not in setting:
+ result_udp = perf_api.new_result("udp_ipv6_id",
+ "udp_ipv6_result",
+ hash_ignore=[
+ 'kernel_release',
+ 'redhat_release',
+ r'guest\d+\.hostname',
+ r'guest\d+\..*hwaddr',
+ r'host\d+\..*tap\d*\.hwaddr',
+ r'host\d+\..*tap\d*\.devname'])
+ for offload in setting:
+ result_udp.set_parameter(offload[0], offload[1])
+
+ if nperf_udp_size is not None:
+ result_udp.set_parameter("nperf_udp_size", nperf_udp_size)
+
+ result_udp.add_tag(product_name)
+ if nperf_mode == "multi":
+ result_udp.add_tag("multithreaded")
+ result_udp.set_parameter('num_parallel', nperf_num_parallel)
+
+ baseline = perf_api.get_baseline_of_result(result_udp)
+ netperf_baseline_template(netperf_cli_udp6, baseline)
+
+ udp_res_data = h2.run(netperf_cli_udp6,
+ timeout = (netperf_duration + nperf_reserve)*nperf_max_runs)
+
+ netperf_result_template(result_udp, udp_res_data)
+ result_udp.set_comment(pr_comment)
+ perf_api.save_result(result_udp)
server_proc.intr()
--
2.5.5
7 years, 7 months