[PATCH 1/2] Netperf: fixes for rate pretty printing
by Jan Tluka
If threshold is not specified the test will fail with an exception.
The fix is to process threshold values for prettier output only if
it was specified.
Also if threshold is not specified the rates were printed in bps.
Now they are printed using pretty rates.
This patch also fixes pretty print of the deviation.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
test_modules/Netperf.py | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py
index 6c247f4..8848bcd 100644
--- a/test_modules/Netperf.py
+++ b/test_modules/Netperf.py
@@ -407,21 +407,22 @@ class Netperf(TestGeneric):
rate_pretty = self._pretty_rate(rate)
rate_dev_pretty = self._pretty_rate(rate_deviation, unit=rate_pretty["unit"])
- threshold_pretty = self._pretty_rate(self._threshold["rate"])
- threshold_dev_pretty = self._pretty_rate(self._threshold_deviation["rate"],
- unit = threshold_pretty["unit"])
res_val = False
if self._threshold_interval is not None:
result_interval = (rate - rate_deviation,
rate + rate_deviation)
+ threshold_pretty = self._pretty_rate(self._threshold["rate"])
+ threshold_dev_pretty = self._pretty_rate(self._threshold_deviation["rate"],
+ unit = threshold_pretty["unit"])
+
if self._threshold_interval[0] > result_interval[1]:
res_val = False
res_data["msg"] = "Measured rate %.2f +-%.2f %s is lower "\
"than threshold %.2f +-%.2f %s" %\
(rate_pretty["rate"],
- rate_deviation,
+ rate_dev_pretty["rate"],
rate_pretty["unit"],
threshold_pretty["rate"],
threshold_dev_pretty["rate"],
@@ -431,7 +432,7 @@ class Netperf(TestGeneric):
res_data["msg"] = "Measured rate %.2f +-%.2f %s is higher "\
"than threshold %.2f +-%.2f %s" %\
(rate_pretty["rate"],
- rate_deviation,
+ rate_dev_pretty["rate"],
rate_pretty["unit"],
threshold_pretty["rate"],
threshold_dev_pretty["rate"],
@@ -441,8 +442,10 @@ class Netperf(TestGeneric):
res_val = True
else:
res_val = False
- res_data["msg"] = "Measured rate was %.2f +-%.2f bps" %\
- (rate, rate_deviation)
+ res_data["msg"] = "Measured rate was %.2f +-%.2f %s" %\
+ (rate_pretty["rate"],
+ rate_dev_pretty["rate"],
+ rate_pretty["unit"])
if rv != 0 and self._runs == 1:
res_data["msg"] = "Could not get performance throughput!"
--
2.4.11
8 years
[PATCH v3 1/4] Netperf: fix SCTP_STREAM test
by Jan Tluka
When we changed how the Netperf test module reports results we silently
broke SCTP_STREAM mode. The reason is that SCTP_STREAM does not have omni
output selection available (checked with both 2.6.0 and 2.7.0).
SCTP_STREAM data must be parsed separately.
Fixes issue 167.
V2 changes:
Added two methods to parse omni and non-omni outputs so that code is more
readable.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
test_modules/Netperf.py | 55 +++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 49 insertions(+), 6 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py
index 2033c57..1248f8c 100644
--- a/test_modules/Netperf.py
+++ b/test_modules/Netperf.py
@@ -18,6 +18,8 @@ class Netperf(TestGeneric):
supported_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR",
"SCTP_STREAM", "SCTP_STREAM_MANY", "SCTP_RR"]
+ omni_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR"]
+
def __init__(self, command):
super(Netperf, self).__init__(command)
@@ -52,13 +54,18 @@ class Netperf(TestGeneric):
else:
self._threshold_interval = None
+ def _is_omni(self):
+ return self._testname in self.omni_tests
+
def _compose_cmd(self):
"""
composes commands for netperf and netserver based on xml recipe
"""
if self._role == "client":
- # -P 0 disables banner header of output
- cmd = "netperf -H %s -f k -P 0" % self._netperf_server
+ cmd = "netperf -H %s -f k" % self._netperf_server
+ if self._is_omni():
+ # -P 0 disables banner header of output
+ cmd += " -P 0"
if self._port is not None:
"""
client connects on this port
@@ -112,7 +119,8 @@ class Netperf(TestGeneric):
cmd += " -s 1"
# Print only relevant output
- cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
+ if self._is_omni():
+ cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
elif self._role == "server":
cmd = "netserver -D"
@@ -134,6 +142,20 @@ class Netperf(TestGeneric):
return cmd
def _parse_output(self, output):
+ res_val = None
+
+ if self._is_omni():
+ res_val = self._parse_omni_output(output)
+ else:
+ res_val = self._parse_non_omni_output(output)
+
+ if self._confidence is not None:
+ confidence = self._parse_confidence(output)
+ res_val["confidence"] = confidence
+
+ return res_val
+
+ def _parse_omni_output(self, output):
res_val = {}
pattern_throughput = "THROUGHPUT=(\d+\.\d+)"
@@ -158,9 +180,30 @@ class Netperf(TestGeneric):
rem_cpu_util = re.search(pattern_rem_cpu_util, output)
res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1))
- if self._confidence is not None:
- confidence = self._parse_confidence(output)
- res_val["confidence"] = confidence
+ return res_val
+
+ def _parse_non_omni_output(self, output):
+ res_val = {}
+
+ # pattern for SCTP streams and other tests
+ # decimal decimal decimal float (float)
+ pattern = "\d+\s+\d+\s+\d+\s+\d+\.\d+\s+(\d+(?:\.\d+){0,1})"
+ if self._cpu_util:
+ # cpu utilization data in format: float float
+ pattern += "\s+(\d+(?:\.\d+){0,1})\s+(\d+(?:\.\d+){0,1})"
+
+ r2 = re.search(pattern, output.lower())
+
+ if r2 is None:
+ rate_in_kb = 0.0
+ else:
+ rate_in_kb = float(r2.group(1))
+ if self._cpu_util:
+ res_val["LOCAL_CPU_UTIL"] = float(r2.group(2))
+ res_val["REMOTE_CPU_UTIL"] = float(r2.group(3))
+
+ res_val["rate"] = rate_in_kb*1000
+ res_val["unit"] = "bps"
return res_val
--
2.4.11
8 years
[PATCH v2 1/4] Netperf: fix SCTP_STREAM test
by Jan Tluka
When we changed how the Netperf test module reports results we silently
broke SCTP_STREAM mode. The reason is that SCTP_STREAM does not have omni
output selection available (checked with both 2.6.0 and 2.7.0).
SCTP_STREAM data must be parsed separately.
Fixes:
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
test_modules/Netperf.py | 67 +++++++++++++++++++++++++++++++++++--------------
1 file changed, 48 insertions(+), 19 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py
index 2033c57..89664fb 100644
--- a/test_modules/Netperf.py
+++ b/test_modules/Netperf.py
@@ -18,6 +18,8 @@ class Netperf(TestGeneric):
supported_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR",
"SCTP_STREAM", "SCTP_STREAM_MANY", "SCTP_RR"]
+ omni_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR"]
+
def __init__(self, command):
super(Netperf, self).__init__(command)
@@ -52,13 +54,18 @@ class Netperf(TestGeneric):
else:
self._threshold_interval = None
+ def _is_omni(self):
+ return self._testname in self.omni_tests
+
def _compose_cmd(self):
"""
composes commands for netperf and netserver based on xml recipe
"""
if self._role == "client":
- # -P 0 disables banner header of output
- cmd = "netperf -H %s -f k -P 0" % self._netperf_server
+ cmd = "netperf -H %s -f k" % self._netperf_server
+ if self._is_omni():
+ # -P 0 disables banner header of output
+ cmd += " -P 0"
if self._port is not None:
"""
client connects on this port
@@ -112,7 +119,8 @@ class Netperf(TestGeneric):
cmd += " -s 1"
# Print only relevant output
- cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
+ if self._is_omni():
+ cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
elif self._role == "server":
cmd = "netserver -D"
@@ -136,27 +144,48 @@ class Netperf(TestGeneric):
def _parse_output(self, output):
res_val = {}
- pattern_throughput = "THROUGHPUT=(\d+\.\d+)"
- throughput = re.search(pattern_throughput, output)
+ if not self._is_omni():
+ # pattern for SCTP streams and other tests
+ # decimal decimal decimal float (float)
+ pattern = "\d+\s+\d+\s+\d+\s+\d+\.\d+\s+(\d+(?:\.\d+){0,1})"
+ if self._cpu_util:
+ # cpu utilization data in format: float float
+ pattern += "\s+(\d+(?:\.\d+){0,1})\s+(\d+(?:\.\d+){0,1})"
- if throughput is None:
- rate_in_kb = 0.0
+ r2 = re.search(pattern, output.lower())
+
+ if r2 is None:
+ rate_in_kb = 0.0
+ else:
+ rate_in_kb = float(r2.group(1))
+ if self._cpu_util:
+ res_val["LOCAL_CPU_UTIL"] = float(r2.group(2))
+ res_val["REMOTE_CPU_UTIL"] = float(r2.group(3))
+
+ res_val["rate"] = rate_in_kb*1000
+ res_val["unit"] = "bps"
else:
- rate_in_kb = float(throughput.group(1))
+ pattern_throughput = "THROUGHPUT=(\d+\.\d+)"
+ throughput = re.search(pattern_throughput, output)
- res_val["rate"] = rate_in_kb*1000
- res_val["unit"] = "bps"
+ if throughput is None:
+ rate_in_kb = 0.0
+ else:
+ rate_in_kb = float(throughput.group(1))
- if self._cpu_util is not None:
- if self._cpu_util == "local" or self._cpu_util == "both":
- pattern_loc_cpu_util = "LOCAL_CPU_UTIL=([-]?\d+\.\d+)"
- loc_cpu_util = re.search(pattern_loc_cpu_util, output)
- res_val["LOCAL_CPU_UTIL"] = float(loc_cpu_util.group(1))
+ res_val["rate"] = rate_in_kb*1000
+ res_val["unit"] = "bps"
- if self._cpu_util == "remote" or self._cpu_util == "both":
- pattern_rem_cpu_util = "REMOTE_CPU_UTIL=([-]?\d+\.\d+)"
- rem_cpu_util = re.search(pattern_rem_cpu_util, output)
- res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1))
+ if self._cpu_util is not None:
+ if self._cpu_util == "local" or self._cpu_util == "both":
+ pattern_loc_cpu_util = "LOCAL_CPU_UTIL=([-]?\d+\.\d+)"
+ loc_cpu_util = re.search(pattern_loc_cpu_util, output)
+ res_val["LOCAL_CPU_UTIL"] = float(loc_cpu_util.group(1))
+
+ if self._cpu_util == "remote" or self._cpu_util == "both":
+ pattern_rem_cpu_util = "REMOTE_CPU_UTIL=([-]?\d+\.\d+)"
+ rem_cpu_util = re.search(pattern_rem_cpu_util, output)
+ res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1))
if self._confidence is not None:
confidence = self._parse_confidence(output)
--
2.4.11
8 years
[PATCH] NetTestController: fix syncing of tools with mixed python and xml tasks
by Jan Tluka
When python task is in the recipe xml with other xml tasks specified
before, the sync_table gets overwritten by default resources.
This is because the res_table is copied from the default resources
in the loop for every task in the recipe.
The fix is to move initialization of res_table before the loop.
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
lnst/Controller/NetTestController.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lnst/Controller/NetTestController.py b/lnst/Controller/NetTestController.py
index 6484c99..0e9b546 100644
--- a/lnst/Controller/NetTestController.py
+++ b/lnst/Controller/NetTestController.py
@@ -264,8 +264,8 @@ class NetTestController:
sync_table = {'module': {}, 'tools': {}}
if resource_sync:
+ res_table = copy.deepcopy(self._resource_table)
for task in self._recipe['tasks']:
- res_table = copy.deepcopy(self._resource_table)
if 'module_dir' in task:
modules = self._load_test_modules([task['module_dir']])
res_table['module'].update(modules)
--
2.4.11
8 years
[PATCH 1/4] Netperf: fix SCTP_STREAM test
by Jan Tluka
When we changed how the Netperf test module reports results we silently
broke SCTP_STREAM mode. The reason is that SCTP_STREAM does not have omni
output selection available (checked with both 2.6.0 and 2.7.0).
SCTP_STREAM data must be parsed separately.
Fixes:
Signed-off-by: Jan Tluka <jtluka(a)redhat.com>
---
test_modules/Netperf.py | 67 +++++++++++++++++++++++++++++++++++--------------
1 file changed, 48 insertions(+), 19 deletions(-)
diff --git a/test_modules/Netperf.py b/test_modules/Netperf.py
index 2033c57..89664fb 100644
--- a/test_modules/Netperf.py
+++ b/test_modules/Netperf.py
@@ -18,6 +18,8 @@ class Netperf(TestGeneric):
supported_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR",
"SCTP_STREAM", "SCTP_STREAM_MANY", "SCTP_RR"]
+ omni_tests = ["TCP_STREAM", "TCP_RR", "UDP_STREAM", "UDP_RR"]
+
def __init__(self, command):
super(Netperf, self).__init__(command)
@@ -52,13 +54,18 @@ class Netperf(TestGeneric):
else:
self._threshold_interval = None
+ def _is_omni(self):
+ return self._testname in self.omni_tests
+
def _compose_cmd(self):
"""
composes commands for netperf and netserver based on xml recipe
"""
if self._role == "client":
- # -P 0 disables banner header of output
- cmd = "netperf -H %s -f k -P 0" % self._netperf_server
+ cmd = "netperf -H %s -f k" % self._netperf_server
+ if self._is_omni():
+ # -P 0 disables banner header of output
+ cmd += " -P 0"
if self._port is not None:
"""
client connects on this port
@@ -112,7 +119,8 @@ class Netperf(TestGeneric):
cmd += " -s 1"
# Print only relevant output
- cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
+ if self._is_omni():
+ cmd += ' -- -k "THROUGHPUT, LOCAL_CPU_UTIL, REMOTE_CPU_UTIL, CONFIDENCE_LEVEL, THROUGHPUT_CONFID"'
elif self._role == "server":
cmd = "netserver -D"
@@ -136,27 +144,48 @@ class Netperf(TestGeneric):
def _parse_output(self, output):
res_val = {}
- pattern_throughput = "THROUGHPUT=(\d+\.\d+)"
- throughput = re.search(pattern_throughput, output)
+ if not self._is_omni():
+ # pattern for SCTP streams and other tests
+ # decimal decimal decimal float (float)
+ pattern = "\d+\s+\d+\s+\d+\s+\d+\.\d+\s+(\d+(?:\.\d+){0,1})"
+ if self._cpu_util:
+ # cpu utilization data in format: float float
+ pattern += "\s+(\d+(?:\.\d+){0,1})\s+(\d+(?:\.\d+){0,1})"
- if throughput is None:
- rate_in_kb = 0.0
+ r2 = re.search(pattern, output.lower())
+
+ if r2 is None:
+ rate_in_kb = 0.0
+ else:
+ rate_in_kb = float(r2.group(1))
+ if self._cpu_util:
+ res_val["LOCAL_CPU_UTIL"] = float(r2.group(2))
+ res_val["REMOTE_CPU_UTIL"] = float(r2.group(3))
+
+ res_val["rate"] = rate_in_kb*1000
+ res_val["unit"] = "bps"
else:
- rate_in_kb = float(throughput.group(1))
+ pattern_throughput = "THROUGHPUT=(\d+\.\d+)"
+ throughput = re.search(pattern_throughput, output)
- res_val["rate"] = rate_in_kb*1000
- res_val["unit"] = "bps"
+ if throughput is None:
+ rate_in_kb = 0.0
+ else:
+ rate_in_kb = float(throughput.group(1))
- if self._cpu_util is not None:
- if self._cpu_util == "local" or self._cpu_util == "both":
- pattern_loc_cpu_util = "LOCAL_CPU_UTIL=([-]?\d+\.\d+)"
- loc_cpu_util = re.search(pattern_loc_cpu_util, output)
- res_val["LOCAL_CPU_UTIL"] = float(loc_cpu_util.group(1))
+ res_val["rate"] = rate_in_kb*1000
+ res_val["unit"] = "bps"
- if self._cpu_util == "remote" or self._cpu_util == "both":
- pattern_rem_cpu_util = "REMOTE_CPU_UTIL=([-]?\d+\.\d+)"
- rem_cpu_util = re.search(pattern_rem_cpu_util, output)
- res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1))
+ if self._cpu_util is not None:
+ if self._cpu_util == "local" or self._cpu_util == "both":
+ pattern_loc_cpu_util = "LOCAL_CPU_UTIL=([-]?\d+\.\d+)"
+ loc_cpu_util = re.search(pattern_loc_cpu_util, output)
+ res_val["LOCAL_CPU_UTIL"] = float(loc_cpu_util.group(1))
+
+ if self._cpu_util == "remote" or self._cpu_util == "both":
+ pattern_rem_cpu_util = "REMOTE_CPU_UTIL=([-]?\d+\.\d+)"
+ rem_cpu_util = re.search(pattern_rem_cpu_util, output)
+ res_val["REMOTE_CPU_UTIL"] = float(rem_cpu_util.group(1))
if self._confidence is not None:
confidence = self._parse_confidence(output)
--
2.4.11
8 years