From: Ondrej Lichtner <olichtne(a)redhat.com>
It may sometimes be useful to ignore some metrics during evaluation. At
the moment this is intended to be used with TRexFlowMeasurement where
generator and receiver cpu stats are identical (because both generator
and receiver are often on the same host). And the generator throughtput
results are often less stable and less important.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
.../BaselineFlowAverageEvaluator.py | 19 ++++++++++++-------
1 file changed, 12 insertions(+), 7 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
index 9d65955..f7a2fb1 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineFlowAverageEvaluator.py
@@ -8,9 +8,19 @@
class BaselineFlowAverageEvaluator(BaselineEvaluator):
- def __init__(self, pass_difference):
+ def __init__(self, pass_difference, metrics_to_evaluate=None):
self._pass_difference = pass_difference
+ if metrics_to_evaluate is not None:
+ self._metrics_to_evaluate = metrics_to_evaluate
+ else:
+ self._metrics_to_evaluate = [
+ "generator_results",
+ "generator_cpu_stats",
+ "receiver_results",
+ "receiver_cpu_stats",
+ ]
+
def describe_group_results(self, recipe, results):
result = results[0]
return [
@@ -28,12 +38,7 @@ def compare_result_with_baseline(self, recipe, result, baseline):
comparison_result = False
result_text.append("No baseline found for this flow")
else:
- for i in [
- "generator_results",
- "generator_cpu_stats",
- "receiver_results",
- "receiver_cpu_stats",
- ]:
+ for i in self._metrics_to_evaluate:
comparison, text = self._average_diff_comparison(
name="{} average".format(i),
target=getattr(result, i),
--
2.28.0