On Fri, Apr 12, 2019 at 02:44:10PM +0200, olichtne(a)redhat.com wrote:
From: Ondrej Lichtner <olichtne(a)redhat.com>
The evaluator is based on how reporting works for CPU measurements -
separating the individual cpus by the host machine. And uses a
comparison method based on how flow average evaluation implements it.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
.../Evaluators/BaselineCPUAverageEvaluator.py | 52 ++++++++++++++-----
1 file changed, 40 insertions(+), 12 deletions(-)
diff --git a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
index de0b83d..e9c6bca 100644
--- a/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
+++ b/lnst/RecipeCommon/Perf/Evaluators/BaselineCPUAverageEvaluator.py
@@ -6,6 +6,7 @@ from ..Measurements.BaseCPUMeasurement import (
CPUMeasurementResults,
AggregatedCPUMeasurementResults,
)
+from ..Results import result_averages_difference
class BaselineCPUAverageEvaluator(BaseEvaluator):
@@ -13,24 +14,51 @@ class BaselineCPUAverageEvaluator(BaseEvaluator):
self._pass_difference = pass_difference
def evaluate_results(self, recipe, results):
- for result in results:
- baseline = self.get_baseline(recipe, result)
- self._compare_result_with_baseline(recipe, result, baseline)
+ for host_results in self._divide_results_by_host(results).values():
+ self._evaluate_host_results(recipe, host_results)
def get_baseline(self, recipe, result):
return None
- def _compare_result_with_baseline(self, recipe, result, baseline):
+ def _divide_results_by_host(self, results):
+ results_by_host = {}
+ for result in results:
+ if result.host not in results_by_host:
+ results_by_host[result.host] = []
+ results_by_host[result.host].append(result)
+ return results_by_host
+
+ def _evaluate_host_results(self, recipe, host_results):
comparison_result = True
result_text = [
- "CPU Baseline average evaluation".format(),
- "Configured {}% difference as
acceptable".format(self._pass_difference),
+ "CPU Baseline average evaluation for Host {}:".format(
+ host_results[0].host.hostid
+ ),
+ "Configured {}% difference as acceptable".format(
+ self._pass_difference
+ ),
+ ]
+ pairs = [
+ (result, self.get_baseline(recipe, result))
+ for result in host_results
]
- if baseline is None:
- comparison_result = False
- result_text.append("No baseline found for this CPU measurement")
- else:
- result_text.append("I don't know how to compare CPU averages
yet!!!")
- comparison_result = False
+ for result, baseline in pairs:
+ difference = result_averages_difference(
+ result.utilization, baseline.utilization
+ )
+ if difference is None:
+ result_text.append(
+ "CPU {cpuid}: no baseline found for ".format(result.cpu)
+ )
+ else:
+ if abs(difference) > self._pass_difference:
+ comparison_result = False
+ result_text.append(
+ "CPU {cpuid}: utilization {diff:.2f}% {direction} than
baseline".format(
+ cpuid=result.cpu,
+ diff=abs(difference),
+ direction="higher" if difference >= 0 else
"lower",
+ )
+ )
recipe.add_result(comparison_result, "\n".join(result_text))
--
2.21.0
Looks like there's a couple of issues with this patch that showed up
when running on real hw and numbers that didn't show up when running on
a vm.
Will send a v2...
-Ondrej