diff options
author | Martin Peres <martin.peres@linux.intel.com> | 2016-08-18 22:29:52 +0300 |
---|---|---|
committer | Martin Peres <martin.peres@linux.intel.com> | 2016-08-18 23:49:16 +0300 |
commit | 4e31387f65764a5de6ee956cec7bf129a70dff48 (patch) | |
tree | 6219b89635e4ef1a016a5b0e12f9e2f2dfb1f925 | |
parent | e46e5826a525a51e0904ba0b032b179a4662f01f (diff) |
ezbench.py: s/unit_str/unit/g
No idea what I had in mind, but it sure has been bothering me
for some time!
-rwxr-xr-x | stats/compare_reports.py | 14 | ||||
-rw-r--r-- | utils/ezbench.py | 37 |
2 files changed, 23 insertions, 28 deletions
diff --git a/stats/compare_reports.py b/stats/compare_reports.py index 1a2d04e..3f3e0bc 100755 --- a/stats/compare_reports.py +++ b/stats/compare_reports.py @@ -125,7 +125,7 @@ def reports_to_html(reports, output, output_unit = None, title = None, db['reference'] = reference_report for result in ref_commit.results: average_raw = result.result().mean() - average = convert_unit(average_raw, result.unit_str, output_unit) + average = convert_unit(average_raw, result.unit, output_unit) average = float("{0:.2f}".format(average)) average_raw = float("{0:.2f}".format(average_raw)) if (not result.test.full_name in db["targets"] or @@ -188,7 +188,7 @@ def reports_to_html(reports, output, output_unit = None, title = None, db["metrics"][result.test.full_name] = [] db["commits"][commit.label]['reports'][report.name][result.test.full_name] = result average_raw = result.result().mean() - average = convert_unit(average_raw, result.unit_str, output_unit) + average = convert_unit(average_raw, result.unit, output_unit) score_sum += average count += 1 @@ -493,7 +493,7 @@ def reports_to_html(reports, output, output_unit = None, title = None, result = db["commits"][commit]['reports'][report][test] diff_target = "{0:.2f}".format(result.diff_target) %>\\ - , ${diff_target}, "${tooltip_commit_table(commit)}<h4>Perf</h4><table><tr><td><b>Test</b></td><td>${test}</td></tr><tr><td><b>Target</b></td><td>${db['targets'][test]} ${output_unit} (${diff_target}%)</td></tr><tr><td><b>Raw value</b></td><td>${result.average_raw} ${result.unit_str} +/- ${result.margin_str}% (n=${len(result.result())})</td></tr><tr><td><b>Converted value</b></td><td>${result.average} ${output_unit} +/- ${result.margin_str}% (n=${len(result.result())})</td></tr></table><br/>"\\ + , ${diff_target}, "${tooltip_commit_table(commit)}<h4>Perf</h4><table><tr><td><b>Test</b></td><td>${test}</td></tr><tr><td><b>Target</b></td><td>${db['targets'][test]} ${output_unit} (${diff_target}%)</td></tr><tr><td><b>Raw value</b></td><td>${result.average_raw} ${result.unit} +/- ${result.margin_str}% (n=${len(result.result())})</td></tr><tr><td><b>Converted value</b></td><td>${result.average} ${output_unit} +/- ${result.margin_str}% (n=${len(result.result())})</td></tr></table><br/>"\\ % else: , null, "${test}"\\ % endif @@ -689,10 +689,10 @@ def reports_to_html(reports, output, output_unit = None, title = None, <% r1 = db["commits"][commit]['reports'][report1.name][test] r2 = db["commits"][commit]['reports'][report2.name][test] - perf_diff = compute_perf_difference(r1.unit_str, r1.average_raw, r2.average_raw) + perf_diff = compute_perf_difference(r1.unit, r1.average_raw, r2.average_raw) perf_diff = "{0:.2f}".format(perf_diff) %> - dataTable.addRows([['${test}', '${report1.name}', '${report2.name}', ${perf_diff}, "${r1.average_raw} => ${r2.average_raw} ${r1.unit_str}"]]) + dataTable.addRows([['${test}', '${report1.name}', '${report2.name}', ${perf_diff}, "${r1.average_raw} => ${r2.average_raw} ${r1.unit}"]]) % endif % endfor % endif @@ -716,10 +716,10 @@ def reports_to_html(reports, output, output_unit = None, title = None, % if (test in db["commits"][commit]['reports'][report1.name] and test in db["targets"]): <% r1 = db["commits"][commit]['reports'][report1.name][test] - perf_diff = compute_perf_difference(r1.unit_str, db["targets_raw"][test], r1.average_raw) + perf_diff = compute_perf_difference(r1.unit, db["targets_raw"][test], r1.average_raw) perf_diff = "{0:.2f}".format(perf_diff) %>\\ -dataTable.addRows([['${test}', '${report1.name}', ${perf_diff}, "${r1.average_raw}(${report1.name}) => ${db["targets_raw"][test]}(target) ${r1.unit_str}"]]) +dataTable.addRows([['${test}', '${report1.name}', ${perf_diff}, "${r1.average_raw}(${report1.name}) => ${db["targets_raw"][test]}(target) ${r1.unit}"]]) % endif % endfor % endif diff --git a/utils/ezbench.py b/utils/ezbench.py index bb6ff5c..ce3fb86 100644 --- a/utils/ezbench.py +++ b/utils/ezbench.py @@ -1196,13 +1196,13 @@ class SmartEzbench: class Test: def __init__(self, full_name, unit="undefined"): self.full_name = full_name - self.unit_str = unit + self.unit = unit def __eq__(x, y): - return x.full_name == y.full_name and x.unit_str == y.unit_str + return x.full_name == y.full_name and x.unit == y.unit def __hash__(self): - return hash(self.full_name) ^ hash(self.unit_str) + return hash(self.full_name) ^ hash(self.unit) # returns (base_name, subtests=[]) @classmethod @@ -1447,7 +1447,7 @@ class TestRun: # There are no subtests here data, unit, more_is_better = readCsv(runFile) if len(data) > 0: - result = SubTestFloat("", testResult.unit_str, data, runFile) + result = SubTestFloat("", testResult.unit, data, runFile) self.__add_result__(result) elif testType == "unit": unit_tests = readUnitRun(runFile) @@ -1792,7 +1792,7 @@ class TestResult: self.runs = [] self.test_type = testType self.more_is_better = True - self.unit_str = None + self.unit = None self._results = set() self._cache_result = None @@ -1801,26 +1801,26 @@ class TestResult: def __parse_results__(self, testType, testFile, runFiles, metricsFiles): # Read the data and abort if there is no data - data, unit_str, self.more_is_better = readCsv(testFile) + data, unit, self.more_is_better = readCsv(testFile) if len(data) == 0: raise ValueError("The TestResult {} does not contain any runs".format(testFile)) - if unit_str is None: - unit_str = "FPS" - self.unit_str = unit_str + if unit is None: + unit = "FPS" + self.unit = unit # Check that we have the same unit as the test - if self.test.unit_str != self.unit_str: - if self.test.unit_str != "undefined": + if self.test.unit != self.unit: + if self.test.unit != "undefined": msg = "The unit used by the test '{test}' changed from '{unit_old}' to '{unit_new}' in commit {commit}" print(msg.format(test=test.full_name, - unit_old=test.unit_str, - unit_new=self.unit_str, + unit_old=test.unit, + unit_new=self.unit, commit=commit.sha1)) - self.test.unit_str = unit_str + self.test.unit = unit for i in range(0, len(runFiles)): - run = TestRun(self, testType, runFiles[i], metricsFiles[runFiles[i]], unit_str, data[i]) + run = TestRun(self, testType, runFiles[i], metricsFiles[runFiles[i]], unit, data[i]) self._results |= run.results() self.runs.append(run) @@ -2422,7 +2422,7 @@ class Report: for result_key in testresult.results(): result = testresult.result(result_key) test = result.subtest_fullname() - test_unit = result.test.unit_str + test_unit = result.test.unit if (result.value_type == BenchSubTestType.SUBTEST_FLOAT or result.value_type == BenchSubTestType.METRIC): @@ -2488,11 +2488,6 @@ class Report: result)) unittest_prev[subtest_name] = result - - elif result.value_type == BenchSubTestType.METRIC: - # Nothing to do for now, until we start bisecting - # power. - pass elif result.value_type == BenchSubTestType.SUBTEST_IMAGE: subtest_name = result.subtest_fullname() |