summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Peres <martin.peres@linux.intel.com>2016-01-12 22:12:14 +0200
committerMartin Peres <martin.peres@linux.intel.com>2016-01-13 13:16:56 +0200
commitd75b020fa9095ef52b3eb313ad31cdfb7cb2d82e (patch)
tree10c3195f32b0d99d3c75723a1f5d39730a930dfa
parent78f19b11d130816d834dfeead5b7978f6979d981 (diff)
ezbench.py: initial version of perf and build failure bisecting
-rw-r--r--utils/ezbench.py88
-rwxr-xr-xutils/ezbenchd.py3
2 files changed, 91 insertions, 0 deletions
diff --git a/utils/ezbench.py b/utils/ezbench.py
index f6b2d6c..af9d20e 100644
--- a/utils/ezbench.py
+++ b/utils/ezbench.py
@@ -555,6 +555,94 @@ class SmartEzbench:
return True
+ def __get_git_history__(self):
+ git_history = list()
+
+ # Get the repo directory
+ ezbench = self.__create_ezbench()
+ run_info = ezbench.run_commits(["HEAD"], [], [], dry_run=True)
+
+ # Get the list of commits and store their position in the list in a dict
+ git_history = subprocess.check_output(["/usr/bin/git", "log", "--format=%h"],
+ cwd=run_info.repo_dir).decode().split()
+
+ return git_history
+
+ def report(self, reorder_commits = True):
+ git_history = dict()
+
+ if reorder_commits:
+ git_history = self.__get_git_history__()
+
+ # Generate the report, order commits based on the git history
+ r = genPerformanceReport(self.state['log_folder'], silentMode = True,
+ commits_rev_order=git_history)
+
+ def __find_middle_commit(self, git_history, old, new, msg):
+ old_idx = git_history.index(old)
+ new_idx = git_history.index(new)
+ middle_idx = int(old_idx - ((old_idx - new_idx) / 2))
+ if middle_idx != old_idx and middle_idx != new_idx:
+ middle = git_history[middle_idx]
+ log = "{} between commits {}({}) and {}({}), bisect using commit {}({})"
+ self.__log(Criticality.WW,
+ log.format(msg, old, old_idx, new, new_idx, middle, middle_idx))
+ return middle
+ else:
+ self.__log(Criticality.WW,
+ "{} due to commit '{}'".format(msg, new))
+ return None
+
+ def schedule_enhancements(self, perf_change_threshold=0.05):
+ # Generate the report, order commits based on the git history
+ git_history = self.__get_git_history__()
+ r = genPerformanceReport(self.state['log_folder'], silentMode = True,
+ commits_rev_order=git_history)
+
+ # Check all the commits
+ bench_prev = dict()
+ commit_prev = None
+ for commit in r.commits:
+ # Look for compilation errors
+ if ((commit.compil_exit_code > 0 and commit_prev is not None and
+ commit_prev.compil_exit_code == 0) or
+ (commit.compil_exit_code == 0 and commit_prev is not None and
+ commit_prev.compil_exit_code > 0)):
+ if commit.compil_exit_code > 0:
+ msg = "The build got broken"
+ else:
+ msg = "The build got fixed"
+ middle_commit = self.__find_middle_commit(git_history,
+ commit_prev.sha1,
+ commit.sha1, msg)
+ if middle_commit is not None:
+ self.add_benchmark(middle_commit, "no-op", 1)
+
+ # Look for performance regressions
+ for result in commit.results:
+ perf = sum(result.data) / len(result.data)
+ bench = result.benchmark.full_name
+ bench_unit = result.benchmark.unit_str
+ if result.benchmark.full_name in bench_prev:
+ # We got previous perf results, compare!
+ old_commit = bench_prev[bench][0]
+ old_perf = bench_prev[bench][1]
+ diff = perf / old_perf
+
+ if (diff > (1 + perf_change_threshold) or
+ diff < (1 - perf_change_threshold)):
+ msg = "Bench '{}' went from {} to {} {}".format(bench, old_perf,
+ perf, bench_unit)
+ middle_commit = self.__find_middle_commit(git_history, old_commit,
+ commit.sha1, msg)
+ if middle_commit is not None:
+ # TODO: Just ensure the benchmark is run
+ # TODO: Figure out how many runs we need based on the variance
+ self.add_benchmark(middle_commit, bench, 3)
+
+ bench_prev[bench] = (commit.sha1, perf)
+ commit_prev = commit
+
# Report parsing
class Benchmark:
def __init__(self, full_name, unit="undefined"):
diff --git a/utils/ezbenchd.py b/utils/ezbenchd.py
index e459235..ea97ba8 100755
--- a/utils/ezbenchd.py
+++ b/utils/ezbenchd.py
@@ -44,8 +44,10 @@ def stop_handler(signum, frame):
return
def reload_conf_handler(signum, frame):
+ # TODO
return
+
# parse the options
parser = argparse.ArgumentParser()
args = parser.parse_args()
@@ -65,6 +67,7 @@ while not stop_requested:
sbench = SmartEzbench(ezbench_dir, report_name)
if sbench.running_mode() == RunningMode.RUN:
sbench.run()
+ sbench.schedule_enhancements()
# TODO: Replace this by inotify
time.sleep(1) \ No newline at end of file