summaryrefslogtreecommitdiff
path: root/utils/bisect.py
blob: 4758d13487b7fc1ab5e8f8a9f3a9a4c578612647 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#!/usr/bin/env python3

from subprocess import call,check_output
from numpy import *
import subprocess
import argparse
import shutil
import sys
import os

ezbench_dir = os.path.abspath(sys.path[0]+'/../') + '/'

# Import ezbench from the utils/ folder
sys.path.append(ezbench_dir + 'utils/')
from ezbench import *

# function that tests the performance of one commit
def check_commit_perf(ezbench_base_cmd, commit, logs_dir):
    cmd = list(ezbench_base_cmd)
    cmd.append(commit)

    # Call ezbench
    with open(os.devnull, "w") as f:
        call(cmd, stdout=f, stderr=f)

    # parse the logs, read the perf of the last entry!
    commits, benchmarks = readPerformanceReport(logs_dir, True, True)

    if len(benchmarks) != 1:
        print ("Warning: Expected only one benchmark result for commit {}!".format(commit))

    # read the perf report of the last entry!
    return getPerformanceResultsCommitBenchmark(commits[-1], benchmarks[0], True).mean()

def checkPerformance(beforePerf, afterPerf, threshold, perf):
    if beforePerf > afterPerf:
        if (perf > threshold):
            res = "good"
        else:
            res = "bad"
    else:
        if (perf > threshold):
            res = "bad"
        else:
            res = "good"
    return res

# parse the options
parser = argparse.ArgumentParser()
parser.add_argument("-p", dest='repo_path', help="Git repository's path",
                    action="store", required=True)
parser.add_argument("-b", dest='benchmark', help="Benchmark to run",
                    action="store", required=True)
parser.add_argument("-r", dest='rounds', help="Number of execution rounds",
                    action="store", type=int, nargs='?', const=3)
parser.add_argument("-m", dest='make_cmd', help="Compilation command",
                    action="store")
parser.add_argument("BEFORE_COMMIT")
parser.add_argument("AFTER_COMMIT")
args = parser.parse_args()

# compute the report name
reportName = "bisect_{benchmark}_{BEFORE_COMMIT}_{AFTER_COMMIT}".format(benchmark=args.benchmark,
                                                                        BEFORE_COMMIT=args.BEFORE_COMMIT,
                                                                        AFTER_COMMIT=args.AFTER_COMMIT)
logs_dir = os.path.abspath(ezbench_dir + '/logs/' + reportName)  + '/'

# Check if the report dir already exists and ask for deletion
if os.path.exists(logs_dir):
    shouldAbort = input("The log directory '{}' already exists and will be deleted.\nAbort? (y/N)".format(logs_dir)).lower() == 'y'
    if shouldAbort:
        exit(0)
    shutil.rmtree(logs_dir, ignore_errors=True)
    print()

# Create the command line for ezbench
ezbench_cmd = []
ezbench_cmd.append(ezbench_dir + "ezbench.sh")
ezbench_cmd.append("-p"); ezbench_cmd.append(args.repo_path)
ezbench_cmd.append("-b"); ezbench_cmd.append(args.benchmark)
ezbench_cmd.append("-r"); ezbench_cmd.append(str(args.rounds))
ezbench_cmd.append("-m"); ezbench_cmd.append(args.make_cmd)
ezbench_cmd.append("-N"); ezbench_cmd.append(reportName)

print("Checking the performance of:")

# First, try the before and after commits
print("\tBEFORE_COMMIT: ", end="",flush=True)
before_perf = check_commit_perf(ezbench_cmd, args.BEFORE_COMMIT, logs_dir)
print("Performance index {before_perf}".format(before_perf=before_perf))

print("\tAFTER_COMMIT:  ", end="",flush=True)
after_perf = check_commit_perf(ezbench_cmd, args.AFTER_COMMIT, logs_dir)
print("Performance index {after_perf}".format(after_perf=after_perf))
print()

# Find the threshold
threshold = (before_perf + after_perf) / 2
overThreshold = checkPerformance(before_perf, after_perf, threshold, threshold + 1).upper()
underThreshold = checkPerformance(before_perf, after_perf, threshold, threshold - 1).upper()

print("Setting the performance threshold to {threshold}.".format(threshold=threshold))
print("\tIf a commit's perf is > {threshold}, then the commit is {res}".format(threshold=threshold,
                                                                               res=overThreshold))
print("\tIf a commit's perf is < {threshold}, then the commit is {res}".format(threshold=threshold,
                                                                               res=underThreshold))
print()

print("Starting the bisecting process.")
print()

# Start the bisecting feature
os.chdir(args.repo_path)
check_output(['git', 'bisect', 'start'], stderr=subprocess.STDOUT)
check_output(['git', 'bisect', 'good', args.BEFORE_COMMIT], stderr=subprocess.STDOUT)
check_output(['git', 'bisect', 'bad', args.AFTER_COMMIT], stderr=subprocess.STDOUT)

while True:
    perf = check_commit_perf(ezbench_cmd, "HEAD", logs_dir)
    res = checkPerformance(before_perf, after_perf, threshold, perf)
    print("Performance index = {perf} (diffThreshold = {diff}). Marking as {res}\n".format(perf=perf,
                                                                                           diff=perf - threshold,                                                                           res=res.upper()))
    output = check_output(['git', 'bisect', res]).decode()

    print(output, end="")
    if "first bad commit" in output:
        firstBad = output.split(" ")[0]
        print ("Change introduced by commit {}".format(firstBad))
        break

check_output(['git', 'bisect', 'reset'], stderr=subprocess.STDOUT)