summaryrefslogtreecommitdiff
path: root/misc/crucible-report
blob: d952dd3bd84f7dbb32e382b9cdeb55e252c872bb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
#!/usr/bin/env python3

usage = """\
NAME
  crucible-report - a script for generating reports from crucible runs

SYNOPSIS
  crucible-report log1 log2"""

import re
import sys
from textwrap import dedent

cru_re = re.compile(r'^crucible:\s+(\w*)\s*:\s+(\S*)\s*$')

def parse_results(fname):
    results = {}
    with open(fname) as f:
        for line in f.readlines():
            match = cru_re.match(line)
            if not match:
                continue

            name = match.group(2)
            status = match.group(1)

            # We don't want info mesages
            if status not in ('start', 'pass', 'fail', 'skip'):
                continue

            # XXX: We really shouldn't have to do this.  If we had to do
            # this then some test is getting run multiple times and that's
            # bad.
            if name in results and results[name] != 'start':
                continue;

            results[name] = status;

    # Go through and mark lost tests
    for name in results:
        if results[name] == 'start':
            results[name] = 'lost'

    return results

status_score = {
    'lost': -2,
    'fail': -1,
    'skip': 0,
    'pass': 1,
}

status_color = {
    'lost': 3, # Yellow
    'fail': 1, # Red
    'skip': 4, # Blue
    'pass': 2, # Green
}

def colored_status(status):
    color = status_color[status]
    return '\x1b[' + str(90 + color) + 'm' + status + '\x1b[0m'

if len(sys.argv) < 3:
    print(usage)
    sys.exit(3)

run1 = parse_results(sys.argv[1])
run2 = parse_results(sys.argv[2])

# The report is only valid if both runs have the same set of tests.
assert run1.keys() == run2.keys()

num_tests = len(run1.keys())
fixed = 0
regressions = 0

for name in run1.keys():
    status1 = run1[name]
    status2 = run2[name]

    if status1 == status2:
        continue

    if status_score[status1] < status_score[status2]:
        fixed += 1
    else:
        regressions += 1

    print(' {0:>5s} -> {1:<5s} : {2}'.format(colored_status(status1),
                                             colored_status(status2), name))

print('================================')
print('  fixed:      {0}'.format(fixed))
print('  regressed:  {0}'.format(regressions))
print('  unchanged:  {0}'.format(num_tests - fixed - regressions))
print('  total:      {0}'.format(num_tests))