1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
|
#!/usr/bin/env python3
usage = """\
NAME
crucible-report - a script for generating reports from crucible runs
SYNOPSIS
crucible-report log1 [log2]"""
import re
import sys
from textwrap import dedent
cru_re = re.compile(r'^crucible:\s+(\w*)\s*:\s+(\S*)\s*$')
def parse_results(fname):
results = {}
with open(fname) as f:
for line in f.readlines():
match = cru_re.match(line)
if not match:
continue
name = match.group(2)
status = match.group(1)
# We don't want info mesages
if status not in ('start', 'pass', 'fail', 'skip'):
continue
# XXX: We really shouldn't have to do this. If we had to do
# this then some test is getting run multiple times and that's
# bad.
if name in results and results[name] != 'start':
continue;
results[name] = status;
# Go through and mark lost tests
for name in results:
if results[name] == 'start':
results[name] = 'lost'
return results
status_score = {
'lost': -2,
'fail': -1,
'skip': 0,
'pass': 1,
}
status_color = {
'lost': 3, # Yellow
'fail': 1, # Red
'skip': 4, # Blue
'pass': 2, # Green
}
def colored_status(status):
color = status_color[status]
return '\x1b[' + str(90 + color) + 'm' + status + '\x1b[0m'
if len(sys.argv) < 2 or len(sys.argv) > 3:
print(usage)
sys.exit(3)
run1 = parse_results(sys.argv[1])
if len(sys.argv) == 2:
counts = {
'pass' : 0,
'fail' : 0,
'skip' : 0,
'lost' : 0,
}
for name, status in run1.items():
counts[status] += 1
print(' {0:>5s} : {1}'.format(colored_status(status), name))
print('================================')
print(' pass: {0}'.format(counts['pass']))
print(' skip: {0}'.format(counts['skip']))
print(' fail: {0}'.format(counts['fail']))
print(' lost: {0}'.format(counts['lost']))
print(' total: {0}'.format(len(run1)))
else:
run2 = parse_results(sys.argv[2])
num_tests = len(run1.keys())
fixed = 0
regressions = 0
for name in set(run1.keys()) | set(run2.keys()):
# If a test wasn't run, consider it skipped.
status1 = run1.get(name, 'skip')
status2 = run2.get(name, 'skip')
if status1 == status2:
continue
if status_score[status1] < status_score[status2]:
fixed += 1
else:
regressions += 1
print(' {0:>5s} -> {1:<5s} : {2}'.format(colored_status(status1),
colored_status(status2), name))
print('================================')
print(' fixed: {0}'.format(fixed))
print(' regressed: {0}'.format(regressions))
print(' unchanged: {0}'.format(num_tests - fixed - regressions))
print(' total: {0}'.format(num_tests))
|