diff options
author | Jason Ekstrand <jason.ekstrand@intel.com> | 2015-10-13 18:28:03 -0700 |
---|---|---|
committer | Jason Ekstrand <jason.ekstrand@intel.com> | 2015-10-19 12:12:05 -0700 |
commit | 95615c9a8841034f3e2441a738c696bdb6c10967 (patch) | |
tree | 68bdd9981a3cc3221359e6a9968472d9bc4d7eb3 /misc | |
parent | 64b50948ae896f5e2f7da29a7ba071ef5013980e (diff) |
cru/report: Add support for reporting the results of a single run
Diffstat (limited to 'misc')
-rwxr-xr-x | misc/crucible-report | 77 |
1 files changed, 49 insertions, 28 deletions
diff --git a/misc/crucible-report b/misc/crucible-report index 315e142..f629cdc 100755 --- a/misc/crucible-report +++ b/misc/crucible-report @@ -5,7 +5,7 @@ NAME crucible-report - a script for generating reports from crucible runs SYNOPSIS - crucible-report log1 log2""" + crucible-report log1 [log2]""" import re import sys @@ -61,35 +61,56 @@ def colored_status(status): color = status_color[status] return '\x1b[' + str(90 + color) + 'm' + status + '\x1b[0m' -if len(sys.argv) < 3: +if len(sys.argv) < 2 or len(sys.argv) > 3: print(usage) sys.exit(3) run1 = parse_results(sys.argv[1]) -run2 = parse_results(sys.argv[2]) -num_tests = len(run1.keys()) -fixed = 0 -regressions = 0 - -for name in set(run1.keys()) | set(run2.keys()): - # If a test wasn't run, consider it skipped. - status1 = run1.get(name, 'skip') - status2 = run2.get(name, 'skip') - - if status1 == status2: - continue - - if status_score[status1] < status_score[status2]: - fixed += 1 - else: - regressions += 1 - - print(' {0:>5s} -> {1:<5s} : {2}'.format(colored_status(status1), - colored_status(status2), name)) - -print('================================') -print(' fixed: {0}'.format(fixed)) -print(' regressed: {0}'.format(regressions)) -print(' unchanged: {0}'.format(num_tests - fixed - regressions)) -print(' total: {0}'.format(num_tests)) +if len(sys.argv) == 2: + counts = { + 'pass' : 0, + 'fail' : 0, + 'skip' : 0, + 'lost' : 0, + } + + for name, status in run1.items(): + counts[status] += 1 + print(' {0:>5s} : {1}'.format(colored_status(status), name)) + + print('================================') + print(' pass: {0}'.format(counts['pass'])) + print(' skip: {0}'.format(counts['skip'])) + print(' fail: {0}'.format(counts['fail'])) + print(' lost: {0}'.format(counts['lost'])) + print(' total: {0}'.format(len(run1))) + +else: + run2 = parse_results(sys.argv[2]) + + num_tests = len(run1.keys()) + fixed = 0 + regressions = 0 + + for name in set(run1.keys()) | set(run2.keys()): + # If a test wasn't run, consider it skipped. + status1 = run1.get(name, 'skip') + status2 = run2.get(name, 'skip') + + if status1 == status2: + continue + + if status_score[status1] < status_score[status2]: + fixed += 1 + else: + regressions += 1 + + print(' {0:>5s} -> {1:<5s} : {2}'.format(colored_status(status1), + colored_status(status2), name)) + + print('================================') + print(' fixed: {0}'.format(fixed)) + print(' regressed: {0}'.format(regressions)) + print(' unchanged: {0}'.format(num_tests - fixed - regressions)) + print(' total: {0}'.format(num_tests)) |