diff options
author | Kenneth Graunke <kenneth@whitecape.org> | 2012-02-16 17:32:40 -0800 |
---|---|---|
committer | Kenneth Graunke <kenneth@whitecape.org> | 2012-03-02 15:45:30 -0800 |
commit | e627eb887b14de448349a89e67ceba813904df74 (patch) | |
tree | b31f26571ce6f6634487ddc3979358fd280e0f25 /piglit-run.py | |
parent | 506ab46926c7069681adce0eb24e3c2b83417865 (diff) |
Add the ability to resume an interrupted test run where it left off.
GPUs like to hang, especially when barraged with lots of mean Piglit
tests. Usually this results in the poor developer having to figure out
what test hung, blacklist it via -x, and start the whole test run over.
This can waste a huge amount of time, especially when many tests hang.
This patch adds the ability to resume a Piglit run where you left off.
The workflow is:
$ piglit-run.py -t foo tests/quick.tests results/foobar-1
<interrupt the test run somehow>
$ piglit-run.py -r -x bad-test results/foobar-1
To accomplish this, piglit-run.py now stores the test profile
(quick.tests) and -t/-x options in the JSON results file so it can tell
what you were originally running. When run with the --resume option, it
re-reads the results file to obtain this information (repairing broken
JSON if necessary), rewrites the existing results, and runs any
remaining tests.
WARNING:
Results files produced after this commit are incompatible with older
piglit-summary-html.py (due to the extra "option" section.)
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Paul Berry <stereotype441@gmail.com>
Diffstat (limited to 'piglit-run.py')
-rwxr-xr-x | piglit-run.py | 59 |
1 files changed, 54 insertions, 5 deletions
diff --git a/piglit-run.py b/piglit-run.py index 2867a7339..296d46324 100755 --- a/piglit-run.py +++ b/piglit-run.py @@ -28,6 +28,7 @@ import re import sys, os import time import traceback +import json sys.path.append(path.dirname(path.realpath(sys.argv[0]))) import framework.core as core @@ -39,6 +40,7 @@ from framework.threads import synchronized_self def usage(): USAGE = """\ Usage: %(progName)s [options] [profile.tests] [results] + %(progName)s [options] -r [results] Options: -h, --help Show this message @@ -60,6 +62,10 @@ Example: %(progName)s -t ^glean/ -t tex tests/all.tests results/all Run all tests that are in the 'glean' group or whose path contains the substring 'tex' + + %(progName)s -r -x bad-test results/all + Resume an interrupted test run whose results are stored in the + directory results/all, skipping bad-test. """ print USAGE % {'progName': sys.argv[0]} sys.exit(1) @@ -71,25 +77,33 @@ def main(): option_list = [ "help", "dry-run", + "resume", "tests=", "name=", "exclude-tests=", "concurrent=", ] - options, args = getopt(sys.argv[1:], "hdt:n:x:c:", option_list) + options, args = getopt(sys.argv[1:], "hdrt:n:x:c:", option_list) except GetoptError: usage() OptionName = '' + OptionResume = False + test_filter = [] + exclude_filter = [] for name, value in options: if name in ('-h', '--help'): usage() elif name in ('-d', '--dry-run'): env.execute = False + elif name in ('-r', '--resume'): + OptionResume = True elif name in ('-t', '--tests'): + test_filter.append(value) env.filter.append(re.compile(value)) elif name in ('-x', '--exclude-tests'): + exclude_filter.append(value) env.exclude_filter.append(re.compile(value)) elif name in ('-n', '--name'): OptionName = value @@ -101,11 +115,29 @@ def main(): else: usage() - if len(args) != 2: - usage() + if OptionResume: + if test_filter or OptionName: + print "-r is not compatible with -t or -n." + usage() + if len(args) != 1: + usage() + resultsDir = args[0] + + # Load settings from the old results JSON + old_results = core.loadTestResults(resultsDir) + profileFilename = old_results.options['profile'] + for value in old_results.options['filter']: + test_filter.append(value) + env.filter.append(re.compile(value)) + for value in old_results.options['exclude_filter']: + exclude_filter.append(value) + env.exclude_filter.append(re.compile(value)) + else: + if len(args) != 2: + usage() - profileFilename = args[0] - resultsDir = path.realpath(args[1]) + profileFilename = args[0] + resultsDir = path.realpath(args[1]) # Change to the piglit's path piglit_dir = path.dirname(path.realpath(sys.argv[0])) @@ -127,6 +159,16 @@ def main(): json_writer = core.JSONWriter(result_file) json_writer.open_dict() + # Write out command line options for use in resuming. + json_writer.write_dict_key('options') + json_writer.open_dict() + json_writer.write_dict_item('profile', profileFilename) + json_writer.write_dict_key('filter') + result_file.write(json.dumps(test_filter)) + json_writer.write_dict_key('exclude_filter') + result_file.write(json.dumps(exclude_filter)) + json_writer.close_dict() + json_writer.write_dict_item('name', results.name) for (key, value) in env.collectData().items(): json_writer.write_dict_item(key, value) @@ -135,6 +177,13 @@ def main(): json_writer.write_dict_key('tests') json_writer.open_dict() + # If resuming an interrupted test run, re-write all of the existing + # results since we clobbered the results file. Also, exclude them + # from being run again. + if OptionResume: + for (key, value) in old_results.tests.items(): + json_writer.write_dict_item(key, value) + env.exclude_tests.add(key) time_start = time.time() profile.run(env, json_writer) |