From fcddcacd9ed941ef660178dc588264419a311ee6 Mon Sep 17 00:00:00 2001 From: Nicolai Hähnle Date: Tue, 26 Sep 2017 10:35:35 +0200 Subject: framework: allow specifying the number of jobs for concurrency The default remains the same: number of CPUs. But on systems with lots of cores but comparatively little (V)RAM it can make sense to reduce the number of jobs to avoid random failures caused by out-of-memory conditions. Reviewed-by: Dylan Baker --- framework/options.py | 1 + framework/profile.py | 5 +++-- framework/programs/run.py | 23 +++++++++++++++++++++-- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/framework/options.py b/framework/options.py index 211159a45..f5f32af78 100644 --- a/framework/options.py +++ b/framework/options.py @@ -58,6 +58,7 @@ class _Options(object): # pylint: disable=too-many-instance-attributes self.sync = False self.deqp_mustpass = False self.process_isolation = True + self.jobs = None # env is used to set some base environment variables that are not going # to change across runs, without sending them to os.environ which is diff --git a/framework/profile.py b/framework/profile.py index ffc91e0a6..a6cac2cf0 100644 --- a/framework/profile.py +++ b/framework/profile.py @@ -597,7 +597,7 @@ def load_test_profile(filename, python=None): 'Did you specify the right file?'.format(filename)) -def run(profiles, logger, backend, concurrency): +def run(profiles, logger, backend, concurrency, jobs): """Runs all tests using Thread pool. When called this method will flatten out self.tests into self.test_list, @@ -614,6 +614,7 @@ def run(profiles, logger, backend, concurrency): profiles -- a list of Profile instances. logger -- a log.LogManager instance. backend -- a results.Backend derived instance. + jobs -- maximum number of concurrent jobs. Use os.cpu_count() by default """ chunksize = 1 @@ -670,7 +671,7 @@ def run(profiles, logger, backend, concurrency): # # The default value of pool is the number of virtual processor cores single = multiprocessing.dummy.Pool(1) - multi = multiprocessing.dummy.Pool() + multi = multiprocessing.dummy.Pool(jobs) try: for p in profiles: diff --git a/framework/programs/run.py b/framework/programs/run.py index 14fb764a2..ab1cb4e24 100644 --- a/framework/programs/run.py +++ b/framework/programs/run.py @@ -208,6 +208,14 @@ def _run_parser(input_): 'isolation. This allows, but does not require, ' 'tests to run multiple tests per process. ' 'This value can also be set in piglit.conf.') + parser.add_argument('-j', '--jobs', + dest='jobs', + action='store', + type=int, + default=core.PIGLIT_CONFIG.safe_get( + 'core', 'jobs', None), + help='Set the maximum number of jobs to run concurrently. ' + 'By default, the reported number of CPUs is used.') parser.add_argument("--ignore-missing", dest="ignore_missing", action="store_true", @@ -296,6 +304,7 @@ def run(input_): options.OPTIONS.sync = args.sync options.OPTIONS.deqp_mustpass = args.deqp_mustpass options.OPTIONS.process_isolation = args.process_isolation + options.OPTIONS.jobs = args.jobs # Set the platform to pass to waffle options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform @@ -364,7 +373,7 @@ def run(input_): if args.include_tests: p.filters.append(profile.RegexFilter(args.include_tests)) - profile.run(profiles, args.log_level, backend, args.concurrency) + profile.run(profiles, args.log_level, backend, args.concurrency, args.jobs) time_elapsed.end = time.time() backend.finalize({'time_elapsed': time_elapsed.to_json()}) @@ -389,6 +398,14 @@ def resume(input_): dest="no_retry", action="store_true", help="Do not retry incomplete tests") + parser.add_argument('-j', '--jobs', + dest='jobs', + action='store', + type=int, + default=core.PIGLIT_CONFIG.safe_get( + 'core', 'jobs', None), + help='Set the maximum number of jobs to run concurrently. ' + 'By default, the reported number of CPUs is used.') args = parser.parse_args(input_) _disable_windows_exception_messages() @@ -398,6 +415,7 @@ def resume(input_): options.OPTIONS.sync = results.options['sync'] options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass'] options.OPTIONS.process_isolation = results.options['process_isolation'] + options.OPTIONS.jobs = args.jobs core.get_config(args.config_file) @@ -453,7 +471,8 @@ def resume(input_): profiles, results.options['log_level'], backend, - results.options['concurrent']) + results.options['concurrent'], + args.jobs) except exceptions.PiglitUserError as e: if str(e) != 'no matching tests': raise -- cgit v1.2.3