summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDylan Baker <dylan@pnwbakers.com>2016-10-13 15:44:18 -0700
committerDylan Baker <dylan@pnwbakers.com>2016-11-10 10:50:31 -0800
commit22f31b6419cedfd80fb43eac8462303791ada9b3 (patch)
treef3bb037c5000561c1c32ba50cf62fc50de5b4302
parent3d887afe4762c78f1b280789339c4e73c71bc2dd (diff)
framework/profile: Don't merge profiles
Because we can copy profiles, we don't need to merge them to run more than one of them. Instead we can simply have a list of profiles, and run them one by one. One side effect of this is that tests will be run one profile at a time, so if running with out the -1/--no-concurrency or -c/--all-concurrent options tests will run in a sort of zipper pattern: <p1 concurrent>, <p1 non-concurrent>, <p2 concurrent>, etc. Signed-off-by: Dylan Baker <dylanx.c.baker@intel.com>
-rw-r--r--framework/profile.py98
-rw-r--r--framework/programs/run.py33
-rw-r--r--framework/summary/feature.py3
-rw-r--r--unittests/framework/summary/test_feature.py19
-rw-r--r--unittests/framework/test_profile.py17
5 files changed, 67 insertions, 103 deletions
diff --git a/framework/profile.py b/framework/profile.py
index a64855ed1..e4b8308e1 100644
--- a/framework/profile.py
+++ b/framework/profile.py
@@ -49,7 +49,6 @@ from framework.test.base import Test
__all__ = [
'TestProfile',
'load_test_profile',
- 'merge_test_profiles'
]
@@ -298,21 +297,6 @@ class TestProfile(object):
"""
self.filters.append(function)
- def update(self, *profiles):
- """ Updates the contents of this TestProfile instance with another
-
- This method overwrites key:value pairs in self with those in the
- provided profiles argument. This allows multiple TestProfiles to be
- called in the same run; which could be used to run piglit and external
- suites at the same time.
-
- Arguments:
- profiles -- one or more TestProfile-like objects to be merged.
-
- """
- for profile in profiles:
- self.test_list.update(profile.test_list)
-
@contextlib.contextmanager
def group_manager(self, test_class, group, prefix=None, **default_args):
"""A context manager to make working with flat groups simple.
@@ -447,24 +431,7 @@ def load_test_profile(filename):
'Check your spelling?'.format(filename))
-def merge_test_profiles(profiles):
- """ Helper for loading and merging TestProfile instances
-
- Takes paths to test profiles as arguments and returns a single merged
- TestProfile instance.
-
- Arguments:
- profiles -- a list of one or more paths to profile files.
-
- """
- profile = load_test_profile(profiles.pop())
- with profile.allow_reassignment:
- for p in profiles:
- profile.update(load_test_profile(p))
- return profile
-
-
-def run(profile, logger, backend, concurrency):
+def run(profiles, logger, backend, concurrency):
"""Runs all tests using Thread pool.
When called this method will flatten out self.tests into self.test_list,
@@ -478,30 +445,52 @@ def run(profile, logger, backend, concurrency):
Finally it will print a final summary of the tests.
Arguments:
- profile -- a Profile ojbect.
- logger -- a log.LogManager instance.
- backend -- a results.Backend derived instance.
+ profiles -- a list of Profile instances.
+ logger -- a log.LogManager instance.
+ backend -- a results.Backend derived instance.
"""
chunksize = 1
- profile.prepare_test_list()
- log = LogManager(logger, len(profile.test_list))
+ for p in profiles:
+ p.prepare_test_list()
+ log = LogManager(logger, sum(len(p.test_list) for p in profiles))
- def test(pair, this_pool=None):
+ def test(name, test, profile, this_pool=None):
"""Function to call test.execute from map"""
- name, test = pair
with backend.write_test(name) as w:
test.execute(name, log.get(), profile.dmesg, profile.monitoring)
w(test.result)
if profile.monitoring.abort_needed:
this_pool.terminate()
- def run_threads(pool, testlist):
+ def run_threads(pool, profile, filterby=None):
""" Open a pool, close it, and join it """
- pool.imap(lambda pair: test(pair, pool), testlist, chunksize)
+ iterable = six.iteritems(profile.test_list)
+ if filterby:
+ iterable = (x for x in iterable if filterby(x))
+
+ pool.imap(lambda pair: test(pair[0], pair[1], profile, pool),
+ iterable, chunksize)
pool.close()
pool.join()
+ def run_profile(profile):
+ """Run an individual profile."""
+ profile.setup()
+ if concurrency == "all":
+ run_threads(multi, profile)
+ elif concurrency == "none":
+ run_threads(single, profile)
+ else:
+ assert concurrency == "some"
+ # Filter and return only thread safe tests to the threaded pool
+ run_threads(multi, profile, lambda x: x[1].run_concurrent)
+
+ # Filter and return the non thread safe tests to the single
+ # pool
+ run_threads(single, profile, lambda x: not x[1].run_concurrent)
+ profile.teardown()
+
# Multiprocessing.dummy is a wrapper around Threading that provides a
# multiprocessing compatible API
#
@@ -509,25 +498,12 @@ def run(profile, logger, backend, concurrency):
single = multiprocessing.dummy.Pool(1)
multi = multiprocessing.dummy.Pool()
- profile.setup()
try:
- if concurrency == "all":
- run_threads(multi, six.iteritems(profile.test_list))
- elif concurrency == "none":
- run_threads(single, six.iteritems(profile.test_list))
- else:
- assert concurrency == "some"
-
- # Filter and return only thread safe tests to the threaded pool
- run_threads(multi, (x for x in six.iteritems(profile.test_list)
- if x[1].run_concurrent))
- # Filter and return the non thread safe tests to the single
- # pool
- run_threads(single, (x for x in six.iteritems(profile.test_list)
- if not x[1].run_concurrent))
+ for p in profiles:
+ run_profile(p)
finally:
log.get().summary()
- profile.teardown()
- if profile.monitoring.abort_needed:
- raise exceptions.PiglitAbort(profile.monitoring.error_message)
+ for p in profiles:
+ if p.monitoring.abort_needed:
+ raise exceptions.PiglitAbort(p.monitoring.error_message)
diff --git a/framework/programs/run.py b/framework/programs/run.py
index 8af844809..9e82bba99 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -312,8 +312,10 @@ def run(input_):
backend.initialize(_create_metadata(
args, args.name or path.basename(args.results_path)))
- profile = framework.profile.merge_test_profiles(args.test_profile)
- profile.results_dir = args.results_path
+ profiles = [framework.profile.load_test_profile(p) for p in args.test_profile]
+ for p in profiles:
+ p.results_dir = args.results_path
+
# If a test list is provided then set the forced_test_list value.
if args.test_list:
if len(args.test_profiles) != 1:
@@ -322,18 +324,20 @@ def run(input_):
with open(args.test_list) as test_list:
# Strip newlines
- profile.forced_test_list = list([t.strip() for t in test_list])
+ profiles[0].forced_test_list = list([t.strip() for t in test_list])
# Set the dmesg type
if args.dmesg:
- profile.dmesg = args.dmesg
+ for p in profiles:
+ p.dmesg = args.dmesg
if args.monitored:
- profile.monitoring = args.monitored
+ for p in profiles:
+ p.monitoring = args.monitored
time_elapsed = framework.results.TimeAttribute(start=time.time())
- framework.profile.run(profile, args.log_level, backend, args.concurrency)
+ framework.profile.run(profiles, args.log_level, backend, args.concurrency)
time_elapsed.end = time.time()
backend.finalize({'time_elapsed': time_elapsed.to_json()})
@@ -391,17 +395,20 @@ def resume(input_):
if args.no_retry or result.result != 'incomplete':
options.OPTIONS.exclude_tests.add(name)
- profile = framework.profile.merge_test_profiles(results.options['profile'])
- profile.results_dir = args.results_path
- if options.OPTIONS.dmesg:
- profile.dmesg = options.OPTIONS.dmesg
+ profiles = [framework.profile.load_test_profile(p)
+ for p in results.options['profile']]
+ for p in profiles:
+ p.results_dir = args.results_path
+
+ if options.OPTIONS.dmesg:
+ p.dmesg = options.OPTIONS.dmesg
- if options.OPTIONS.monitored:
- profile.monitoring = options.OPTIONS.monitored
+ if options.OPTIONS.monitored:
+ p.monitoring = options.OPTIONS.monitored
# This is resumed, don't bother with time since it won't be accurate anyway
framework.profile.run(
- profile,
+ profiles,
results.options['log_level'],
backend,
results.options['concurrent'])
diff --git a/framework/summary/feature.py b/framework/summary/feature.py
index 9a17792e6..a66a49b2f 100644
--- a/framework/summary/feature.py
+++ b/framework/summary/feature.py
@@ -66,8 +66,7 @@ class FeatResults(object): # pylint: disable=too-few-public-methods
options.OPTIONS.exclude_filter = exclude_filter
options.OPTIONS.include_filter = include_filter
- profiles[feature] = profile.TestProfile()
- profiles[feature].update(profile_orig)
+ profiles[feature] = profile_orig.copy()
# An empty list will raise PiglitFatalError exception
# But for reporting we need to handle this situation
diff --git a/unittests/framework/summary/test_feature.py b/unittests/framework/summary/test_feature.py
index 370b3602e..fc05941b6 100644
--- a/unittests/framework/summary/test_feature.py
+++ b/unittests/framework/summary/test_feature.py
@@ -65,16 +65,15 @@ def _maketest(res):
PROFILE = profile.TestProfile()
-PROFILE.test_list = {
- 'spec@gl-1.0@a': _maketest('pass'),
- 'spec@gl-1.0@b': _maketest('warn'),
- 'spec@gl-1.0@c': _maketest('pass'),
- 'spec@gl-1.0@d': _maketest('fail'),
- 'spec@gl-2.0@a': _maketest('fail'),
- 'spec@gl-2.0@b': _maketest('crash'),
- 'spec@gl-2.0@c': _maketest('pass'),
- 'spec@gl-2.0@d': _maketest('fail'),
-}
+PROFILE.test_list = profile.TestDict()
+PROFILE.test_list['spec@gl-1.0@a'] = _maketest('pass')
+PROFILE.test_list['spec@gl-1.0@b'] = _maketest('warn')
+PROFILE.test_list['spec@gl-1.0@c'] = _maketest('pass')
+PROFILE.test_list['spec@gl-1.0@d'] = _maketest('fail')
+PROFILE.test_list['spec@gl-2.0@a'] = _maketest('fail')
+PROFILE.test_list['spec@gl-2.0@b'] = _maketest('crash')
+PROFILE.test_list['spec@gl-2.0@c'] = _maketest('pass')
+PROFILE.test_list['spec@gl-2.0@d'] = _maketest('fail')
class TestFeatResult(object):
diff --git a/unittests/framework/test_profile.py b/unittests/framework/test_profile.py
index 5ef95e4c3..4ffabfad5 100644
--- a/unittests/framework/test_profile.py
+++ b/unittests/framework/test_profile.py
@@ -101,23 +101,6 @@ class TestTestProfile(object):
profile_.dmesg = False
assert isinstance(profile_.dmesg, dmesg.DummyDmesg)
- def test_update_test_list(self):
- """profile.TestProfile.update(): updates TestProfile.test_list"""
- profile1 = profile.TestProfile()
- group1 = grouptools.join('group1', 'test1')
- group2 = grouptools.join('group1', 'test2')
-
- profile1.test_list[group1] = utils.Test(['test1'])
-
- profile2 = profile.TestProfile()
- profile2.test_list[group1] = utils.Test(['test3'])
- profile2.test_list[group2] = utils.Test(['test2'])
-
- with profile1.allow_reassignment:
- profile1.update(profile2)
-
- assert dict(profile1.test_list) == dict(profile2.test_list)
-
class TestPrepareTestList(object):
"""Create tests for TestProfile.prepare_test_list filtering."""