summaryrefslogtreecommitdiff
path: root/framework
diff options
context:
space:
mode:
authorDylan Baker <dylan@pnwbakers.com>2016-10-05 15:49:37 -0700
committerDylan Baker <dylan@pnwbakers.com>2016-11-10 10:49:09 -0800
commitccb430defa1b38fa850db5f5dff739440248b5c4 (patch)
treecc924f21f5dd63f9c1da130f168605aa24683614 /framework
parentd3c980aa9878ee03abe955c2615523aa3d3e04f6 (diff)
framework: Split the run method out of profile.
There are a couple of reasons for doing this. First, profile is a big complex mess that does entirely too much, and this helps with that. Second, there are bugs in the way two profiles run at the same time work, and this is going to fix that. Signed-off-by: Dylan Baker <dylanx.c.baker@intel.com>
Diffstat (limited to 'framework')
-rw-r--r--framework/profile.py135
-rw-r--r--framework/programs/run.py4
-rw-r--r--framework/summary/feature.py2
3 files changed, 71 insertions, 70 deletions
diff --git a/framework/profile.py b/framework/profile.py
index f63180a4e..1b9448f30 100644
--- a/framework/profile.py
+++ b/framework/profile.py
@@ -244,7 +244,7 @@ class TestProfile(object):
"""
self._monitoring = Monitoring(monitored)
- def _prepare_test_list(self):
+ def prepare_test_list(self):
""" Prepare tests for running
Flattens the nested group hierarchy into a flat dictionary using '/'
@@ -287,72 +287,6 @@ class TestProfile(object):
raise exceptions.PiglitFatalError(
'There are no tests scheduled to run. Aborting run.')
- def run(self, logger, backend):
- """ Runs all tests using Thread pool
-
- When called this method will flatten out self.tests into
- self.test_list, then will prepare a logger, and begin executing tests
- through it's Thread pools.
-
- Based on the value of options.OPTIONS.concurrent it will either run all
- the tests concurrently, all serially, or first the thread safe tests
- then the serial tests.
-
- Finally it will print a final summary of the tests
-
- Arguments:
- backend -- a results.Backend derived instance
-
- """
-
- chunksize = 1
-
- self._prepare_test_list()
- log = LogManager(logger, len(self.test_list))
-
- def test(pair, this_pool=None):
- """Function to call test.execute from map"""
- name, test = pair
- with backend.write_test(name) as w:
- test.execute(name, log.get(), self.dmesg, self.monitoring)
- w(test.result)
- if self._monitoring.abort_needed:
- this_pool.terminate()
-
- def run_threads(pool, testlist):
- """ Open a pool, close it, and join it """
- pool.imap(lambda pair: test(pair, pool), testlist, chunksize)
- pool.close()
- pool.join()
-
- # Multiprocessing.dummy is a wrapper around Threading that provides a
- # multiprocessing compatible API
- #
- # The default value of pool is the number of virtual processor cores
- single = multiprocessing.dummy.Pool(1)
- multi = multiprocessing.dummy.Pool()
-
- self.setup()
- try:
- if options.OPTIONS.concurrent == "all":
- run_threads(multi, six.iteritems(self.test_list))
- elif options.OPTIONS.concurrent == "none":
- run_threads(single, six.iteritems(self.test_list))
- else:
- # Filter and return only thread safe tests to the threaded pool
- run_threads(multi, (x for x in six.iteritems(self.test_list)
- if x[1].run_concurrent))
- # Filter and return the non thread safe tests to the single
- # pool
- run_threads(single, (x for x in six.iteritems(self.test_list)
- if not x[1].run_concurrent))
- finally:
- log.get().summary()
- self.teardown()
-
- if self._monitoring.abort_needed:
- raise exceptions.PiglitAbort(self._monitoring.error_message)
-
def filter_tests(self, function):
"""Filter out tests that return false from the supplied function
@@ -512,3 +446,70 @@ def merge_test_profiles(profiles):
for p in profiles:
profile.update(load_test_profile(p))
return profile
+
+
+def run(profile, logger, backend):
+ """Runs all tests using Thread pool.
+
+ When called this method will flatten out self.tests into self.test_list,
+ then will prepare a logger, and begin executing tests through it's Thread
+ pools.
+
+ Based on the value of options.OPTIONS.concurrent it will either run all the
+ tests concurrently, all serially, or first the thread safe tests then the
+ serial tests.
+
+ Finally it will print a final summary of the tests.
+
+ Arguments:
+ profile -- a Profile ojbect.
+ logger -- a log.LogManager instance.
+ backend -- a results.Backend derived instance.
+ """
+ chunksize = 1
+
+ profile.prepare_test_list()
+ log = LogManager(logger, len(profile.test_list))
+
+ def test(pair, this_pool=None):
+ """Function to call test.execute from map"""
+ name, test = pair
+ with backend.write_test(name) as w:
+ test.execute(name, log.get(), profile.dmesg, profile.monitoring)
+ w(test.result)
+ if profile.monitoring.abort_needed:
+ this_pool.terminate()
+
+ def run_threads(pool, testlist):
+ """ Open a pool, close it, and join it """
+ pool.imap(lambda pair: test(pair, pool), testlist, chunksize)
+ pool.close()
+ pool.join()
+
+ # Multiprocessing.dummy is a wrapper around Threading that provides a
+ # multiprocessing compatible API
+ #
+ # The default value of pool is the number of virtual processor cores
+ single = multiprocessing.dummy.Pool(1)
+ multi = multiprocessing.dummy.Pool()
+
+ profile.setup()
+ try:
+ if options.OPTIONS.concurrent == "all":
+ run_threads(multi, six.iteritems(profile.test_list))
+ elif options.OPTIONS.concurrent == "none":
+ run_threads(single, six.iteritems(profile.test_list))
+ else:
+ # Filter and return only thread safe tests to the threaded pool
+ run_threads(multi, (x for x in six.iteritems(profile.test_list)
+ if x[1].run_concurrent))
+ # Filter and return the non thread safe tests to the single
+ # pool
+ run_threads(single, (x for x in six.iteritems(profile.test_list)
+ if not x[1].run_concurrent))
+ finally:
+ log.get().summary()
+ profile.teardown()
+
+ if profile.monitoring.abort_needed:
+ raise exceptions.PiglitAbort(profile.monitoring.error_message)
diff --git a/framework/programs/run.py b/framework/programs/run.py
index 5a2fcd054..023aa2ee5 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -336,7 +336,7 @@ def run(input_):
if args.monitored:
profile.monitoring = args.monitored
- profile.run(args.log_level, backend)
+ framework.profile.run(profile, args.log_level, backend)
results.time_elapsed.end = time.time()
backend.finalize({'time_elapsed': results.time_elapsed.to_json()})
@@ -404,7 +404,7 @@ def resume(input_):
profile.monitoring = options.OPTIONS.monitored
# This is resumed, don't bother with time since it won't be accurate anyway
- profile.run(results.options['log_level'], backend)
+ framework.profile.run(profile, results.options['log_level'], backend)
backend.finalize()
diff --git a/framework/summary/feature.py b/framework/summary/feature.py
index cd903c2ee..9a17792e6 100644
--- a/framework/summary/feature.py
+++ b/framework/summary/feature.py
@@ -72,7 +72,7 @@ class FeatResults(object): # pylint: disable=too-few-public-methods
# An empty list will raise PiglitFatalError exception
# But for reporting we need to handle this situation
try:
- profiles[feature]._prepare_test_list()
+ profiles[feature].prepare_test_list()
except exceptions.PiglitFatalError:
pass