summaryrefslogtreecommitdiff
path: root/framework
diff options
context:
space:
mode:
authorDylan Baker <baker.dylan.c@gmail.com>2015-08-03 17:55:07 -0700
committerDylan Baker <baker.dylan.c@gmail.com>2015-09-22 14:45:48 -0700
commitb365367f43ce239092851a6b9a2a152628b08c8b (patch)
treeed1a1493498896c39cc299aceb5c1de15c90207c /framework
parent9b39f0c3a706448e2fd4661621c5c46c5b397f38 (diff)
framework: replace TestResult dict with an object
This is a very invasive patch, because it replaces one of our core data-structures with a completely different kind of object. This new object is not a dict-like object (it doesn't use obj[key] = value syntax), instead it's a standard object with the standard object attributes. So result['time'] becomes result.time. This approach has a couple of advantages. First, it allows us to use properties, which allows us to encapsulate a lot of distributed logic from the summary module in the results module, and in a way that is easier to test for correctness. The second advantage of that encapsulation is that correct behavior is used everywhere, instead of just in most places. Finally, it allows us to use the flyweight pattern on the results objects, limiting the amount of memory consumed. Signed-off-by: Dylan Baker <dylanx.c.baker@intel.com>
Diffstat (limited to 'framework')
-rw-r--r--framework/backends/abstract.py2
-rw-r--r--framework/backends/json.py45
-rw-r--r--framework/backends/junit.py30
-rw-r--r--framework/dmesg.py9
-rw-r--r--framework/programs/run.py2
-rw-r--r--framework/programs/summary.py6
-rw-r--r--framework/results.py133
-rw-r--r--framework/summary.py41
-rw-r--r--framework/test/base.py58
-rw-r--r--framework/test/deqp.py10
-rw-r--r--framework/test/gleantest.py6
-rw-r--r--framework/test/gtest.py8
-rw-r--r--framework/test/oclconform.py6
-rw-r--r--framework/test/piglit_test.py7
-rw-r--r--framework/tests/base_tests.py38
-rw-r--r--framework/tests/compressed_backend_tests.py3
-rw-r--r--framework/tests/deqp_tests.py16
-rw-r--r--framework/tests/dmesg_tests.py51
-rw-r--r--framework/tests/gleantest_tests.py4
-rw-r--r--framework/tests/json_backend_tests.py66
-rw-r--r--framework/tests/json_results_update_tests.py87
-rw-r--r--framework/tests/json_tests.py4
-rw-r--r--framework/tests/junit_backends_tests.py89
-rw-r--r--framework/tests/piglit_test_tests.py20
-rw-r--r--framework/tests/results_tests.py239
-rw-r--r--framework/tests/summary_tests.py37
-rw-r--r--framework/tests/utils.py22
27 files changed, 705 insertions, 334 deletions
diff --git a/framework/backends/abstract.py b/framework/backends/abstract.py
index 4a87857d7..26151f42d 100644
--- a/framework/backends/abstract.py
+++ b/framework/backends/abstract.py
@@ -171,7 +171,7 @@ class FileBackend(Backend):
self._file_sync = file_fsync
self._write_final = write_compressed
- __INCOMPLETE = TestResult({'result': INCOMPLETE})
+ __INCOMPLETE = TestResult(result=INCOMPLETE)
def __fsync(self, file_):
""" Sync the file to disk
diff --git a/framework/backends/json.py b/framework/backends/json.py
index 12a5ac7de..e88c9c881 100644
--- a/framework/backends/json.py
+++ b/framework/backends/json.py
@@ -42,7 +42,7 @@ __all__ = [
]
# The current version of the JSON results
-CURRENT_JSON_VERSION = 5
+CURRENT_JSON_VERSION = 6
# The level to indent a final file
INDENT = 4
@@ -58,13 +58,18 @@ def piglit_encoder(obj):
return str(obj)
elif isinstance(obj, set):
return list(obj)
+ elif hasattr(obj, 'to_json'):
+ return obj.to_json()
return obj
def piglit_decoder(obj):
"""Json decoder for piglit that can load TestResult objects."""
- if isinstance(obj, dict) and 'result' in obj:
- return results.TestResult.load(obj)
+ if isinstance(obj, dict):
+ if obj.get('__type__', '') == 'TestResult':
+ return results.TestResult.from_dict(obj)
+ elif obj.get('__type__', '') == 'Subtests':
+ return results.Subtests.from_dict(obj)
return obj
@@ -287,6 +292,7 @@ def _update_results(results, filepath):
2: _update_two_to_three,
3: _update_three_to_four,
4: _update_four_to_five,
+ 5: _update_five_to_six,
}
while results.results_version < CURRENT_JSON_VERSION:
@@ -326,7 +332,7 @@ def _write(results, file_):
indent=INDENT)
-def _update_zero_to_one(results):
+def _update_zero_to_one(result):
""" Update version zero results to version 1 results
Changes from version 0 to version 1
@@ -347,7 +353,10 @@ def _update_zero_to_one(results):
updated_results = {}
remove = set()
- for name, test in results.tests.iteritems():
+ for name, test in result.tests.iteritems():
+ assert not isinstance(test, results.TestResult), \
+ 'Test was erroniaously turned into a TestResult'
+
# fix dmesg errors if any
if isinstance(test.get('dmesg'), list):
test['dmesg'] = '\n'.join(test['dmesg'])
@@ -429,13 +438,13 @@ def _update_zero_to_one(results):
updated_results[testname] = test
for name in remove:
- del results.tests[name]
- results.tests.update(updated_results)
+ del result.tests[name]
+ result.tests.update(updated_results)
# set the results version
- results.results_version = 1
+ result.results_version = 1
- return results
+ return result
def _update_one_to_two(results):
@@ -533,6 +542,24 @@ def _update_four_to_five(results):
return results
+def _update_five_to_six(result):
+ """Updates json results from version 5 to 6.
+
+ This uses a special field to for marking TestResult instances, rather than
+ just checking for fields we expect.
+
+ """
+ new_tests = {}
+
+ for name, test in result.tests.iteritems():
+ new_tests[name] = results.TestResult.from_dict(test)
+
+ result.tests = new_tests
+ result.results_version = 6
+
+ return result
+
+
REGISTRY = Registry(
extensions=['', '.json'],
backend=JSONBackend,
diff --git a/framework/backends/junit.py b/framework/backends/junit.py
index f052007fe..a2cdd49a5 100644
--- a/framework/backends/junit.py
+++ b/framework/backends/junit.py
@@ -139,10 +139,10 @@ class JUnitBackend(FileBackend):
res = None
# Add relevant result value, if the result is pass then it doesn't
# need one of these statuses
- if data['result'] == 'skip':
+ if data.result == 'skip':
res = etree.SubElement(element, 'skipped')
- elif data['result'] in ['fail', 'dmesg-warn', 'dmesg-fail']:
+ elif data.result in ['fail', 'dmesg-warn', 'dmesg-fail']:
if expected_result == "failure":
err.text += "\n\nWARN: passing test as an expected failure"
res = etree.SubElement(element, 'skipped',
@@ -150,7 +150,7 @@ class JUnitBackend(FileBackend):
else:
res = etree.SubElement(element, 'failure')
- elif data['result'] == 'crash':
+ elif data.result == 'crash':
if expected_result == "error":
err.text += "\n\nWARN: passing test as an expected crash"
res = etree.SubElement(element, 'skipped',
@@ -165,7 +165,7 @@ class JUnitBackend(FileBackend):
# Add the piglit type to the failure result
if res is not None:
- res.attrib['type'] = str(data['result'])
+ res.attrib['type'] = str(data.result)
# Split the name of the test and the group (what junit refers to as
# classname), and replace piglits '/' separated groups with '.', after
@@ -196,22 +196,22 @@ class JUnitBackend(FileBackend):
element = etree.Element('testcase', name=full_test_name,
classname=classname,
# Incomplete will not have a time.
- time=str(data.get('time')),
- status=str(data['result']))
+ time=str(data.time),
+ status=str(data.result))
# If this is an incomplete status then none of these values will be
# available, nor
- if data['result'] != 'incomplete':
+ if data.result != 'incomplete':
# Add stdout
out = etree.SubElement(element, 'system-out')
- out.text = data['out']
+ out.text = data.out
# Prepend command line to stdout
- out.text = data['command'] + '\n' + out.text
+ out.text = data.command + '\n' + out.text
# Add stderr
err = etree.SubElement(element, 'system-err')
- err.text = data['err']
+ err.text = data.err
calculate_result()
else:
etree.SubElement(element, 'failure', message='Incomplete run.')
@@ -252,15 +252,15 @@ def _load(results_file):
if name.endswith('_'):
name = name[:-1]
- result['result'] = status.status_lookup(test.attrib['status'])
- result['time'] = float(test.attrib['time'])
- result['err'] = test.find('system-err').text
+ result.result = test.attrib['status']
+ result.time = float(test.attrib['time'])
+ result.err = test.find('system-err').text
# The command is prepended to system-out, so we need to separate those
# into two separate elements
out = test.find('system-out').text.split('\n')
- result['command'] = out[0]
- result['out'] = '\n'.join(out[1:])
+ result.command = out[0]
+ result.out = '\n'.join(out[1:])
run_result.tests[name] = result
diff --git a/framework/dmesg.py b/framework/dmesg.py
index 1a5f629fc..a024db276 100644
--- a/framework/dmesg.py
+++ b/framework/dmesg.py
@@ -132,15 +132,14 @@ class BaseDmesg(object):
else:
return result
- result['result'] = replace(result['result'])
+ result.result = replace(result.result)
# Replace the results of any subtests
- if 'subtest' in result:
- for key, value in result['subtest'].iteritems():
- result['subtest'][key] = replace(value)
+ for key, value in result.subtests.iteritems():
+ result.subtests[key] = replace(value)
# Add the dmesg values to the result
- result['dmesg'] = "\n".join(self._new_messages)
+ result.dmesg = "\n".join(self._new_messages)
return result
diff --git a/framework/programs/run.py b/framework/programs/run.py
index bf8cc1023..16c3d3744 100644
--- a/framework/programs/run.py
+++ b/framework/programs/run.py
@@ -328,7 +328,7 @@ def resume(input_):
# Don't re-run tests that have already completed, incomplete status tests
# have obviously not completed.
for name, result in results.tests.iteritems():
- if args.no_retry or result['result'] != 'incomplete':
+ if args.no_retry or result.result != 'incomplete':
opts.exclude_tests.add(name)
profile = framework.profile.merge_test_profiles(results.options['profile'])
diff --git a/framework/programs/summary.py b/framework/programs/summary.py
index 939bfd980..df8173beb 100644
--- a/framework/programs/summary.py
+++ b/framework/programs/summary.py
@@ -181,9 +181,9 @@ def csv(input_):
def write_results(output):
for name, result in testrun.tests.iteritems():
- output.write("{},{},{},{}\n".format(name, result.get('time', ""),
- result.get('returncode', ""),
- result['result']))
+ output.write("{},{},{},{}\n".format(name, result.time,
+ result.returncode,
+ result.result))
if args.output != "stdout":
with open(args.output, 'w') as output:
diff --git a/framework/results.py b/framework/results.py
index dd1a64530..5ba0533d9 100644
--- a/framework/results.py
+++ b/framework/results.py
@@ -22,7 +22,8 @@
""" Module for results generation """
from __future__ import print_function, absolute_import
-import framework.status as status
+
+from framework import status, exceptions
__all__ = [
'TestrunResult',
@@ -30,58 +31,112 @@ __all__ = [
]
-class TestResult(dict):
- def recursive_update(self, dictionary):
- """ Recursively update the TestResult
-
- The problem with using self.update() is this:
- >>> t = TestResult()
- >>> t.update({'subtest': {'test1': 'pass'}})
- >>> t.update({'subtest': {'test2': 'pass'}})
- >>> t['subtest']
- {'test2': 'pass'}
+class Subtests(dict):
+ def __setitem__(self, name, value):
+ super(Subtests, self).__setitem__(name, status.status_lookup(value))
- This function is different, because it recursively updates self, it
- doesn't clobber existing entires in the same way
- >>> t = TestResult()
- >>> t.recursive_update({'subtest': {'test1': 'pass'}})
- >>> t.recursive_update({'subtest': {'test2': 'pass'}})
- >>> t['subtest']
- {'test1': 'pass', 'test2': 'pass'}
+ def to_json(self):
+ res = dict(self)
+ res['__type__'] = 'Subtests'
+ return res
- Arguments:
- dictionary -- a dictionary instance to update the TestResult with
+ @classmethod
+ def from_dict(cls, dict_):
+ res = cls(dict_)
+
+ if '__type__' in res:
+ del res['__type__']
+
+ return res
+
+
+class TestResult(object):
+ """An object represting the result of a single test."""
+ __slots__ = ['returncode', 'err', 'out', 'time', 'command', 'environment',
+ 'subtests', 'dmesg', '__result', 'images', 'traceback']
+
+ def __init__(self, result=None):
+ self.returncode = None
+ self.err = str()
+ self.out = str()
+ self.time = float()
+ self.command = str()
+ self.environment = str()
+ self.subtests = Subtests()
+ self.dmesg = str()
+ self.images = None
+ self.traceback = None
+ if result:
+ self.result = result
+ else:
+ self.__result = status.NOTRUN
+
+ @property
+ def result(self):
+ """Return the result of the test.
+
+ If there are subtests return the "worst" value of those subtests. If
+ there are not return the stored value of the test.
"""
- def update(d, u, check):
- for k, v in u.iteritems():
- if isinstance(v, dict):
- d[k] = update(d.get(k, {}), v, True)
- else:
- if check and k in d:
- print("Warning: duplicate subtest: {} value: {} old value: {}".format(k, v, d[k]))
- d[k] = v
- return d
-
- update(self, dictionary, False)
+ return self.__result
+
+ @result.setter
+ def result(self, new):
+ try:
+ self.__result = status.status_lookup(new)
+ except exceptions.PiglitInternalError as e:
+ raise exceptions.PiglitFatalError(str(e))
+
+ def to_json(self):
+ """Return the TestResult as a json serializable object."""
+ obj = {
+ '__type__': 'TestResult',
+ 'returncode': self.returncode,
+ 'err': self.err,
+ 'out': self.out,
+ 'time': self.time,
+ 'environment': self.environment,
+ 'subtests': self.subtests,
+ 'result': self.result,
+ }
+ return obj
@classmethod
- def load(cls, res):
- """Load an already generated result.
+ def from_dict(cls, dict_):
+ """Load an already generated result in dictionary form.
This is used as an alternate constructor which converts an existing
dictionary into a TestResult object. It converts a key 'result' into a
status.Status object
"""
- result = cls(res)
+ inst = cls()
+
+ # TODO: There's probably a more clever way to do this
+ for each in ['returncode', 'err', 'out', 'time', 'command',
+ 'environment', 'result', 'dmesg']:
+ if each in dict_:
+ setattr(inst, each, dict_[each])
- # Replace the result with a status object. 'result' is a required key
- # for results, so don't do any checking. This should fail if it doesn't
- # exist.
- result['result'] = status.status_lookup(result['result'])
+ if 'subtests' in dict_:
+ for name, value in dict_['subtests'].iteritems():
+ inst.subtests[name] = value
- return result
+ return inst
+
+ def update(self, dict_):
+ """Update the results and subtests fields from a piglit test.
+
+ Native piglit tests output their data as valid json, and piglit uses
+ the json module to parse this data. This method consumes that raw
+ dictionary data and updates itself.
+
+ """
+ if 'result' in dict_:
+ self.result = dict_['result']
+ elif 'subtest' in dict_:
+ self.subtests.update(dict_['subtest'])
class TestrunResult(object):
diff --git a/framework/summary.py b/framework/summary.py
index 80cb18107..bdbd6e26d 100644
--- a/framework/summary.py
+++ b/framework/summary.py
@@ -38,6 +38,7 @@ from mako.template import Template
# a local variable status exists, prevent accidental overloading by renaming
# the module
import framework.status as so
+import framework.results
from framework import grouptools, backends, exceptions
__all__ = [
@@ -188,7 +189,7 @@ class HTMLIndex(list):
# If the "group" at the top of the key heirachy contains
# 'subtest' then it is really not a group, link to that page
try:
- if each.tests[grouptools.groupname(key)]['subtest']:
+ if each.tests[grouptools.groupname(key)].subtests:
href = grouptools.groupname(key)
except KeyError:
href = key
@@ -342,24 +343,18 @@ class Summary:
temp_results = {}
for key, value in results.tests.iteritems():
- # if the first character of key is a / then our while loop will
- # become an infinite loop. Beyond that / should never be the
- # leading character, if it is then there is a bug in one of the
- # test profiles.
- assert key[0] != '/'
-
# Treat a test with subtests as if it is a group, assign the
# subtests' statuses and fractions down to the test, and then
# proceed like normal.
- if 'subtest' in value:
- for (subt, subv) in value['subtest'].iteritems():
+ if value.subtests:
+ for (subt, subv) in value.subtests.iteritems():
subt = grouptools.join(key, subt)
subv = so.status_lookup(subv)
# Add the subtest to the fractions and status lists
fraction[subt] = subv.fraction
status[subt] = subv
- temp_results.update({subt: {'result': subv}})
+ temp_results.update({subt: framework.results.TestResult(subv)})
self.tests['all'].add(subt)
while subt != '':
@@ -377,11 +372,11 @@ class Summary:
# if the status of the previous level was worse, but is not
# skip
while key != '':
- fgh(key, value['result'])
+ fgh(key, value.result)
key = grouptools.groupname(key)
# when we hit the root update the 'all' group and stop
- fgh('all', value['result'])
+ fgh('all', value.result)
# Update the the results.tests dictionary with the subtests so that
# they are entered into the appropriate pages other than all.
@@ -395,7 +390,7 @@ class Summary:
status = []
for each in self.results:
try:
- status.append(each.tests[test]['result'])
+ status.append(each.tests[test].result)
except KeyError:
status.append(so.NOTRUN)
@@ -440,7 +435,7 @@ class Summary:
'dmesg-fail': 0, 'incomplete': 0,}
for test in results.tests.itervalues():
- self.totals[str(test['result'])] += 1
+ self.totals[str(test.result)] += 1
def generate_html(self, destination, exclude):
"""
@@ -513,15 +508,15 @@ class Summary:
escape_filename(key + ".html"))
temp_path = path.dirname(html_path)
- if value['result'] not in exclude:
+ if value.result not in exclude:
# os.makedirs is very annoying, it throws an OSError if
# the path requested already exists, so do this check to
# ensure that it doesn't
if not path.exists(temp_path):
os.makedirs(temp_path)
- if value.get('time') is not None:
- value['time'] = datetime.timedelta(0, value['time'])
+ if value.time:
+ value.time = datetime.timedelta(0, value.time)
with open(html_path, 'w') as out:
out.write(testfile.render(
@@ -581,11 +576,19 @@ class Summary:
result.
"""
+ def make_status(test):
+ elems = []
+ for res in self.results:
+ try:
+ elems.append(str(res.tests[test].result))
+ except KeyError:
+ elems.append(str(so.NOTRUN))
+ return ' '.join(elems)
+
for test in list_:
print("{test}: {statuses}".format(
test='/'.join(test.split(grouptools.SEPARATOR)),
- statuses=' '.join(str(i.tests.get(test, {'result': so.SKIP})
- ['result']) for i in self.results)))
+ statuses=make_status(test)))
def print_summary():
"""print a summary."""
diff --git a/framework/test/base.py b/framework/test/base.py
index 798e5a6ba..6be63cfe1 100644
--- a/framework/test/base.py
+++ b/framework/test/base.py
@@ -157,7 +157,7 @@ class Test(object):
self.run_concurrent = run_concurrent
self._command = copy.copy(command)
self.env = {}
- self.result = TestResult({'result': 'fail'})
+ self.result = TestResult()
self.cwd = None
self.__proc_timeout = None
@@ -180,18 +180,18 @@ class Test(object):
time_start = time.time()
dmesg.update_dmesg()
self.run()
- self.result['time'] = time.time() - time_start
+ self.result.time = time.time() - time_start
self.result = dmesg.update_result(self.result)
# This is a rare case where a bare exception is okay, since we're
# using it to log exceptions
except:
exception = sys.exc_info()
- self.result['result'] = 'fail'
- self.result['exception'] = "{}{}".format(*exception[:2])
- self.result['traceback'] = "".join(
+ self.result.result = 'fail'
+ self.result.exception = "{}{}".format(*exception[:2])
+ self.result.traceback = "".join(
traceback.format_tb(exception[2]))
- log.log(self.result['result'])
+ log.log(self.result.result)
else:
log.log('dry-run')
@@ -204,14 +204,14 @@ class Test(object):
def interpret_result(self):
"""Convert the raw output of the test into a form piglit understands.
"""
- if _is_crash_returncode(self.result['returncode']):
+ if _is_crash_returncode(self.result.returncode):
# check if the process was terminated by the timeout
if self.timeout > 0 and self.__proc_timeout.join() > 0:
- self.result['result'] = 'timeout'
+ self.result.result = 'timeout'
else:
- self.result['result'] = 'crash'
- elif self.result['returncode'] != 0 and self.result['result'] == 'pass':
- self.result['result'] = 'warn'
+ self.result.result = 'crash'
+ elif self.result.returncode != 0 and self.result.result == 'pass':
+ self.result.result = 'warn'
def run(self):
"""
@@ -223,27 +223,25 @@ class Test(object):
* For 'returncode', the value will be the numeric exit code/value.
* For 'command', the value will be command line program and arguments.
"""
- self.result['command'] = ' '.join(self.command)
- self.result['environment'] = " ".join(
+ self.result.command = ' '.join(self.command)
+ self.result.environment = " ".join(
'{0}="{1}"'.format(k, v) for k, v in itertools.chain(
self.OPTS.env.iteritems(), self.env.iteritems()))
try:
self.is_skip()
except TestIsSkip as e:
- self.result['result'] = 'skip'
- self.result['out'] = unicode(e)
- self.result['err'] = u""
- self.result['returncode'] = None
+ self.result.result = 'skip'
+ self.result.out = unicode(e)
+ self.result.returncode = None
return
try:
self._run_command()
except TestRunError as e:
- self.result['result'] = unicode(e.status)
- self.result['out'] = unicode(e)
- self.result['err'] = u""
- self.result['returncode'] = None
+ self.result.result = unicode(e.status)
+ self.result.out = unicode(e)
+ self.result.returncode = None
return
self.interpret_result()
@@ -334,9 +332,9 @@ class Test(object):
# replaces erroneous charcters with the Unicode
# "replacement character" (a white question mark inside
# a black diamond).
- self.result['out'] = out.decode('utf-8', 'replace')
- self.result['err'] = err.decode('utf-8', 'replace')
- self.result['returncode'] = returncode
+ self.result.out = out.decode('utf-8', 'replace')
+ self.result.err = err.decode('utf-8', 'replace')
+ self.result.returncode = returncode
def __eq__(self, other):
return self.command == other.command
@@ -367,7 +365,7 @@ class WindowResizeMixin(object):
"""
for _ in xrange(5):
super(WindowResizeMixin, self)._run_command()
- if "Got spurious window resize" not in self.result['out']:
+ if "Got spurious window resize" not in self.result.out:
return
# If we reach this point then there has been no error, but spurious
@@ -404,11 +402,11 @@ class ValgrindMixin(object):
if self.OPTS.valgrind:
# If the underlying test failed, simply report
# 'skip' for this valgrind test.
- if self.result['result'] != 'pass':
- self.result['result'] = 'skip'
- elif self.result['returncode'] == 0:
+ if self.result.result != 'pass':
+ self.result.result = 'skip'
+ elif self.result.returncode == 0:
# Test passes and is valgrind clean.
- self.result['result'] = 'pass'
+ self.result.result = 'pass'
else:
# Test passed but has valgrind errors.
- self.result['result'] = 'fail'
+ self.result.result = 'fail'
diff --git a/framework/test/deqp.py b/framework/test/deqp.py
index 1462ca2d6..8641e45c3 100644
--- a/framework/test/deqp.py
+++ b/framework/test/deqp.py
@@ -140,16 +140,16 @@ class DEQPBaseTest(Test):
return command + self.extra_args
def interpret_result(self):
- if self.result['returncode'] != 0:
- self.result['result'] = 'fail'
+ if self.result.returncode != 0:
+ self.result.result = 'fail'
return
- for line in self.result['out'].split('\n'):
+ for line in self.result.out.split('\n'):
line = line.lstrip()
for k, v in self.__RESULT_MAP.iteritems():
if line.startswith(k):
- self.result['result'] = v
+ self.result.result = v
return
# We failed to parse the test output. Fallback to 'fail'.
- self.result['result'] = 'fail'
+ self.result.result = 'fail'
diff --git a/framework/test/gleantest.py b/framework/test/gleantest.py
index 539212f1b..21592c44b 100644
--- a/framework/test/gleantest.py
+++ b/framework/test/gleantest.py
@@ -53,10 +53,10 @@ class GleanTest(Test):
return super(GleanTest, self).command + self.GLOBAL_PARAMS
def interpret_result(self):
- if self.result['returncode'] != 0 or 'FAIL' in self.result['out']:
- self.result['result'] = 'fail'
+ if self.result.returncode != 0 or 'FAIL' in self.result.out:
+ self.result.result = 'fail'
else:
- self.result['result'] = 'pass'
+ self.result.result = 'pass'
def is_skip(self):
# Glean tests require glx
diff --git a/framework/test/gtest.py b/framework/test/gtest.py
index cd808718a..8d239af3b 100644
--- a/framework/test/gtest.py
+++ b/framework/test/gtest.py
@@ -36,12 +36,12 @@ class GTest(Test):
def interpret_result(self):
# Since gtests can have several subtets, if any of the subtests fail
# then we need to report fail.
- out = self.result['out']
+ out = self.result.out
if len(re.findall('FAILED', out, re.MULTILINE)) > 0:
- self.result['result'] = 'fail'
+ self.result.result = 'fail'
elif len(re.findall('PASSED', out, re.MULTILINE)) > 0:
- self.result['result'] = 'pass'
+ self.result.result = 'pass'
else:
#If we get here, then the test probably exited early.
- self.result['result'] = 'fail'
+ self.result.result = 'fail'
return out
diff --git a/framework/test/oclconform.py b/framework/test/oclconform.py
index 7f6e27dc4..14ce90282 100644
--- a/framework/test/oclconform.py
+++ b/framework/test/oclconform.py
@@ -44,10 +44,10 @@ def get_test_section_name(test):
class OCLConform(Test):
def interpret_result(self):
- if self.result['returncode'] != 0 or 'FAIL' in self.result['out']:
- self.result['result'] = 'fail'
+ if self.result.returncode != 0 or 'FAIL' in self.result.out:
+ self.result.result = 'fail'
else:
- self.result['result'] = 'pass'
+ self.result.result = 'pass'
def add_sub_test(profile, test_name, subtest_name, subtest):
profile.test_list[grouptools.join('oclconform', test_name, subtest_name)] = subtest
diff --git a/framework/test/piglit_test.py b/framework/test/piglit_test.py
index fcd613caf..7a35311cf 100644
--- a/framework/test/piglit_test.py
+++ b/framework/test/piglit_test.py
@@ -66,12 +66,13 @@ class PiglitBaseTest(ValgrindMixin, Test):
self._command[0] = os.path.join(TEST_BIN_DIR, self._command[0])
def interpret_result(self):
- outlines = self.result['out'].split('\n')
+ outlines = self.result.out.split('\n')
outpiglit = (s[7:] for s in outlines if s.startswith('PIGLIT:'))
+ # FIXME: handle this properly. It needs a method in TestResult probably
for piglit in outpiglit:
- self.result.recursive_update(json.loads(piglit))
- self.result['out'] = '\n'.join(
+ self.result.update(json.loads(piglit))
+ self.result.out = '\n'.join(
s for s in outlines if not s.startswith('PIGLIT:'))
super(PiglitBaseTest, self).interpret_result()
diff --git a/framework/tests/base_tests.py b/framework/tests/base_tests.py
index 212f25a88..e65f6739a 100644
--- a/framework/tests/base_tests.py
+++ b/framework/tests/base_tests.py
@@ -70,7 +70,7 @@ def test_timeout():
test = _Test(['sleep', '60'])
test.timeout = 1
test.run()
- nt.eq_(test.result['result'], 'timeout')
+ nt.eq_(test.result.result, 'timeout')
def test_timeout_pass():
@@ -79,14 +79,14 @@ def test_timeout_pass():
utils.binary_check('true')
def helper():
- if (test.result['returncode'] == 0):
- test.result['result'] = "pass"
+ if (test.result.returncode == 0):
+ test.result.result = "pass"
test = TestTest(['true'])
test.test_interpret_result = helper
test.timeout = 1
test.run()
- nt.eq_(test.result['result'], 'pass')
+ nt.eq_(test.result.result, 'pass')
def test_WindowResizeMixin_rerun():
@@ -99,15 +99,15 @@ def test_WindowResizeMixin_rerun():
self.__return_spurious = True
def _run_command(self):
- self.result['returncode'] = None
+ self.result.returncode = None
# IF this is run only once we'll have "got spurious window resize"
- # in result['out'], if it runs multiple times we'll get 'all good'
+ # in result.out, if it runs multiple times we'll get 'all good'
if self.__return_spurious:
- self.result['out'] = "Got spurious window resize"
+ self.result.out = "Got spurious window resize"
self.__return_spurious = False
else:
- self.result['out'] = 'all good'
+ self.result.out = 'all good'
class Test_(WindowResizeMixin, Mixin, Test):
def interpret_result(self):
@@ -115,7 +115,7 @@ def test_WindowResizeMixin_rerun():
test = Test_(['foo'])
test.run()
- nt.assert_equal(test.result['out'], 'all good')
+ nt.assert_equal(test.result.out, 'all good')
def test_run_command_early():
@@ -195,9 +195,9 @@ class TestValgrindMixinRun(object):
def test(status, expected):
test = self.test(['foo'])
test.OPTS.valgrind = True
- test.result['result'] = status
+ test.result.result = status
test.run()
- nt.eq_(test.result['result'], expected)
+ nt.eq_(test.result.result, expected)
desc = ('test.base.ValgrindMixin.run: '
'when status is "{}" it is changed to "{}"')
@@ -212,9 +212,9 @@ class TestValgrindMixinRun(object):
def test(status):
test = self.test(['foo'])
test.OPTS.valgrind = False
- test.result['result'] = status
+ test.result.result = status
test.run()
- nt.eq_(test.result['result'], status)
+ nt.eq_(test.result.result, status)
desc = ('test.base.ValgrindMixin.run: when status is "{}" '
'it is not changed when not running valgrind.')
@@ -228,17 +228,17 @@ class TestValgrindMixinRun(object):
"""
test = self.test(['foo'])
test.OPTS.valgrind = True
- test.result['result'] = 'pass'
- test.result['returncode'] = 0
+ test.result.result = 'pass'
+ test.result.returncode = 0
test.run()
- nt.eq_(test.result['result'], 'pass')
+ nt.eq_(test.result.result, 'pass')
def test_fallthrough(self):
"""test.base.ValgrindMixin.run: when a test is 'pass' but returncode is not 0 it's 'fail'
"""
test = self.test(['foo'])
test.OPTS.valgrind = True
- test.result['result'] = 'pass'
- test.result['returncode'] = 1
+ test.result.result = 'pass'
+ test.result.returncode = 1
test.run()
- nt.eq_(test.result['result'], 'fail')
+ nt.eq_(test.result.result, 'fail')
diff --git a/framework/tests/compressed_backend_tests.py b/framework/tests/compressed_backend_tests.py
index 19f15f3d8..42d5ab90e 100644
--- a/framework/tests/compressed_backend_tests.py
+++ b/framework/tests/compressed_backend_tests.py
@@ -32,6 +32,7 @@ import functools
import nose.tools as nt
from nose.plugins.skip import SkipTest
+from framework import results
from framework.tests import utils
from framework.backends import compression, abstract
@@ -111,7 +112,7 @@ def _test_extension():
obj = _TestBackend(d)
obj.initialize()
with obj.write_test('foo') as t:
- t({'result': 'foo'})
+ t(results.TestResult('pass'))
obj.finalize()
diff --git a/framework/tests/deqp_tests.py b/framework/tests/deqp_tests.py
index efd9dec24..d9327ec27 100644
--- a/framework/tests/deqp_tests.py
+++ b/framework/tests/deqp_tests.py
@@ -141,21 +141,21 @@ def test_DEQPBaseTest_interpret_result_returncode():
"""deqp.DEQPBaseTest.interpret_result: if returncode is not 0 result is fail
"""
test = _DEQPTestTest('a.deqp.test')
- test.result['returncode'] = 1
+ test.result.returncode = 1
test.interpret_result()
- nt.eq_(test.result['result'], 'fail')
+ nt.eq_(test.result.result, 'fail')
def test_DEQPBaseTest_interpret_result_fallthrough():
"""deqp.DEQPBaseTest.interpret_result: if no case is hit set to fail
"""
test = _DEQPTestTest('a.deqp.test')
- test.result['returncode'] = 0
- test.result['out'] = ''
+ test.result.returncode = 0
+ test.result.out = ''
test.interpret_result()
- nt.eq_(test.result['result'], 'fail')
+ nt.eq_(test.result.result, 'fail')
@utils.nose_generator
@@ -163,10 +163,10 @@ def test_DEQPBaseTest_interpret_result_status():
"""generate tests for each status possiblility."""
def test(status, expected):
inst = _DEQPTestTest('a.deqp.test')
- inst.result['returncode'] = 0
- inst.result['out'] = status
+ inst.result.returncode = 0
+ inst.result.out = status
inst.interpret_result()
- nt.eq_(inst.result['result'], expected)
+ nt.eq_(inst.result.result, expected)
desc = ('deqp.DEQPBaseTest.interpret_result: '
'when "{}" in stdout status is set to "{}"')
diff --git a/framework/tests/dmesg_tests.py b/framework/tests/dmesg_tests.py
index 1888f0cfc..618994c0c 100644
--- a/framework/tests/dmesg_tests.py
+++ b/framework/tests/dmesg_tests.py
@@ -27,7 +27,6 @@ don't want to run them use '-e sudo' with nosetests
from __future__ import print_function, absolute_import
import os
-import sys
import subprocess
import re
@@ -38,7 +37,7 @@ except ImportError:
import nose.tools as nt
from nose.plugins.skip import SkipTest
-import framework.dmesg as dmesg
+from framework import dmesg, status
import framework.core
import framework.test
import framework.backends
@@ -251,25 +250,24 @@ def test_update_result_replace():
def create_test_result(res):
result = framework.results.TestResult()
- result['result'] = res
- result['subtest'] = {}
- result['subtest']['test'] = res
+ result.result = res
+ result.subtests['test'] = res
return result
dmesg = TestDmesg()
-
- for res in ['pass', 'fail', 'crash', 'warn', 'skip', 'notrun']:
+ for res in [status.status_lookup(x) for x in
+ ['pass', 'fail', 'crash', 'warn', 'skip', 'notrun']]:
dmesg.regex = None
dmesg._new_messages = ['add', 'some', 'stuff']
new_result = dmesg.update_result(create_test_result(res))
check_update_result.description = \
"dmesg.Dmesg.update_result: '{0}' replaced correctly".format(res)
- yield check_update_result, new_result['result'], res
+ yield check_update_result, new_result.result, res
check_update_result.description = \
"dmesg.Dmesg.update_result: subtest '{0}' replaced correctly".format(res)
- yield check_update_result, new_result['subtest']['test'], res
+ yield check_update_result, new_result.subtests['test'], res
@utils.nose_generator
@@ -278,13 +276,13 @@ def test_update_result_no_match_regex():
def create_test_result(res):
result = framework.results.TestResult()
- result['result'] = res
- result['subtest'] = {}
- result['subtest']['test'] = res
+ result.result = res
+ result.subtests['test'] = res
return result
dmesg = TestDmesg()
- for res in ['pass', 'fail', 'crash', 'warn', 'skip', 'notrun']:
+ for res in [status.status_lookup(x) for x in
+ ['pass', 'fail', 'crash', 'warn', 'skip', 'notrun']]:
# check that the status is not updated when Dmesg.regex is set and does
# not match the dmesg output
dmesg.regex = re.compile("(?!)")
@@ -293,7 +291,7 @@ def test_update_result_no_match_regex():
check_equal_result.description = \
"dmesg.Dmesg.update_result: with non-matching regex '{0}'".format(res)
- yield check_equal_result, new_result['result'], res
+ yield check_equal_result, new_result.result, res
@utils.nose_generator
@@ -302,13 +300,13 @@ def test_update_result_match_regex():
def create_test_result(res):
result = framework.results.TestResult()
- result['result'] = res
- result['subtest'] = {}
- result['subtest']['test'] = res
+ result.result = res
+ result.subtests['test'] = res
return result
dmesg = TestDmesg()
- for res in ['pass', 'fail', 'crash', 'warn', 'skip', 'notrun']:
+ for res in [status.status_lookup(x) for x in
+ ['pass', 'fail', 'crash', 'warn', 'skip', 'notrun']]:
# check that the status is updated when Dmesg.regex is set and matches
# the dmesg output
dmesg.regex = re.compile("piglit.*test")
@@ -317,7 +315,7 @@ def test_update_result_match_regex():
check_update_result.description = \
"dmesg.Dmesg.update_result: with matching regex '{0}'".format(res)
- yield check_update_result, new_result['result'], res
+ yield check_update_result, new_result.result, res
def check_equal_result(result, status):
@@ -355,17 +353,18 @@ def check_update_result(result, status):
def test_update_result_add_dmesg():
- """dmesg.Dmesgupdate_result: sets the dmesg attribute"""
+ """dmesg.Dmesg.update_result: sets the dmesg attribute"""
test = TestDmesg()
result = framework.results.TestResult()
- result['result'] = 'pass'
+ result.result = 'pass'
+ messages = ['some', 'new', 'messages']
- test._new_messages = ['some', 'new', 'messages']
+ test._new_messages = messages
result = test.update_result(result)
- nt.assert_in('dmesg', result,
- msg="result does not have dmesg member but should")
+ nt.eq_(result.dmesg, '\n'.join(messages),
+ msg="result does not have dmesg member but should")
def test_json_serialize_updated_result():
@@ -373,7 +372,7 @@ def test_json_serialize_updated_result():
test = TestDmesg()
result = framework.results.TestResult()
- result['result'] = 'pass'
+ result.result = 'pass'
test._new_messages = ['some', 'new', 'messages']
result = test.update_result(result)
@@ -424,5 +423,5 @@ def check_classes_dmesg(test_class, test_args):
test.execute(None, DummyLog(), json, test)
- nt.assert_in(json.result['result'], ['dmesg-warn', 'dmesg-fail'],
+ nt.assert_in(json.result.result, ['dmesg-warn', 'dmesg-fail'],
msg="{0} did not update status with dmesg".format(type(test)))
diff --git a/framework/tests/gleantest_tests.py b/framework/tests/gleantest_tests.py
index b817e92d7..f755c8edd 100644
--- a/framework/tests/gleantest_tests.py
+++ b/framework/tests/gleantest_tests.py
@@ -64,9 +64,9 @@ def test_bad_returncode():
"""
test = GleanTest('basic')
- test.result['returncode'] = 1
+ test.result.returncode = 1
test.interpret_result()
- nt.assert_equal(test.result['result'], 'fail')
+ nt.assert_equal(test.result.result, 'fail')
@nt.raises(TestIsSkip)
diff --git a/framework/tests/json_backend_tests.py b/framework/tests/json_backend_tests.py
index 1b4a037e3..26f5db975 100644
--- a/framework/tests/json_backend_tests.py
+++ b/framework/tests/json_backend_tests.py
@@ -68,13 +68,13 @@ def test_json_initialize_metadata():
class TestJSONTestMethod(utils.StaticDirectory):
@classmethod
def setup_class(cls):
- cls.test_name = 'a/test/group/test1'
- cls.result = results.TestResult({
- 'time': 1.2345,
- 'result': 'pass',
- 'out': 'this is stdout',
- 'err': 'this is stderr',
- })
+ cls.test_name = grouptools.join('a', 'test', 'group', 'test1')
+ cls.result = results.TestResult()
+ cls.time = 1.2345
+ cls.result = 'pass'
+ cls.out = 'this is stdout'
+ cls.err = 'this is stderr'
+
super(TestJSONTestMethod, cls).setup_class()
test = backends.json.JSONBackend(cls.tdir)
test.initialize(BACKEND_INITIAL_META)
@@ -104,12 +104,8 @@ class TestJSONTestFinalize(utils.StaticDirectory):
@classmethod
def setup_class(cls):
cls.test_name = grouptools.join('a', 'test', 'group', 'test1')
- cls.result = results.TestResult({
- 'time': 1.2345,
- 'result': 'pass',
- 'out': 'this is stdout',
- 'err': 'this is stderr',
- })
+ cls.result = results.TestResult('pass')
+
super(TestJSONTestFinalize, cls).setup_class()
test = backends.json.JSONBackend(cls.tdir)
test.initialize(BACKEND_INITIAL_META)
@@ -144,7 +140,7 @@ def test_update_results_current():
with utils.tempdir() as d:
with open(os.path.join(d, 'main'), 'w') as f:
- json.dump(data, f)
+ json.dump(data, f, default=backends.json.piglit_encoder)
with open(os.path.join(d, 'main'), 'r') as f:
base = backends.json._load(f)
@@ -195,11 +191,11 @@ def test_resume_load_valid():
backend = backends.json.JSONBackend(f)
backend.initialize(BACKEND_INITIAL_META)
with backend.write_test("group1/test1") as t:
- t({'result': 'fail'})
+ t(results.TestResult('fail'))
with backend.write_test("group1/test2") as t:
- t({'result': 'pass'})
+ t(results.TestResult('pass'))
with backend.write_test("group2/test3") as t:
- t({'result': 'fail'})
+ t(results.TestResult('fail'))
test = backends.json._resume(f)
@@ -215,11 +211,11 @@ def test_resume_load_invalid():
backend = backends.json.JSONBackend(f)
backend.initialize(BACKEND_INITIAL_META)
with backend.write_test("group1/test1") as t:
- t({'result': 'fail'})
+ t(results.TestResult('fail'))
with backend.write_test("group1/test2") as t:
- t({'result': 'pass'})
+ t(results.TestResult('pass'))
with backend.write_test("group2/test3") as t:
- t({'result': 'fail'})
+ t(results.TestResult('fail'))
with open(os.path.join(f, 'tests', 'x.json'), 'w') as w:
w.write('foo')
@@ -243,13 +239,13 @@ def test_resume_load_incomplete():
backend = backends.json.JSONBackend(f)
backend.initialize(BACKEND_INITIAL_META)
with backend.write_test("group1/test1") as t:
- t({'result': 'fail'})
+ t(results.TestResult('fail'))
with backend.write_test("group1/test2") as t:
- t({'result': 'pass'})
+ t(results.TestResult('pass'))
with backend.write_test("group2/test3") as t:
- t({'result': 'crash'})
+ t(results.TestResult('crash'))
with backend.write_test("group2/test4") as t:
- t({'result': 'incomplete'})
+ t(results.TestResult('incomplete'))
test = backends.json._resume(f)
@@ -266,7 +262,8 @@ def test_load_results_folder_as_main():
"""
with utils.tempdir() as tdir:
with open(os.path.join(tdir, 'main'), 'w') as tfile:
- tfile.write(json.dumps(utils.JSON_DATA))
+ tfile.write(json.dumps(utils.JSON_DATA,
+ default=backends.json.piglit_encoder))
backends.json.load_results(tdir, 'none')
@@ -276,7 +273,8 @@ def test_load_results_folder():
"""backends.json.load_results: takes a folder with a file named results.json"""
with utils.tempdir() as tdir:
with open(os.path.join(tdir, 'results.json'), 'w') as tfile:
- tfile.write(json.dumps(utils.JSON_DATA))
+ tfile.write(json.dumps(utils.JSON_DATA,
+ default=backends.json.piglit_encoder))
backends.json.load_results(tdir, 'none')
@@ -293,7 +291,7 @@ def test_load_json():
with utils.tempdir() as tdir:
filename = os.path.join(tdir, 'results.json')
with open(filename, 'w') as f:
- json.dump(utils.JSON_DATA, f)
+ json.dump(utils.JSON_DATA, f, default=backends.json.piglit_encoder)
result = backends.load(filename)
@@ -301,13 +299,21 @@ def test_load_json():
nt.assert_in('sometest', result.tests)
-def test_piglit_decoder():
- """backends.json.piglit_decoder: Works correctly"""
- test = json.loads('{"foo": {"result": "pass"}}',
+def test_piglit_decoder_result():
+ """backends.json.piglit_decoder: turns results into TestResults"""
+ test = json.loads('{"foo": {"result": "pass", "__type__": "TestResult"}}',
object_hook=backends.json.piglit_decoder)
nt.assert_is_instance(test['foo'], results.TestResult)
+def test_piglit_decoder_old_result():
+ """backends.json.piglit_decoder: does not turn old results into TestResults
+ """
+ test = json.loads('{"foo": {"result": "pass"}}',
+ object_hook=backends.json.piglit_decoder)
+ nt.assert_is_instance(test['foo'], dict)
+
+
@nt.raises(exceptions.PiglitFatalError)
def test_load_bad_json():
"""backends.json._load: Raises fatal error if json is corrupt"""
diff --git a/framework/tests/json_results_update_tests.py b/framework/tests/json_results_update_tests.py
index 510a2026c..fd20e100f 100644
--- a/framework/tests/json_results_update_tests.py
+++ b/framework/tests/json_results_update_tests.py
@@ -159,9 +159,8 @@ class TestV0toV1(object):
nt.ok_(self.RESULT.tests.get('group1/groupA/test'))
def test_subtests_test_is_testresult(self):
- """backends.json.update_results (0 -> 1): The result of the new test is a TestResult Instance"""
- nt.ok_(isinstance(self.RESULT.tests['group1/groupA/test'],
- results.TestResult))
+ """backends.json.update_results (0 -> 1): The result of the new test is a dict Instance"""
+ nt.ok_(isinstance(self.RESULT.tests['group1/groupA/test'], dict))
def test_info_delete(self):
"""backends.json.update_results (0 -> 1): Remove the info name from results"""
@@ -233,7 +232,8 @@ class TestV0toV1(object):
data = self.DATA
try:
- with utils.tempfile(json.dumps(data)) as t:
+ with utils.tempfile(
+ json.dumps(data, default=backends.json.piglit_encoder)) as t:
result = backends.json.load_results(t, 'none')
except OSError as e:
# There is the potential that the file will be renamed. In that event
@@ -253,7 +253,7 @@ class TestV0toV1(object):
"""
result = self._load_with_update()
- nt.assert_equal(result.tests['sometest']['dmesg'], 'this\nis\ndmesg')
+ nt.assert_equal(result.tests['sometest'].dmesg, 'this\nis\ndmesg')
def test_load_results_v0(self):
"""backends.json.load_results: Loads results v0 and updates correctly.
@@ -265,7 +265,7 @@ class TestV0toV1(object):
data['results_version'] = 0
result = self._load_with_update(data)
- nt.assert_equal(result.tests['sometest']['dmesg'], 'this\nis\ndmesg')
+ nt.assert_equal(result.tests['sometest'].dmesg, 'this\nis\ndmesg')
def test_info_split(self):
"""backends.json.update_results (0 -> 1): info can split into any number of elements"""
@@ -273,7 +273,8 @@ class TestV0toV1(object):
data['tests']['sometest']['info'] = \
'Returncode: 1\n\nErrors:stderr\n\nOutput: stdout\n\nmore\n\nstuff'
- with utils.tempfile(json.dumps(data)) as t:
+ with utils.tempfile(
+ json.dumps(data, default=backends.json.piglit_encoder)) as t:
with open(t, 'r') as f:
backends.json._update_zero_to_one(backends.json._load(f))
@@ -317,7 +318,8 @@ class TestV2Update(object):
}
}
- with utils.tempfile(json.dumps(data)) as t:
+ with utils.tempfile(
+ json.dumps(data, default=backends.json.piglit_encoder)) as t:
with open(t, 'r') as f:
cls.result = backends.json._update_one_to_two(
backends.json._load(f))
@@ -384,7 +386,8 @@ class TestV2NoUpdate(object):
}
}
- with utils.tempfile(json.dumps(data)) as t:
+ with utils.tempfile(
+ json.dumps(data, default=backends.json.piglit_encoder)) as t:
with open(t, 'r') as f:
cls.result = backends.json._update_one_to_two(
backends.json._load(f))
@@ -460,7 +463,8 @@ class TestV2toV3(object):
}
}
- with utils.tempfile(json.dumps(data)) as t:
+ with utils.tempfile(
+ json.dumps(data, default=backends.json.piglit_encoder)) as t:
with open(t, 'r') as f:
# pylint: disable=protected-access
cls.RESULT = backends.json._update_two_to_three(backends.json._load(f))
@@ -525,7 +529,8 @@ class TestV3toV4(object):
@staticmethod
def _make_result(data):
"""Write data to a file and return a result.TestrunResult object."""
- with utils.tempfile(json.dumps(data)) as t:
+ with utils.tempfile(
+ json.dumps(data, default=backends.json.piglit_encoder)) as t:
with open(t, 'r') as f:
# pylint: disable=protected-access
return backends.json._update_three_to_four(backends.json._load(f))
@@ -614,7 +619,8 @@ class TestV4toV5(object):
"has@windows",
]
- with utils.tempfile(json.dumps(cls.DATA)) as t:
+ with utils.tempfile(
+ json.dumps(cls.DATA, default=backends.json.piglit_encoder)) as t:
with open(t, 'r') as f:
cls.result = backends.json._update_four_to_five(backends.json._load(f))
@@ -636,12 +642,63 @@ class TestV4toV5(object):
for new in self.new:
nt.assert_dict_equal(self.result.tests[new], self.TEST_DATA)
+
+class TestV5toV6(object):
+ TEST_DATA = {
+ 'returncode': 0,
+ 'err': None,
+ 'environment': None,
+ 'command': 'foo',
+ 'result': 'skip',
+ 'time': 0.123,
+ 'out': None,
+ }
+
+ DATA = {
+ "results_version": 4,
+ "name": "test",
+ "options": {
+ "profile": ['quick'],
+ "dmesg": False,
+ "verbose": False,
+ "platform": "gbm",
+ "sync": False,
+ "valgrind": False,
+ "filter": [],
+ "concurrent": "all",
+ "test_count": 0,
+ "exclude_tests": [],
+ "exclude_filter": [],
+ "env": {
+ "lspci": "stuff",
+ "uname": "more stuff",
+ "glxinfo": "and stuff",
+ "wglinfo": "stuff"
+ }
+ },
+ "tests": {
+ 'a@test': TEST_DATA,
+ }
+ }
+
+ @classmethod
+ def setup_class(cls):
+ """Class setup. Create a TestrunResult with v4 data."""
+ with utils.tempfile(
+ json.dumps(cls.DATA, default=backends.json.piglit_encoder)) as t:
+ with open(t, 'r') as f:
+ cls.result = backends.json._update_five_to_six(backends.json._load(f))
+
+ def test_result_is_TestResult(self):
+ """backends.json.update_results (5 -> 6): A test result is converted to a TestResult instance"""
+ nt.ok_(isinstance(self.result.tests['a@test'], results.TestResult))
+
def test_load_results(self):
- """backends.json.update_results (4 -> 5): load_results properly updates."""
+ """backends.json.update_results (5 -> 6): load_results properly updates."""
with utils.tempdir() as d:
tempfile = os.path.join(d, 'results.json')
with open(tempfile, 'w') as f:
- json.dump(self.DATA, f)
+ json.dump(self.DATA, f, default=backends.json.piglit_encoder)
with open(tempfile, 'r') as f:
result = backends.json.load_results(tempfile, 'none')
- nt.assert_equal(result.results_version, 5)
+ nt.assert_equal(result.results_version, 6)
diff --git a/framework/tests/json_tests.py b/framework/tests/json_tests.py
index 7819ca612..be76f3bc2 100644
--- a/framework/tests/json_tests.py
+++ b/framework/tests/json_tests.py
@@ -35,8 +35,8 @@ try:
except ImportError:
import json
-import framework.core as core
import framework.tests.utils as utils
+from framework import core, results
from framework.backends.json import JSONBackend
from framework.programs.run import _create_metadata
@@ -74,7 +74,7 @@ class TestJsonOutput(utils.StaticDirectory):
backend = JSONBackend(cls.tdir, file_fsync=True)
backend.initialize(_create_metadata(args, 'test', core.Options()))
with backend.write_test('result') as t:
- t({'result': 'pass'})
+ t(results.TestResult('pass'))
backend.finalize({'time_elapsed': 1.22})
with open(os.path.join(cls.tdir, 'results.json'), 'r') as f:
cls.json = json.load(f)
diff --git a/framework/tests/junit_backends_tests.py b/framework/tests/junit_backends_tests.py
index 7e82f0101..764f4c35d 100644
--- a/framework/tests/junit_backends_tests.py
+++ b/framework/tests/junit_backends_tests.py
@@ -87,17 +87,18 @@ class TestJUnitSingleTest(TestJunitNoTests):
def setup_class(cls):
super(TestJUnitSingleTest, cls).setup_class()
cls.test_file = os.path.join(cls.tdir, 'results.xml')
+
+ result = results.TestResult()
+ result.time = 1.2345
+ result.result = 'pass'
+ result.out = 'this is stdout'
+ result.err = 'this is stderr'
+ result.command = 'foo'
+
test = backends.junit.JUnitBackend(cls.tdir)
test.initialize(BACKEND_INITIAL_META)
with test.write_test(grouptools.join('a', 'test', 'group', 'test1')) as t:
- t(results.TestResult({
- 'time': 1.2345,
- 'result': 'pass',
- 'out': 'this is stdout',
- 'err': 'this is stderr',
- 'command': 'foo',
- })
- )
+ t(result)
test.finalize()
def test_xml_well_formed(self):
@@ -117,27 +118,24 @@ class TestJUnitMultiTest(TestJUnitSingleTest):
@classmethod
def setup_class(cls):
super(TestJUnitMultiTest, cls).setup_class()
+
+ result = results.TestResult()
+ result.time = 1.2345
+ result.result = 'pass'
+ result.out = 'this is stdout'
+ result.err = 'this is stderr'
+ result.command = 'foo'
+
cls.test_file = os.path.join(cls.tdir, 'results.xml')
test = backends.junit.JUnitBackend(cls.tdir)
test.initialize(BACKEND_INITIAL_META)
+ with test.write_test(grouptools.join('a', 'test', 'group', 'test1')) as t:
+ t(result)
+
+ result.result = 'fail'
with test.write_test(
- grouptools.join('a', 'test', 'group', 'test1')) as t:
- t(results.TestResult({
- 'time': 1.2345,
- 'result': 'pass',
- 'out': 'this is stdout',
- 'err': 'this is stderr',
- 'command': 'foo',
- }))
- with test.write_test(
- grouptools.join('a', 'different', 'test', 'group', 'test2')) as t:
- t(results.TestResult({
- 'time': 1.2345,
- 'result': 'fail',
- 'out': 'this is stdout',
- 'err': 'this is stderr',
- 'command': 'foo',
- }))
+ grouptools.join('a', 'different', 'test', 'group', 'test2')) as t:
+ t(result)
test.finalize()
def test_xml_well_formed(self):
@@ -153,16 +151,17 @@ class TestJUnitMultiTest(TestJUnitSingleTest):
def test_junit_replace():
"""backends.junit.JUnitBackend.write_test(): '{separator}' is replaced with '.'"""
with utils.tempdir() as tdir:
+ result = results.TestResult()
+ result.time = 1.2345
+ result.result = 'pass'
+ result.out = 'this is stdout'
+ result.err = 'this is stderr'
+ result.command = 'foo'
+
test = backends.junit.JUnitBackend(tdir)
test.initialize(BACKEND_INITIAL_META)
with test.write_test(grouptools.join('a', 'test', 'group', 'test1')) as t:
- t(results.TestResult({
- 'time': 1.2345,
- 'result': 'pass',
- 'out': 'this is stdout',
- 'err': 'this is stderr',
- 'command': 'foo',
- }))
+ t(result)
test.finalize()
test_value = etree.parse(os.path.join(tdir, 'results.xml')).getroot()
@@ -175,17 +174,17 @@ def test_junit_replace():
def test_junit_skips_bad_tests():
"""backends.junit.JUnitBackend: skips illformed tests"""
with utils.tempdir() as tdir:
+ result = results.TestResult()
+ result.time = 1.2345
+ result.result = 'pass'
+ result.out = 'this is stdout'
+ result.err = 'this is stderr'
+ result.command = 'foo'
+
test = backends.junit.JUnitBackend(tdir)
test.initialize(BACKEND_INITIAL_META)
with test.write_test(grouptools.join('a', 'test', 'group', 'test1')) as t:
- t(results.TestResult({
- 'time': 1.2345,
- 'result': 'pass',
- 'out': 'this is stdout',
- 'err': 'this is stderr',
- 'command': 'foo',
- })
- )
+ t(result)
with open(os.path.join(tdir, 'tests', '1.xml'), 'w') as f:
f.write('bad data')
@@ -232,28 +231,28 @@ class TestJUnitLoad(utils.StaticDirectory):
def test_status_instance(self):
"""backends.junit._load: a status is found and loaded."""
- nt.assert_is_instance(self.xml().tests[self.testname]['result'],
+ nt.assert_is_instance(self.xml().tests[self.testname].result,
status.Status)
def test_time(self):
"""backends.junit._load: Time is loaded correctly."""
- time = self.xml().tests[self.testname]['time']
+ time = self.xml().tests[self.testname].time
nt.assert_is_instance(time, float)
nt.assert_equal(time, 1.12345)
def test_command(self):
"""backends.junit._load: command is loaded correctly."""
- test = self.xml().tests[self.testname]['command']
+ test = self.xml().tests[self.testname].command
nt.assert_equal(test, 'this/is/a/command')
def test_out(self):
"""backends.junit._load: stdout is loaded correctly."""
- test = self.xml().tests[self.testname]['out']
+ test = self.xml().tests[self.testname].out
nt.assert_equal(test, 'This is stdout')
def test_err(self):
"""backends.junit._load: stderr is loaded correctly."""
- test = self.xml().tests[self.testname]['err']
+ test = self.xml().tests[self.testname].err
nt.assert_equal(test, 'this is stderr')
@utils.no_error
diff --git a/framework/tests/piglit_test_tests.py b/framework/tests/piglit_test_tests.py
index 5e3583969..db4c8b06e 100644
--- a/framework/tests/piglit_test_tests.py
+++ b/framework/tests/piglit_test_tests.py
@@ -43,36 +43,36 @@ def test_initialize_piglitcltest():
def test_piglittest_interpret_result():
- """test.piglit_test.PiglitBaseTest.interpret_result(): works no subtests"""
+ """test.piglit_test.PiglitBaseTest.interpret_result(): works with no subtests"""
test = PiglitBaseTest(['foo'])
- test.result['out'] = 'PIGLIT: {"result": "pass"}\n'
- test.result['returncode'] = 0
+ test.result.out = 'PIGLIT: {"result": "pass"}\n'
+ test.result.returncode = 0
test.interpret_result()
- nt.eq_(test.result['result'], 'pass')
+ nt.eq_(test.result.result, 'pass')
def test_piglittest_interpret_result_subtest():
"""test.piglit_test.PiglitBaseTest.interpret_result(): works with subtests"""
test = PiglitBaseTest(['foo'])
- test.result['out'] = ('PIGLIT: {"result": "pass"}\n'
+ test.result.out = ('PIGLIT: {"result": "pass"}\n'
'PIGLIT: {"subtest": {"subtest": "pass"}}\n')
- test.result['returncode'] = 0
+ test.result.returncode = 0
test.interpret_result()
- nt.eq_(test.result['subtest']['subtest'], 'pass')
+ nt.eq_(test.result.subtests['subtest'], 'pass')
def test_piglitest_no_clobber():
"""test.piglit_test.PiglitBaseTest.interpret_result(): does not clobber subtest entires"""
test = PiglitBaseTest(['a', 'command'])
- test.result['out'] = (
+ test.result.out = (
'PIGLIT: {"result": "pass"}\n'
'PIGLIT: {"subtest": {"test1": "pass"}}\n'
'PIGLIT: {"subtest": {"test2": "pass"}}\n'
)
- test.result['returncode'] = 0
+ test.result.returncode = 0
test.interpret_result()
- nt.assert_dict_equal(test.result['subtest'],
+ nt.assert_dict_equal(test.result.subtests,
{'test1': 'pass', 'test2': 'pass'})
diff --git a/framework/tests/results_tests.py b/framework/tests/results_tests.py
index 9ecffa02d..cf266f6bb 100644
--- a/framework/tests/results_tests.py
+++ b/framework/tests/results_tests.py
@@ -25,7 +25,7 @@ from __future__ import print_function, absolute_import
import nose.tools as nt
-from framework import results, status
+from framework import results, status, exceptions
import framework.tests.utils as utils
@@ -49,8 +49,235 @@ def test_generate_initialize():
yield check, target
-def test_testresult_load_to_status():
- """results.TestResult: an initial status value is converted to a Status object"""
- result = results.TestResult.load({'result': 'pass'})
- nt.ok_(isinstance(result['result'], status.Status),
- msg="Result key not converted to a status object")
+def test_Subtests_convert():
+ """results.Subtests.__setitem__: converts strings to statues"""
+ test = results.Subtests()
+ test['foo'] = 'pass'
+ nt.assert_is(test['foo'], status.PASS)
+
+
+def test_Subtests_to_json():
+ """results.Subtests.to_json: sets values properly"""
+ baseline = {
+ 'foo': status.PASS,
+ 'bar': status.CRASH,
+ '__type__': 'Subtests',
+ }
+
+ test = results.Subtests()
+ test['foo'] = status.PASS
+ test['bar'] = status.CRASH
+
+ nt.assert_dict_equal(baseline, test.to_json())
+
+
+def test_Subtests_from_dict():
+ """results.Subtests.from_dict: restores values properly"""
+ baseline = results.Subtests()
+ baseline['foo'] = status.PASS
+ baseline['bar'] = status.CRASH
+
+ test = results.Subtests.from_dict(baseline.to_json())
+
+ nt.assert_dict_equal(baseline, test)
+
+
+def test_Subtests_from_dict_instance():
+ """results.Subtests.from_dict: restores values properly"""
+ baseline = results.Subtests()
+ baseline['foo'] = status.PASS
+
+ test = results.Subtests.from_dict(baseline.to_json())
+
+ nt.assert_is(test['foo'], status.PASS)
+
+
+def test_TestResult_from_dict_inst():
+ """results.TestResult.from_dict: returns a TestResult"""
+ test = results.TestResult.from_dict({'result': 'pass'})
+ nt.ok_(isinstance(test, results.TestResult))
+
+
+class TestTestResultFromDictAttributes(object):
+ """A series of tests to show that each attribute is sucessfully populated.
+ """
+ @classmethod
+ def setup_class(cls):
+ dict_ = {
+ 'returncode': 10,
+ 'err': 'stderr',
+ 'out': 'stdout',
+ 'time': 1.2345,
+ 'command': 'this is a command',
+ 'environment': 'environment variables',
+ 'result': 'pass',
+ 'dmesg': 'this is some dmesg',
+ }
+
+ cls.test = results.TestResult.from_dict(dict_)
+
+ def test_returncode(self):
+ """results.TestResult.from_dict: sets returncode correctly"""
+ nt.eq_(self.test.returncode, 10)
+
+ def test_err(self):
+ """results.TestResult.from_dict: sets err correctly"""
+ nt.eq_(self.test.err, 'stderr')
+
+ def test_out(self):
+ """results.TestResult.from_dict: sets out correctly"""
+ nt.eq_(self.test.out, 'stdout')
+
+ def test_time(self):
+ """results.TestResult.from_dict: sets time correctly"""
+ nt.eq_(self.test.time, 1.2345)
+
+ def test_command(self):
+ """results.TestResult.from_dict: sets command correctly"""
+ nt.eq_(self.test.command, 'this is a command')
+
+ def test_environment(self):
+ """results.TestResult.from_dict: sets environment correctly"""
+ nt.eq_(self.test.environment, 'environment variables')
+
+ def test_result(self):
+ """results.TestResult.from_dict: sets result correctly"""
+ nt.eq_(self.test.result, 'pass')
+
+ def test_dmesg(self):
+ """dmesgs.TestResult.from_dict: sets dmesg correctly"""
+ nt.eq_(self.test.dmesg, 'this is some dmesg')
+
+
+def test_TestResult_result_getter():
+ """results.TestResult.result: Getter returns the result when there are no subtests"""
+ test = results.TestResult('pass')
+ nt.eq_(test.result, 'pass')
+
+
+def test_TestResult_result_setter():
+ """results.TestResult.result: setter makes the result a status"""
+ test = results.TestResult('pass')
+ test.result = 'fail'
+ nt.ok_(isinstance(test.result, status.Status))
+ nt.eq_(test.result, 'fail')
+
+
+@nt.raises(exceptions.PiglitFatalError)
+def test_TestResult_result_setter_invalid():
+ """results.TestResult.result: setter raises PiglitFatalError for invalid values"""
+ test = results.TestResult('pass')
+ test.result = 'poop'
+
+
+class TestTestResult_to_json(object):
+ """Tests for the attributes of the to_json method."""
+ @classmethod
+ def setup_class(cls):
+ cls.dict = {
+ 'returncode': 100,
+ 'err': 'this is an err',
+ 'out': 'this is some text',
+ 'time': 0.5,
+ 'environment': 'some env stuff',
+ 'subtests': {
+ 'a': 'pass',
+ 'b': 'fail',
+ },
+ 'result': 'crash',
+ }
+
+ test = results.TestResult.from_dict(cls.dict)
+
+ cls.json = test.to_json()
+
+ def test_returncode(self):
+ """results.TestResult.to_json: sets the returncode correctly"""
+ nt.eq_(self.dict['returncode'], self.json['returncode'])
+
+ def test_err(self):
+ """results.TestResult.to_json: sets the err correctly"""
+ nt.eq_(self.dict['err'], self.json['err'])
+
+ def test_out(self):
+ """results.TestResult.to_json: sets the out correctly"""
+ nt.eq_(self.dict['out'], self.json['out'])
+
+ def test_time(self):
+ """results.TestResult.to_json: sets the time correctly"""
+ nt.eq_(self.dict['time'], self.json['time'])
+
+ def test_environment(self):
+ """results.TestResult.to_json: sets the environment correctly"""
+ nt.eq_(self.dict['environment'], self.json['environment'])
+
+ def test_subtests(self):
+ """results.TestResult.to_json: sets the subtests correctly"""
+ nt.eq_(self.dict['subtests'], self.json['subtests'])
+
+ def test_type(self):
+ """results.TestResult.to_json: adds the __type__ hint"""
+ nt.eq_(self.json['__type__'], 'TestResult')
+
+
+class TestTestResult_from_dict(object):
+ """Tests for the from_dict method."""
+ @classmethod
+ def setup_class(cls):
+ cls.dict = {
+ 'returncode': 100,
+ 'err': 'this is an err',
+ 'out': 'this is some text',
+ 'time': 0.5,
+ 'environment': 'some env stuff',
+ 'subtests': {
+ 'a': 'pass',
+ 'b': 'fail',
+ },
+ 'result': 'crash',
+ }
+
+ cls.test = results.TestResult.from_dict(cls.dict)
+
+ def test_returncode(self):
+ """results.TestResult.from_dict: sets returncode properly"""
+ nt.eq_(self.test.returncode, self.dict['returncode'])
+
+ def test_err(self):
+ """results.TestResult.from_dict: sets err properly"""
+ nt.eq_(self.test.err, self.dict['err'])
+
+ def test_out(self):
+ """results.TestResult.from_dict: sets out properly"""
+ nt.eq_(self.test.out, self.dict['out'])
+
+ def test_time(self):
+ """results.TestResult.from_dict: sets time properly"""
+ nt.eq_(self.test.time, self.dict['time'])
+
+ def test_environment(self):
+ """results.TestResult.from_dict: sets environment properly"""
+ nt.eq_(self.test.environment, self.dict['environment'])
+
+ def test_subtests(self):
+ """results.TestResult.from_dict: sets subtests properly"""
+ nt.eq_(self.test.subtests, self.dict['subtests'])
+
+ def test_subtests_type(self):
+ """results.TestResult.from_dict: subtests are Status instances"""
+ nt.assert_is(self.test.subtests['a'], status.PASS)
+ nt.assert_is(self.test.subtests['b'], status.FAIL)
+
+
+def test_TestResult_update():
+ """results.TestResult.update: result is updated"""
+ test = results.TestResult('pass')
+ test.update({'result': 'incomplete'})
+ nt.eq_(test.result, 'incomplete')
+
+
+def test_TestResult_update_subtests():
+ """results.TestResult.update: subests are updated"""
+ test = results.TestResult('pass')
+ test.update({'subtest': {'result': 'incomplete'}})
+ nt.eq_(test.subtests['result'], 'incomplete')
diff --git a/framework/tests/summary_tests.py b/framework/tests/summary_tests.py
index 0c9542dfc..51ec392c7 100644
--- a/framework/tests/summary_tests.py
+++ b/framework/tests/summary_tests.py
@@ -32,6 +32,7 @@ import nose.tools as nt
import framework.summary as summary
import framework.tests.utils as utils
+from framework.backends.json import piglit_encoder
@utils.no_error
@@ -72,10 +73,12 @@ def test_summary_add_to_set():
def check_sets(old, ostat, new, nstat, set_):
""" Check that the statuses are added to the correct set """
old['tests']['sometest']['result'] = ostat
+ old['tests']['sometest']['__type__'] = 'TestResult'
new['tests']['sometest']['result'] = nstat
+ new['tests']['sometest']['__type__'] = 'TestResult'
- with utils.tempfile(json.dumps(old)) as ofile:
- with utils.tempfile(json.dumps(new)) as nfile:
+ with utils.tempfile(json.dumps(old, default=piglit_encoder)) as ofile:
+ with utils.tempfile(json.dumps(new, default=piglit_encoder)) as nfile:
summ = summary.Summary([ofile, nfile])
print(summ.tests)
@@ -88,22 +91,22 @@ class TestSubtestHandling(object):
@classmethod
def setup_class(cls):
data = copy.deepcopy(utils.JSON_DATA)
+ data['tests']['sometest']['__type__'] = 'TestResult'
data['tests']['with_subtests']['result'] = 'pass'
+ data['tests']['with_subtests']['__type__'] = 'TestResult'
- data['tests']['with_subtests']['subtest']['subtest1'] = 'fail'
- data['tests']['with_subtests']['subtest']['subtest2'] = 'warn'
- data['tests']['with_subtests']['subtest']['subtest3'] = 'crash'
+ data['tests']['with_subtests']['subtests']['subtest1'] = 'fail'
+ data['tests']['with_subtests']['subtests']['subtest2'] = 'warn'
+ data['tests']['with_subtests']['subtests']['subtest3'] = 'crash'
data['tests']['is_skip']['result'] = 'skip'
+ data['tests']['is_skip']['__type__'] = 'TestResult'
- with utils.tempfile(json.dumps(data)) as sumfile:
+ with utils.tempfile(json.dumps(data, default=piglit_encoder)) as sumfile:
cls.summ = summary.Summary([sumfile])
def test_subtests_are_tests(self):
"""summary.Summary: Subtests should be treated as full tests"""
- nt.assert_equal(
- self.summ.fractions['fake-tests']['with_subtests'], (0, 3),
- msg="Summary.fraction['fake-tests']['with_subtests'] should "
- "be (0, 3), but isn't")
+ nt.eq_(self.summ.fractions['fake-tests']['with_subtests'], (0, 3))
def test_tests_w_subtests_are_groups(self):
"""summary.Summary: Tests with subtests should be a group
@@ -112,21 +115,13 @@ class TestSubtestHandling(object):
will be 'crash' if it has. (since we set the data that way)
"""
- nt.assert_equal(
- self.summ.status['fake-tests']['with_subtests'], 'crash',
- msg="Summary.status['fake-tests']['with_subtests'] should "
- "be crash, but isn't")
+ nt.eq_(self.summ.status['fake-tests']['with_subtests'], 'crash')
def test_removed_from_all(self):
"""summary.Summary: Tests with subtests should not be in the all results
"""
- nt.assert_not_in(
- 'with_subtests', self.summ.tests['all'],
- msg="Test with subtests should have been removed from "
- "self.tests['all'], but wasn't")
+ nt.assert_not_in('with_subtests', self.summ.tests['all'])
def subtest_not_skip_notrun(self):
"""summary.Summary: skips are not changed to notruns"""
- nt.eq_(
- self.summ.status['fake-tests']['is_skip'], 'skip',
- msg="Status should be skip but was changed")
+ nt.eq_(self.summ.status['fake-tests']['is_skip'], 'skip')
diff --git a/framework/tests/utils.py b/framework/tests/utils.py
index dbcedcba1..007771622 100644
--- a/framework/tests/utils.py
+++ b/framework/tests/utils.py
@@ -28,6 +28,7 @@ in a single place.
from __future__ import print_function, absolute_import
import os
import sys
+import copy
import shutil
import tempfile as tempfile_
import functools
@@ -41,7 +42,7 @@ except ImportError:
import json
from nose.plugins.skip import SkipTest
-from framework import test, backends, results, core
+from framework import test, backends, core, results
__all__ = [
'tempfile',
@@ -74,10 +75,10 @@ JSON_DATA = {
"lspci": "fake",
"glxinfo": "fake",
"tests": _Tree({
- "sometest": results.TestResult({
- "result": "pass",
- "time": 0.01
- })
+ "sometest": {
+ 'result': 'pass',
+ 'time': 1.2,
+ }
})
}
@@ -230,12 +231,15 @@ class GeneratedTestWrapper(object):
@contextmanager
def resultfile():
""" Create a stringio with some json in it and pass that as results """
- with tempfile_.NamedTemporaryFile(delete=False) as output:
- json.dump(JSON_DATA, output)
+ data = copy.deepcopy(JSON_DATA)
+ data['tests']['sometest'] = results.TestResult('pass')
+ data['tests']['sometest'].time = 1.2
+ with tempfile_.NamedTemporaryFile(delete=False) as f:
+ json.dump(data, f, default=backends.json.piglit_encoder)
- yield output
+ yield f
- os.remove(output.name)
+ os.remove(f.name)
@contextmanager