diff options
author | Kenneth Graunke <kenneth@whitecape.org> | 2011-07-29 15:08:58 -0700 |
---|---|---|
committer | Kenneth Graunke <kenneth@whitecape.org> | 2011-07-29 17:35:54 -0700 |
commit | caa35ef4e584d5a254f0079b67ca34998ae27230 (patch) | |
tree | 5a31e268a1ef403222455bac1669d295158f3e73 | |
parent | 8704cec42889a9d592e15dfaa77ad1498f30dc09 (diff) |
Refactor PlainExecTest, GleanTest, and GTFTest to share code.
Each of these classes are essentially the same, except for:
1. How to determine whether the test passed/failed
2. Setting up the command and options
All the logic for running the command and obtaining its output can be
shared, as well as the logic to determine if it trapped/aborted/etc.
This patch creates a new 'ExecTest' base class. Subclasses can set the
exact command to run and options in their __init__ method, and must
supply an 'interpretResult' method which reads the output and sets
results['result'] to pass/fail (along with other desired annotations).
v2: Update for JSON related changes and ensure that output stays the
same. (Piglit and Glean are identical; GTF now includes the command
that was run but is otherwise equivalent.)
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
-rw-r--r-- | framework/exectest.py | 71 | ||||
-rw-r--r-- | framework/gleantest.py | 70 | ||||
-rw-r--r-- | tests/gtf.tests | 52 |
3 files changed, 71 insertions, 122 deletions
diff --git a/framework/exectest.py b/framework/exectest.py index e41cd21a..91a674e0 100644 --- a/framework/exectest.py +++ b/framework/exectest.py @@ -26,24 +26,21 @@ import subprocess from core import Test, testBinDir, TestResult -def add_plain_test(group, name): - group[name] = PlainExecTest([name, '-auto']) - ############################################################################# -##### PlainExecTest: Simply run an executable -##### Expect one line prefixed PIGLIT: in the output, which contains a -##### result dictionary. The plain output is appended to this dictionary +##### ExecTest: A shared base class for tests that simply run an executable. ############################################################################# -class PlainExecTest(Test): + +class ExecTest(Test): def __init__(self, command): Test.__init__(self) self.command = command - # Prepend testBinDir to the path. - self.command[0] = testBinDir + self.command[0] self.env = {} - def run(self): + def interpretResult(self, out, results): + raise NotImplementedError + return out + def run(self): fullenv = os.environ.copy() for e in self.env: fullenv[e] = str(self.env[e]) @@ -58,21 +55,9 @@ class PlainExecTest(Test): ) out, err = proc.communicate() - outlines = out.split('\n') - outpiglit = map(lambda s: s[7:], filter(lambda s: s.startswith('PIGLIT:'), outlines)) - results = TestResult() - if len(outpiglit) > 0: - try: - results.update(eval(''.join(outpiglit), {})) - out = '\n'.join(filter(lambda s: not s.startswith('PIGLIT:'), outlines)) - except: - results['result'] = 'fail' - results['note'] = 'Failed to parse result string' - - if 'result' not in results: - results['result'] = 'fail' + out = self.interpretResult(out, results) if proc.returncode == -5: results['result'] = 'trap' @@ -92,11 +77,17 @@ class PlainExecTest(Test): results['result'] = 'fail' results['note'] = 'Returncode was %d' % (proc.returncode) - self.handleErr(results, err) - + env = '' + for key in self.env: + env = env + key + '="' + self.env[key] + '" ' + if env: + results['environment'] = env results['info'] = "Returncode: %d\n\nErrors:\n%s\n\nOutput:\n%s" % (proc.returncode, err, out) results['returncode'] = proc.returncode results['command'] = ' '.join(self.command) + + self.handleErr(results, err) + else: results = TestResult() if 'result' not in results: @@ -105,3 +96,33 @@ class PlainExecTest(Test): return results + +def add_plain_test(group, name): + group[name] = PlainExecTest([name, '-auto']) + +############################################################################# +##### PlainExecTest: Run a "native" piglit test executable +##### Expect one line prefixed PIGLIT: in the output, which contains a +##### result dictionary. The plain output is appended to this dictionary +############################################################################# +class PlainExecTest(ExecTest): + def __init__(self, command): + ExecTest.__init__(self, command) + # Prepend testBinDir to the path. + self.command[0] = testBinDir + self.command[0] + + def interpretResult(self, out, results): + outlines = out.split('\n') + outpiglit = map(lambda s: s[7:], filter(lambda s: s.startswith('PIGLIT:'), outlines)) + + if len(outpiglit) > 0: + try: + results.update(eval(''.join(outpiglit), {})) + out = '\n'.join(filter(lambda s: not s.startswith('PIGLIT:'), outlines)) + except: + results['result'] = 'fail' + results['note'] = 'Failed to parse result string' + + if 'result' not in results: + results['result'] = 'fail' + return out diff --git a/framework/gleantest.py b/framework/gleantest.py index 0b02491a..254cfcc3 100644 --- a/framework/gleantest.py +++ b/framework/gleantest.py @@ -25,6 +25,7 @@ import os import subprocess from core import checkDir, testBinDir, Test, TestResult +from exectest import ExecTest ############################################################################# ##### GleanTest: Execute a sub-test of Glean @@ -35,68 +36,27 @@ def gleanExecutable(): def gleanResultDir(): return os.path.join('.', 'results', 'glean') -class GleanTest(Test): +class GleanTest(ExecTest): globalParams = [] def __init__(self, name): - Test.__init__(self) - self.name = name - self.command = \ - [gleanExecutable(), "-r", os.path.join(gleanResultDir(), self.name), + ExecTest.__init__(self, \ + [gleanExecutable(), "-r", os.path.join(gleanResultDir(), name), "-o", "-v", "-v", "-v", - "-t", "+"+self.name] - self.env = {} - - def run(self): - results = TestResult() - - fullenv = os.environ.copy() - for e in self.env: - fullenv[e] = str(self.env[e]) + "-t", "+"+name]) - checkDir(os.path.join(gleanResultDir(), self.name), False) + checkDir(os.path.join(gleanResultDir(), name), False) - glean = subprocess.Popen( - self.command + GleanTest.globalParams, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=fullenv, - universal_newlines=True - ) - - out, err = glean.communicate() + self.name = name - results['result'] = 'pass' + def run(self): + self.command += GleanTest.globalParams + return ExecTest.run(self) - if glean.returncode == -5: - results['result'] = 'trap' - elif glean.returncode == -6: - results['result'] = 'abort' - elif glean.returncode in (-8, -10, -11): - results['result'] = 'crash' - elif glean.returncode == -1073741819: - # 0xc0000005 - # Windows EXCEPTION_ACCESS_VIOLATION - results['result'] = 'crash' - elif glean.returncode == -1073741676: - # 0xc0000094 - # Windows EXCEPTION_INT_DIVIDE_BY_ZERO - results['result'] = 'crash' - elif glean.returncode != 0 or out.find('FAIL') >= 0: + def interpretResult(self, out, results): + if out.find('FAIL') >= 0: results['result'] = 'fail' - - results['returncode'] = glean.returncode - results['command'] = ' '.join(self.command + GleanTest.globalParams) - - env = '' - for key in self.env: - env = env + key + '="' + self.env[key] + '" '; - results['environment'] = env - - self.handleErr(results, err) - - results['info'] = "Returncode: %d\n\nErrors:\n%s\n\nOutput:\n%s" % (glean.returncode, err, out) - - return results - + else: + results['result'] = 'pass' + return out diff --git a/tests/gtf.tests b/tests/gtf.tests index 323e39f4..cc5f59b5 100644 --- a/tests/gtf.tests +++ b/tests/gtf.tests @@ -42,50 +42,18 @@ if not os.path.exists(os.path.join(testBinDir, 'GTF')): # Chase the piglit/bin/GTF symlink to find where the tests really live. gtfroot = path.dirname(path.realpath(path.join(testBinDir, 'GTF'))) -class GTFTest(PlainExecTest): +class GTFTest(ExecTest): pass_re = re.compile(r'Regression PASSED all 1 tests') - def __init__(self, command): - PlainExecTest.__init__(self, command) - self.env = dict() + def __init__(self, testpath): + ExecTest.__init__(self, [path.join(testBinDir, 'GTF'), '-noimagefileio', '-id=7', '-run=' + testpath]) - def run(self): - fullenv = os.environ.copy() - for e in self.env: - fullenv[e] = str(self.env[e]) - - if self.command is not None: - proc = subprocess.Popen( - self.command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=fullenv, - universal_newlines=True - ) - out, err = proc.communicate() - - test_pass = GTFTest.pass_re.search(out) is not None - - results = TestResult() - if test_pass: - results['result'] = 'pass' - else: - results['result'] = 'fail' - if proc.returncode != 0: - results['result'] = 'fail' - results['note'] = 'Returncode was %d' % (proc.returncode) - - self.handleErr(results, err) - - results['info'] = "Returncode: %d\n\nErrors:\n%s\n\nOutput:\n%s" % (proc.returncode, err, out) - results['returncode'] = proc.returncode - - else: - results = TestResult() - if 'result' not in results: - results['result'] = 'skip' - - return results + def interpretResult(self, out, results): + if self.pass_re.search(out) is not None: + results['result'] = 'pass' + else: + results['result'] = 'fail' + return out # Populate a group with tests in the given directory: # @@ -105,7 +73,7 @@ def populateTests(group, directory): name = entry[:-5] # Don't add example.test...it's not a real test. if name != 'example': - group[name] = GTFTest(['GTF', '-noimagefileio', '-id=7', '-run=' + pathname]) + group[name] = GTFTest(pathname) # Create a new top-level 'gtf' category for all Khronos ES 2.0 tests gtf = Group() |