diff options
author | Kenneth Graunke <kenneth@whitecape.org> | 2012-08-29 22:15:35 -0700 |
---|---|---|
committer | Eric Anholt <eric@anholt.net> | 2013-03-22 13:41:46 -0700 |
commit | 97969b875c112d4bd8dfb76f10eddde462abecd2 (patch) | |
tree | 8403c520f4f311d56c67c7c1b05fc8bfd993652c | |
parent | c0d4d1595db4ba5fa53fe41bb1d673351d88a9a8 (diff) |
run.py: Process shaders concurrently.
Now that we've accumulated a non-trivial amount of shaders, running
shader-db takes far too long. Parsing the shaders in parallel is
embarassingly easy and gives roughly a 4x speed-up on my dual-core
hyperthreaded Ivybridge system.
This may not be the most efficient way to implement the thread pools,
but it seems to work well enough. Requires Python 3.2.
-rwxr-xr-x | run.py | 18 |
1 files changed, 10 insertions, 8 deletions
@@ -4,6 +4,8 @@ from getopt import getopt, GetoptError import re import sys, os import subprocess +from concurrent.futures import ThreadPoolExecutor +from multiprocessing import cpu_count def usage(): USAGE = """\ @@ -34,8 +36,7 @@ def run_test(filename): stderr=subprocess.PIPE, env=env) except: - print(filename + " FAIL") - return + return filename + " FAIL" try: (stdout, stderr) = p.communicate() @@ -43,8 +44,7 @@ def run_test(filename): except KeyboardInterrupt: exit(1) except: - print(filename + " FAIL ") - return + return filename + " FAIL" with open(filename + '.out', 'w') as file: file.write(results) @@ -70,10 +70,11 @@ def run_test(filename): elif (re.search(re_align, line)): counts[current_type] = counts[current_type] + 1 + out = '' for t in counts: if counts[t] != 0: - print(filename + " " + t + ": " + str(counts[t])) - sys.stdout.flush() + out += "".join([filename, " ", t, ": ", str(counts[t]), "\n"]) + return out def main(): try: @@ -91,8 +92,9 @@ def main(): if len(args) < 1: usage() - for filename in args: - run_test(filename) + executor = ThreadPoolExecutor(cpu_count()) + for t in executor.map(run_test, args): + sys.stdout.write(t) if __name__ == "__main__": main() |