From 4f206f743c660aece71d0ba09a29fcb24d071835 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 7 Nov 2011 11:53:24 -0200 Subject: intel_gpu_analyse: cross-reference performance analysis Add cross-reference for seconds of execution and specific process, and link perf results from the main execution page. Signed-off-by: Eugeni Dodonov --- tools/intel_gpu_analyze.py | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/tools/intel_gpu_analyze.py b/tools/intel_gpu_analyze.py index a9edcfe..c2e60e9 100755 --- a/tools/intel_gpu_analyze.py +++ b/tools/intel_gpu_analyze.py @@ -31,6 +31,12 @@ p {

Execution lasted %(duration).2f seconds

""" +PERF_REFERENCE=""" +

+Kernel perf results analysis +

+""" + MINMAXAVG="""

%(descr)s:

@@ -54,8 +60,8 @@ PERF_ITERATION="""

Second %(sec)d

-Processes in execution: %(processes)s -
+ +Processes in execution: """ PERF_SECOND_TITLE=""" @@ -63,8 +69,14 @@ PERF_SECOND_TITLE="""
""" +PERF_PROCESS_REF=""" +%(process)s +""" + PERF_PROCESS=""" +

Process %(process)s

+
""" PERF_TOP=""" @@ -174,7 +186,9 @@ def analyse_perf(logfile, out_dir, perf="perf.html"): for sec in seconds: print >>output, PERF_ITERATION % {'sec': sec, 'processes': ', '.join(results[sec].keys())} for process in results[sec]: - print >>output, PERF_PROCESS % {'process': process} + print >>output, PERF_PROCESS_REF % {'sec': sec, 'process': process} + for process in results[sec]: + print >>output, PERF_PROCESS % {'sec': sec, 'process': process} # let's sort functions functions_by_time = sorted(results[sec][process], key=lambda key: results[sec][process][key], reverse=True) top = "" @@ -184,7 +198,7 @@ def analyse_perf(logfile, out_dir, perf="perf.html"): print >>output, TAIL output.close() -def analyse(results, title, out_dir, summary="index.html"): +def analyse(results, title, out_dir, perf_logfile=None, summary="index.html"): """Analyses intel_gpu_top results""" # calculate min/max/avg values keys = results.keys() @@ -231,6 +245,10 @@ def analyse(results, title, out_dir, summary="index.html"): 'avg': avgval_s, } + # Do we have perf results? + if perf_logfile: + print >>output, PERF_REFERENCE % {'perf': 'perf.html'} + # graphics try: import pylab @@ -461,6 +479,6 @@ if __name__ == "__main__": pass if logfile: results = collect(logfile) - analyse(results, title, output) + analyse(results, title, output, perf_logfile=perf_logfile) if perf_logfile: analyse_perf(perf_logfile, output) -- cgit v1.2.3