summaryrefslogtreecommitdiff
path: root/stats
diff options
context:
space:
mode:
authorMartin Peres <martin.peres@linux.intel.com>2015-07-21 23:27:24 +0300
committerMartin Peres <martin.peres@linux.intel.com>2015-07-21 23:27:24 +0300
commit9300aa3720d85d3ee9a57608bcbed73163612879 (patch)
treea37b84fad6ba90b00efdad8ecfef155dc9393e22 /stats
parentb2e0a768663f6b31bea08908ba22bd689a5834ea (diff)
stats: add tools to generate offline reports
Diffstat (limited to 'stats')
-rwxr-xr-xstats/gen_report.py287
-rwxr-xr-xstats/test_report.R27
2 files changed, 314 insertions, 0 deletions
diff --git a/stats/gen_report.py b/stats/gen_report.py
new file mode 100755
index 0000000..38a7c8d
--- /dev/null
+++ b/stats/gen_report.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python3
+
+import matplotlib.pyplot as plt
+from numpy import *
+import subprocess
+import argparse
+import glob
+import csv
+import sys
+import os
+
+# constants
+html_name="index.html"
+report_folder="ezbench_report/"
+
+class Benchmark:
+ def __init__(self, full_name):
+ self.full_name = full_name
+ self.prevValue = -1
+
+class BenchResult:
+ def __init__(self, commit, benchmark, data_raw_file, img_src_name):
+ self.commit = commit
+ self.benchmark = benchmark
+ self.data_raw_file = data_raw_file
+ self.img_src_name = img_src_name
+ self.data = []
+
+class Commit:
+ def __init__(self, sha1, full_name, compile_log):
+ self.sha1 = sha1
+ self.full_name = full_name
+ self.compile_log = compile_log
+ self.results = []
+
+benchmarks = []
+commits = []
+
+# parse the options
+parser = argparse.ArgumentParser()
+parser.add_argument("log_folder")
+args = parser.parse_args()
+
+# Look for the commit_list file
+os.chdir(args.log_folder)
+
+try:
+ f = open( "commit_list", "r")
+ try:
+ commitsLines = f.readlines()
+ finally:
+ f.close()
+except IOError:
+ sys.stderr.write("The log folder '{0}' does not contain a commit_list file\n".format(args.log_folder))
+ sys.exit(1)
+
+# Check that there are commits
+if (len(commitsLines) == 0):
+ sys.stderr.write("The commit_list file is empty\n")
+ sys.exit(2)
+
+# Gather all the information from the commits and generate the images
+commits_txt = ""
+table_entries_txt = ""
+for commitLine in commitsLines:
+ full_name = commitLine.strip(' \t\n\r')
+ sha1 = commitLine.split()[0]
+ compile_log = sha1 + "_compile_log"
+ commit = Commit(sha1, full_name, compile_log)
+
+ # find all the benchmarks
+ benchFiles = glob.glob("{sha1}_bench_*".format(sha1=commit.sha1));
+ benchs_txt = ""
+ for benchFile in benchFiles:
+ # Get the bench name
+ bench_name = benchFile.replace("{sha1}_bench_".format(sha1=commit.sha1), "")
+ if bench_name.endswith(".png"):
+ continue
+
+ # Find the right Benchmark or create one if none are found
+ try:
+ benchmark = next(b for b in benchmarks if b.full_name == bench_name)
+ except StopIteration:
+ benchmark = Benchmark(bench_name)
+ benchmarks.append(benchmark)
+
+ # Create the result object
+ result = BenchResult(commit, benchmark, benchFile, report_folder + benchFile + ".png")
+
+ # Read the data
+ with open(benchFile, 'rt') as f:
+ if (csv.Sniffer().has_header(f.read(1024))):
+ f.seek(0)
+ next(f)
+ else:
+ f.seek(0)
+ reader = csv.reader(f)
+ try:
+ for row in reader:
+ result.data.append(float(row[0]))
+ except csv.Error as e:
+ sys.stderr.write('file %s, line %d: %s' % (benchFile, reader.line_num, e))
+ sys.exit(3)
+
+ # Add the result to the commit's results
+ commit.results.append(result)
+
+ # Add the commit to the list of commits
+ commit.results = sorted(commit.results, key=lambda res: res.benchmark.full_name)
+ commits.append(commit)
+
+# Sort the list of benchmarks
+benchmarks = sorted(benchmarks, key=lambda bench: bench.full_name)
+
+def getResultsBenchmarkDiffs(benchmark):
+ prevValue = -1
+ results = []
+
+ # Compute a report per application
+ i = 0
+ for commit in commits:
+ for result in commit.results:
+ if result.benchmark != benchmark:
+ continue
+
+ value = array(result.data).mean()
+
+ if prevValue >= 0:
+ diff = (value * 100.0 / prevValue) - 100.0
+ else:
+ diff = 0
+ prevValue = value
+
+ results.append([i, diff])
+ i = i + 1
+
+ return results
+
+# Create a folder for the results
+try:
+ os.mkdir(report_folder)
+except OSError:
+ print ("Error while creating the report folder")
+
+# Generate the trend graph
+plt.figure(figsize=(15,3))
+plt.xlabel('Commit #')
+plt.ylabel('Perf. diff. with the prev. commit (%)')
+plt.grid(True)
+for i in range(len(benchmarks)):
+ data = getResultsBenchmarkDiffs(benchmarks[i])
+
+ x_val = [x[0] for x in data]
+ y_val = [x[1] for x in data]
+
+ plt.plot(x_val, y_val, label=benchmarks[i].full_name)
+
+#plt.xticks(range(len(x)), x_val, rotation='vertical')
+plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
+ ncol=3, mode="expand", borderaxespad=0.)
+plt.savefig(report_folder + 'overview.svg', bbox_inches='tight')
+
+# Generate the images (HACK, do that in python!)
+for commit in commits:
+ for result in commit.results:
+ subprocess.call(['../../stats/test_report.R', result.data_raw_file, result.img_src_name])
+
+
+# Generate the report
+html_template="""
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+
+ <head>
+ <title>Performace report on the run named '{run_name}'</title>
+ </head>
+
+ <body>
+ <h1>Performace report on the run named '{run_name}'</h1>
+
+ <h2>Trends</h2>
+
+ <center><img src="{report_folder}/overview.svg" alt="Trends"/></center>
+
+ <h2>Stats</h2>
+
+ <table border="1" style="">
+ <tr>
+ <th>Commit SHA1</th>
+ <th>Geometric mean</th>
+ {tbl_hdr_benchmarks}
+ </tr>
+ {tbl_entries}
+ </table>
+
+ <h2>Commits</h2>
+ {commits}
+ </body>
+
+</html>
+"""
+
+table_commit_template="""
+ <tr>
+ <td><a href="#commit_{sha1}">{sha1}</a></td>
+ <td>{geom_mean}</td>
+ {tbl_res_benchmarks}
+ </tr>
+"""
+
+table_entry_template="""
+<td bgcolor="{color}">
+ <a href="#commit_{sha1}_bench_{bench_name}">{value:.2f} ({diff:.2f} %)<a/>
+</td>"""
+
+commit_template="""
+ <h3 id="commit_{sha1}">{commit}</h3>
+ Here is the <a href="{compile_log}">compilation logs</a> and list of benchmarks found for commit {sha1}:
+ {benchs}"""
+
+bench_template="""
+ <h4 id="commit_{sha1}_bench_{bench_name}">{bench_name}</h4>
+
+ <a href="{raw_data_file}">Original data</a>
+
+ <img src="{img_src}" alt="Test's time series and density of probability" />"""
+
+# For all commits
+commits_txt = ""
+tbl_entries_txt = ""
+for commit in commits:
+ benchs_txt = ""
+ tbl_res_benchmarks = ""
+ for result in commit.results:
+ value = array(result.data).mean()
+
+ if result.benchmark.prevValue > 0:
+ diff = (value * 100.0 / result.benchmark.prevValue) - 100.0
+ else:
+ diff = 0
+ result.benchmark.prevValue = value
+
+ if diff < -1.5:
+ color = "#FF0000"
+ elif diff > 1.5:
+ color = "#00FF00"
+ else:
+ color = "#FFFFFF"
+
+ # Generate the html
+ benchs_txt += bench_template.format(sha1=commit.sha1,
+ bench_name=result.benchmark.full_name,
+ img_src=result.img_src_name,
+ raw_data_file=result.data_raw_file)
+
+
+ tbl_res_benchmarks += table_entry_template.format(sha1=commit.sha1,
+ bench_name=result.benchmark.full_name,
+ value=value,
+ diff=diff,
+ color=color)
+
+ # generate the html
+ tbl_entries_txt += table_commit_template.format(sha1=commit.sha1, geom_mean=0,
+ tbl_res_benchmarks=tbl_res_benchmarks)
+ commits_txt += commit_template.format(commit=commit.full_name,
+ sha1=commit.sha1,
+ benchs=benchs_txt,
+ compile_log=commit.compile_log)
+
+# generate the table's header
+tbl_hdr_benchmarks = ""
+for benchmark in benchmarks:
+ tbl_hdr_benchmarks += "<th>{benchmark}</th>\n".format(benchmark=benchmark.full_name)
+
+# Generate the final html file
+html = html_template.format(run_name=args.log_folder,
+ commits=commits_txt,
+ tbl_entries=tbl_entries_txt,
+ tbl_hdr_benchmarks=tbl_hdr_benchmarks,
+ report_folder=report_folder);
+
+with open(html_name, 'w') as f:
+ f.write(html)
+ print("Output HTML generated at: {0}/{1}".format(os.getcwd(), html_name))
diff --git a/stats/test_report.R b/stats/test_report.R
new file mode 100755
index 0000000..73bc144
--- /dev/null
+++ b/stats/test_report.R
@@ -0,0 +1,27 @@
+#!/usr/bin/Rscript
+
+args <- commandArgs(trailingOnly = TRUE)
+if(length(args) != 2) {
+ cat("Usage:\n\t./test_report.R input.csv output.png\n\n")
+ q(save="no")
+}
+
+data <- read.csv(args[1])
+
+d <- density(data[[1]])
+
+png(args[2], width = 1900, height = 200)
+
+layout(matrix(c(1,2), 1), c(3.5,1), c(1,3))
+par(mar=c(4.3, 4.3, 2.0, 0.1))
+
+plot(data[[1]], ylab="FPS", xlab="FPS sample", main="Time series of the FPS",
+ cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5)
+lines(data[[1]], type="l")
+abline(h=mean(data[[1]]),col=4)
+abline(h=median(data[[1]]),col=2)
+
+plot(d, xlab="FPS", main="Density function of the FPS", cex.lab=1.5,
+ cex.axis=1.5, cex.main=1.5, cex.sub=1.5)
+abline(v=mean(data[[1]]),col=4)
+abline(v=median(data[[1]]),col=2)