RawSpeed
fast raw decoding library
Loading...
Searching...
No Matches
rsbench.py
Go to the documentation of this file.
1'''Test module to collect google benchmark results.'''
2from litsupport import shellcommand
3from litsupport import testplan
4import json
5import lit.Test
6
7
8# This is largely duplicated/copy-pasted from LLVM
9# test-suite/litsupport/modules/microbenchmark.py
10
11
12def _mutateCommandLine(context, commandline):
13 cmd = shellcommand.parse(commandline)
14 context.rawfilename = cmd.arguments[0]
15 cmd.arguments.append("--benchmark_format=json")
16 # We need stdout ourself to get the benchmark json data.
17 if cmd.stdout is not None:
18 raise Exception("Rerouting stdout not allowed for microbenchmarks")
19 benchfile = context.tmpBase + '.bench.json'
20 cmd.stdout = benchfile
21 context.microbenchfiles.append(benchfile)
22
23 return cmd.toCommandline()
24
25
26def _mutateScript(context, script):
27 return testplan.mutateScript(context, script, _mutateCommandLine)
28
29
30def _collectMicrobenchmarkTime(context, microbenchfiles):
31 for f in microbenchfiles:
32 content = context.read_result_file(context, f)
33 data = json.loads(content)
34
35 # Create a micro_result for each benchmark
36 for benchmark in data['benchmarks']:
37 # Name for MicroBenchmark
38 benchmarkname = benchmark['name']
39 # Drop raw file name from the name we will report.
40 assert benchmarkname.startswith(context.rawfilename + '/')
41 benchmarkname = benchmarkname[len(context.rawfilename + '/'):]
42 # Create Result object with PASS
43 microBenchmark = lit.Test.Result(lit.Test.PASS)
44 # Report the wall time.
45 exec_time_metric = lit.Test.toMetricValue(benchmark['WallTime,s'])
46 microBenchmark.addMetric('exec_time', exec_time_metric)
47 # Propagate the perf profile to the microbenchmark.
48 if hasattr(context, 'profilefile'):
49 microBenchmark.addMetric(
50 'profile', lit.Test.toMetricValue(
51 context.profilefile))
52 # Add the fields we want
53 for field in benchmark.keys():
54 if field in ['real_time', 'cpu_time', 'time_unit']:
55 continue
56 metric = lit.Test.toMetricValue(benchmark[field])
57 microBenchmark.addMetric(field, metric)
58 # Add Micro Result
59 context.micro_results[benchmarkname] = microBenchmark
60
61 # returning the number of microbenchmarks collected as a metric for the
62 # base test
63 return ({
64 'rsbench': lit.Test.toMetricValue(len(context.micro_results))
65 })
66
67
68def mutatePlan(context, plan):
69 context.microbenchfiles = []
70 plan.runscript = _mutateScript(context, plan.runscript)
71 plan.metric_collectors.append(
72 lambda context: _collectMicrobenchmarkTime(context,
73 context.microbenchfiles)
74 )
_mutateCommandLine(context, commandline)
Definition rsbench.py:12
_collectMicrobenchmarkTime(context, microbenchfiles)
Definition rsbench.py:30