benchmarks: e77ed7d18a68 (original) (raw)
--- a/perf.py +++ b/perf.py @@ -420,7 +420,17 @@ class RawData(object): self.inst_output = inst_output -class BenchmarkResult(object): +class BaseBenchmarkResult(object):
+ + +class BenchmarkResult(BaseBenchmarkResult): """An object representing data from a succesful benchmark run.""" def init(self, min_base, min_changed, delta_min, avg_base, @@ -458,11 +468,9 @@ class BenchmarkResult(object): return ["%f" % self.min_base, "%f" % self.min_changed] -class BenchmarkError(object): +class BenchmarkError(BaseBenchmarkResult): """Object representing the error from a failed benchmark run."""
- def init(self, e): self.msg = str(e) @@ -470,11 +478,9 @@ class BenchmarkError(object): return self.msg -class MemoryUsageResult(object): +class MemoryUsageResult(BaseBenchmarkResult): """Memory usage data from a successful benchmark run."""
- def init(self, max_base, max_changed, delta_max, timeline_link): self.max_base = max_base self.max_changed = max_changed @@ -496,11 +502,9 @@ class MemoryUsageResult(object): return ["%.3f" % self.max_base, "%.3f" % self.max_changed] -class SimpleBenchmarkResult(object): +class SimpleBenchmarkResult(BaseBenchmarkResult): """Object representing result data from a successful benchmark run."""
- def init(self, base_time, changed_time, time_delta): self.base_time = base_time self.changed_time = changed_time @@ -515,11 +519,9 @@ class SimpleBenchmarkResult(object): return ["%f" % self.base_time, "%f" % self.changed_time] -class InstrumentationResult(object): +class InstrumentationResult(BaseBenchmarkResult): """Object respresenting a --diff_instrumentation result."""
- def init(self, inst_diff, options): self.inst_diff = inst_diff self._control_label = options.control_label @@ -599,6 +601,33 @@ def _FormatPerfDataForTable(base_label, return table +def _FormatPyBenchDataForTable(base_label, changed_label, results):
- Args:
base_label: label for the control binary.[](#l1.82)
changed_label: label for the experimental binary.[](#l1.83)
results: iterable of (bench_name, result) 2-tuples where bench_name is[](#l1.84)
the name of the benchmark being reported; and result is a[](#l1.85)
PyBenchBenchmarkResult object.[](#l1.86)
- Returns:
A list of 4-tuples, where each tuple corresponds to a row in the output[](#l1.89)
table, and each item in the tuples corresponds to a cell in the output[](#l1.90)
table.[](#l1.91)
- """
- table = [("Benchmark", base_label, changed_label, "Change")]
- for (bench_name, result) in results:
table.append((bench_name,[](#l1.96)
# Limit the precision for conciseness in the table.[](#l1.97)
str(round(result.avg_base, 2)),[](#l1.98)
str(round(result.avg_changed, 2)),[](#l1.99)
result.delta_avg))[](#l1.100)
+ + def _FormatMemoryUsageForTable(base_label, changed_label, results): """Prepare memory usage data for tabular output. @@ -644,6 +673,8 @@ def FormatOutputAsTable(base_label, chan table = _FormatPerfDataForTable(base_label, changed_label, results) elif isinstance(results[0][1], MemoryUsageResult): table = _FormatMemoryUsageForTable(base_label, changed_label, results)
- elif isinstance(results[0][1], PyBenchBenchmarkResult):
else: raise TypeError("Unknown result type: %r" % type(results[0][1]))table = _FormatPyBenchDataForTable(base_label, changed_label, results)[](#l1.113)
@@ -1183,7 +1214,7 @@ def VersionRange(minver=None, maxver=Non return deco -class PyBenchBenchmarkResult(object): +class PyBenchBenchmarkResult(BaseBenchmarkResult): def init(self, min_base, min_changed, delta_min, avg_base, avg_changed, delta_avg): @@ -1199,6 +1230,10 @@ class PyBenchBenchmarkResult(object): "Avg: %(avg_base)d -> %(avg_changed)d: %(delta_avg)s") % self.dict)
- def as_csv(self):
# Min base, min changed[](#l1.131)
return ["%f" % self.min_base, "%f" % self.min_changed][](#l1.132)
+ _PY_BENCH_TOTALS_LINE = re.compile(""" Totals:\s+(?P\d+)ms\s+