⬆️ upgraded Catch and Google Benchmark
- Catch 1.12.0 -> 1.12.2 - Google Benchmark 1.3.0 -> 1.4.1
This commit is contained in:
parent
daeb48b01a
commit
06731b14d7
76 changed files with 2828 additions and 341 deletions
102
benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json
vendored
Executable file
102
benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run1.json
vendored
Executable file
|
@ -0,0 +1,102 @@
|
|||
{
|
||||
"context": {
|
||||
"date": "2016-08-02 17:44:46",
|
||||
"num_cpus": 4,
|
||||
"mhz_per_cpu": 4228,
|
||||
"cpu_scaling_enabled": false,
|
||||
"library_build_type": "release"
|
||||
},
|
||||
"benchmarks": [
|
||||
{
|
||||
"name": "BM_SameTimes",
|
||||
"iterations": 1000,
|
||||
"real_time": 10,
|
||||
"cpu_time": 10,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_2xFaster",
|
||||
"iterations": 1000,
|
||||
"real_time": 50,
|
||||
"cpu_time": 50,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_2xSlower",
|
||||
"iterations": 1000,
|
||||
"real_time": 50,
|
||||
"cpu_time": 50,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_1PercentFaster",
|
||||
"iterations": 1000,
|
||||
"real_time": 100,
|
||||
"cpu_time": 100,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_1PercentSlower",
|
||||
"iterations": 1000,
|
||||
"real_time": 100,
|
||||
"cpu_time": 100,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_10PercentFaster",
|
||||
"iterations": 1000,
|
||||
"real_time": 100,
|
||||
"cpu_time": 100,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_10PercentSlower",
|
||||
"iterations": 1000,
|
||||
"real_time": 100,
|
||||
"cpu_time": 100,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_100xSlower",
|
||||
"iterations": 1000,
|
||||
"real_time": 100,
|
||||
"cpu_time": 100,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_100xFaster",
|
||||
"iterations": 1000,
|
||||
"real_time": 10000,
|
||||
"cpu_time": 10000,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_10PercentCPUToTime",
|
||||
"iterations": 1000,
|
||||
"real_time": 100,
|
||||
"cpu_time": 100,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_ThirdFaster",
|
||||
"iterations": 1000,
|
||||
"real_time": 100,
|
||||
"cpu_time": 100,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_BadTimeUnit",
|
||||
"iterations": 1000,
|
||||
"real_time": 0.4,
|
||||
"cpu_time": 0.5,
|
||||
"time_unit": "s"
|
||||
},
|
||||
{
|
||||
"name": "BM_DifferentTimeUnit",
|
||||
"iterations": 1,
|
||||
"real_time": 1,
|
||||
"cpu_time": 1,
|
||||
"time_unit": "s"
|
||||
}
|
||||
]
|
||||
}
|
102
benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json
vendored
Executable file
102
benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test1_run2.json
vendored
Executable file
|
@ -0,0 +1,102 @@
|
|||
{
|
||||
"context": {
|
||||
"date": "2016-08-02 17:44:46",
|
||||
"num_cpus": 4,
|
||||
"mhz_per_cpu": 4228,
|
||||
"cpu_scaling_enabled": false,
|
||||
"library_build_type": "release"
|
||||
},
|
||||
"benchmarks": [
|
||||
{
|
||||
"name": "BM_SameTimes",
|
||||
"iterations": 1000,
|
||||
"real_time": 10,
|
||||
"cpu_time": 10,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_2xFaster",
|
||||
"iterations": 1000,
|
||||
"real_time": 25,
|
||||
"cpu_time": 25,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_2xSlower",
|
||||
"iterations": 20833333,
|
||||
"real_time": 100,
|
||||
"cpu_time": 100,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_1PercentFaster",
|
||||
"iterations": 1000,
|
||||
"real_time": 98.9999999,
|
||||
"cpu_time": 98.9999999,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_1PercentSlower",
|
||||
"iterations": 1000,
|
||||
"real_time": 100.9999999,
|
||||
"cpu_time": 100.9999999,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_10PercentFaster",
|
||||
"iterations": 1000,
|
||||
"real_time": 90,
|
||||
"cpu_time": 90,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_10PercentSlower",
|
||||
"iterations": 1000,
|
||||
"real_time": 110,
|
||||
"cpu_time": 110,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_100xSlower",
|
||||
"iterations": 1000,
|
||||
"real_time": 1.0000e+04,
|
||||
"cpu_time": 1.0000e+04,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_100xFaster",
|
||||
"iterations": 1000,
|
||||
"real_time": 100,
|
||||
"cpu_time": 100,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_10PercentCPUToTime",
|
||||
"iterations": 1000,
|
||||
"real_time": 110,
|
||||
"cpu_time": 90,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_ThirdFaster",
|
||||
"iterations": 1000,
|
||||
"real_time": 66.665,
|
||||
"cpu_time": 66.664,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_BadTimeUnit",
|
||||
"iterations": 1000,
|
||||
"real_time": 0.04,
|
||||
"cpu_time": 0.6,
|
||||
"time_unit": "s"
|
||||
},
|
||||
{
|
||||
"name": "BM_DifferentTimeUnit",
|
||||
"iterations": 1,
|
||||
"real_time": 1,
|
||||
"cpu_time": 1,
|
||||
"time_unit": "ns"
|
||||
}
|
||||
]
|
||||
}
|
81
benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json
vendored
Executable file
81
benchmarks/thirdparty/benchmark/tools/gbench/Inputs/test2_run.json
vendored
Executable file
|
@ -0,0 +1,81 @@
|
|||
{
|
||||
"context": {
|
||||
"date": "2016-08-02 17:44:46",
|
||||
"num_cpus": 4,
|
||||
"mhz_per_cpu": 4228,
|
||||
"cpu_scaling_enabled": false,
|
||||
"library_build_type": "release"
|
||||
},
|
||||
"benchmarks": [
|
||||
{
|
||||
"name": "BM_Hi",
|
||||
"iterations": 1234,
|
||||
"real_time": 42,
|
||||
"cpu_time": 24,
|
||||
"time_unit": "ms"
|
||||
},
|
||||
{
|
||||
"name": "BM_Zero",
|
||||
"iterations": 1000,
|
||||
"real_time": 10,
|
||||
"cpu_time": 10,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_Zero/4",
|
||||
"iterations": 4000,
|
||||
"real_time": 40,
|
||||
"cpu_time": 40,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "Prefix/BM_Zero",
|
||||
"iterations": 2000,
|
||||
"real_time": 20,
|
||||
"cpu_time": 20,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "Prefix/BM_Zero/3",
|
||||
"iterations": 3000,
|
||||
"real_time": 30,
|
||||
"cpu_time": 30,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_One",
|
||||
"iterations": 5000,
|
||||
"real_time": 5,
|
||||
"cpu_time": 5,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_One/4",
|
||||
"iterations": 2000,
|
||||
"real_time": 20,
|
||||
"cpu_time": 20,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "Prefix/BM_One",
|
||||
"iterations": 1000,
|
||||
"real_time": 10,
|
||||
"cpu_time": 10,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "Prefix/BM_One/3",
|
||||
"iterations": 1500,
|
||||
"real_time": 15,
|
||||
"cpu_time": 15,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_Bye",
|
||||
"iterations": 5321,
|
||||
"real_time": 11,
|
||||
"cpu_time": 63,
|
||||
"time_unit": "ns"
|
||||
}
|
||||
]
|
||||
}
|
8
benchmarks/thirdparty/benchmark/tools/gbench/__init__.py
vendored
Executable file
8
benchmarks/thirdparty/benchmark/tools/gbench/__init__.py
vendored
Executable file
|
@ -0,0 +1,8 @@
|
|||
"""Google Benchmark tooling"""
|
||||
|
||||
__author__ = 'Eric Fiselier'
|
||||
__email__ = 'eric@efcs.ca'
|
||||
__versioninfo__ = (0, 5, 0)
|
||||
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
|
||||
|
||||
__all__ = []
|
208
benchmarks/thirdparty/benchmark/tools/gbench/report.py
vendored
Executable file
208
benchmarks/thirdparty/benchmark/tools/gbench/report.py
vendored
Executable file
|
@ -0,0 +1,208 @@
|
|||
"""report.py - Utilities for reporting statistics about benchmark results
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import copy
|
||||
|
||||
class BenchmarkColor(object):
|
||||
def __init__(self, name, code):
|
||||
self.name = name
|
||||
self.code = code
|
||||
|
||||
def __repr__(self):
|
||||
return '%s%r' % (self.__class__.__name__,
|
||||
(self.name, self.code))
|
||||
|
||||
def __format__(self, format):
|
||||
return self.code
|
||||
|
||||
# Benchmark Colors Enumeration
|
||||
BC_NONE = BenchmarkColor('NONE', '')
|
||||
BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
|
||||
BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
|
||||
BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
|
||||
BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
|
||||
BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
|
||||
BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
|
||||
BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
|
||||
BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
|
||||
BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
|
||||
BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
|
||||
|
||||
def color_format(use_color, fmt_str, *args, **kwargs):
|
||||
"""
|
||||
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
|
||||
'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
|
||||
is False then all color codes in 'args' and 'kwargs' are replaced with
|
||||
the empty string.
|
||||
"""
|
||||
assert use_color is True or use_color is False
|
||||
if not use_color:
|
||||
args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
|
||||
for arg in args]
|
||||
kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
|
||||
for key, arg in kwargs.items()}
|
||||
return fmt_str.format(*args, **kwargs)
|
||||
|
||||
|
||||
def find_longest_name(benchmark_list):
|
||||
"""
|
||||
Return the length of the longest benchmark name in a given list of
|
||||
benchmark JSON objects
|
||||
"""
|
||||
longest_name = 1
|
||||
for bc in benchmark_list:
|
||||
if len(bc['name']) > longest_name:
|
||||
longest_name = len(bc['name'])
|
||||
return longest_name
|
||||
|
||||
|
||||
def calculate_change(old_val, new_val):
|
||||
"""
|
||||
Return a float representing the decimal change between old_val and new_val.
|
||||
"""
|
||||
if old_val == 0 and new_val == 0:
|
||||
return 0.0
|
||||
if old_val == 0:
|
||||
return float(new_val - old_val) / (float(old_val + new_val) / 2)
|
||||
return float(new_val - old_val) / abs(old_val)
|
||||
|
||||
|
||||
def filter_benchmark(json_orig, family, replacement=""):
|
||||
"""
|
||||
Apply a filter to the json, and only leave the 'family' of benchmarks.
|
||||
"""
|
||||
regex = re.compile(family)
|
||||
filtered = {}
|
||||
filtered['benchmarks'] = []
|
||||
for be in json_orig['benchmarks']:
|
||||
if not regex.search(be['name']):
|
||||
continue
|
||||
filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
|
||||
filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
|
||||
filtered['benchmarks'].append(filteredbench)
|
||||
return filtered
|
||||
|
||||
|
||||
def generate_difference_report(json1, json2, use_color=True):
|
||||
"""
|
||||
Calculate and report the difference between each test of two benchmarks
|
||||
runs specified as 'json1' and 'json2'.
|
||||
"""
|
||||
first_col_width = find_longest_name(json1['benchmarks'])
|
||||
def find_test(name):
|
||||
for b in json2['benchmarks']:
|
||||
if b['name'] == name:
|
||||
return b
|
||||
return None
|
||||
first_col_width = max(first_col_width, len('Benchmark'))
|
||||
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
|
||||
'Benchmark', 12 + first_col_width)
|
||||
output_strs = [first_line, '-' * len(first_line)]
|
||||
|
||||
gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn)
|
||||
for bn in gen:
|
||||
other_bench = find_test(bn['name'])
|
||||
if not other_bench:
|
||||
continue
|
||||
|
||||
if bn['time_unit'] != other_bench['time_unit']:
|
||||
continue
|
||||
|
||||
def get_color(res):
|
||||
if res > 0.05:
|
||||
return BC_FAIL
|
||||
elif res > -0.07:
|
||||
return BC_WHITE
|
||||
else:
|
||||
return BC_CYAN
|
||||
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
|
||||
tres = calculate_change(bn['real_time'], other_bench['real_time'])
|
||||
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
|
||||
output_strs += [color_format(use_color, fmt_str,
|
||||
BC_HEADER, bn['name'], first_col_width,
|
||||
get_color(tres), tres, get_color(cpures), cpures,
|
||||
bn['real_time'], other_bench['real_time'],
|
||||
bn['cpu_time'], other_bench['cpu_time'],
|
||||
endc=BC_ENDC)]
|
||||
return output_strs
|
||||
|
||||
###############################################################################
|
||||
# Unit tests
|
||||
|
||||
import unittest
|
||||
|
||||
class TestReportDifference(unittest.TestCase):
|
||||
def load_results(self):
|
||||
import json
|
||||
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
|
||||
testOutput1 = os.path.join(testInputs, 'test1_run1.json')
|
||||
testOutput2 = os.path.join(testInputs, 'test1_run2.json')
|
||||
with open(testOutput1, 'r') as f:
|
||||
json1 = json.load(f)
|
||||
with open(testOutput2, 'r') as f:
|
||||
json2 = json.load(f)
|
||||
return json1, json2
|
||||
|
||||
def test_basic(self):
|
||||
expect_lines = [
|
||||
['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
|
||||
['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
|
||||
['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
|
||||
['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
|
||||
['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
|
||||
['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
|
||||
['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
|
||||
['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'],
|
||||
['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'],
|
||||
['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'],
|
||||
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
|
||||
['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
|
||||
]
|
||||
json1, json2 = self.load_results()
|
||||
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
|
||||
output_lines = output_lines_with_header[2:]
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(' ') if x]
|
||||
self.assertEqual(len(parts), 7)
|
||||
self.assertEqual(parts, expect_lines[i])
|
||||
|
||||
|
||||
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
|
||||
def load_result(self):
|
||||
import json
|
||||
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
|
||||
testOutput = os.path.join(testInputs, 'test2_run.json')
|
||||
with open(testOutput, 'r') as f:
|
||||
json = json.load(f)
|
||||
return json
|
||||
|
||||
def test_basic(self):
|
||||
expect_lines = [
|
||||
['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
|
||||
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
|
||||
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
|
||||
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
|
||||
]
|
||||
json = self.load_result()
|
||||
json1 = filter_benchmark(json, "BM_Z.ro", ".")
|
||||
json2 = filter_benchmark(json, "BM_O.e", ".")
|
||||
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
|
||||
output_lines = output_lines_with_header[2:]
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(' ') if x]
|
||||
self.assertEqual(len(parts), 7)
|
||||
self.assertEqual(parts, expect_lines[i])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
||||
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
|
||||
# kate: indent-mode python; remove-trailing-spaces modified;
|
159
benchmarks/thirdparty/benchmark/tools/gbench/util.py
vendored
Executable file
159
benchmarks/thirdparty/benchmark/tools/gbench/util.py
vendored
Executable file
|
@ -0,0 +1,159 @@
|
|||
"""util.py - General utilities for running, loading, and processing benchmarks
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
# Input file type enumeration
|
||||
IT_Invalid = 0
|
||||
IT_JSON = 1
|
||||
IT_Executable = 2
|
||||
|
||||
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
|
||||
def is_executable_file(filename):
|
||||
"""
|
||||
Return 'True' if 'filename' names a valid file which is likely
|
||||
an executable. A file is considered an executable if it starts with the
|
||||
magic bytes for a EXE, Mach O, or ELF file.
|
||||
"""
|
||||
if not os.path.isfile(filename):
|
||||
return False
|
||||
with open(filename, mode='rb') as f:
|
||||
magic_bytes = f.read(_num_magic_bytes)
|
||||
if sys.platform == 'darwin':
|
||||
return magic_bytes in [
|
||||
b'\xfe\xed\xfa\xce', # MH_MAGIC
|
||||
b'\xce\xfa\xed\xfe', # MH_CIGAM
|
||||
b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
|
||||
b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
|
||||
b'\xca\xfe\xba\xbe', # FAT_MAGIC
|
||||
b'\xbe\xba\xfe\xca' # FAT_CIGAM
|
||||
]
|
||||
elif sys.platform.startswith('win'):
|
||||
return magic_bytes == b'MZ'
|
||||
else:
|
||||
return magic_bytes == b'\x7FELF'
|
||||
|
||||
|
||||
def is_json_file(filename):
|
||||
"""
|
||||
Returns 'True' if 'filename' names a valid JSON output file.
|
||||
'False' otherwise.
|
||||
"""
|
||||
try:
|
||||
with open(filename, 'r') as f:
|
||||
json.load(f)
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
def classify_input_file(filename):
|
||||
"""
|
||||
Return a tuple (type, msg) where 'type' specifies the classified type
|
||||
of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
|
||||
string represeting the error.
|
||||
"""
|
||||
ftype = IT_Invalid
|
||||
err_msg = None
|
||||
if not os.path.exists(filename):
|
||||
err_msg = "'%s' does not exist" % filename
|
||||
elif not os.path.isfile(filename):
|
||||
err_msg = "'%s' does not name a file" % filename
|
||||
elif is_executable_file(filename):
|
||||
ftype = IT_Executable
|
||||
elif is_json_file(filename):
|
||||
ftype = IT_JSON
|
||||
else:
|
||||
err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
|
||||
return ftype, err_msg
|
||||
|
||||
|
||||
def check_input_file(filename):
|
||||
"""
|
||||
Classify the file named by 'filename' and return the classification.
|
||||
If the file is classified as 'IT_Invalid' print an error message and exit
|
||||
the program.
|
||||
"""
|
||||
ftype, msg = classify_input_file(filename)
|
||||
if ftype == IT_Invalid:
|
||||
print("Invalid input file: %s" % msg)
|
||||
sys.exit(1)
|
||||
return ftype
|
||||
|
||||
def find_benchmark_flag(prefix, benchmark_flags):
|
||||
"""
|
||||
Search the specified list of flags for a flag matching `<prefix><arg>` and
|
||||
if it is found return the arg it specifies. If specified more than once the
|
||||
last value is returned. If the flag is not found None is returned.
|
||||
"""
|
||||
assert prefix.startswith('--') and prefix.endswith('=')
|
||||
result = None
|
||||
for f in benchmark_flags:
|
||||
if f.startswith(prefix):
|
||||
result = f[len(prefix):]
|
||||
return result
|
||||
|
||||
def remove_benchmark_flags(prefix, benchmark_flags):
|
||||
"""
|
||||
Return a new list containing the specified benchmark_flags except those
|
||||
with the specified prefix.
|
||||
"""
|
||||
assert prefix.startswith('--') and prefix.endswith('=')
|
||||
return [f for f in benchmark_flags if not f.startswith(prefix)]
|
||||
|
||||
def load_benchmark_results(fname):
|
||||
"""
|
||||
Read benchmark output from a file and return the JSON object.
|
||||
REQUIRES: 'fname' names a file containing JSON benchmark output.
|
||||
"""
|
||||
with open(fname, 'r') as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def run_benchmark(exe_name, benchmark_flags):
|
||||
"""
|
||||
Run a benchmark specified by 'exe_name' with the specified
|
||||
'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
|
||||
real time console output.
|
||||
RETURNS: A JSON object representing the benchmark output
|
||||
"""
|
||||
output_name = find_benchmark_flag('--benchmark_out=',
|
||||
benchmark_flags)
|
||||
is_temp_output = False
|
||||
if output_name is None:
|
||||
is_temp_output = True
|
||||
thandle, output_name = tempfile.mkstemp()
|
||||
os.close(thandle)
|
||||
benchmark_flags = list(benchmark_flags) + \
|
||||
['--benchmark_out=%s' % output_name]
|
||||
|
||||
cmd = [exe_name] + benchmark_flags
|
||||
print("RUNNING: %s" % ' '.join(cmd))
|
||||
exitCode = subprocess.call(cmd)
|
||||
if exitCode != 0:
|
||||
print('TEST FAILED...')
|
||||
sys.exit(exitCode)
|
||||
json_res = load_benchmark_results(output_name)
|
||||
if is_temp_output:
|
||||
os.unlink(output_name)
|
||||
return json_res
|
||||
|
||||
|
||||
def run_or_load_benchmark(filename, benchmark_flags):
|
||||
"""
|
||||
Get the results for a specified benchmark. If 'filename' specifies
|
||||
an executable benchmark then the results are generated by running the
|
||||
benchmark. Otherwise 'filename' must name a valid JSON output file,
|
||||
which is loaded and the result returned.
|
||||
"""
|
||||
ftype = check_input_file(filename)
|
||||
if ftype == IT_JSON:
|
||||
return load_benchmark_results(filename)
|
||||
elif ftype == IT_Executable:
|
||||
return run_benchmark(filename, benchmark_flags)
|
||||
else:
|
||||
assert False # This branch is unreachable
|
Loading…
Add table
Add a link
Reference in a new issue