From 7a91ac4753b3a37184a4713a8f852beb9243f056 Mon Sep 17 00:00:00 2001 From: Eric Fiselier Date: Fri, 14 Dec 2018 03:37:13 +0000 Subject: [PATCH] Update google benchmark version llvm-svn: 349126 --- libcxx/utils/google-benchmark/BUILD.bazel | 42 --------------- libcxx/utils/google-benchmark/CONTRIBUTORS | 1 + libcxx/utils/google-benchmark/README.md | 24 ++++++++- .../google-benchmark/include/benchmark/benchmark.h | 10 ++++ libcxx/utils/google-benchmark/src/benchmark.cc | 6 +-- .../google-benchmark/src/benchmark_register.cc | 11 ++-- libcxx/utils/google-benchmark/src/complexity.cc | 4 +- libcxx/utils/google-benchmark/src/json_reporter.cc | 2 + libcxx/utils/google-benchmark/src/reporter.cc | 3 +- libcxx/utils/google-benchmark/src/string_util.h | 6 ++- libcxx/utils/google-benchmark/src/sysinfo.cc | 38 +++++++++++++ .../google-benchmark/test/output_test_helper.cc | 44 ++++++++++++++- .../google-benchmark/test/reporter_output_test.cc | 13 +++++ .../google-benchmark/test/string_util_gtest.cc | 62 +++++++++++----------- libcxx/utils/google-benchmark/tools/compare.py | 6 +-- .../utils/google-benchmark/tools/gbench/report.py | 7 ++- libcxx/utils/google-benchmark/tools/gbench/util.py | 15 ++++-- 17 files changed, 194 insertions(+), 100 deletions(-) delete mode 100644 libcxx/utils/google-benchmark/BUILD.bazel diff --git a/libcxx/utils/google-benchmark/BUILD.bazel b/libcxx/utils/google-benchmark/BUILD.bazel deleted file mode 100644 index 6ee69f2..0000000 --- a/libcxx/utils/google-benchmark/BUILD.bazel +++ /dev/null @@ -1,42 +0,0 @@ -licenses(["notice"]) - -config_setting( - name = "windows", - values = { - "cpu": "x64_windows", - }, - visibility = [":__subpackages__"], -) - -cc_library( - name = "benchmark", - srcs = glob( - [ - "src/*.cc", - "src/*.h", - ], - exclude = ["src/benchmark_main.cc"], - ), - hdrs = ["include/benchmark/benchmark.h"], - linkopts = select({ - ":windows": ["-DEFAULTLIB:shlwapi.lib"], - "//conditions:default": ["-pthread"], - }), - strip_include_prefix = "include", - visibility = ["//visibility:public"], -) - -cc_library( - name = "benchmark_main", - srcs = ["src/benchmark_main.cc"], - hdrs = ["include/benchmark/benchmark.h"], - strip_include_prefix = "include", - visibility = ["//visibility:public"], - deps = [":benchmark"], -) - -cc_library( - name = "benchmark_internal_headers", - hdrs = glob(["src/*.h"]), - visibility = ["//test:__pkg__"], -) diff --git a/libcxx/utils/google-benchmark/CONTRIBUTORS b/libcxx/utils/google-benchmark/CONTRIBUTORS index f727bd1..ee74ff8 100644 --- a/libcxx/utils/google-benchmark/CONTRIBUTORS +++ b/libcxx/utils/google-benchmark/CONTRIBUTORS @@ -27,6 +27,7 @@ Arne Beer Billy Robert O'Neal III Chris Kennelly Christopher Seymour +Cyrille Faucheux David Coeurjolly Deniz Evrenci Dominic Hamon diff --git a/libcxx/utils/google-benchmark/README.md b/libcxx/utils/google-benchmark/README.md index 3820395..858ea23 100644 --- a/libcxx/utils/google-benchmark/README.md +++ b/libcxx/utils/google-benchmark/README.md @@ -255,7 +255,7 @@ that might be used to customize high-order term calculation. ```c++ BENCHMARK(BM_StringCompare)->RangeMultiplier(2) - ->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; }); + ->Range(1<<10, 1<<18)->Complexity([](int64_t n)->double{return n; }); ``` ### Templated benchmarks @@ -264,7 +264,7 @@ messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the absence of multiprogramming. ```c++ -template int BM_Sequential(benchmark::State& state) { +template void BM_Sequential(benchmark::State& state) { Q q; typename Q::value_type v; for (auto _ : state) { @@ -428,6 +428,26 @@ BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime(); Without `UseRealTime`, CPU time is used by default. +## Controlling timers +Normally, the entire duration of the work loop (`for (auto _ : state) {}`) +is measured. But sometimes, it is nessesary to do some work inside of +that loop, every iteration, but without counting that time to the benchmark time. +That is possible, althought it is not recommended, since it has high overhead. + +```c++ +static void BM_SetInsert_With_Timer_Control(benchmark::State& state) { + std::set data; + for (auto _ : state) { + state.PauseTiming(); // Stop timers. They will not count until they are resumed. + data = ConstructRandomSet(state.range(0)); // Do something that should not be measured + state.ResumeTiming(); // And resume timers. They are now counting again. + // The rest will be measured. + for (int j = 0; j < state.range(1); ++j) + data.insert(RandomNumber()); + } +} +BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}}); +``` ## Manual timing For benchmarking something for which neither CPU time nor real-time are diff --git a/libcxx/utils/google-benchmark/include/benchmark/benchmark.h b/libcxx/utils/google-benchmark/include/benchmark/benchmark.h index 1f072b1..a0fd7c6 100644 --- a/libcxx/utils/google-benchmark/include/benchmark/benchmark.h +++ b/libcxx/utils/google-benchmark/include/benchmark/benchmark.h @@ -1293,6 +1293,15 @@ struct CPUInfo { BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo); }; +//Adding Struct for System Information +struct SystemInfo { + std::string name; + static const SystemInfo& Get(); + private: + SystemInfo(); + BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo); +}; + // Interface for custom benchmark result printers. // By default, benchmark reports are printed to stdout. However an application // can control the destination of the reports by calling @@ -1302,6 +1311,7 @@ class BenchmarkReporter { public: struct Context { CPUInfo const& cpu_info; + SystemInfo const& sys_info; // The number of chars in the longest benchmark name. size_t name_field_width; static const char* executable_name; diff --git a/libcxx/utils/google-benchmark/src/benchmark.cc b/libcxx/utils/google-benchmark/src/benchmark.cc index 410493f..aab0750 100644 --- a/libcxx/utils/google-benchmark/src/benchmark.cc +++ b/libcxx/utils/google-benchmark/src/benchmark.cc @@ -57,9 +57,9 @@ DEFINE_bool(benchmark_list_tests, false, DEFINE_string(benchmark_filter, ".", "A regular expression that specifies the set of benchmarks " - "to execute. If this flag is empty, no benchmarks are run. " - "If this flag is the string \"all\", all benchmarks linked " - "into the process are run."); + "to execute. If this flag is empty, or if this flag is the " + "string \"all\", all benchmarks linked into the binary are " + "run."); DEFINE_double(benchmark_min_time, 0.5, "Minimum number of seconds we should run benchmark before " diff --git a/libcxx/utils/google-benchmark/src/benchmark_register.cc b/libcxx/utils/google-benchmark/src/benchmark_register.cc index a85a4b4..f17f5b2 100644 --- a/libcxx/utils/google-benchmark/src/benchmark_register.cc +++ b/libcxx/utils/google-benchmark/src/benchmark_register.cc @@ -182,14 +182,19 @@ bool BenchmarkFamilies::FindBenchmarks( } } - instance.name += StrFormat("%d", arg); + // we know that the args are always non-negative (see 'AddRange()'), + // thus print as 'unsigned'. BUT, do a cast due to the 32-bit builds. + instance.name += StrFormat("%lu", static_cast(arg)); ++arg_i; } if (!IsZero(family->min_time_)) instance.name += StrFormat("/min_time:%0.3f", family->min_time_); - if (family->iterations_ != 0) - instance.name += StrFormat("/iterations:%d", family->iterations_); + if (family->iterations_ != 0) { + instance.name += + StrFormat("/iterations:%lu", + static_cast(family->iterations_)); + } if (family->repetitions_ != 0) instance.name += StrFormat("/repeats:%d", family->repetitions_); diff --git a/libcxx/utils/google-benchmark/src/complexity.cc b/libcxx/utils/google-benchmark/src/complexity.cc index 0be73e0..6ef1766 100644 --- a/libcxx/utils/google-benchmark/src/complexity.cc +++ b/libcxx/utils/google-benchmark/src/complexity.cc @@ -73,8 +73,8 @@ std::string GetBigOString(BigO complexity) { // - time : Vector containing the times for the benchmark tests. // - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };). -// For a deeper explanation on the algorithm logic, look the README file at -// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit +// For a deeper explanation on the algorithm logic, please refer to +// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, diff --git a/libcxx/utils/google-benchmark/src/json_reporter.cc b/libcxx/utils/google-benchmark/src/json_reporter.cc index f599425..7d01e8e 100644 --- a/libcxx/utils/google-benchmark/src/json_reporter.cc +++ b/libcxx/utils/google-benchmark/src/json_reporter.cc @@ -77,6 +77,8 @@ bool JSONReporter::ReportContext(const Context& context) { std::string walltime_value = LocalDateTimeString(); out << indent << FormatKV("date", walltime_value) << ",\n"; + out << indent << FormatKV("host_name", context.sys_info.name) << ",\n"; + if (Context::executable_name) { // windows uses backslash for its path separator, // which must be escaped in JSON otherwise it blows up conforming JSON diff --git a/libcxx/utils/google-benchmark/src/reporter.cc b/libcxx/utils/google-benchmark/src/reporter.cc index 056561d..59bc5f7 100644 --- a/libcxx/utils/google-benchmark/src/reporter.cc +++ b/libcxx/utils/google-benchmark/src/reporter.cc @@ -79,7 +79,8 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out, // No initializer because it's already initialized to NULL. const char *BenchmarkReporter::Context::executable_name; -BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {} +BenchmarkReporter::Context::Context() + : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {} std::string BenchmarkReporter::Run::benchmark_name() const { std::string name = run_name; diff --git a/libcxx/utils/google-benchmark/src/string_util.h b/libcxx/utils/google-benchmark/src/string_util.h index 4a55012..fc5f8b0 100644 --- a/libcxx/utils/google-benchmark/src/string_util.h +++ b/libcxx/utils/google-benchmark/src/string_util.h @@ -12,7 +12,11 @@ void AppendHumanReadable(int n, std::string* str); std::string HumanReadableNumber(double n, double one_k = 1024.0); -std::string StrFormat(const char* format, ...); +#ifdef __GNUC__ +__attribute__((format(printf, 1, 2))) +#endif +std::string +StrFormat(const char* format, ...); inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { return out; diff --git a/libcxx/utils/google-benchmark/src/sysinfo.cc b/libcxx/utils/google-benchmark/src/sysinfo.cc index 0ad300e..c0c07e5 100644 --- a/libcxx/utils/google-benchmark/src/sysinfo.cc +++ b/libcxx/utils/google-benchmark/src/sysinfo.cc @@ -19,6 +19,7 @@ #undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA #include #include +#include #else #include #ifndef BENCHMARK_OS_FUCHSIA @@ -52,6 +53,7 @@ #include #include #include +#include #include "check.h" #include "cycleclock.h" @@ -366,6 +368,35 @@ std::vector GetCacheSizes() { #endif } +std::string GetSystemName() { +#if defined(BENCHMARK_OS_WINDOWS) + std::string str; + const unsigned COUNT = MAX_COMPUTERNAME_LENGTH+1; + TCHAR hostname[COUNT] = {'\0'}; + DWORD DWCOUNT = COUNT; + if (!GetComputerName(hostname, &DWCOUNT)) + return std::string(""); +#ifndef UNICODE + str = std::string(hostname, DWCOUNT); +#else + //Using wstring_convert, Is deprecated in C++17 + using convert_type = std::codecvt_utf8; + std::wstring_convert converter; + std::wstring wStr(hostname, DWCOUNT); + str = converter.to_bytes(wStr); +#endif + return str; +#else // defined(BENCHMARK_OS_WINDOWS) +#ifdef BENCHMARK_OS_MACOSX //Mac Doesnt have HOST_NAME_MAX defined +#define HOST_NAME_MAX 64 +#endif + char hostname[HOST_NAME_MAX]; + int retVal = gethostname(hostname, HOST_NAME_MAX); + if (retVal != 0) return std::string(""); + return std::string(hostname); +#endif // Catch-all POSIX block. +} + int GetNumCPUs() { #ifdef BENCHMARK_HAS_SYSCTL int NumCPU = -1; @@ -609,4 +640,11 @@ CPUInfo::CPUInfo() scaling_enabled(CpuScalingEnabled(num_cpus)), load_avg(GetLoadAvg()) {} + +const SystemInfo& SystemInfo::Get() { + static const SystemInfo* info = new SystemInfo(); + return *info; +} + +SystemInfo::SystemInfo() : name(GetSystemName()) {} } // end namespace benchmark diff --git a/libcxx/utils/google-benchmark/test/output_test_helper.cc b/libcxx/utils/google-benchmark/test/output_test_helper.cc index f2da752..59c9dfb 100644 --- a/libcxx/utils/google-benchmark/test/output_test_helper.cc +++ b/libcxx/utils/google-benchmark/test/output_test_helper.cc @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -207,7 +208,7 @@ void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { void ResultsChecker::CheckResults(std::stringstream& output) { // first reset the stream to the start { - auto start = std::ios::streampos(0); + auto start = std::stringstream::pos_type(0); // clear before calling tellg() output.clear(); // seek to zero only when needed @@ -438,11 +439,50 @@ int SubstrCnt(const std::string& haystack, const std::string& pat) { return count; } +static char ToHex(int ch) { + return ch < 10 ? static_cast('0' + ch) + : static_cast('a' + (ch - 10)); +} + +static char RandomHexChar() { + static std::mt19937 rd{std::random_device{}()}; + static std::uniform_int_distribution mrand{0, 15}; + return ToHex(mrand(rd)); +} + +static std::string GetRandomFileName() { + std::string model = "test.%%%%%%"; + for (auto & ch : model) { + if (ch == '%') + ch = RandomHexChar(); + } + return model; +} + +static bool FileExists(std::string const& name) { + std::ifstream in(name.c_str()); + return in.good(); +} + +static std::string GetTempFileName() { + // This function attempts to avoid race conditions where two tests + // create the same file at the same time. However, it still introduces races + // similar to tmpnam. + int retries = 3; + while (--retries) { + std::string name = GetRandomFileName(); + if (!FileExists(name)) + return name; + } + std::cerr << "Failed to create unique temporary file name" << std::endl; + std::abort(); +} + std::string GetFileReporterOutput(int argc, char* argv[]) { std::vector new_argv(argv, argv + argc); assert(static_cast(argc) == new_argv.size()); - std::string tmp_file_name = std::tmpnam(nullptr); + std::string tmp_file_name = GetTempFileName(); std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n'; std::string tmp = "--benchmark_out="; diff --git a/libcxx/utils/google-benchmark/test/reporter_output_test.cc b/libcxx/utils/google-benchmark/test/reporter_output_test.cc index 8a45471..8263831 100644 --- a/libcxx/utils/google-benchmark/test/reporter_output_test.cc +++ b/libcxx/utils/google-benchmark/test/reporter_output_test.cc @@ -23,6 +23,7 @@ static int AddContextCases() { {{"^\\{", MR_Default}, {"\"context\":", MR_Next}, {"\"date\": \"", MR_Next}, + {"\"host_name\":", MR_Next}, {"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", MR_Next}, {"\"num_cpus\": %int,$", MR_Next}, @@ -220,6 +221,18 @@ ADD_CASES(TC_JSONOut, ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}}); // ========================================================================= // +// ------------------------ Testing Big Args Output ------------------------ // +// ========================================================================= // + +void BM_BigArgs(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U); +ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, + {"^BM_BigArgs/2147483648 %console_report$"}}); + +// ========================================================================= // // ----------------------- Testing Complexity Output ----------------------- // // ========================================================================= // diff --git a/libcxx/utils/google-benchmark/test/string_util_gtest.cc b/libcxx/utils/google-benchmark/test/string_util_gtest.cc index 4c81734..2c5d073 100644 --- a/libcxx/utils/google-benchmark/test/string_util_gtest.cc +++ b/libcxx/utils/google-benchmark/test/string_util_gtest.cc @@ -9,56 +9,56 @@ namespace { TEST(StringUtilTest, stoul) { { size_t pos = 0; - EXPECT_EQ(0, benchmark::stoul("0", &pos)); - EXPECT_EQ(1, pos); + EXPECT_EQ(0ul, benchmark::stoul("0", &pos)); + EXPECT_EQ(1ul, pos); } { size_t pos = 0; - EXPECT_EQ(7, benchmark::stoul("7", &pos)); - EXPECT_EQ(1, pos); + EXPECT_EQ(7ul, benchmark::stoul("7", &pos)); + EXPECT_EQ(1ul, pos); } { size_t pos = 0; - EXPECT_EQ(135, benchmark::stoul("135", &pos)); - EXPECT_EQ(3, pos); + EXPECT_EQ(135ul, benchmark::stoul("135", &pos)); + EXPECT_EQ(3ul, pos); } #if ULONG_MAX == 0xFFFFFFFFul { size_t pos = 0; EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos)); - EXPECT_EQ(10, pos); + EXPECT_EQ(10ul, pos); } #elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul { size_t pos = 0; EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos)); - EXPECT_EQ(20, pos); + EXPECT_EQ(20ul, pos); } #endif { size_t pos = 0; - EXPECT_EQ(10, benchmark::stoul("1010", &pos, 2)); - EXPECT_EQ(4, pos); + EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2)); + EXPECT_EQ(4ul, pos); } { size_t pos = 0; - EXPECT_EQ(520, benchmark::stoul("1010", &pos, 8)); - EXPECT_EQ(4, pos); + EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8)); + EXPECT_EQ(4ul, pos); } { size_t pos = 0; - EXPECT_EQ(1010, benchmark::stoul("1010", &pos, 10)); - EXPECT_EQ(4, pos); + EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10)); + EXPECT_EQ(4ul, pos); } { size_t pos = 0; - EXPECT_EQ(4112, benchmark::stoul("1010", &pos, 16)); - EXPECT_EQ(4, pos); + EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16)); + EXPECT_EQ(4ul, pos); } { size_t pos = 0; - EXPECT_EQ(0xBEEF, benchmark::stoul("BEEF", &pos, 16)); - EXPECT_EQ(4, pos); + EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16)); + EXPECT_EQ(4ul, pos); } { ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); @@ -69,42 +69,42 @@ TEST(StringUtilTest, stoi) { { size_t pos = 0; EXPECT_EQ(0, benchmark::stoi("0", &pos)); - EXPECT_EQ(1, pos); + EXPECT_EQ(1ul, pos); } { size_t pos = 0; EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); - EXPECT_EQ(3, pos); + EXPECT_EQ(3ul, pos); } { size_t pos = 0; EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); - EXPECT_EQ(4, pos); + EXPECT_EQ(4ul, pos); } { size_t pos = 0; EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); - EXPECT_EQ(4, pos); + EXPECT_EQ(4ul, pos); } { size_t pos = 0; EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); - EXPECT_EQ(4, pos); + EXPECT_EQ(4ul, pos); } { size_t pos = 0; EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); - EXPECT_EQ(4, pos); + EXPECT_EQ(4ul, pos); } { size_t pos = 0; EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); - EXPECT_EQ(4, pos); + EXPECT_EQ(4ul, pos); } { size_t pos = 0; EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); - EXPECT_EQ(4, pos); + EXPECT_EQ(4ul, pos); } { ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); @@ -115,28 +115,28 @@ TEST(StringUtilTest, stod) { { size_t pos = 0; EXPECT_EQ(0.0, benchmark::stod("0", &pos)); - EXPECT_EQ(1, pos); + EXPECT_EQ(1ul, pos); } { size_t pos = 0; EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); - EXPECT_EQ(3, pos); + EXPECT_EQ(3ul, pos); } { size_t pos = 0; EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); - EXPECT_EQ(4, pos); + EXPECT_EQ(4ul, pos); } { size_t pos = 0; EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); - EXPECT_EQ(3, pos); + EXPECT_EQ(3ul, pos); } { size_t pos = 0; /* Note: exactly representable as double */ EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); - EXPECT_EQ(8, pos); + EXPECT_EQ(8ul, pos); } { ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); diff --git a/libcxx/utils/google-benchmark/tools/compare.py b/libcxx/utils/google-benchmark/tools/compare.py index 9ff5c14..539ace6 100755 --- a/libcxx/utils/google-benchmark/tools/compare.py +++ b/libcxx/utils/google-benchmark/tools/compare.py @@ -1,5 +1,6 @@ #!/usr/bin/env python +import unittest """ compare.py - versatile benchmark output compare tool """ @@ -244,9 +245,6 @@ def main(): print(ln) -import unittest - - class TestParser(unittest.TestCase): def setUp(self): self.parser = create_parser() @@ -402,7 +400,7 @@ class TestParser(unittest.TestCase): if __name__ == '__main__': - #unittest.main() + # unittest.main() main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 diff --git a/libcxx/utils/google-benchmark/tools/gbench/report.py b/libcxx/utils/google-benchmark/tools/gbench/report.py index 28bab34..5085b93 100644 --- a/libcxx/utils/google-benchmark/tools/gbench/report.py +++ b/libcxx/utils/google-benchmark/tools/gbench/report.py @@ -1,3 +1,4 @@ +import unittest """report.py - Utilities for reporting statistics about benchmark results """ import os @@ -270,9 +271,6 @@ def generate_difference_report( # Unit tests -import unittest - - class TestGetUniqueBenchmarkNames(unittest.TestCase): def load_results(self): import json @@ -290,7 +288,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase): 'BM_One', 'BM_Two', 'short', # These two are not sorted - 'medium', # These two are not sorted + 'medium', # These two are not sorted ] json = self.load_results() output_lines = get_unique_benchmark_names(json) @@ -300,6 +298,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase): for i in range(0, len(output_lines)): self.assertEqual(expect_lines[i], output_lines[i]) + class TestReportDifference(unittest.TestCase): def load_results(self): import json diff --git a/libcxx/utils/google-benchmark/tools/gbench/util.py b/libcxx/utils/google-benchmark/tools/gbench/util.py index 07c2377..1f8e8e2 100644 --- a/libcxx/utils/google-benchmark/tools/gbench/util.py +++ b/libcxx/utils/google-benchmark/tools/gbench/util.py @@ -7,11 +7,13 @@ import subprocess import sys # Input file type enumeration -IT_Invalid = 0 -IT_JSON = 1 +IT_Invalid = 0 +IT_JSON = 1 IT_Executable = 2 _num_magic_bytes = 2 if sys.platform.startswith('win') else 4 + + def is_executable_file(filename): """ Return 'True' if 'filename' names a valid file which is likely @@ -46,7 +48,7 @@ def is_json_file(filename): with open(filename, 'r') as f: json.load(f) return True - except: + except BaseException: pass return False @@ -84,6 +86,7 @@ def check_input_file(filename): sys.exit(1) return ftype + def find_benchmark_flag(prefix, benchmark_flags): """ Search the specified list of flags for a flag matching `` and @@ -97,6 +100,7 @@ def find_benchmark_flag(prefix, benchmark_flags): result = f[len(prefix):] return result + def remove_benchmark_flags(prefix, benchmark_flags): """ Return a new list containing the specified benchmark_flags except those @@ -105,6 +109,7 @@ def remove_benchmark_flags(prefix, benchmark_flags): assert prefix.startswith('--') and prefix.endswith('=') return [f for f in benchmark_flags if not f.startswith(prefix)] + def load_benchmark_results(fname): """ Read benchmark output from a file and return the JSON object. @@ -129,7 +134,7 @@ def run_benchmark(exe_name, benchmark_flags): thandle, output_name = tempfile.mkstemp() os.close(thandle) benchmark_flags = list(benchmark_flags) + \ - ['--benchmark_out=%s' % output_name] + ['--benchmark_out=%s' % output_name] cmd = [exe_name] + benchmark_flags print("RUNNING: %s" % ' '.join(cmd)) @@ -156,4 +161,4 @@ def run_or_load_benchmark(filename, benchmark_flags): elif ftype == IT_Executable: return run_benchmark(filename, benchmark_flags) else: - assert False # This branch is unreachable \ No newline at end of file + assert False # This branch is unreachable -- 2.7.4