From fcafd3e6002cb682d737ddf56f1d972358dbfa61 Mon Sep 17 00:00:00 2001 From: Eric Fiselier Date: Tue, 10 Jul 2018 04:02:00 +0000 Subject: [PATCH] Update google-benchark to trunk llvm-svn: 336635 --- libcxx/utils/google-benchmark/AUTHORS | 4 + libcxx/utils/google-benchmark/CMakeLists.txt | 55 +++- libcxx/utils/google-benchmark/CONTRIBUTORS | 4 + libcxx/utils/google-benchmark/README.md | 39 ++- .../cmake/AddCXXCompilerFlag.cmake | 10 + .../google-benchmark/cmake/CXXFeatureCheck.cmake | 36 +-- .../google-benchmark/cmake/GetGitVersion.cmake | 3 + .../utils/google-benchmark/cmake/HandleGTest.cmake | 78 ++++-- .../utils/google-benchmark/cmake/split_list.cmake | 3 + .../utils/google-benchmark/docs/AssemblyTests.md | 147 +++++++++++ .../google-benchmark/include/benchmark/benchmark.h | 286 +++++++++++++-------- libcxx/utils/google-benchmark/releasing.md | 16 ++ libcxx/utils/google-benchmark/src/CMakeLists.txt | 25 +- libcxx/utils/google-benchmark/src/benchmark.cc | 206 +++++---------- .../google-benchmark/src/benchmark_api_internal.h | 2 +- .../utils/google-benchmark/src/benchmark_main.cc | 17 ++ .../google-benchmark/src/benchmark_register.cc | 83 +++--- .../google-benchmark/src/benchmark_register.h | 33 +++ libcxx/utils/google-benchmark/src/check.h | 5 +- .../utils/google-benchmark/src/commandlineflags.cc | 2 +- libcxx/utils/google-benchmark/src/complexity.cc | 25 +- libcxx/utils/google-benchmark/src/counter.cc | 25 +- libcxx/utils/google-benchmark/src/counter.h | 8 +- libcxx/utils/google-benchmark/src/csv_reporter.cc | 20 +- libcxx/utils/google-benchmark/src/cycleclock.h | 7 +- .../utils/google-benchmark/src/internal_macros.h | 20 +- libcxx/utils/google-benchmark/src/json_reporter.cc | 46 ++-- libcxx/utils/google-benchmark/src/log.h | 3 +- libcxx/utils/google-benchmark/src/re.h | 44 +++- libcxx/utils/google-benchmark/src/reporter.cc | 6 + libcxx/utils/google-benchmark/src/statistics.cc | 46 ++-- libcxx/utils/google-benchmark/src/string_util.cc | 95 ++++++- libcxx/utils/google-benchmark/src/string_util.h | 28 +- libcxx/utils/google-benchmark/src/sysinfo.cc | 86 ++++++- libcxx/utils/google-benchmark/src/thread_manager.h | 66 +++++ libcxx/utils/google-benchmark/src/thread_timer.h | 69 +++++ libcxx/utils/google-benchmark/src/timers.cc | 11 +- libcxx/utils/google-benchmark/test/BUILD | 65 +++++ libcxx/utils/google-benchmark/test/CMakeLists.txt | 50 +++- libcxx/utils/google-benchmark/test/basic_test.cc | 20 +- .../utils/google-benchmark/test/benchmark_gtest.cc | 33 +++ .../utils/google-benchmark/test/benchmark_test.cc | 29 ++- .../test/clobber_memory_assembly_test.cc | 64 +++++ .../utils/google-benchmark/test/complexity_test.cc | 17 +- .../test/donotoptimize_assembly_test.cc | 163 ++++++++++++ .../google-benchmark/test/donotoptimize_test.cc | 6 +- .../utils/google-benchmark/test/link_main_test.cc | 8 + libcxx/utils/google-benchmark/test/map_test.cc | 12 +- .../google-benchmark/test/multiple_ranges_test.cc | 33 ++- libcxx/utils/google-benchmark/test/output_test.h | 29 ++- .../google-benchmark/test/output_test_helper.cc | 96 +++---- .../test/register_benchmark_test.cc | 2 + .../google-benchmark/test/reporter_output_test.cc | 31 ++- .../google-benchmark/test/skip_with_error_test.cc | 17 +- .../google-benchmark/test/state_assembly_test.cc | 68 +++++ .../google-benchmark/test/statistics_gtest.cc | 28 ++ .../utils/google-benchmark/test/statistics_test.cc | 61 ----- .../google-benchmark/test/string_util_gtest.cc | 146 +++++++++++ .../test/templated_fixture_test.cc | 6 +- .../test/user_counters_tabular_test.cc | 66 ++--- .../google-benchmark/test/user_counters_test.cc | 209 +++++++++++++-- libcxx/utils/google-benchmark/tools/compare.py | 67 ++++- .../tools/gbench/Inputs/test3_run0.json | 39 +++ .../tools/gbench/Inputs/test3_run1.json | 39 +++ .../utils/google-benchmark/tools/gbench/report.py | 186 ++++++++++++-- libcxx/utils/google-benchmark/tools/strip_asm.py | 151 +++++++++++ 66 files changed, 2671 insertions(+), 729 deletions(-) create mode 100644 libcxx/utils/google-benchmark/cmake/split_list.cmake create mode 100644 libcxx/utils/google-benchmark/docs/AssemblyTests.md create mode 100644 libcxx/utils/google-benchmark/releasing.md create mode 100644 libcxx/utils/google-benchmark/src/benchmark_main.cc create mode 100644 libcxx/utils/google-benchmark/src/benchmark_register.h create mode 100644 libcxx/utils/google-benchmark/src/thread_manager.h create mode 100644 libcxx/utils/google-benchmark/src/thread_timer.h create mode 100644 libcxx/utils/google-benchmark/test/BUILD create mode 100644 libcxx/utils/google-benchmark/test/benchmark_gtest.cc create mode 100644 libcxx/utils/google-benchmark/test/clobber_memory_assembly_test.cc create mode 100644 libcxx/utils/google-benchmark/test/donotoptimize_assembly_test.cc create mode 100644 libcxx/utils/google-benchmark/test/link_main_test.cc create mode 100644 libcxx/utils/google-benchmark/test/state_assembly_test.cc create mode 100644 libcxx/utils/google-benchmark/test/statistics_gtest.cc delete mode 100644 libcxx/utils/google-benchmark/test/statistics_test.cc create mode 100644 libcxx/utils/google-benchmark/test/string_util_gtest.cc create mode 100644 libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json create mode 100644 libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json create mode 100755 libcxx/utils/google-benchmark/tools/strip_asm.py diff --git a/libcxx/utils/google-benchmark/AUTHORS b/libcxx/utils/google-benchmark/AUTHORS index 4e4c4ed..daea1f6 100644 --- a/libcxx/utils/google-benchmark/AUTHORS +++ b/libcxx/utils/google-benchmark/AUTHORS @@ -13,11 +13,13 @@ Arne Beer Carto Christopher Seymour David Coeurjolly +Deniz Evrenci Dirac Research Dominik Czarnota Eric Fiselier Eugene Zhuk Evgeny Safronov +Federico Ficarelli Felix Homann Google Inc. International Business Machines Corporation @@ -31,6 +33,7 @@ Kishan Kumar Lei Xu Matt Clarkson Maxim Vafin +MongoDB Inc. Nick Hutchinson Oleksandr Sochka Paul Redmond @@ -38,6 +41,7 @@ Radoslav Yovchev Roman Lebedev Shuo Chen Steinar H. Gunderson +Stripe, Inc. Yixuan Qiu Yusuke Suzuki Zbigniew Skowron diff --git a/libcxx/utils/google-benchmark/CMakeLists.txt b/libcxx/utils/google-benchmark/CMakeLists.txt index aa08267..8ddacab 100644 --- a/libcxx/utils/google-benchmark/CMakeLists.txt +++ b/libcxx/utils/google-benchmark/CMakeLists.txt @@ -27,10 +27,48 @@ option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree buildi # in cases where it is not possible to build or find a valid version of gtest. option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON) +set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF) +function(should_enable_assembly_tests) + if(CMAKE_BUILD_TYPE) + string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) + if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage") + # FIXME: The --coverage flag needs to be removed when building assembly + # tests for this to work. + return() + endif() + endif() + if (MSVC) + return() + elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") + return() + elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8) + # FIXME: Make these work on 32 bit builds + return() + elseif(BENCHMARK_BUILD_32_BITS) + # FIXME: Make these work on 32 bit builds + return() + endif() + find_program(LLVM_FILECHECK_EXE FileCheck) + if (LLVM_FILECHECK_EXE) + set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE) + message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}") + else() + message(STATUS "Failed to find LLVM FileCheck") + return() + endif() + set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE) +endfunction() +should_enable_assembly_tests() + +# This option disables the building and running of the assembly verification tests +option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests" + ${ENABLE_ASSEMBLY_TESTS_DEFAULT}) + # Make sure we can import out CMake functions list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") + # Read the git tags to determine the project version include(GetGitVersion) get_git_version(GIT_VERSION) @@ -92,7 +130,6 @@ else() # Turn compiler warnings up to 11 add_cxx_compiler_flag(-Wall) - add_cxx_compiler_flag(-Wextra) add_cxx_compiler_flag(-Wshadow) add_cxx_compiler_flag(-Werror RELEASE) @@ -101,8 +138,20 @@ else() add_cxx_compiler_flag(-pedantic) add_cxx_compiler_flag(-pedantic-errors) add_cxx_compiler_flag(-Wshorten-64-to-32) - add_cxx_compiler_flag(-Wfloat-equal) add_cxx_compiler_flag(-fstrict-aliasing) + # Disable warnings regarding deprecated parts of the library while building + # and testing those parts of the library. + add_cxx_compiler_flag(-Wno-deprecated-declarations) + if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") + # Intel silently ignores '-Wno-deprecated-declarations', + # warning no. 1786 must be explicitly disabled. + # See #631 for rationale. + add_cxx_compiler_flag(-wd1786) + endif() + # Disable deprecation warnings for release builds (when -Werror is enabled). + add_cxx_compiler_flag(-Wno-deprecated RELEASE) + add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO) + add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL) if (NOT BENCHMARK_ENABLE_EXCEPTIONS) add_cxx_compiler_flag(-fno-exceptions) endif() @@ -178,7 +227,7 @@ if (BENCHMARK_USE_LIBCXX) # linker flags appear before all linker inputs and -lc++ must appear after. list(APPEND BENCHMARK_CXX_LIBRARIES c++) else() - message(FATAL "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler") + message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler") endif() endif(BENCHMARK_USE_LIBCXX) diff --git a/libcxx/utils/google-benchmark/CONTRIBUTORS b/libcxx/utils/google-benchmark/CONTRIBUTORS index c59134b..2ff2f2a 100644 --- a/libcxx/utils/google-benchmark/CONTRIBUTORS +++ b/libcxx/utils/google-benchmark/CONTRIBUTORS @@ -28,16 +28,19 @@ Billy Robert O'Neal III Chris Kennelly Christopher Seymour David Coeurjolly +Deniz Evrenci Dominic Hamon Dominik Czarnota Eric Fiselier Eugene Zhuk Evgeny Safronov +Federico Ficarelli Felix Homann Ismael Jimenez Martinez Jern-Kuan Leong JianXiong Zhou Joao Paulo Magalhaes +John Millikin Jussi Knuuttila Kai Wolf Kishan Kumar @@ -53,6 +56,7 @@ Pierre Phaneuf Radoslav Yovchev Raul Marin Ray Glover +Robert Guo Roman Lebedev Shuo Chen Tobias Ulvgård diff --git a/libcxx/utils/google-benchmark/README.md b/libcxx/utils/google-benchmark/README.md index 6bd81e7..80e69f6 100644 --- a/libcxx/utils/google-benchmark/README.md +++ b/libcxx/utils/google-benchmark/README.md @@ -14,6 +14,8 @@ IRC channel: https://freenode.net #googlebenchmark [Additional Tooling Documentation](docs/tools.md) +[Assembly Testing Documentation](docs/AssemblyTests.md) + ## Building @@ -21,7 +23,7 @@ The basic steps for configuring and building the library look like this: ```bash $ git clone https://github.com/google/benchmark.git -# Benchmark requires GTest as a dependency. Add the source tree as a subdirectory. +# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory. $ git clone https://github.com/google/googletest.git benchmark/googletest $ mkdir build && cd build $ cmake -G [options] ../benchmark @@ -29,15 +31,13 @@ $ cmake -G [options] ../benchmark $ make ``` -Note that Google Benchmark requires GTest to build and run the tests. This -dependency can be provided three ways: +Note that Google Benchmark requires Google Test to build and run the tests. This +dependency can be provided two ways: -* Checkout the GTest sources into `benchmark/googletest`. +* Checkout the Google Test sources into `benchmark/googletest` as above. * Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during configuration, the library will automatically download and build any required dependencies. -* Otherwise, if nothing is done, CMake will use `find_package(GTest REQUIRED)` - to resolve the required GTest dependency. If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF` to `CMAKE_ARGS`. @@ -59,6 +59,7 @@ Now, let's clone the repository and build it ``` git clone https://github.com/google/benchmark.git cd benchmark +git clone https://github.com/google/googletest.git mkdir build cd build cmake .. -DCMAKE_BUILD_TYPE=RELEASE @@ -71,7 +72,7 @@ We need to install the library globally now sudo make install ``` -Now you have google/benchmark installed in your machine +Now you have google/benchmark installed in your machine Note: Don't forget to link to pthread library while building ## Stable and Experimental Library Versions @@ -86,6 +87,11 @@ to use, test, and provide feedback on the new features are encouraged to try this branch. However, this branch provides no stability guarantees and reserves the right to change and break the API at any time. +## Prerequisite knowledge + +Before attempting to understand this framework one should ideally have some familiarity with the structure and format of the Google Test framework, upon which it is based. Documentation for Google Test, including a "Getting Started" (primer) guide, is available here: +https://github.com/google/googletest/blob/master/googletest/docs/primer.md + ## Example usage ### Basic usage @@ -112,7 +118,10 @@ BENCHMARK(BM_StringCopy); BENCHMARK_MAIN(); ``` -Don't forget to inform your linker to add benchmark library e.g. through `-lbenchmark` compilation flag. +Don't forget to inform your linker to add benchmark library e.g. through +`-lbenchmark` compilation flag. Alternatively, you may leave out the +`BENCHMARK_MAIN();` at the end of the source file and link against +`-lbenchmark_main` to get the same default behavior. The benchmark library will reporting the timing for the code within the `for(...)` loop. @@ -821,7 +830,7 @@ BM_SetInsert/1024/10 33157 33648 21431 1.13369M The JSON format outputs human readable json split into two top level attributes. The `context` attribute contains information about the run in general, including information about the CPU and the date. -The `benchmarks` attribute contains a list of ever benchmark run. Example json +The `benchmarks` attribute contains a list of every benchmark run. Example json output looks like: ```json { @@ -893,8 +902,11 @@ If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake cach If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables. ## Linking against the library -When using gcc, it is necessary to link against pthread to avoid runtime exceptions. -This is due to how gcc implements std::thread. + +When the library is built using GCC it is necessary to link with `-pthread`, +due to how GCC implements `std::thread`. + +For GCC 4.x failing to link to pthreads will lead to runtime exceptions, not linker errors. See [issue #67](https://github.com/google/benchmark/issues/67) for more details. ## Compiler Support @@ -928,8 +940,11 @@ sudo cpupower frequency-set --governor powersave # Known Issues -### Windows +### Windows with CMake * Users must manually link `shlwapi.lib`. Failure to do so may result in unresolved symbols. +### Solaris + +* Users must explicitly link with kstat library (-lkstat compilation flag). diff --git a/libcxx/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake b/libcxx/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake index 17d5f3d..d0d2099 100644 --- a/libcxx/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake +++ b/libcxx/utils/google-benchmark/cmake/AddCXXCompilerFlag.cmake @@ -62,3 +62,13 @@ function(add_required_cxx_compiler_flag FLAG) message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler") endif() endfunction() + +function(check_cxx_warning_flag FLAG) + mangle_compiler_flag("${FLAG}" MANGLED_FLAG) + set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") + # Add -Werror to ensure the compiler generates an error if the warning flag + # doesn't exist. + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}") + check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) + set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") +endfunction() diff --git a/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake b/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake index b2a8217..c4c4d66 100644 --- a/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake +++ b/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake @@ -27,25 +27,27 @@ function(cxx_feature_check FILE) return() endif() - message("-- Performing Test ${FEATURE}") - if(CMAKE_CROSSCOMPILING) - try_compile(COMPILE_${FEATURE} - ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) - if(COMPILE_${FEATURE}) - message(WARNING - "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") - set(RUN_${FEATURE} 0) + if (NOT DEFINED COMPILE_${FEATURE}) + message("-- Performing Test ${FEATURE}") + if(CMAKE_CROSSCOMPILING) + try_compile(COMPILE_${FEATURE} + ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp + CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) + if(COMPILE_${FEATURE}) + message(WARNING + "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") + set(RUN_${FEATURE} 0) + else() + set(RUN_${FEATURE} 1) + endif() else() - set(RUN_${FEATURE} 1) + message("-- Performing Test ${FEATURE}") + try_run(RUN_${FEATURE} COMPILE_${FEATURE} + ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp + CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) endif() - else() - message("-- Performing Test ${FEATURE}") - try_run(RUN_${FEATURE} COMPILE_${FEATURE} - ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) endif() if(RUN_${FEATURE} EQUAL 0) diff --git a/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake b/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake index 8dd9480..88cebe3 100644 --- a/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake +++ b/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake @@ -21,6 +21,7 @@ set(__get_git_version INCLUDED) function(get_git_version var) if(GIT_EXECUTABLE) execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} RESULT_VARIABLE status OUTPUT_VARIABLE GIT_VERSION ERROR_QUIET) @@ -33,9 +34,11 @@ function(get_git_version var) # Work out if the repository is dirty execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} OUTPUT_QUIET ERROR_QUIET) execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD -- + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} OUTPUT_VARIABLE GIT_DIFF_INDEX ERROR_QUIET) string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY) diff --git a/libcxx/utils/google-benchmark/cmake/HandleGTest.cmake b/libcxx/utils/google-benchmark/cmake/HandleGTest.cmake index 77ffc4c..7ce1a63 100644 --- a/libcxx/utils/google-benchmark/cmake/HandleGTest.cmake +++ b/libcxx/utils/google-benchmark/cmake/HandleGTest.cmake @@ -1,7 +1,5 @@ -macro(split_list listname) - string(REPLACE ";" " " ${listname} "${${listname}}") -endmacro() +include(split_list) macro(build_external_gtest) include(ExternalProject) @@ -23,9 +21,22 @@ macro(build_external_gtest) if ("${GTEST_BUILD_TYPE}" STREQUAL "COVERAGE") set(GTEST_BUILD_TYPE "DEBUG") endif() + # FIXME: Since 10/Feb/2017 the googletest trunk has had a bug where + # -Werror=unused-function fires during the build on OS X. This is a temporary + # workaround to keep our travis bots from failing. It should be removed + # once gtest is fixed. + if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") + list(APPEND GTEST_FLAGS "-Wno-unused-function") + endif() split_list(GTEST_FLAGS) + set(EXCLUDE_FROM_ALL_OPT "") + set(EXCLUDE_FROM_ALL_VALUE "") + if (${CMAKE_VERSION} VERSION_GREATER "3.0.99") + set(EXCLUDE_FROM_ALL_OPT "EXCLUDE_FROM_ALL") + set(EXCLUDE_FROM_ALL_VALUE "ON") + endif() ExternalProject_Add(googletest - EXCLUDE_FROM_ALL ON + ${EXCLUDE_FROM_ALL_OPT} ${EXCLUDE_FROM_ALL_VALUE} GIT_REPOSITORY https://github.com/google/googletest.git GIT_TAG master PREFIX "${CMAKE_BINARY_DIR}/googletest" @@ -35,45 +46,68 @@ macro(build_external_gtest) -DCMAKE_C_COMPILER:STRING=${CMAKE_C_COMPILER} -DCMAKE_CXX_COMPILER:STRING=${CMAKE_CXX_COMPILER} -DCMAKE_INSTALL_PREFIX:PATH= + -DCMAKE_INSTALL_LIBDIR:PATH=/lib -DCMAKE_CXX_FLAGS:STRING=${GTEST_FLAGS} -Dgtest_force_shared_crt:BOOL=ON ) ExternalProject_Get_Property(googletest install_dir) - - add_library(gtest UNKNOWN IMPORTED) - add_library(gtest_main UNKNOWN IMPORTED) + set(GTEST_INCLUDE_DIRS ${install_dir}/include) + file(MAKE_DIRECTORY ${GTEST_INCLUDE_DIRS}) set(LIB_SUFFIX "${CMAKE_STATIC_LIBRARY_SUFFIX}") set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}") - if("${GTEST_BUILD_TYPE}" STREQUAL "DEBUG") set(LIB_SUFFIX "d${CMAKE_STATIC_LIBRARY_SUFFIX}") endif() - file(MAKE_DIRECTORY ${install_dir}/include) - set_target_properties(gtest PROPERTIES - IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}gtest${LIB_SUFFIX} - INTERFACE_INCLUDE_DIRECTORIES ${install_dir}/include - ) - set_target_properties(gtest_main PROPERTIES - IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}gtest_main${LIB_SUFFIX} - INTERFACE_INCLUDE_DIRECTORIES ${install_dir}/include - ) - add_dependencies(gtest googletest) - add_dependencies(gtest_main googletest) - set(GTEST_BOTH_LIBRARIES gtest gtest_main) - #set(GTEST_INCLUDE_DIRS ${install_dir}/include) + + # Use gmock_main instead of gtest_main because it initializes gtest as well. + # Note: The libraries are listed in reverse order of their dependancies. + foreach(LIB gtest gmock gmock_main) + add_library(${LIB} UNKNOWN IMPORTED) + set_target_properties(${LIB} PROPERTIES + IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}${LIB}${LIB_SUFFIX} + INTERFACE_INCLUDE_DIRECTORIES ${GTEST_INCLUDE_DIRS} + INTERFACE_LINK_LIBRARIES "${GTEST_BOTH_LIBRARIES}" + ) + add_dependencies(${LIB} googletest) + list(APPEND GTEST_BOTH_LIBRARIES ${LIB}) + endforeach() endmacro(build_external_gtest) if (BENCHMARK_ENABLE_GTEST_TESTS) if (IS_DIRECTORY ${CMAKE_SOURCE_DIR}/googletest) + set(GTEST_ROOT "${CMAKE_SOURCE_DIR}/googletest") set(INSTALL_GTEST OFF CACHE INTERNAL "") set(INSTALL_GMOCK OFF CACHE INTERNAL "") add_subdirectory(${CMAKE_SOURCE_DIR}/googletest) - set(GTEST_BOTH_LIBRARIES gtest gtest_main) + set(GTEST_BOTH_LIBRARIES gtest gmock gmock_main) + foreach(HEADER test mock) + # CMake 2.8 and older don't respect INTERFACE_INCLUDE_DIRECTORIES, so we + # have to add the paths ourselves. + set(HFILE g${HEADER}/g${HEADER}.h) + set(HPATH ${GTEST_ROOT}/google${HEADER}/include) + find_path(HEADER_PATH_${HEADER} ${HFILE} + NO_DEFAULT_PATHS + HINTS ${HPATH} + ) + if (NOT HEADER_PATH_${HEADER}) + message(FATAL_ERROR "Failed to find header ${HFILE} in ${HPATH}") + endif() + list(APPEND GTEST_INCLUDE_DIRS ${HEADER_PATH_${HEADER}}) + endforeach() elseif(BENCHMARK_DOWNLOAD_DEPENDENCIES) build_external_gtest() else() find_package(GTest REQUIRED) + find_path(GMOCK_INCLUDE_DIRS gmock/gmock.h + HINTS ${GTEST_INCLUDE_DIRS}) + if (NOT GMOCK_INCLUDE_DIRS) + message(FATAL_ERROR "Failed to find header gmock/gmock.h with hint ${GTEST_INCLUDE_DIRS}") + endif() + set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIRS} ${GMOCK_INCLUDE_DIRS}) + # FIXME: We don't currently require the gmock library to build the tests, + # and it's likely we won't find it, so we don't try. As long as we've + # found the gmock/gmock.h header and gtest_main that should be good enough. endif() endif() diff --git a/libcxx/utils/google-benchmark/cmake/split_list.cmake b/libcxx/utils/google-benchmark/cmake/split_list.cmake new file mode 100644 index 0000000..67aed3f --- /dev/null +++ b/libcxx/utils/google-benchmark/cmake/split_list.cmake @@ -0,0 +1,3 @@ +macro(split_list listname) + string(REPLACE ";" " " ${listname} "${${listname}}") +endmacro() diff --git a/libcxx/utils/google-benchmark/docs/AssemblyTests.md b/libcxx/utils/google-benchmark/docs/AssemblyTests.md new file mode 100644 index 0000000..1fbdc26 --- /dev/null +++ b/libcxx/utils/google-benchmark/docs/AssemblyTests.md @@ -0,0 +1,147 @@ +# Assembly Tests + +The Benchmark library provides a number of functions whose primary +purpose in to affect assembly generation, including `DoNotOptimize` +and `ClobberMemory`. In addition there are other functions, +such as `KeepRunning`, for which generating good assembly is paramount. + +For these functions it's important to have tests that verify the +correctness and quality of the implementation. This requires testing +the code generated by the compiler. + +This document describes how the Benchmark library tests compiler output, +as well as how to properly write new tests. + + +## Anatomy of a Test + +Writing a test has two steps: + +* Write the code you want to generate assembly for. +* Add `// CHECK` lines to match against the verified assembly. + +Example: +```c++ + +// CHECK-LABEL: test_add: +extern "C" int test_add() { + extern int ExternInt; + return ExternInt + 1; + + // CHECK: movl ExternInt(%rip), %eax + // CHECK: addl %eax + // CHECK: ret +} + +``` + +#### LLVM Filecheck + +[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html) +is used to test the generated assembly against the `// CHECK` lines +specified in the tests source file. Please see the documentation +linked above for information on how to write `CHECK` directives. + +#### Tips and Tricks: + +* Tests should match the minimal amount of output required to establish +correctness. `CHECK` directives don't have to match on the exact next line +after the previous match, so tests should omit checks for unimportant +bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive) +can be used to ensure a match occurs exactly after the previous match). + +* The tests are compiled with `-O3 -g0`. So we're only testing the +optimized output. + +* The assembly output is further cleaned up using `tools/strip_asm.py`. +This removes comments, assembler directives, and unused labels before +the test is run. + +* The generated and stripped assembly file for a test is output under +`/test/.s` + +* Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes) +to specify lines that should only match in certain situations. +The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that +are only expected to match Clang or GCC's output respectively. Normal +`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and +`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed +`CHECK` lines) + +* Use `extern "C"` to disable name mangling for specific functions. This +makes them easier to name in the `CHECK` lines. + + +## Problems Writing Portable Tests + +Writing tests which check the code generated by a compiler are +inherently non-portable. Different compilers and even different compiler +versions may generate entirely different code. The Benchmark tests +must tolerate this. + +LLVM Filecheck provides a number of mechanisms to help write +"more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax), +allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables) +for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive). + +#### Capturing Variables + +For example, say GCC stores a variable in a register but Clang stores +it in memory. To write a test that tolerates both cases we "capture" +the destination of the store, and then use the captured expression +to write the remainder of the test. + +```c++ +// CHECK-LABEL: test_div_no_op_into_shr: +extern "C" void test_div_no_op_into_shr(int value) { + int divisor = 2; + benchmark::DoNotOptimize(divisor); // hide the value from the optimizer + return value / divisor; + + // CHECK: movl $2, [[DEST:.*]] + // CHECK: idivl [[DEST]] + // CHECK: ret +} +``` + +#### Using Regular Expressions to Match Differing Output + +Often tests require testing assembly lines which may subtly differ +between compilers or compiler versions. A common example of this +is matching stack frame addresses. In this case regular expressions +can be used to match the differing bits of output. For example: + +```c++ +int ExternInt; +struct Point { int x, y, z; }; + +// CHECK-LABEL: test_store_point: +extern "C" void test_store_point() { + Point p{ExternInt, ExternInt, ExternInt}; + benchmark::DoNotOptimize(p); + + // CHECK: movl ExternInt(%rip), %eax + // CHECK: movl %eax, -{{[0-9]+}}(%rsp) + // CHECK: movl %eax, -{{[0-9]+}}(%rsp) + // CHECK: movl %eax, -{{[0-9]+}}(%rsp) + // CHECK: ret +} +``` + +## Current Requirements and Limitations + +The tests require Filecheck to be installed along the `PATH` of the +build machine. Otherwise the tests will be disabled. + +Additionally, as mentioned in the previous section, codegen tests are +inherently non-portable. Currently the tests are limited to: + +* x86_64 targets. +* Compiled with GCC or Clang + +Further work could be done, at least on a limited basis, to extend the +tests to other architectures and compilers (using `CHECK` prefixes). + +Furthermore, the tests fail for builds which specify additional flags +that modify code generation, including `--coverage` or `-fsanitize=`. + diff --git a/libcxx/utils/google-benchmark/include/benchmark/benchmark.h b/libcxx/utils/google-benchmark/include/benchmark/benchmark.h index 340cbc1..193fffc 100644 --- a/libcxx/utils/google-benchmark/include/benchmark/benchmark.h +++ b/libcxx/utils/google-benchmark/include/benchmark/benchmark.h @@ -164,7 +164,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #ifndef BENCHMARK_BENCHMARK_H_ #define BENCHMARK_BENCHMARK_H_ - // The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer. #if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) #define BENCHMARK_HAS_CXX11 @@ -172,22 +171,23 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #include +#include #include #include #include -#include -#include #include #include +#include +#include #if defined(BENCHMARK_HAS_CXX11) -#include #include +#include #include #endif #if defined(_MSC_VER) -#include // for _ReadWriteBarrier +#include // for _ReadWriteBarrier #endif #ifndef BENCHMARK_HAS_CXX11 @@ -226,13 +226,15 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #define BENCHMARK_INTERNAL_TOSTRING2(x) #x #define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x) -#if defined(__GNUC__) +#if defined(__GNUC__) || defined(__clang__) #define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) #define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) #else #define BENCHMARK_BUILTIN_EXPECT(x, y) x #define BENCHMARK_DEPRECATED_MSG(msg) -#define BENCHMARK_WARNING_MSG(msg) __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING(__LINE__) ") : warning note: " msg)) +#define BENCHMARK_WARNING_MSG(msg) \ + __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \ + __LINE__) ") : warning note: " msg)) #endif #if defined(__GNUC__) && !defined(__clang__) @@ -289,13 +291,11 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams(); } // namespace internal - #if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \ - defined(EMSCRIPTN) -# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY + defined(__EMSCRIPTEN__) +#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY #endif - // The DoNotOptimize(...) function can be used to prevent a value or // expression from being optimized away by the compiler. This function is // intended to add little to no overhead. @@ -303,14 +303,18 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams(); #ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY template inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { - // Clang doesn't like the 'X' constraint on `value` and certain GCC versions - // don't like the 'g' constraint. Attempt to placate them both. + asm volatile("" : : "r,m"(value) : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { #if defined(__clang__) - asm volatile("" : : "g"(value) : "memory"); + asm volatile("" : "+r,m"(value) : : "memory"); #else - asm volatile("" : : "i,r,m"(value) : "memory"); + asm volatile("" : "+m,r"(value) : : "memory"); #endif } + // Force the compiler to flush pending writes to global memory. Acts as an // effective read/write barrier inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { @@ -323,9 +327,7 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { _ReadWriteBarrier(); } -inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { - _ReadWriteBarrier(); -} +inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); } #else template inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { @@ -334,39 +336,54 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { // FIXME Add ClobberMemory() for non-gnu and non-msvc compilers #endif - - // This class is used for user-defined counters. class Counter { -public: - + public: enum Flags { - kDefaults = 0, + kDefaults = 0, // Mark the counter as a rate. It will be presented divided // by the duration of the benchmark. - kIsRate = 1, + kIsRate = 1U << 0U, // Mark the counter as a thread-average quantity. It will be // presented divided by the number of threads. - kAvgThreads = 2, + kAvgThreads = 1U << 1U, // Mark the counter as a thread-average rate. See above. - kAvgThreadsRate = kIsRate|kAvgThreads + kAvgThreadsRate = kIsRate | kAvgThreads, + // Mark the counter as a constant value, valid/same for *every* iteration. + // When reporting, it will be *multiplied* by the iteration count. + kIsIterationInvariant = 1U << 2U, + // Mark the counter as a constant rate. + // When reporting, it will be *multiplied* by the iteration count + // and then divided by the duration of the benchmark. + kIsIterationInvariantRate = kIsRate | kIsIterationInvariant, + // Mark the counter as a iteration-average quantity. + // It will be presented divided by the number of iterations. + kAvgIterations = 1U << 3U, + // Mark the counter as a iteration-average rate. See above. + kAvgIterationsRate = kIsRate | kAvgIterations }; double value; - Flags flags; + Flags flags; BENCHMARK_ALWAYS_INLINE Counter(double v = 0., Flags f = kDefaults) : value(v), flags(f) {} - BENCHMARK_ALWAYS_INLINE operator double const& () const { return value; } - BENCHMARK_ALWAYS_INLINE operator double & () { return value; } - + BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; } + BENCHMARK_ALWAYS_INLINE operator double&() { return value; } }; +// A helper for user code to create unforeseen combinations of Flags, without +// having to do this cast manually each time, or providing this operator. +Counter::Flags inline operator|(const Counter::Flags& LHS, + const Counter::Flags& RHS) { + return static_cast(static_cast(LHS) | + static_cast(RHS)); +} + // This is the container for the user-defined counters. typedef std::map UserCounters; - // TimeUnit is passed to a benchmark in order to specify the order of magnitude // for the measured time. enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond }; @@ -379,7 +396,7 @@ enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda }; // BigOFunc is passed to a benchmark in order to specify the asymptotic // computational complexity for the benchmark. -typedef double(BigOFunc)(int); +typedef double(BigOFunc)(int64_t); // StatisticsFunc is passed to a benchmark in order to compute some descriptive // statistics over all the measurements of some type @@ -390,7 +407,7 @@ struct Statistics { StatisticsFunc* compute_; Statistics(std::string name, StatisticsFunc* compute) - : name_(name), compute_(compute) {} + : name_(name), compute_(compute) {} }; namespace internal { @@ -399,14 +416,12 @@ class ThreadManager; enum ReportMode #if defined(BENCHMARK_HAS_CXX11) - : unsigned + : unsigned #else #endif - { - RM_Unspecified, // The mode has not been manually specified +{ RM_Unspecified, // The mode has not been manually specified RM_Default, // The mode is user-specified as default. - RM_ReportAggregatesOnly -}; + RM_ReportAggregatesOnly }; } // namespace internal // State is passed to a running Benchmark and contains state for the @@ -429,16 +444,19 @@ class State { // Returns true if the benchmark should continue through another iteration. // NOTE: A benchmark may not return from the test until KeepRunning() has // returned false. - bool KeepRunning() { - if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { - StartKeepRunning(); - } - bool const res = (--total_iterations_ != 0); - if (BENCHMARK_BUILTIN_EXPECT(!res, false)) { - FinishKeepRunning(); - } - return res; - } + bool KeepRunning(); + + // Returns true iff the benchmark should run n more iterations. + // REQUIRES: 'n' > 0. + // NOTE: A benchmark must not return from the test until KeepRunningBatch() + // has returned false. + // NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations. + // + // Intended usage: + // while (state.KeepRunningBatch(1000)) { + // // process 1000 elements + // } + bool KeepRunningBatch(size_t n); // REQUIRES: timer is running and 'SkipWithError(...)' has not been called // by the current thread. @@ -505,10 +523,10 @@ class State { // // REQUIRES: a benchmark has exited its benchmarking loop. BENCHMARK_ALWAYS_INLINE - void SetBytesProcessed(size_t bytes) { bytes_processed_ = bytes; } + void SetBytesProcessed(int64_t bytes) { bytes_processed_ = bytes; } BENCHMARK_ALWAYS_INLINE - size_t bytes_processed() const { return bytes_processed_; } + int64_t bytes_processed() const { return bytes_processed_; } // If this routine is called with complexity_n > 0 and complexity report is // requested for the @@ -516,10 +534,10 @@ class State { // and complexity_n will // represent the length of N. BENCHMARK_ALWAYS_INLINE - void SetComplexityN(int complexity_n) { complexity_n_ = complexity_n; } + void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; } BENCHMARK_ALWAYS_INLINE - int complexity_length_n() { return complexity_n_; } + int64_t complexity_length_n() { return complexity_n_; } // If this routine is called with items > 0, then an items/s // label is printed on the benchmark report line for the currently @@ -528,10 +546,10 @@ class State { // // REQUIRES: a benchmark has exited its benchmarking loop. BENCHMARK_ALWAYS_INLINE - void SetItemsProcessed(size_t items) { items_processed_ = items; } + void SetItemsProcessed(int64_t items) { items_processed_ = items; } BENCHMARK_ALWAYS_INLINE - size_t items_processed() const { return items_processed_; } + int64_t items_processed() const { return items_processed_; } // If this routine is called, the specified label is printed at the // end of the benchmark report line for the currently executing @@ -539,7 +557,7 @@ class State { // static void BM_Compress(benchmark::State& state) { // ... // double compress = input_size / output_size; - // state.SetLabel(StringPrintf("compress:%.1f%%", 100.0*compression)); + // state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression)); // } // Produces output that looks like: // BM_Compress 50 50 14115038 compress:27.3% @@ -553,33 +571,51 @@ class State { // Range arguments for this run. CHECKs if the argument has been set. BENCHMARK_ALWAYS_INLINE - int range(std::size_t pos = 0) const { + int64_t range(std::size_t pos = 0) const { assert(range_.size() > pos); return range_[pos]; } BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead") - int range_x() const { return range(0); } + int64_t range_x() const { return range(0); } BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead") - int range_y() const { return range(1); } + int64_t range_y() const { return range(1); } BENCHMARK_ALWAYS_INLINE - size_t iterations() const { return (max_iterations - total_iterations_) + 1; } + size_t iterations() const { + if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { + return 0; + } + return max_iterations - total_iterations_ + batch_leftover_; + } + + private + : // items we expect on the first cache line (ie 64 bytes of the struct) + // When total_iterations_ is 0, KeepRunning() and friends will return false. + // May be larger than max_iterations. + size_t total_iterations_; + + // When using KeepRunningBatch(), batch_leftover_ holds the number of + // iterations beyond max_iters that were run. Used to track + // completed_iterations_ accurately. + size_t batch_leftover_; + + public: + const size_t max_iterations; private: bool started_; bool finished_; - size_t total_iterations_; - - std::vector range_; + bool error_occurred_; - size_t bytes_processed_; - size_t items_processed_; + private: // items we don't need on the first cache line + std::vector range_; - int complexity_n_; + int64_t bytes_processed_; + int64_t items_processed_; - bool error_occurred_; + int64_t complexity_n_; public: // Container for user-defined counters. @@ -588,27 +624,66 @@ class State { const int thread_index; // Number of threads concurrently executing the benchmark. const int threads; - const size_t max_iterations; // TODO(EricWF) make me private - State(size_t max_iters, const std::vector& ranges, int thread_i, + State(size_t max_iters, const std::vector& ranges, int thread_i, int n_threads, internal::ThreadTimer* timer, internal::ThreadManager* manager); private: void StartKeepRunning(); + // Implementation of KeepRunning() and KeepRunningBatch(). + // is_batch must be true unless n is 1. + bool KeepRunningInternal(size_t n, bool is_batch); void FinishKeepRunning(); internal::ThreadTimer* timer_; internal::ThreadManager* manager_; BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State); }; +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() { + return KeepRunningInternal(1, /*is_batch=*/false); +} + +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(size_t n) { + return KeepRunningInternal(n, /*is_batch=*/true); +} + +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(size_t n, + bool is_batch) { + // total_iterations_ is set to 0 by the constructor, and always set to a + // nonzero value by StartKepRunning(). + assert(n > 0); + // n must be 1 unless is_batch is true. + assert(is_batch || n == 1); + if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) { + total_iterations_ -= n; + return true; + } + if (!started_) { + StartKeepRunning(); + if (!error_occurred_ && total_iterations_ >= n) { + total_iterations_ -= n; + return true; + } + } + // For non-batch runs, total_iterations_ must be 0 by now. + if (is_batch && total_iterations_ != 0) { + batch_leftover_ = n - total_iterations_; + total_iterations_ = 0; + return true; + } + FinishKeepRunning(); + return false; +} + struct State::StateIterator { struct BENCHMARK_UNUSED Value {}; typedef std::forward_iterator_tag iterator_category; typedef Value value_type; typedef Value reference; typedef Value pointer; + typedef std::ptrdiff_t difference_type; private: friend class State; @@ -670,7 +745,7 @@ class Benchmark { // Run this benchmark once with "x" as the extra argument passed // to the function. // REQUIRES: The function passed to the constructor must accept an arg1. - Benchmark* Arg(int x); + Benchmark* Arg(int64_t x); // Run this benchmark with the given time unit for the generated output report Benchmark* Unit(TimeUnit unit); @@ -678,23 +753,23 @@ class Benchmark { // Run this benchmark once for a number of values picked from the // range [start..limit]. (start and limit are always picked.) // REQUIRES: The function passed to the constructor must accept an arg1. - Benchmark* Range(int start, int limit); + Benchmark* Range(int64_t start, int64_t limit); // Run this benchmark once for all values in the range [start..limit] with // specific step // REQUIRES: The function passed to the constructor must accept an arg1. - Benchmark* DenseRange(int start, int limit, int step = 1); + Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1); // Run this benchmark once with "args" as the extra arguments passed // to the function. // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... - Benchmark* Args(const std::vector& args); + Benchmark* Args(const std::vector& args); // Equivalent to Args({x, y}) // NOTE: This is a legacy C++03 interface provided for compatibility only. // New code should use 'Args'. - Benchmark* ArgPair(int x, int y) { - std::vector args; + Benchmark* ArgPair(int64_t x, int64_t y) { + std::vector args; args.push_back(x); args.push_back(y); return Args(args); @@ -703,7 +778,7 @@ class Benchmark { // Run this benchmark once for a number of values picked from the // ranges [start..limit]. (starts and limits are always picked.) // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... - Benchmark* Ranges(const std::vector >& ranges); + Benchmark* Ranges(const std::vector >& ranges); // Equivalent to ArgNames({name}) Benchmark* ArgName(const std::string& name); @@ -715,8 +790,8 @@ class Benchmark { // Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}). // NOTE: This is a legacy C++03 interface provided for compatibility only. // New code should use 'Ranges'. - Benchmark* RangePair(int lo1, int hi1, int lo2, int hi2) { - std::vector > ranges; + Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) { + std::vector > ranges; ranges.push_back(std::make_pair(lo1, hi1)); ranges.push_back(std::make_pair(lo2, hi2)); return Ranges(ranges); @@ -823,15 +898,13 @@ class Benchmark { int ArgsCnt() const; - static void AddRange(std::vector* dst, int lo, int hi, int mult); - private: friend class BenchmarkFamilies; std::string name_; ReportMode report_mode_; - std::vector arg_names_; // Args for all benchmark runs - std::vector > args_; // Args for all benchmark runs + std::vector arg_names_; // Args for all benchmark runs + std::vector > args_; // Args for all benchmark runs TimeUnit time_unit_; int range_multiplier_; double min_time_; @@ -1055,7 +1128,7 @@ class Fixture : public internal::Benchmark { class BaseClass##_##Method##_Benchmark : public BaseClass { \ public: \ BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass"<" #a ">/" #Method); \ + this->SetName(#BaseClass "<" #a ">/" #Method); \ } \ \ protected: \ @@ -1066,7 +1139,7 @@ class Fixture : public internal::Benchmark { class BaseClass##_##Method##_Benchmark : public BaseClass { \ public: \ BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass"<" #a "," #b ">/" #Method); \ + this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \ } \ \ protected: \ @@ -1078,14 +1151,15 @@ class Fixture : public internal::Benchmark { class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \ public: \ BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \ - this->SetName(#BaseClass"<" #__VA_ARGS__ ">/" #Method); \ + this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \ } \ \ protected: \ virtual void BenchmarkCase(::benchmark::State&); \ }; #else -#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a) +#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \ + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a) #endif #define BENCHMARK_DEFINE_F(BaseClass, Method) \ @@ -1105,7 +1179,8 @@ class Fixture : public internal::Benchmark { BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ void BaseClass##_##Method##_Benchmark::BenchmarkCase #else -#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) +#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) #endif #define BENCHMARK_REGISTER_F(BaseClass, Method) \ @@ -1132,24 +1207,24 @@ class Fixture : public internal::Benchmark { void BaseClass##_##Method##_Benchmark::BenchmarkCase #ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \ +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \ BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ - BENCHMARK_REGISTER_F(BaseClass, Method); \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ void BaseClass##_##Method##_Benchmark::BenchmarkCase #else -#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) #endif // Helper macro to create a main routine in a test that runs the benchmarks -#define BENCHMARK_MAIN() \ - int main(int argc, char** argv) { \ - ::benchmark::Initialize(&argc, argv); \ +#define BENCHMARK_MAIN() \ + int main(int argc, char** argv) { \ + ::benchmark::Initialize(&argc, argv); \ if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \ - ::benchmark::RunSpecifiedBenchmarks(); \ - } \ + ::benchmark::RunSpecifiedBenchmarks(); \ + } \ int main(int, char**) - // ------------------------------------------------------ // Benchmark Reporters @@ -1186,7 +1261,7 @@ class BenchmarkReporter { CPUInfo const& cpu_info; // The number of chars in the longest benchmark name. size_t name_field_width; - + static const char* executable_name; Context(); }; @@ -1239,7 +1314,7 @@ class BenchmarkReporter { // Keep track of arguments to compute asymptotic complexity BigO complexity; BigOFunc* complexity_lambda; - int complexity_n; + int64_t complexity_n; // what statistics to compute from the measurements const std::vector* statistics; @@ -1309,17 +1384,19 @@ class BenchmarkReporter { // Simple reporter that outputs benchmark data to the console. This is the // default reporter used by RunSpecifiedBenchmarks(). class ConsoleReporter : public BenchmarkReporter { -public: + public: enum OutputOptions { OO_None = 0, OO_Color = 1, OO_Tabular = 2, - OO_ColorTabular = OO_Color|OO_Tabular, + OO_ColorTabular = OO_Color | OO_Tabular, OO_Defaults = OO_ColorTabular }; explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults) - : output_options_(opts_), name_field_width_(0), - prev_counters_(), printed_header_(false) {} + : output_options_(opts_), + name_field_width_(0), + prev_counters_(), + printed_header_(false) {} virtual bool ReportContext(const Context& context); virtual void ReportRuns(const std::vector& reports); @@ -1347,7 +1424,8 @@ class JSONReporter : public BenchmarkReporter { bool first_report_; }; -class CSVReporter : public BenchmarkReporter { +class BENCHMARK_DEPRECATED_MSG("The CSV Reporter will be removed in a future release") + CSVReporter : public BenchmarkReporter { public: CSVReporter() : printed_header_(false) {} virtual bool ReportContext(const Context& context); @@ -1357,7 +1435,7 @@ class CSVReporter : public BenchmarkReporter { void PrintRunData(const Run& report); bool printed_header_; - std::set< std::string > user_counter_names_; + std::set user_counter_names_; }; inline const char* GetTimeUnitString(TimeUnit unit) { @@ -1384,6 +1462,6 @@ inline double GetTimeUnitMultiplier(TimeUnit unit) { } } -} // namespace benchmark +} // namespace benchmark #endif // BENCHMARK_BENCHMARK_H_ diff --git a/libcxx/utils/google-benchmark/releasing.md b/libcxx/utils/google-benchmark/releasing.md new file mode 100644 index 0000000..f0cd701 --- /dev/null +++ b/libcxx/utils/google-benchmark/releasing.md @@ -0,0 +1,16 @@ +# How to release + +* Make sure you're on master and synced to HEAD +* Ensure the project builds and tests run (sanity check only, obviously) + * `parallel -j0 exec ::: test/*_test` can help ensure everything at least + passes +* Prepare release notes + * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of + commits between the last annotated tag and HEAD + * Pick the most interesting. +* Create a release through github's interface + * Note this will create a lightweight tag. + * Update this to an annotated tag: + * `git pull --tags` + * `git tag -a -f ` + * `git push --force origin` diff --git a/libcxx/utils/google-benchmark/src/CMakeLists.txt b/libcxx/utils/google-benchmark/src/CMakeLists.txt index e22620a..977474f 100644 --- a/libcxx/utils/google-benchmark/src/CMakeLists.txt +++ b/libcxx/utils/google-benchmark/src/CMakeLists.txt @@ -11,6 +11,10 @@ file(GLOB *.cc ${PROJECT_SOURCE_DIR}/include/benchmark/*.h ${CMAKE_CURRENT_SOURCE_DIR}/*.h) +file(GLOB BENCHMARK_MAIN "benchmark_main.cc") +foreach(item ${BENCHMARK_MAIN}) + list(REMOVE_ITEM SOURCE_FILES "${item}") +endforeach() add_library(benchmark ${SOURCE_FILES}) set_target_properties(benchmark PROPERTIES @@ -34,6 +38,23 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") target_link_libraries(benchmark Shlwapi) endif() +# We need extra libraries on Solaris +if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS") + target_link_libraries(benchmark kstat) +endif() + +# Benchmark main library +add_library(benchmark_main "benchmark_main.cc") +set_target_properties(benchmark_main PROPERTIES + OUTPUT_NAME "benchmark_main" + VERSION ${GENERIC_LIB_VERSION} + SOVERSION ${GENERIC_LIB_SOVERSION} +) +target_include_directories(benchmark PUBLIC + $ + ) +target_link_libraries(benchmark_main benchmark) + set(include_install_dir "include") set(lib_install_dir "lib/") set(bin_install_dir "bin/") @@ -51,7 +72,7 @@ set(namespace "${PROJECT_NAME}::") include(CMakePackageConfigHelpers) write_basic_package_version_file( - "${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY SameMajorVersion + "${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY SameMajorVersion ) configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY) @@ -60,7 +81,7 @@ configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ON if (BENCHMARK_ENABLE_INSTALL) # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) install( - TARGETS benchmark + TARGETS benchmark benchmark_main EXPORT ${targets_export_name} ARCHIVE DESTINATION ${lib_install_dir} LIBRARY DESTINATION ${lib_install_dir} diff --git a/libcxx/utils/google-benchmark/src/benchmark.cc b/libcxx/utils/google-benchmark/src/benchmark.cc index 1a7d218..b14bc62 100644 --- a/libcxx/utils/google-benchmark/src/benchmark.cc +++ b/libcxx/utils/google-benchmark/src/benchmark.cc @@ -17,7 +17,9 @@ #include "internal_macros.h" #ifndef BENCHMARK_OS_WINDOWS +#ifndef BENCHMARK_OS_FUCHSIA #include +#endif #include #include #endif @@ -27,10 +29,10 @@ #include #include #include -#include #include #include #include +#include #include #include "check.h" @@ -44,7 +46,8 @@ #include "re.h" #include "statistics.h" #include "string_util.h" -#include "timers.h" +#include "thread_manager.h" +#include "thread_timer.h" DEFINE_bool(benchmark_list_tests, false, "Print a list of benchmarks. This option overrides all other " @@ -82,7 +85,7 @@ DEFINE_string(benchmark_out_format, "json", "The format to use for file output. Valid values are " "'console', 'json', or 'csv'."); -DEFINE_string(benchmark_out, "", "The file to write additonal output to"); +DEFINE_string(benchmark_out, "", "The file to write additional output to"); DEFINE_string(benchmark_color, "auto", "Whether to use colors in the output. Valid values: " @@ -108,119 +111,11 @@ namespace internal { void UseCharPointer(char const volatile*) {} -class ThreadManager { - public: - ThreadManager(int num_threads) - : alive_threads_(num_threads), start_stop_barrier_(num_threads) {} - - Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) { - return benchmark_mutex_; - } - - bool StartStopBarrier() EXCLUDES(end_cond_mutex_) { - return start_stop_barrier_.wait(); - } - - void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) { - start_stop_barrier_.removeThread(); - if (--alive_threads_ == 0) { - MutexLock lock(end_cond_mutex_); - end_condition_.notify_all(); - } - } - - void WaitForAllThreads() EXCLUDES(end_cond_mutex_) { - MutexLock lock(end_cond_mutex_); - end_condition_.wait(lock.native_handle(), - [this]() { return alive_threads_ == 0; }); - } - - public: - struct Result { - double real_time_used = 0; - double cpu_time_used = 0; - double manual_time_used = 0; - int64_t bytes_processed = 0; - int64_t items_processed = 0; - int complexity_n = 0; - std::string report_label_; - std::string error_message_; - bool has_error_ = false; - UserCounters counters; - }; - GUARDED_BY(GetBenchmarkMutex()) Result results; - - private: - mutable Mutex benchmark_mutex_; - std::atomic alive_threads_; - Barrier start_stop_barrier_; - Mutex end_cond_mutex_; - Condition end_condition_; -}; - -// Timer management class -class ThreadTimer { - public: - ThreadTimer() = default; - - // Called by each thread - void StartTimer() { - running_ = true; - start_real_time_ = ChronoClockNow(); - start_cpu_time_ = ThreadCPUUsage(); - } - - // Called by each thread - void StopTimer() { - CHECK(running_); - running_ = false; - real_time_used_ += ChronoClockNow() - start_real_time_; - // Floating point error can result in the subtraction producing a negative - // time. Guard against that. - cpu_time_used_ += std::max(ThreadCPUUsage() - start_cpu_time_, 0); - } - - // Called by each thread - void SetIterationTime(double seconds) { manual_time_used_ += seconds; } - - bool running() const { return running_; } - - // REQUIRES: timer is not running - double real_time_used() { - CHECK(!running_); - return real_time_used_; - } - - // REQUIRES: timer is not running - double cpu_time_used() { - CHECK(!running_); - return cpu_time_used_; - } - - // REQUIRES: timer is not running - double manual_time_used() { - CHECK(!running_); - return manual_time_used_; - } - - private: - bool running_ = false; // Is the timer running - double start_real_time_ = 0; // If running_ - double start_cpu_time_ = 0; // If running_ - - // Accumulated time so far (does not contain current slice if running_) - double real_time_used_ = 0; - double cpu_time_used_ = 0; - // Manually set iteration time. User sets this with SetIterationTime(seconds). - double manual_time_used_ = 0; -}; - namespace { BenchmarkReporter::Run CreateRunReport( const benchmark::internal::Benchmark::Instance& b, - const internal::ThreadManager::Result& results, size_t iters, - double seconds) { + const internal::ThreadManager::Result& results, double seconds) { // Create report about this benchmark run. BenchmarkReporter::Run report; @@ -228,8 +123,8 @@ BenchmarkReporter::Run CreateRunReport( report.error_occurred = results.has_error_; report.error_message = results.error_message_; report.report_label = results.report_label_; - // Report the total iterations across all threads. - report.iterations = static_cast(iters) * b.threads; + // This is the total iterations across all threads. + report.iterations = results.iterations; report.time_unit = b.time_unit; if (!report.error_occurred) { @@ -255,7 +150,7 @@ BenchmarkReporter::Run CreateRunReport( report.complexity_lambda = b.complexity_lambda; report.statistics = b.statistics; report.counters = results.counters; - internal::Finish(&report.counters, seconds, b.threads); + internal::Finish(&report.counters, results.iterations, seconds, b.threads); } return report; } @@ -268,11 +163,12 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b, internal::ThreadTimer timer; State st(iters, b->arg, thread_id, b->threads, &timer, manager); b->benchmark->Run(st); - CHECK(st.iterations() == st.max_iterations) + CHECK(st.iterations() >= st.max_iterations) << "Benchmark returned before State::KeepRunning() returned false!"; { MutexLock l(manager->GetBenchmarkMutex()); internal::ThreadManager::Result& results = manager->results; + results.iterations += st.iterations(); results.cpu_time_used += timer.cpu_time_used(); results.real_time_used += timer.real_time_used(); results.manual_time_used += timer.manual_time_used(); @@ -337,21 +233,23 @@ std::vector RunBenchmark( const double min_time = !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time; + // clang-format off + // turn off clang-format since it mangles prettiness here // Determine if this run should be reported; Either it has // run for a sufficient amount of time or because an error was reported. const bool should_report = repetition_num > 0 - || has_explicit_iteration_count // An exact iteration count was requested + || has_explicit_iteration_count // An exact iteration count was requested || results.has_error_ - || iters >= kMaxIterations - || seconds >= min_time // the elapsed time is large enough + || iters >= kMaxIterations // No chance to try again, we hit the limit. + || seconds >= min_time // the elapsed time is large enough // CPU time is specified but the elapsed real time greatly exceeds the // minimum time. Note that user provided timers are except from this // sanity check. || ((results.real_time_used >= 5 * min_time) && !b.use_manual_time); + // clang-format on if (should_report) { - BenchmarkReporter::Run report = - CreateRunReport(b, results, iters, seconds); + BenchmarkReporter::Run report = CreateRunReport(b, results, seconds); if (!report.error_occurred && b.complexity != oNone) complexity_reports->push_back(report); reports.push_back(report); @@ -394,26 +292,50 @@ std::vector RunBenchmark( } // namespace } // namespace internal -State::State(size_t max_iters, const std::vector& ranges, int thread_i, +State::State(size_t max_iters, const std::vector& ranges, int thread_i, int n_threads, internal::ThreadTimer* timer, internal::ThreadManager* manager) - : started_(false), + : total_iterations_(0), + batch_leftover_(0), + max_iterations(max_iters), + started_(false), finished_(false), - total_iterations_(max_iters + 1), + error_occurred_(false), range_(ranges), bytes_processed_(0), items_processed_(0), complexity_n_(0), - error_occurred_(false), counters(), thread_index(thread_i), threads(n_threads), - max_iterations(max_iters), timer_(timer), manager_(manager) { CHECK(max_iterations != 0) << "At least one iteration must be run"; - CHECK(total_iterations_ != 0) << "max iterations wrapped around"; CHECK_LT(thread_index, threads) << "thread_index must be less than threads"; + + // Note: The use of offsetof below is technically undefined until C++17 + // because State is not a standard layout type. However, all compilers + // currently provide well-defined behavior as an extension (which is + // demonstrated since constexpr evaluation must diagnose all undefined + // behavior). However, GCC and Clang also warn about this use of offsetof, + // which must be suppressed. +#if defined(__INTEL_COMPILER) +#pragma warning push +#pragma warning(disable:1875) +#elif defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif + // Offset tests to ensure commonly accessed data is on the first cache line. + const int cache_line_size = 64; + static_assert(offsetof(State, error_occurred_) <= + (cache_line_size - sizeof(error_occurred_)), + ""); +#if defined(__INTEL_COMPILER) +#pragma warning pop +#elif defined(__GNUC__) +#pragma GCC diagnostic pop +#endif } void State::PauseTiming() { @@ -437,7 +359,7 @@ void State::SkipWithError(const char* msg) { manager_->results.has_error_ = true; } } - total_iterations_ = 1; + total_iterations_ = 0; if (timer_->running()) timer_->StopTimer(); } @@ -453,6 +375,7 @@ void State::SetLabel(const char* label) { void State::StartKeepRunning() { CHECK(!started_ && !finished_); started_ = true; + total_iterations_ = error_occurred_ ? 0 : max_iterations; manager_->StartStopBarrier(); if (!error_occurred_) ResumeTiming(); } @@ -462,8 +385,8 @@ void State::FinishKeepRunning() { if (!error_occurred_) { PauseTiming(); } - // Total iterations has now wrapped around zero. Fix this. - total_iterations_ = 1; + // Total iterations has now wrapped around past 0. Fix this. + total_iterations_ = 0; finished_ = true; manager_->StartStopBarrier(); } @@ -472,8 +395,8 @@ namespace internal { namespace { void RunBenchmarks(const std::vector& benchmarks, - BenchmarkReporter* console_reporter, - BenchmarkReporter* file_reporter) { + BenchmarkReporter* console_reporter, + BenchmarkReporter* file_reporter) { // Note the file_reporter can be null. CHECK(console_reporter != nullptr); @@ -486,7 +409,7 @@ void RunBenchmarks(const std::vector& benchmarks, std::max(name_field_width, benchmark.name.size()); has_repetitions |= benchmark.repetitions > 1; - for(const auto& Stat : *benchmark.statistics) + for (const auto& Stat : *benchmark.statistics) stat_field_width = std::max(stat_field_width, Stat.name_.size()); } if (has_repetitions) name_field_width += 1 + stat_field_width; @@ -495,7 +418,7 @@ void RunBenchmarks(const std::vector& benchmarks, BenchmarkReporter::Context context; context.name_field_width = name_field_width; - // Keep track of runing times of all instances of current benchmark + // Keep track of running times of all instances of current benchmark std::vector complexity_reports; // We flush streams after invoking reporter methods that write to them. This @@ -554,15 +477,15 @@ ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) { } else { output_opts &= ~ConsoleReporter::OO_Color; } - if(force_no_color) { + if (force_no_color) { output_opts &= ~ConsoleReporter::OO_Color; } - if(FLAGS_benchmark_counters_tabular) { + if (FLAGS_benchmark_counters_tabular) { output_opts |= ConsoleReporter::OO_Tabular; } else { output_opts &= ~ConsoleReporter::OO_Tabular; } - return static_cast< ConsoleReporter::OutputOptions >(output_opts); + return static_cast(output_opts); } } // end namespace internal @@ -587,7 +510,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter, std::unique_ptr default_file_reporter; if (!console_reporter) { default_console_reporter = internal::CreateReporter( - FLAGS_benchmark_format, internal::GetOutputOptions()); + FLAGS_benchmark_format, internal::GetOutputOptions()); console_reporter = default_console_reporter.get(); } auto& Out = console_reporter->GetOutputStream(); @@ -653,6 +576,8 @@ void PrintUsageAndExit() { void ParseCommandLineFlags(int* argc, char** argv) { using namespace benchmark; + BenchmarkReporter::Context::executable_name = + (argc && *argc > 0) ? argv[0] : "unknown"; for (int i = 1; i < *argc; ++i) { if (ParseBoolFlag(argv[i], "benchmark_list_tests", &FLAGS_benchmark_list_tests) || @@ -672,7 +597,7 @@ void ParseCommandLineFlags(int* argc, char** argv) { // TODO: Remove this. ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) || ParseBoolFlag(argv[i], "benchmark_counters_tabular", - &FLAGS_benchmark_counters_tabular) || + &FLAGS_benchmark_counters_tabular) || ParseInt32Flag(argv[i], "v", &FLAGS_v)) { for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1]; @@ -706,7 +631,8 @@ void Initialize(int* argc, char** argv) { bool ReportUnrecognizedArguments(int argc, char** argv) { for (int i = 1; i < argc; ++i) { - fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]); + fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], + argv[i]); } return argc > 1; } diff --git a/libcxx/utils/google-benchmark/src/benchmark_api_internal.h b/libcxx/utils/google-benchmark/src/benchmark_api_internal.h index d481dc5..dd7a3ff 100644 --- a/libcxx/utils/google-benchmark/src/benchmark_api_internal.h +++ b/libcxx/utils/google-benchmark/src/benchmark_api_internal.h @@ -17,7 +17,7 @@ struct Benchmark::Instance { std::string name; Benchmark* benchmark; ReportMode report_mode; - std::vector arg; + std::vector arg; TimeUnit time_unit; int range_multiplier; bool use_real_time; diff --git a/libcxx/utils/google-benchmark/src/benchmark_main.cc b/libcxx/utils/google-benchmark/src/benchmark_main.cc new file mode 100644 index 0000000..b3b2478 --- /dev/null +++ b/libcxx/utils/google-benchmark/src/benchmark_main.cc @@ -0,0 +1,17 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" + +BENCHMARK_MAIN(); diff --git a/libcxx/utils/google-benchmark/src/benchmark_register.cc b/libcxx/utils/google-benchmark/src/benchmark_register.cc index d5746a3..26a8972 100644 --- a/libcxx/utils/google-benchmark/src/benchmark_register.cc +++ b/libcxx/utils/google-benchmark/src/benchmark_register.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/benchmark.h" -#include "benchmark_api_internal.h" -#include "internal_macros.h" +#include "benchmark_register.h" #ifndef BENCHMARK_OS_WINDOWS +#ifndef BENCHMARK_OS_FUCHSIA #include +#endif #include #include #endif @@ -34,13 +34,16 @@ #include #include +#include "benchmark/benchmark.h" +#include "benchmark_api_internal.h" #include "check.h" #include "commandlineflags.h" #include "complexity.h" -#include "statistics.h" +#include "internal_macros.h" #include "log.h" #include "mutex.h" #include "re.h" +#include "statistics.h" #include "string_util.h" #include "timers.h" @@ -74,7 +77,7 @@ class BenchmarkFamilies { // Extract the list of benchmark instances that match the specified // regular expression. - bool FindBenchmarks(const std::string& re, + bool FindBenchmarks(std::string re, std::vector* benchmarks, std::ostream* Err); @@ -104,13 +107,18 @@ void BenchmarkFamilies::ClearBenchmarks() { } bool BenchmarkFamilies::FindBenchmarks( - const std::string& spec, std::vector* benchmarks, + std::string spec, std::vector* benchmarks, std::ostream* ErrStream) { CHECK(ErrStream); auto& Err = *ErrStream; // Make regular expression out of command-line flag std::string error_msg; Regex re; + bool isNegativeFilter = false; + if (spec[0] == '-') { + spec.replace(0, 1, ""); + isNegativeFilter = true; + } if (!re.Init(spec, &error_msg)) { Err << "Could not compile benchmark re: " << error_msg << std::endl; return false; @@ -170,20 +178,20 @@ bool BenchmarkFamilies::FindBenchmarks( const auto& arg_name = family->arg_names_[arg_i]; if (!arg_name.empty()) { instance.name += - StringPrintF("%s:", family->arg_names_[arg_i].c_str()); + StrFormat("%s:", family->arg_names_[arg_i].c_str()); } } - - instance.name += StringPrintF("%d", arg); + + instance.name += StrFormat("%d", arg); ++arg_i; } if (!IsZero(family->min_time_)) - instance.name += StringPrintF("/min_time:%0.3f", family->min_time_); + instance.name += StrFormat("/min_time:%0.3f", family->min_time_); if (family->iterations_ != 0) - instance.name += StringPrintF("/iterations:%d", family->iterations_); + instance.name += StrFormat("/iterations:%d", family->iterations_); if (family->repetitions_ != 0) - instance.name += StringPrintF("/repeats:%d", family->repetitions_); + instance.name += StrFormat("/repeats:%d", family->repetitions_); if (family->use_manual_time_) { instance.name += "/manual_time"; @@ -193,10 +201,11 @@ bool BenchmarkFamilies::FindBenchmarks( // Add the number of threads used to the name if (!family->thread_counts_.empty()) { - instance.name += StringPrintF("/threads:%d", instance.threads); + instance.name += StrFormat("/threads:%d", instance.threads); } - if (re.Match(instance.name)) { + if ((re.Match(instance.name) && !isNegativeFilter) || + (!re.Match(instance.name) && isNegativeFilter)) { instance.last_benchmark_instance = (&args == &family->args_.back()); benchmarks->push_back(std::move(instance)); } @@ -244,30 +253,7 @@ Benchmark::Benchmark(const char* name) Benchmark::~Benchmark() {} -void Benchmark::AddRange(std::vector* dst, int lo, int hi, int mult) { - CHECK_GE(lo, 0); - CHECK_GE(hi, lo); - CHECK_GE(mult, 2); - - // Add "lo" - dst->push_back(lo); - - static const int kint32max = std::numeric_limits::max(); - - // Now space out the benchmarks in multiples of "mult" - for (int32_t i = 1; i < kint32max / mult; i *= mult) { - if (i >= hi) break; - if (i > lo) { - dst->push_back(i); - } - } - // Add "hi" (if different from "lo") - if (hi != lo) { - dst->push_back(hi); - } -} - -Benchmark* Benchmark::Arg(int x) { +Benchmark* Benchmark::Arg(int64_t x) { CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); args_.push_back({x}); return this; @@ -278,20 +264,21 @@ Benchmark* Benchmark::Unit(TimeUnit unit) { return this; } -Benchmark* Benchmark::Range(int start, int limit) { +Benchmark* Benchmark::Range(int64_t start, int64_t limit) { CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); - std::vector arglist; + std::vector arglist; AddRange(&arglist, start, limit, range_multiplier_); - for (int i : arglist) { + for (int64_t i : arglist) { args_.push_back({i}); } return this; } -Benchmark* Benchmark::Ranges(const std::vector>& ranges) { +Benchmark* Benchmark::Ranges( + const std::vector>& ranges) { CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(ranges.size())); - std::vector> arglists(ranges.size()); + std::vector> arglists(ranges.size()); std::size_t total = 1; for (std::size_t i = 0; i < ranges.size(); i++) { AddRange(&arglists[i], ranges[i].first, ranges[i].second, @@ -302,7 +289,7 @@ Benchmark* Benchmark::Ranges(const std::vector>& ranges) { std::vector ctr(arglists.size(), 0); for (std::size_t i = 0; i < total; i++) { - std::vector tmp; + std::vector tmp; tmp.reserve(arglists.size()); for (std::size_t j = 0; j < arglists.size(); j++) { @@ -334,17 +321,17 @@ Benchmark* Benchmark::ArgNames(const std::vector& names) { return this; } -Benchmark* Benchmark::DenseRange(int start, int limit, int step) { +Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) { CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); CHECK_GE(start, 0); CHECK_LE(start, limit); - for (int arg = start; arg <= limit; arg += step) { + for (int64_t arg = start; arg <= limit; arg += step) { args_.push_back({arg}); } return this; } -Benchmark* Benchmark::Args(const std::vector& args) { +Benchmark* Benchmark::Args(const std::vector& args) { CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(args.size())); args_.push_back(args); return this; @@ -361,7 +348,6 @@ Benchmark* Benchmark::RangeMultiplier(int multiplier) { return this; } - Benchmark* Benchmark::MinTime(double t) { CHECK(t > 0.0); CHECK(iterations_ == 0); @@ -369,7 +355,6 @@ Benchmark* Benchmark::MinTime(double t) { return this; } - Benchmark* Benchmark::Iterations(size_t n) { CHECK(n > 0); CHECK(IsZero(min_time_)); diff --git a/libcxx/utils/google-benchmark/src/benchmark_register.h b/libcxx/utils/google-benchmark/src/benchmark_register.h new file mode 100644 index 0000000..0705e21 --- /dev/null +++ b/libcxx/utils/google-benchmark/src/benchmark_register.h @@ -0,0 +1,33 @@ +#ifndef BENCHMARK_REGISTER_H +#define BENCHMARK_REGISTER_H + +#include + +#include "check.h" + +template +void AddRange(std::vector* dst, T lo, T hi, int mult) { + CHECK_GE(lo, 0); + CHECK_GE(hi, lo); + CHECK_GE(mult, 2); + + // Add "lo" + dst->push_back(lo); + + static const T kmax = std::numeric_limits::max(); + + // Now space out the benchmarks in multiples of "mult" + for (T i = 1; i < kmax / mult; i *= mult) { + if (i >= hi) break; + if (i > lo) { + dst->push_back(i); + } + } + + // Add "hi" (if different from "lo") + if (hi != lo) { + dst->push_back(hi); + } +} + +#endif // BENCHMARK_REGISTER_H diff --git a/libcxx/utils/google-benchmark/src/check.h b/libcxx/utils/google-benchmark/src/check.h index 73bead2..f5f8253 100644 --- a/libcxx/utils/google-benchmark/src/check.h +++ b/libcxx/utils/google-benchmark/src/check.h @@ -1,9 +1,9 @@ #ifndef CHECK_H_ #define CHECK_H_ +#include #include #include -#include #include "internal_macros.h" #include "log.h" @@ -62,6 +62,8 @@ class CheckHandler { #define CHECK(b) ::benchmark::internal::GetNullLogInstance() #endif +// clang-format off +// preserve whitespacing between operators for alignment #define CHECK_EQ(a, b) CHECK((a) == (b)) #define CHECK_NE(a, b) CHECK((a) != (b)) #define CHECK_GE(a, b) CHECK((a) >= (b)) @@ -75,5 +77,6 @@ class CheckHandler { #define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps)) #define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps)) #define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps)) +//clang-format on #endif // CHECK_H_ diff --git a/libcxx/utils/google-benchmark/src/commandlineflags.cc b/libcxx/utils/google-benchmark/src/commandlineflags.cc index 2fc92517..734e88b 100644 --- a/libcxx/utils/google-benchmark/src/commandlineflags.cc +++ b/libcxx/utils/google-benchmark/src/commandlineflags.cc @@ -45,7 +45,7 @@ bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) { // LONG_MAX or LONG_MIN when the input overflows.) result != long_value // The parsed value overflows as an Int32. - ) { + ) { std::cerr << src_text << " is expected to be a 32-bit integer, " << "but actually has value \"" << str << "\", " << "which overflows.\n"; diff --git a/libcxx/utils/google-benchmark/src/complexity.cc b/libcxx/utils/google-benchmark/src/complexity.cc index 8883269..aafd538 100644 --- a/libcxx/utils/google-benchmark/src/complexity.cc +++ b/libcxx/utils/google-benchmark/src/complexity.cc @@ -26,20 +26,23 @@ namespace benchmark { // Internal function to calculate the different scalability forms BigOFunc* FittingCurve(BigO complexity) { + static const double kLog2E = 1.44269504088896340736; switch (complexity) { case oN: - return [](int n) -> double { return n; }; + return [](int64_t n) -> double { return static_cast(n); }; case oNSquared: - return [](int n) -> double { return std::pow(n, 2); }; + return [](int64_t n) -> double { return std::pow(n, 2); }; case oNCubed: - return [](int n) -> double { return std::pow(n, 3); }; + return [](int64_t n) -> double { return std::pow(n, 3); }; case oLogN: - return [](int n) { return log2(n); }; + /* Note: can't use log2 because Android's GNU STL lacks it */ + return [](int64_t n) { return kLog2E * log(static_cast(n)); }; case oNLogN: - return [](int n) { return n * log2(n); }; + /* Note: can't use log2 because Android's GNU STL lacks it */ + return [](int64_t n) { return kLog2E * n * log(static_cast(n)); }; case o1: default: - return [](int) { return 1.0; }; + return [](int64_t) { return 1.0; }; } } @@ -65,15 +68,15 @@ std::string GetBigOString(BigO complexity) { // Find the coefficient for the high-order term in the running time, by // minimizing the sum of squares of relative error, for the fitting curve -// given by the lambda expresion. +// given by the lambda expression. // - n : Vector containing the size of the benchmark tests. // - time : Vector containing the times for the benchmark tests. -// - fitting_curve : lambda expresion (e.g. [](int n) {return n; };). +// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };). // For a deeper explanation on the algorithm logic, look the README file at // http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit -LeastSq MinimalLeastSq(const std::vector& n, +LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, BigOFunc* fitting_curve) { double sigma_gn = 0.0; @@ -117,7 +120,7 @@ LeastSq MinimalLeastSq(const std::vector& n, // - complexity : If different than oAuto, the fitting curve will stick to // this one. If it is oAuto, it will be calculated the best // fitting curve. -LeastSq MinimalLeastSq(const std::vector& n, +LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, const BigO complexity) { CHECK_EQ(n.size(), time.size()); CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two @@ -157,7 +160,7 @@ std::vector ComputeBigO( if (reports.size() < 2) return results; // Accumulators. - std::vector n; + std::vector n; std::vector real_time; std::vector cpu_time; diff --git a/libcxx/utils/google-benchmark/src/counter.cc b/libcxx/utils/google-benchmark/src/counter.cc index ed1aa04..cb604e0 100644 --- a/libcxx/utils/google-benchmark/src/counter.cc +++ b/libcxx/utils/google-benchmark/src/counter.cc @@ -17,7 +17,8 @@ namespace benchmark { namespace internal { -double Finish(Counter const& c, double cpu_time, double num_threads) { +double Finish(Counter const& c, int64_t iterations, double cpu_time, + double num_threads) { double v = c.value; if (c.flags & Counter::kIsRate) { v /= cpu_time; @@ -25,25 +26,31 @@ double Finish(Counter const& c, double cpu_time, double num_threads) { if (c.flags & Counter::kAvgThreads) { v /= num_threads; } + if (c.flags & Counter::kIsIterationInvariant) { + v *= iterations; + } + if (c.flags & Counter::kAvgIterations) { + v /= iterations; + } return v; } -void Finish(UserCounters *l, double cpu_time, double num_threads) { - for (auto &c : *l) { - c.second.value = Finish(c.second, cpu_time, num_threads); +void Finish(UserCounters* l, int64_t iterations, double cpu_time, double num_threads) { + for (auto& c : *l) { + c.second.value = Finish(c.second, iterations, cpu_time, num_threads); } } -void Increment(UserCounters *l, UserCounters const& r) { +void Increment(UserCounters* l, UserCounters const& r) { // add counters present in both or just in *l - for (auto &c : *l) { + for (auto& c : *l) { auto it = r.find(c.first); if (it != r.end()) { c.second.value = c.second + it->second; } } // add counters present in r, but not in *l - for (auto const &tc : r) { + for (auto const& tc : r) { auto it = l->find(tc.first); if (it == l->end()) { (*l)[tc.first] = tc.second; @@ -64,5 +71,5 @@ bool SameNames(UserCounters const& l, UserCounters const& r) { return true; } -} // end namespace internal -} // end namespace benchmark +} // end namespace internal +} // end namespace benchmark diff --git a/libcxx/utils/google-benchmark/src/counter.h b/libcxx/utils/google-benchmark/src/counter.h index dd6865a..d884e50 100644 --- a/libcxx/utils/google-benchmark/src/counter.h +++ b/libcxx/utils/google-benchmark/src/counter.h @@ -18,9 +18,9 @@ namespace benchmark { // these counter-related functions are hidden to reduce API surface. namespace internal { -void Finish(UserCounters *l, double time, double num_threads); -void Increment(UserCounters *l, UserCounters const& r); +void Finish(UserCounters* l, int64_t iterations, double time, double num_threads); +void Increment(UserCounters* l, UserCounters const& r); bool SameNames(UserCounters const& l, UserCounters const& r); -} // end namespace internal +} // end namespace internal -} //end namespace benchmark +} // end namespace benchmark diff --git a/libcxx/utils/google-benchmark/src/csv_reporter.cc b/libcxx/utils/google-benchmark/src/csv_reporter.cc index 3551064..4a64190 100644 --- a/libcxx/utils/google-benchmark/src/csv_reporter.cc +++ b/libcxx/utils/google-benchmark/src/csv_reporter.cc @@ -22,9 +22,9 @@ #include #include +#include "check.h" #include "string_util.h" #include "timers.h" -#include "check.h" // File format reference: http://edoceo.com/utilitas/csv-file-format. @@ -42,7 +42,7 @@ bool CSVReporter::ReportContext(const Context& context) { return true; } -void CSVReporter::ReportRuns(const std::vector & reports) { +void CSVReporter::ReportRuns(const std::vector& reports) { std::ostream& Out = GetOutputStream(); if (!printed_header_) { @@ -58,7 +58,8 @@ void CSVReporter::ReportRuns(const std::vector & reports) { Out << *B++; if (B != elements.end()) Out << ","; } - for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) { + for (auto B = user_counter_names_.begin(); + B != user_counter_names_.end();) { Out << ",\"" << *B++ << "\""; } Out << "\n"; @@ -69,9 +70,9 @@ void CSVReporter::ReportRuns(const std::vector & reports) { for (const auto& run : reports) { for (const auto& cnt : run.counters) { CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end()) - << "All counters must be present in each run. " - << "Counter named \"" << cnt.first - << "\" was not in a run after being added to the header"; + << "All counters must be present in each run. " + << "Counter named \"" << cnt.first + << "\" was not in a run after being added to the header"; } } } @@ -80,10 +81,9 @@ void CSVReporter::ReportRuns(const std::vector & reports) { for (const auto& run : reports) { PrintRunData(run); } - } -void CSVReporter::PrintRunData(const Run & run) { +void CSVReporter::PrintRunData(const Run& run) { std::ostream& Out = GetOutputStream(); // Field with embedded double-quote characters must be doubled and the field @@ -135,9 +135,9 @@ void CSVReporter::PrintRunData(const Run & run) { Out << ",,"; // for error_occurred and error_message // Print user counters - for (const auto &ucn : user_counter_names_) { + for (const auto& ucn : user_counter_names_) { auto it = run.counters.find(ucn); - if(it == run.counters.end()) { + if (it == run.counters.end()) { Out << ","; } else { Out << "," << it->second; diff --git a/libcxx/utils/google-benchmark/src/cycleclock.h b/libcxx/utils/google-benchmark/src/cycleclock.h index 4251fe4..00d5764 100644 --- a/libcxx/utils/google-benchmark/src/cycleclock.h +++ b/libcxx/utils/google-benchmark/src/cycleclock.h @@ -121,7 +121,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { // because is provides nanosecond resolution (which is noticable at // least for PNaCl modules running on x86 Mac & Linux). // Initialize to always return 0 if clock_gettime fails. - struct timespec ts = { 0, 0 }; + struct timespec ts = {0, 0}; clock_gettime(CLOCK_MONOTONIC, &ts); return static_cast(ts.tv_sec) * 1000000000 + ts.tv_nsec; #elif defined(__aarch64__) @@ -159,6 +159,11 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { struct timeval tv; gettimeofday(&tv, nullptr); return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +#elif defined(__s390__) // Covers both s390 and s390x. + // Return the CPU clock. + uint64_t tsc; + asm("stck %0" : "=Q"(tsc) : : "cc"); + return tsc; #else // The soft failover to a generic implementation is automatic only for ARM. // For other platforms the developer is expected to make an attempt to create diff --git a/libcxx/utils/google-benchmark/src/internal_macros.h b/libcxx/utils/google-benchmark/src/internal_macros.h index c34f571..b7e9203 100644 --- a/libcxx/utils/google-benchmark/src/internal_macros.h +++ b/libcxx/utils/google-benchmark/src/internal_macros.h @@ -3,6 +3,11 @@ #include "benchmark/benchmark.h" +/* Needed to detect STL */ +#include + +// clang-format off + #ifndef __has_feature #define __has_feature(x) 0 #endif @@ -39,6 +44,7 @@ #elif defined(_WIN32) #define BENCHMARK_OS_WINDOWS 1 #elif defined(__APPLE__) + #define BENCHMARK_OS_APPLE 1 #include "TargetConditionals.h" #if defined(TARGET_OS_MAC) #define BENCHMARK_OS_MACOSX 1 @@ -50,14 +56,24 @@ #define BENCHMARK_OS_FREEBSD 1 #elif defined(__NetBSD__) #define BENCHMARK_OS_NETBSD 1 +#elif defined(__OpenBSD__) + #define BENCHMARK_OS_OPENBSD 1 #elif defined(__linux__) #define BENCHMARK_OS_LINUX 1 #elif defined(__native_client__) #define BENCHMARK_OS_NACL 1 -#elif defined(EMSCRIPTEN) +#elif defined(__EMSCRIPTEN__) #define BENCHMARK_OS_EMSCRIPTEN 1 #elif defined(__rtems__) #define BENCHMARK_OS_RTEMS 1 +#elif defined(__Fuchsia__) +#define BENCHMARK_OS_FUCHSIA 1 +#elif defined (__SVR4) && defined (__sun) +#define BENCHMARK_OS_SOLARIS 1 +#endif + +#if defined(__ANDROID__) && defined(__GLIBCXX__) +#define BENCHMARK_STL_ANDROID_GNUSTL 1 #endif #if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \ @@ -79,4 +95,6 @@ #define BENCHMARK_UNREACHABLE() ((void)0) #endif +// clang-format on + #endif // BENCHMARK_INTERNAL_MACROS_H_ diff --git a/libcxx/utils/google-benchmark/src/json_reporter.cc b/libcxx/utils/google-benchmark/src/json_reporter.cc index b5ae302..611605a 100644 --- a/libcxx/utils/google-benchmark/src/json_reporter.cc +++ b/libcxx/utils/google-benchmark/src/json_reporter.cc @@ -17,12 +17,12 @@ #include #include +#include // for setprecision #include +#include #include #include #include -#include // for setprecision -#include #include "string_util.h" #include "timers.h" @@ -32,15 +32,15 @@ namespace benchmark { namespace { std::string FormatKV(std::string const& key, std::string const& value) { - return StringPrintF("\"%s\": \"%s\"", key.c_str(), value.c_str()); + return StrFormat("\"%s\": \"%s\"", key.c_str(), value.c_str()); } std::string FormatKV(std::string const& key, const char* value) { - return StringPrintF("\"%s\": \"%s\"", key.c_str(), value); + return StrFormat("\"%s\": \"%s\"", key.c_str(), value); } std::string FormatKV(std::string const& key, bool value) { - return StringPrintF("\"%s\": %s", key.c_str(), value ? "true" : "false"); + return StrFormat("\"%s\": %s", key.c_str(), value ? "true" : "false"); } std::string FormatKV(std::string const& key, int64_t value) { @@ -53,7 +53,7 @@ std::string FormatKV(std::string const& key, double value) { std::stringstream ss; ss << '"' << key << "\": "; - const auto max_digits10 = std::numeric_limits::max_digits10; + const auto max_digits10 = std::numeric_limits::max_digits10; const auto max_fractional_digits10 = max_digits10 - 1; ss << std::scientific << std::setprecision(max_fractional_digits10) << value; @@ -77,6 +77,10 @@ bool JSONReporter::ReportContext(const Context& context) { std::string walltime_value = LocalDateTimeString(); out << indent << FormatKV("date", walltime_value) << ",\n"; + if (Context::executable_name) { + out << indent << FormatKV("executable", Context::executable_name) << ",\n"; + } + CPUInfo const& info = context.cpu_info; out << indent << FormatKV("num_cpus", static_cast(info.num_cpus)) << ",\n"; @@ -157,40 +161,30 @@ void JSONReporter::PrintRunData(Run const& run) { } if (!run.report_big_o && !run.report_rms) { out << indent << FormatKV("iterations", run.iterations) << ",\n"; - out << indent - << FormatKV("real_time", run.GetAdjustedRealTime()) - << ",\n"; - out << indent - << FormatKV("cpu_time", run.GetAdjustedCPUTime()); + out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n"; + out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime()); out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); } else if (run.report_big_o) { - out << indent - << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) + out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) << ",\n"; - out << indent - << FormatKV("real_coefficient", run.GetAdjustedRealTime()) + out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) << ",\n"; out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n"; out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); } else if (run.report_rms) { - out << indent - << FormatKV("rms", run.GetAdjustedCPUTime()); + out << indent << FormatKV("rms", run.GetAdjustedCPUTime()); } if (run.bytes_per_second > 0.0) { out << ",\n" - << indent - << FormatKV("bytes_per_second", run.bytes_per_second); + << indent << FormatKV("bytes_per_second", run.bytes_per_second); } if (run.items_per_second > 0.0) { out << ",\n" - << indent - << FormatKV("items_per_second", run.items_per_second); + << indent << FormatKV("items_per_second", run.items_per_second); } - for(auto &c : run.counters) { - out << ",\n" - << indent - << FormatKV(c.first, c.second); + for (auto& c : run.counters) { + out << ",\n" << indent << FormatKV(c.first, c.second); } if (!run.report_label.empty()) { out << ",\n" << indent << FormatKV("label", run.report_label); @@ -198,4 +192,4 @@ void JSONReporter::PrintRunData(Run const& run) { out << '\n'; } -} // end namespace benchmark +} // end namespace benchmark diff --git a/libcxx/utils/google-benchmark/src/log.h b/libcxx/utils/google-benchmark/src/log.h index d06e103..47d0c35 100644 --- a/libcxx/utils/google-benchmark/src/log.h +++ b/libcxx/utils/google-benchmark/src/log.h @@ -66,8 +66,9 @@ inline LogType& GetLogInstanceForLevel(int level) { } // end namespace internal } // end namespace benchmark +// clang-format off #define VLOG(x) \ (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \ " ") - +// clang-format on #endif diff --git a/libcxx/utils/google-benchmark/src/re.h b/libcxx/utils/google-benchmark/src/re.h index 01e9736..fbe2503 100644 --- a/libcxx/utils/google-benchmark/src/re.h +++ b/libcxx/utils/google-benchmark/src/re.h @@ -17,22 +17,39 @@ #include "internal_macros.h" +// clang-format off + +#if !defined(HAVE_STD_REGEX) && \ + !defined(HAVE_GNU_POSIX_REGEX) && \ + !defined(HAVE_POSIX_REGEX) + // No explicit regex selection; detect based on builtin hints. + #if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE) + #define HAVE_POSIX_REGEX 1 + #elif __cplusplus >= 199711L + #define HAVE_STD_REGEX 1 + #endif +#endif + // Prefer C regex libraries when compiling w/o exceptions so that we can // correctly report errors. -#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && defined(HAVE_STD_REGEX) && \ +#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \ + defined(BENCHMARK_HAVE_STD_REGEX) && \ (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX)) -#undef HAVE_STD_REGEX + #undef HAVE_STD_REGEX #endif #if defined(HAVE_STD_REGEX) -#include + #include #elif defined(HAVE_GNU_POSIX_REGEX) -#include + #include #elif defined(HAVE_POSIX_REGEX) -#include + #include #else #error No regular expression backend was found! #endif + +// clang-format on + #include #include "check.h" @@ -72,20 +89,21 @@ class Regex { inline bool Regex::Init(const std::string& spec, std::string* error) { #ifdef BENCHMARK_HAS_NO_EXCEPTIONS - ((void)error); // suppress unused warning + ((void)error); // suppress unused warning #else try { #endif - re_ = std::regex(spec, std::regex_constants::extended); - init_ = true; + re_ = std::regex(spec, std::regex_constants::extended); + init_ = true; #ifndef BENCHMARK_HAS_NO_EXCEPTIONS - } catch (const std::regex_error& e) { - if (error) { - *error = e.what(); - } +} +catch (const std::regex_error& e) { + if (error) { + *error = e.what(); } +} #endif - return init_; +return init_; } inline Regex::~Regex() {} diff --git a/libcxx/utils/google-benchmark/src/reporter.cc b/libcxx/utils/google-benchmark/src/reporter.cc index 5d2fa05..541661a 100644 --- a/libcxx/utils/google-benchmark/src/reporter.cc +++ b/libcxx/utils/google-benchmark/src/reporter.cc @@ -37,6 +37,9 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out, Out << LocalDateTimeString() << "\n"; + if (context.executable_name) + Out << "Running " << context.executable_name << "\n"; + const CPUInfo &info = context.cpu_info; Out << "Run on (" << info.num_cpus << " X " << (info.cycles_per_second / 1000000.0) << " MHz CPU " @@ -64,6 +67,9 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out, #endif } +// No initializer because it's already initialized to NULL. +const char *BenchmarkReporter::Context::executable_name; + BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {} double BenchmarkReporter::Run::GetAdjustedRealTime() const { diff --git a/libcxx/utils/google-benchmark/src/statistics.cc b/libcxx/utils/google-benchmark/src/statistics.cc index 5932ad4..612dda2 100644 --- a/libcxx/utils/google-benchmark/src/statistics.cc +++ b/libcxx/utils/google-benchmark/src/statistics.cc @@ -17,9 +17,9 @@ #include #include +#include #include #include -#include #include "check.h" #include "statistics.h" @@ -30,22 +30,25 @@ auto StatisticsSum = [](const std::vector& v) { }; double StatisticsMean(const std::vector& v) { - if (v.size() == 0) return 0.0; + if (v.empty()) return 0.0; return StatisticsSum(v) * (1.0 / v.size()); } double StatisticsMedian(const std::vector& v) { if (v.size() < 3) return StatisticsMean(v); - std::vector partial; - // we need roundDown(count/2)+1 slots - partial.resize(1 + (v.size() / 2)); - std::partial_sort_copy(v.begin(), v.end(), partial.begin(), partial.end()); - // did we have odd number of samples? - // if yes, then the last element of partially-sorted vector is the median - // it no, then the average of the last two elements is the median - if(v.size() % 2 == 1) - return partial.back(); - return (partial[partial.size() - 2] + partial[partial.size() - 1]) / 2.0; + std::vector copy(v); + + auto center = copy.begin() + v.size() / 2; + std::nth_element(copy.begin(), center, copy.end()); + + // did we have an odd number of samples? + // if yes, then center is the median + // it no, then we are looking for the average between center and the value + // before + if (v.size() % 2 == 1) return *center; + auto center2 = copy.begin() + v.size() / 2 - 1; + std::nth_element(copy.begin(), center2, copy.end()); + return (*center + *center2) / 2.0; } // Return the sum of the squares of this sample set @@ -62,11 +65,10 @@ auto Sqrt = [](const double dat) { double StatisticsStdDev(const std::vector& v) { const auto mean = StatisticsMean(v); - if (v.size() == 0) return mean; + if (v.empty()) return mean; // Sample standard deviation is undefined for n = 1 - if (v.size() == 1) - return 0.0; + if (v.size() == 1) return 0.0; const double avg_squares = SumSquares(v) * (1.0 / v.size()); return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); @@ -105,11 +107,11 @@ std::vector ComputeStats( Counter c; std::vector s; }; - std::map< std::string, CounterStat > counter_stats; - for(Run const& r : reports) { - for(auto const& cnt : r.counters) { + std::map counter_stats; + for (Run const& r : reports) { + for (auto const& cnt : r.counters) { auto it = counter_stats.find(cnt.first); - if(it == counter_stats.end()) { + if (it == counter_stats.end()) { counter_stats.insert({cnt.first, {cnt.second, std::vector{}}}); it = counter_stats.find(cnt.first); it->second.s.reserve(reports.size()); @@ -129,7 +131,7 @@ std::vector ComputeStats( items_per_second_stat.emplace_back(run.items_per_second); bytes_per_second_stat.emplace_back(run.bytes_per_second); // user counters - for(auto const& cnt : run.counters) { + for (auto const& cnt : run.counters) { auto it = counter_stats.find(cnt.first); CHECK_NE(it, counter_stats.end()); it->second.s.emplace_back(cnt.second); @@ -145,7 +147,7 @@ std::vector ComputeStats( } } - for(const auto& Stat : *reports[0].statistics) { + for (const auto& Stat : *reports[0].statistics) { // Get the data from the accumulator to BenchmarkReporter::Run's. Run data; data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_; @@ -160,7 +162,7 @@ std::vector ComputeStats( data.time_unit = reports[0].time_unit; // user counters - for(auto const& kv : counter_stats) { + for (auto const& kv : counter_stats) { const auto uc_stat = Stat.compute_(kv.second.s); auto c = Counter(uc_stat, counter_stats[kv.first].c.flags); data.counters[kv.first] = c; diff --git a/libcxx/utils/google-benchmark/src/string_util.cc b/libcxx/utils/google-benchmark/src/string_util.cc index 29edb2a..05ac5b4 100644 --- a/libcxx/utils/google-benchmark/src/string_util.cc +++ b/libcxx/utils/google-benchmark/src/string_util.cc @@ -122,7 +122,7 @@ std::string HumanReadableNumber(double n, double one_k) { return ToBinaryStringFullySpecified(n, 1.1, 1, one_k); } -std::string StringPrintFImp(const char* msg, va_list args) { +std::string StrFormatImp(const char* msg, va_list args) { // we might need a second shot at this, so pre-emptivly make a copy va_list args_cp; va_copy(args_cp, args); @@ -152,10 +152,10 @@ std::string StringPrintFImp(const char* msg, va_list args) { return std::string(buff_ptr.get()); } -std::string StringPrintF(const char* format, ...) { +std::string StrFormat(const char* format, ...) { va_list args; va_start(args, format); - std::string tmp = StringPrintFImp(format, args); + std::string tmp = StrFormatImp(format, args); va_end(args); return tmp; } @@ -169,4 +169,93 @@ void ReplaceAll(std::string* str, const std::string& from, } } +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +/* + * GNU STL in Android NDK lacks support for some C++11 functions, including + * stoul, stoi, stod. We reimplement them here using C functions strtoul, + * strtol, strtod. Note that reimplemented functions are in benchmark:: + * namespace, not std:: namespace. + */ +unsigned long stoul(const std::string& str, size_t* pos, int base) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const unsigned long result = strtoul(strStart, &strEnd, base); + + const int strtoulErrno = errno; + /* Restore previous errno */ + errno = oldErrno; + + /* Check for errors and return */ + if (strtoulErrno == ERANGE) { + throw std::out_of_range( + "stoul failed: " + str + " is outside of range of unsigned long"); + } else if (strEnd == strStart || strtoulErrno != 0) { + throw std::invalid_argument( + "stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return result; +} + +int stoi(const std::string& str, size_t* pos, int base) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const long result = strtol(strStart, &strEnd, base); + + const int strtolErrno = errno; + /* Restore previous errno */ + errno = oldErrno; + + /* Check for errors and return */ + if (strtolErrno == ERANGE || long(int(result)) != result) { + throw std::out_of_range( + "stoul failed: " + str + " is outside of range of int"); + } else if (strEnd == strStart || strtolErrno != 0) { + throw std::invalid_argument( + "stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return int(result); +} + +double stod(const std::string& str, size_t* pos) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const double result = strtod(strStart, &strEnd); + + /* Restore previous errno */ + const int strtodErrno = errno; + errno = oldErrno; + + /* Check for errors and return */ + if (strtodErrno == ERANGE) { + throw std::out_of_range( + "stoul failed: " + str + " is outside of range of int"); + } else if (strEnd == strStart || strtodErrno != 0) { + throw std::invalid_argument( + "stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return result; +} +#endif + } // end namespace benchmark diff --git a/libcxx/utils/google-benchmark/src/string_util.h b/libcxx/utils/google-benchmark/src/string_util.h index c3d53bf..4a55012 100644 --- a/libcxx/utils/google-benchmark/src/string_util.h +++ b/libcxx/utils/google-benchmark/src/string_util.h @@ -12,29 +12,45 @@ void AppendHumanReadable(int n, std::string* str); std::string HumanReadableNumber(double n, double one_k = 1024.0); -std::string StringPrintF(const char* format, ...); +std::string StrFormat(const char* format, ...); -inline std::ostream& StringCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { +inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { return out; } template -inline std::ostream& StringCatImp(std::ostream& out, First&& f, - Rest&&... rest) { +inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) { out << std::forward(f); - return StringCatImp(out, std::forward(rest)...); + return StrCatImp(out, std::forward(rest)...); } template inline std::string StrCat(Args&&... args) { std::ostringstream ss; - StringCatImp(ss, std::forward(args)...); + StrCatImp(ss, std::forward(args)...); return ss.str(); } void ReplaceAll(std::string* str, const std::string& from, const std::string& to); +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +/* + * GNU STL in Android NDK lacks support for some C++11 functions, including + * stoul, stoi, stod. We reimplement them here using C functions strtoul, + * strtol, strtod. Note that reimplemented functions are in benchmark:: + * namespace, not std:: namespace. + */ +unsigned long stoul(const std::string& str, size_t* pos = nullptr, + int base = 10); +int stoi(const std::string& str, size_t* pos = nullptr, int base = 10); +double stod(const std::string& str, size_t* pos = nullptr); +#else +using std::stoul; +using std::stoi; +using std::stod; +#endif + } // end namespace benchmark #endif // BENCHMARK_STRING_UTIL_H_ diff --git a/libcxx/utils/google-benchmark/src/sysinfo.cc b/libcxx/utils/google-benchmark/src/sysinfo.cc index 2520ad5..73064b9 100644 --- a/libcxx/utils/google-benchmark/src/sysinfo.cc +++ b/libcxx/utils/google-benchmark/src/sysinfo.cc @@ -16,20 +16,26 @@ #ifdef BENCHMARK_OS_WINDOWS #include +#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA #include #include #else #include +#ifndef BENCHMARK_OS_FUCHSIA #include +#endif #include #include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD #include #if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \ - defined BENCHMARK_OS_NETBSD + defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD #define BENCHMARK_HAS_SYSCTL #include #endif #endif +#if defined(BENCHMARK_OS_SOLARIS) +#include +#endif #include #include @@ -130,6 +136,26 @@ struct ValueUnion { }; ValueUnion GetSysctlImp(std::string const& Name) { +#if defined BENCHMARK_OS_OPENBSD + int mib[2]; + + mib[0] = CTL_HW; + if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){ + ValueUnion buff(sizeof(int)); + + if (Name == "hw.ncpu") { + mib[1] = HW_NCPU; + } else { + mib[1] = HW_CPUSPEED; + } + + if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) { + return ValueUnion(); + } + return buff; + } + return ValueUnion(); +#else size_t CurBuffSize = 0; if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1) return ValueUnion(); @@ -138,6 +164,7 @@ ValueUnion GetSysctlImp(std::string const& Name) { if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0) return buff; return ValueUnion(); +#endif } BENCHMARK_MAYBE_UNUSED @@ -198,7 +225,7 @@ int CountSetBitsInCPUMap(std::string Val) { auto CountBits = [](std::string Part) { using CPUMask = std::bitset; Part = "0x" + Part; - CPUMask Mask(std::stoul(Part, nullptr, 16)); + CPUMask Mask(benchmark::stoul(Part, nullptr, 16)); return static_cast(Mask.count()); }; size_t Pos; @@ -303,7 +330,7 @@ std::vector GetCacheSizesWindows() { if (!B.test(0)) continue; CInfo* Cache = &it->Cache; CPUInfo::CacheInfo C; - C.num_sharing = B.count(); + C.num_sharing = static_cast(B.count()); C.level = Cache->Level; C.size = Cache->Size; switch (Cache->Type) { @@ -354,6 +381,15 @@ int GetNumCPUs() { return sysinfo.dwNumberOfProcessors; // number of logical // processors in the current // group +#elif defined(BENCHMARK_OS_SOLARIS) + // Returns -1 in case of a failure. + int NumCPU = sysconf(_SC_NPROCESSORS_ONLN); + if (NumCPU < 0) { + fprintf(stderr, + "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", + strerror(errno)); + } + return NumCPU; #else int NumCPUs = 0; int MaxID = -1; @@ -372,7 +408,7 @@ int GetNumCPUs() { if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) { NumCPUs++; if (!value.empty()) { - int CurID = std::stoi(value); + int CurID = benchmark::stoi(value); MaxID = std::max(CurID, MaxID); } } @@ -441,16 +477,16 @@ double GetCPUCyclesPerSecond() { std::string value; if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only - // accept postive values. Some environments (virtual machines) report zero, + // accept positive values. Some environments (virtual machines) report zero, // which would cause infinite looping in WallTime_Init. if (startsWithKey(ln, "cpu MHz")) { if (!value.empty()) { - double cycles_per_second = std::stod(value) * 1000000.0; + double cycles_per_second = benchmark::stod(value) * 1000000.0; if (cycles_per_second > 0) return cycles_per_second; } } else if (startsWithKey(ln, "bogomips")) { if (!value.empty()) { - bogo_clock = std::stod(value) * 1000000.0; + bogo_clock = benchmark::stod(value) * 1000000.0; if (bogo_clock < 0.0) bogo_clock = error_value; } } @@ -473,12 +509,17 @@ double GetCPUCyclesPerSecond() { constexpr auto* FreqStr = #if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD) "machdep.tsc_freq"; +#elif defined BENCHMARK_OS_OPENBSD + "hw.cpuspeed"; #else "hw.cpufrequency"; #endif unsigned long long hz = 0; +#if defined BENCHMARK_OS_OPENBSD + if (GetSysctl(FreqStr, &hz)) return hz * 1000000; +#else if (GetSysctl(FreqStr, &hz)) return hz; - +#endif fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", FreqStr, strerror(errno)); @@ -493,6 +534,35 @@ double GetCPUCyclesPerSecond() { "~MHz", nullptr, &data, &data_size))) return static_cast((int64_t)data * (int64_t)(1000 * 1000)); // was mhz +#elif defined (BENCHMARK_OS_SOLARIS) + kstat_ctl_t *kc = kstat_open(); + if (!kc) { + std::cerr << "failed to open /dev/kstat\n"; + return -1; + } + kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0"); + if (!ksp) { + std::cerr << "failed to lookup in /dev/kstat\n"; + return -1; + } + if (kstat_read(kc, ksp, NULL) < 0) { + std::cerr << "failed to read from /dev/kstat\n"; + return -1; + } + kstat_named_t *knp = + (kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz"); + if (!knp) { + std::cerr << "failed to lookup data in /dev/kstat\n"; + return -1; + } + if (knp->data_type != KSTAT_DATA_UINT64) { + std::cerr << "current_clock_Hz is of unexpected data type: " + << knp->data_type << "\n"; + return -1; + } + double clock_hz = knp->value.ui64; + kstat_close(kc); + return clock_hz; #endif // If we've fallen through, attempt to roughly estimate the CPU clock rate. const int estimate_time_ms = 1000; diff --git a/libcxx/utils/google-benchmark/src/thread_manager.h b/libcxx/utils/google-benchmark/src/thread_manager.h new file mode 100644 index 0000000..82b4d72 --- /dev/null +++ b/libcxx/utils/google-benchmark/src/thread_manager.h @@ -0,0 +1,66 @@ +#ifndef BENCHMARK_THREAD_MANAGER_H +#define BENCHMARK_THREAD_MANAGER_H + +#include + +#include "benchmark/benchmark.h" +#include "mutex.h" + +namespace benchmark { +namespace internal { + +class ThreadManager { + public: + ThreadManager(int num_threads) + : alive_threads_(num_threads), start_stop_barrier_(num_threads) {} + + Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) { + return benchmark_mutex_; + } + + bool StartStopBarrier() EXCLUDES(end_cond_mutex_) { + return start_stop_barrier_.wait(); + } + + void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) { + start_stop_barrier_.removeThread(); + if (--alive_threads_ == 0) { + MutexLock lock(end_cond_mutex_); + end_condition_.notify_all(); + } + } + + void WaitForAllThreads() EXCLUDES(end_cond_mutex_) { + MutexLock lock(end_cond_mutex_); + end_condition_.wait(lock.native_handle(), + [this]() { return alive_threads_ == 0; }); + } + + public: + struct Result { + int64_t iterations = 0; + double real_time_used = 0; + double cpu_time_used = 0; + double manual_time_used = 0; + int64_t bytes_processed = 0; + int64_t items_processed = 0; + int64_t complexity_n = 0; + std::string report_label_; + std::string error_message_; + bool has_error_ = false; + UserCounters counters; + }; + GUARDED_BY(GetBenchmarkMutex()) Result results; + + private: + mutable Mutex benchmark_mutex_; + std::atomic alive_threads_; + Barrier start_stop_barrier_; + Mutex end_cond_mutex_; + Condition end_condition_; +}; + +} // namespace internal +} // namespace benchmark + +#endif // BENCHMARK_THREAD_MANAGER_H diff --git a/libcxx/utils/google-benchmark/src/thread_timer.h b/libcxx/utils/google-benchmark/src/thread_timer.h new file mode 100644 index 0000000..eaf108e --- /dev/null +++ b/libcxx/utils/google-benchmark/src/thread_timer.h @@ -0,0 +1,69 @@ +#ifndef BENCHMARK_THREAD_TIMER_H +#define BENCHMARK_THREAD_TIMER_H + +#include "check.h" +#include "timers.h" + +namespace benchmark { +namespace internal { + +class ThreadTimer { + public: + ThreadTimer() = default; + + // Called by each thread + void StartTimer() { + running_ = true; + start_real_time_ = ChronoClockNow(); + start_cpu_time_ = ThreadCPUUsage(); + } + + // Called by each thread + void StopTimer() { + CHECK(running_); + running_ = false; + real_time_used_ += ChronoClockNow() - start_real_time_; + // Floating point error can result in the subtraction producing a negative + // time. Guard against that. + cpu_time_used_ += std::max(ThreadCPUUsage() - start_cpu_time_, 0); + } + + // Called by each thread + void SetIterationTime(double seconds) { manual_time_used_ += seconds; } + + bool running() const { return running_; } + + // REQUIRES: timer is not running + double real_time_used() { + CHECK(!running_); + return real_time_used_; + } + + // REQUIRES: timer is not running + double cpu_time_used() { + CHECK(!running_); + return cpu_time_used_; + } + + // REQUIRES: timer is not running + double manual_time_used() { + CHECK(!running_); + return manual_time_used_; + } + + private: + bool running_ = false; // Is the timer running + double start_real_time_ = 0; // If running_ + double start_cpu_time_ = 0; // If running_ + + // Accumulated time so far (does not contain current slice if running_) + double real_time_used_ = 0; + double cpu_time_used_ = 0; + // Manually set iteration time. User sets this with SetIterationTime(seconds). + double manual_time_used_ = 0; +}; + +} // namespace internal +} // namespace benchmark + +#endif // BENCHMARK_THREAD_TIMER_H diff --git a/libcxx/utils/google-benchmark/src/timers.cc b/libcxx/utils/google-benchmark/src/timers.cc index 817272d..2010e24 100644 --- a/libcxx/utils/google-benchmark/src/timers.cc +++ b/libcxx/utils/google-benchmark/src/timers.cc @@ -17,11 +17,14 @@ #ifdef BENCHMARK_OS_WINDOWS #include +#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA #include #include #else #include +#ifndef BENCHMARK_OS_FUCHSIA #include +#endif #include #include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD #include @@ -74,7 +77,7 @@ double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) { static_cast(user.QuadPart)) * 1e-7; } -#else +#elif !defined(BENCHMARK_OS_FUCHSIA) double MakeTime(struct rusage const& ru) { return (static_cast(ru.ru_utime.tv_sec) + static_cast(ru.ru_utime.tv_usec) * 1e-6 + @@ -162,6 +165,10 @@ double ThreadCPUUsage() { // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c return ProcessCPUUsage(); +#elif defined(BENCHMARK_OS_SOLARIS) + struct rusage ru; + if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru); + DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed"); #elif defined(CLOCK_THREAD_CPUTIME_ID) struct timespec ts; if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts); @@ -186,7 +193,6 @@ std::string DateTimeString(bool local) { std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now)); #else std::tm timeinfo; - std::memset(&timeinfo, 0, sizeof(std::tm)); ::localtime_r(&now, &timeinfo); written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); #endif @@ -195,7 +201,6 @@ std::string DateTimeString(bool local) { written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now)); #else std::tm timeinfo; - std::memset(&timeinfo, 0, sizeof(std::tm)); ::gmtime_r(&now, &timeinfo); written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); #endif diff --git a/libcxx/utils/google-benchmark/test/BUILD b/libcxx/utils/google-benchmark/test/BUILD new file mode 100644 index 0000000..3f174c4 --- /dev/null +++ b/libcxx/utils/google-benchmark/test/BUILD @@ -0,0 +1,65 @@ +TEST_COPTS = [ + "-pedantic", + "-pedantic-errors", + "-std=c++11", + "-Wall", + "-Wextra", + "-Wshadow", +# "-Wshorten-64-to-32", + "-Wfloat-equal", + "-fstrict-aliasing", +] + +PER_SRC_COPTS = ({ + "cxx03_test.cc": ["-std=c++03"], + # Some of the issues with DoNotOptimize only occur when optimization is enabled + "donotoptimize_test.cc": ["-O3"], +}) + + +TEST_ARGS = ["--benchmark_min_time=0.01"] + +PER_SRC_TEST_ARGS = ({ + "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"], +}) + +cc_library( + name = "output_test_helper", + testonly = 1, + srcs = ["output_test_helper.cc"], + hdrs = ["output_test.h"], + copts = TEST_COPTS, + deps = [ + "//:benchmark", + "//:benchmark_internal_headers", + ], +) + +[ + cc_test( + name = test_src[:-len(".cc")], + size = "small", + srcs = [test_src], + args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []), + copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []), + deps = [ + ":output_test_helper", + "//:benchmark", + "//:benchmark_internal_headers", + "@com_google_googletest//:gtest", + ] + ( + ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else [] + ), + # FIXME: Add support for assembly tests to bazel. + # See Issue #556 + # https://github.com/google/benchmark/issues/556 + ) for test_src in glob(["*test.cc"], exclude = ["*_assembly_test.cc", "link_main_test.cc"]) +] + +cc_test( + name = "link_main_test", + size = "small", + srcs = ["link_main_test.cc"], + copts = TEST_COPTS, + deps = ["//:benchmark_main"], +) diff --git a/libcxx/utils/google-benchmark/test/CMakeLists.txt b/libcxx/utils/google-benchmark/test/CMakeLists.txt index efce3ba..f49ca51 100644 --- a/libcxx/utils/google-benchmark/test/CMakeLists.txt +++ b/libcxx/utils/google-benchmark/test/CMakeLists.txt @@ -22,6 +22,12 @@ if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" ) endforeach() endif() +check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) +set(BENCHMARK_O3_FLAG "") +if (BENCHMARK_HAS_O3_FLAG) + set(BENCHMARK_O3_FLAG "-O3") +endif() + # NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise # they will break the configuration check. if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) @@ -35,6 +41,10 @@ macro(compile_benchmark_test name) target_link_libraries(${name} benchmark ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_benchmark_test) +macro(compile_benchmark_test_with_main name) + add_executable(${name} "${name}.cc") + target_link_libraries(${name} benchmark_main) +endmacro(compile_benchmark_test_with_main) macro(compile_output_test name) add_executable(${name} "${name}.cc" output_test.h) @@ -53,14 +63,23 @@ macro(add_filter_test name filter expect) endmacro(add_filter_test) add_filter_test(filter_simple "Foo" 3) +add_filter_test(filter_simple_negative "-Foo" 2) add_filter_test(filter_suffix "BM_.*" 4) +add_filter_test(filter_suffix_negative "-BM_.*" 1) add_filter_test(filter_regex_all ".*" 5) +add_filter_test(filter_regex_all_negative "-.*" 0) add_filter_test(filter_regex_blank "" 5) +add_filter_test(filter_regex_blank_negative "-" 0) add_filter_test(filter_regex_none "monkey" 0) +add_filter_test(filter_regex_none_negative "-monkey" 5) add_filter_test(filter_regex_wildcard ".*Foo.*" 3) +add_filter_test(filter_regex_wildcard_negative "-.*Foo.*" 2) add_filter_test(filter_regex_begin "^BM_.*" 4) +add_filter_test(filter_regex_begin_negative "-^BM_.*" 1) add_filter_test(filter_regex_begin2 "^N" 1) +add_filter_test(filter_regex_begin2_negative "-^N" 4) add_filter_test(filter_regex_end ".*Ba$" 1) +add_filter_test(filter_regex_end_negative "-.*Ba$" 4) compile_benchmark_test(options_test) add_test(options_benchmarks options_test --benchmark_min_time=0.01) @@ -94,6 +113,9 @@ add_test(map_test map_test --benchmark_min_time=0.01) compile_benchmark_test(multiple_ranges_test) add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01) +compile_benchmark_test_with_main(link_main_test) +add_test(link_main_test link_main_test --benchmark_min_time=0.01) + compile_output_test(reporter_output_test) add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01) @@ -144,8 +166,11 @@ if (BENCHMARK_ENABLE_GTEST_TESTS) if (TARGET googletest) add_dependencies(${name} googletest) endif() + if (GTEST_INCLUDE_DIRS) + target_include_directories(${name} PRIVATE ${GTEST_INCLUDE_DIRS}) + endif() target_link_libraries(${name} benchmark - "${GTEST_BOTH_LIBRARIES}" ${CMAKE_THREAD_LIBS_INIT}) + ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_gtest) macro(add_gtest name) @@ -153,9 +178,30 @@ if (BENCHMARK_ENABLE_GTEST_TESTS) add_test(${name} ${name}) endmacro() - add_gtest(statistics_test) + add_gtest(benchmark_gtest) + add_gtest(statistics_gtest) + add_gtest(string_util_gtest) endif(BENCHMARK_ENABLE_GTEST_TESTS) +############################################################################### +# Assembly Unit Tests +############################################################################### + +if (BENCHMARK_ENABLE_ASSEMBLY_TESTS) + if (NOT LLVM_FILECHECK_EXE) + message(FATAL_ERROR "LLVM FileCheck is required when including this file") + endif() + include(AssemblyTests.cmake) + add_filecheck_test(donotoptimize_assembly_test) + add_filecheck_test(state_assembly_test) + add_filecheck_test(clobber_memory_assembly_test) +endif() + + + +############################################################################### +# Code Coverage Configuration +############################################################################### # Add the coverage command(s) if(CMAKE_BUILD_TYPE) diff --git a/libcxx/utils/google-benchmark/test/basic_test.cc b/libcxx/utils/google-benchmark/test/basic_test.cc index 3348781..d07fbc0 100644 --- a/libcxx/utils/google-benchmark/test/basic_test.cc +++ b/libcxx/utils/google-benchmark/test/basic_test.cc @@ -99,13 +99,25 @@ BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); void BM_KeepRunning(benchmark::State& state) { size_t iter_count = 0; + assert(iter_count == state.iterations()); while (state.KeepRunning()) { ++iter_count; } - assert(iter_count == state.max_iterations); + assert(iter_count == state.iterations()); } BENCHMARK(BM_KeepRunning); +void BM_KeepRunningBatch(benchmark::State& state) { + // Choose a prime batch size to avoid evenly dividing max_iterations. + const size_t batch_size = 101; + size_t iter_count = 0; + while (state.KeepRunningBatch(batch_size)) { + iter_count += batch_size; + } + assert(state.iterations() == iter_count); +} +BENCHMARK(BM_KeepRunningBatch); + void BM_RangedFor(benchmark::State& state) { size_t iter_count = 0; for (auto _ : state) { @@ -115,4 +127,10 @@ void BM_RangedFor(benchmark::State& state) { } BENCHMARK(BM_RangedFor); +// Ensure that StateIterator provides all the necessary typedefs required to +// instantiate std::iterator_traits. +static_assert(std::is_same< + typename std::iterator_traits::value_type, + typename benchmark::State::StateIterator::value_type>::value, ""); + BENCHMARK_MAIN(); diff --git a/libcxx/utils/google-benchmark/test/benchmark_gtest.cc b/libcxx/utils/google-benchmark/test/benchmark_gtest.cc new file mode 100644 index 0000000..10683b4 --- /dev/null +++ b/libcxx/utils/google-benchmark/test/benchmark_gtest.cc @@ -0,0 +1,33 @@ +#include + +#include "../src/benchmark_register.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace { + +TEST(AddRangeTest, Simple) { + std::vector dst; + AddRange(&dst, 1, 2, 2); + EXPECT_THAT(dst, testing::ElementsAre(1, 2)); +} + +TEST(AddRangeTest, Simple64) { + std::vector dst; + AddRange(&dst, static_cast(1), static_cast(2), 2); + EXPECT_THAT(dst, testing::ElementsAre(1, 2)); +} + +TEST(AddRangeTest, Advanced) { + std::vector dst; + AddRange(&dst, 5, 15, 2); + EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); +} + +TEST(AddRangeTest, Advanced64) { + std::vector dst; + AddRange(&dst, static_cast(5), static_cast(15), 2); + EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); +} + +} // end namespace diff --git a/libcxx/utils/google-benchmark/test/benchmark_test.cc b/libcxx/utils/google-benchmark/test/benchmark_test.cc index 78802c8..3cd4f55 100644 --- a/libcxx/utils/google-benchmark/test/benchmark_test.cc +++ b/libcxx/utils/google-benchmark/test/benchmark_test.cc @@ -40,8 +40,8 @@ double CalculatePi(int depth) { return (pi - 1.0) * 4; } -std::set ConstructRandomSet(int size) { - std::set s; +std::set ConstructRandomSet(int64_t size) { + std::set s; for (int i = 0; i < size; ++i) s.insert(s.end(), i); return s; } @@ -64,7 +64,7 @@ BENCHMARK(BM_Factorial)->UseRealTime(); static void BM_CalculatePiRange(benchmark::State& state) { double pi = 0.0; - for (auto _ : state) pi = CalculatePi(state.range(0)); + for (auto _ : state) pi = CalculatePi(static_cast(state.range(0))); std::stringstream ss; ss << pi; state.SetLabel(ss.str()); @@ -74,7 +74,7 @@ BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024); static void BM_CalculatePi(benchmark::State& state) { static const int depth = 1024; for (auto _ : state) { - benchmark::DoNotOptimize(CalculatePi(depth)); + benchmark::DoNotOptimize(CalculatePi(static_cast(depth))); } } BENCHMARK(BM_CalculatePi)->Threads(8); @@ -82,7 +82,7 @@ BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32); BENCHMARK(BM_CalculatePi)->ThreadPerCpu(); static void BM_SetInsert(benchmark::State& state) { - std::set data; + std::set data; for (auto _ : state) { state.PauseTiming(); data = ConstructRandomSet(state.range(0)); @@ -103,9 +103,9 @@ static void BM_Sequential(benchmark::State& state) { ValueType v = 42; for (auto _ : state) { Container c; - for (int i = state.range(0); --i;) c.push_back(v); + for (int64_t i = state.range(0); --i;) c.push_back(v); } - const size_t items_processed = state.iterations() * state.range(0); + const int64_t items_processed = state.iterations() * state.range(0); state.SetItemsProcessed(items_processed); state.SetBytesProcessed(items_processed * sizeof(v)); } @@ -118,8 +118,9 @@ BENCHMARK_TEMPLATE(BM_Sequential, std::vector, int)->Arg(512); #endif static void BM_StringCompare(benchmark::State& state) { - std::string s1(state.range(0), '-'); - std::string s2(state.range(0), '-'); + size_t len = static_cast(state.range(0)); + std::string s1(len, '-'); + std::string s2(len, '-'); for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); } BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); @@ -154,13 +155,13 @@ static void BM_LongTest(benchmark::State& state) { BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28); static void BM_ParallelMemset(benchmark::State& state) { - int size = state.range(0) / static_cast(sizeof(int)); - int thread_size = size / state.threads; + int64_t size = state.range(0) / static_cast(sizeof(int)); + int thread_size = static_cast(size) / state.threads; int from = thread_size * state.thread_index; int to = from + thread_size; if (state.thread_index == 0) { - test_vector = new std::vector(size); + test_vector = new std::vector(static_cast(size)); } for (auto _ : state) { @@ -178,8 +179,8 @@ static void BM_ParallelMemset(benchmark::State& state) { BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4); static void BM_ManualTiming(benchmark::State& state) { - size_t slept_for = 0; - int microseconds = state.range(0); + int64_t slept_for = 0; + int64_t microseconds = state.range(0); std::chrono::duration sleep_duration{ static_cast(microseconds)}; diff --git a/libcxx/utils/google-benchmark/test/clobber_memory_assembly_test.cc b/libcxx/utils/google-benchmark/test/clobber_memory_assembly_test.cc new file mode 100644 index 0000000..f41911a --- /dev/null +++ b/libcxx/utils/google-benchmark/test/clobber_memory_assembly_test.cc @@ -0,0 +1,64 @@ +#include + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wreturn-type" +#endif + +extern "C" { + +extern int ExternInt; +extern int ExternInt2; +extern int ExternInt3; + +} + +// CHECK-LABEL: test_basic: +extern "C" void test_basic() { + int x; + benchmark::DoNotOptimize(&x); + x = 101; + benchmark::ClobberMemory(); + // CHECK: leaq [[DEST:[^,]+]], %rax + // CHECK: movl $101, [[DEST]] + // CHECK: ret +} + +// CHECK-LABEL: test_redundant_store: +extern "C" void test_redundant_store() { + ExternInt = 3; + benchmark::ClobberMemory(); + ExternInt = 51; + // CHECK-DAG: ExternInt + // CHECK-DAG: movl $3 + // CHECK: movl $51 +} + +// CHECK-LABEL: test_redundant_read: +extern "C" void test_redundant_read() { + int x; + benchmark::DoNotOptimize(&x); + x = ExternInt; + benchmark::ClobberMemory(); + x = ExternInt2; + // CHECK: leaq [[DEST:[^,]+]], %rax + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, [[DEST]] + // CHECK-NOT: ExternInt2 + // CHECK: ret +} + +// CHECK-LABEL: test_redundant_read2: +extern "C" void test_redundant_read2() { + int x; + benchmark::DoNotOptimize(&x); + x = ExternInt; + benchmark::ClobberMemory(); + x = ExternInt2; + benchmark::ClobberMemory(); + // CHECK: leaq [[DEST:[^,]+]], %rax + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, [[DEST]] + // CHECK: ExternInt2(%rip) + // CHECK: movl %eax, [[DEST]] + // CHECK: ret +} diff --git a/libcxx/utils/google-benchmark/test/complexity_test.cc b/libcxx/utils/google-benchmark/test/complexity_test.cc index 89dfa58..5f91660 100644 --- a/libcxx/utils/google-benchmark/test/complexity_test.cc +++ b/libcxx/utils/google-benchmark/test/complexity_test.cc @@ -55,7 +55,7 @@ void BM_Complexity_O1(benchmark::State& state) { } BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(); -BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int) { +BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int64_t) { return 1.0; }); @@ -81,19 +81,19 @@ ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1); // --------------------------- Testing BigO O(N) --------------------------- // // ========================================================================= // -std::vector ConstructRandomVector(int size) { +std::vector ConstructRandomVector(int64_t size) { std::vector v; - v.reserve(size); + v.reserve(static_cast(size)); for (int i = 0; i < size; ++i) { - v.push_back(std::rand() % size); + v.push_back(static_cast(std::rand() % size)); } return v; } void BM_Complexity_O_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range(0)); - const int item_not_in_vector = - state.range(0) * 2; // Test worst case scenario (item not in vector) + // Test worst case scenario (item not in vector) + const int64_t item_not_in_vector = state.range(0) * 2; for (auto _ : state) { benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); } @@ -106,7 +106,7 @@ BENCHMARK(BM_Complexity_O_N) BENCHMARK(BM_Complexity_O_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) - ->Complexity([](int n) -> double { return n; }); + ->Complexity([](int64_t n) -> double { return static_cast(n); }); BENCHMARK(BM_Complexity_O_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) @@ -134,6 +134,7 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) { } state.SetComplexityN(state.range(0)); } +static const double kLog2E = 1.44269504088896340736; BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) @@ -141,7 +142,7 @@ BENCHMARK(BM_Complexity_O_N_log_N) BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) - ->Complexity([](int n) { return n * log2(n); }); + ->Complexity([](int64_t n) { return kLog2E * n * log(static_cast(n)); }); BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) diff --git a/libcxx/utils/google-benchmark/test/donotoptimize_assembly_test.cc b/libcxx/utils/google-benchmark/test/donotoptimize_assembly_test.cc new file mode 100644 index 0000000..d4b0bab --- /dev/null +++ b/libcxx/utils/google-benchmark/test/donotoptimize_assembly_test.cc @@ -0,0 +1,163 @@ +#include + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wreturn-type" +#endif + +extern "C" { + +extern int ExternInt; +extern int ExternInt2; +extern int ExternInt3; + +inline int Add42(int x) { return x + 42; } + +struct NotTriviallyCopyable { + NotTriviallyCopyable(); + explicit NotTriviallyCopyable(int x) : value(x) {} + NotTriviallyCopyable(NotTriviallyCopyable const&); + int value; +}; + +struct Large { + int value; + int data[2]; +}; + +} +// CHECK-LABEL: test_with_rvalue: +extern "C" void test_with_rvalue() { + benchmark::DoNotOptimize(Add42(0)); + // CHECK: movl $42, %eax + // CHECK: ret +} + +// CHECK-LABEL: test_with_large_rvalue: +extern "C" void test_with_large_rvalue() { + benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}}); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]] + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_non_trivial_rvalue: +extern "C" void test_with_non_trivial_rvalue() { + benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt)); + // CHECK: mov{{l|q}} ExternInt(%rip) + // CHECK: ret +} + +// CHECK-LABEL: test_with_lvalue: +extern "C" void test_with_lvalue() { + int x = 101; + benchmark::DoNotOptimize(x); + // CHECK-GNU: movl $101, %eax + // CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_large_lvalue: +extern "C" void test_with_large_lvalue() { + Large L{ExternInt, {ExternInt, ExternInt}}; + benchmark::DoNotOptimize(L); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_non_trivial_lvalue: +extern "C" void test_with_non_trivial_lvalue() { + NotTriviallyCopyable NTC(ExternInt); + benchmark::DoNotOptimize(NTC); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_const_lvalue: +extern "C" void test_with_const_lvalue() { + const int x = 123; + benchmark::DoNotOptimize(x); + // CHECK: movl $123, %eax + // CHECK: ret +} + +// CHECK-LABEL: test_with_large_const_lvalue: +extern "C" void test_with_large_const_lvalue() { + const Large L{ExternInt, {ExternInt, ExternInt}}; + benchmark::DoNotOptimize(L); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_non_trivial_const_lvalue: +extern "C" void test_with_non_trivial_const_lvalue() { + const NotTriviallyCopyable Obj(ExternInt); + benchmark::DoNotOptimize(Obj); + // CHECK: mov{{q|l}} ExternInt(%rip) + // CHECK: ret +} + +// CHECK-LABEL: test_div_by_two: +extern "C" int test_div_by_two(int input) { + int divisor = 2; + benchmark::DoNotOptimize(divisor); + return input / divisor; + // CHECK: movl $2, [[DEST:.*]] + // CHECK: idivl [[DEST]] + // CHECK: ret +} + +// CHECK-LABEL: test_inc_integer: +extern "C" int test_inc_integer() { + int x = 0; + for (int i=0; i < 5; ++i) + benchmark::DoNotOptimize(++x); + // CHECK: movl $1, [[DEST:.*]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK-CLANG: movl [[DEST]], %eax + // CHECK: ret + return x; +} + +// CHECK-LABEL: test_pointer_rvalue +extern "C" void test_pointer_rvalue() { + // CHECK: movl $42, [[DEST:.*]] + // CHECK: leaq [[DEST]], %rax + // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret + int x = 42; + benchmark::DoNotOptimize(&x); +} + +// CHECK-LABEL: test_pointer_const_lvalue: +extern "C" void test_pointer_const_lvalue() { + // CHECK: movl $42, [[DEST:.*]] + // CHECK: leaq [[DEST]], %rax + // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret + int x = 42; + int * const xp = &x; + benchmark::DoNotOptimize(xp); +} + +// CHECK-LABEL: test_pointer_lvalue: +extern "C" void test_pointer_lvalue() { + // CHECK: movl $42, [[DEST:.*]] + // CHECK: leaq [[DEST]], %rax + // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]]) + // CHECK: ret + int x = 42; + int *xp = &x; + benchmark::DoNotOptimize(xp); +} diff --git a/libcxx/utils/google-benchmark/test/donotoptimize_test.cc b/libcxx/utils/google-benchmark/test/donotoptimize_test.cc index a705654..2ce92d1 100644 --- a/libcxx/utils/google-benchmark/test/donotoptimize_test.cc +++ b/libcxx/utils/google-benchmark/test/donotoptimize_test.cc @@ -28,13 +28,13 @@ private: int main(int, char*[]) { // this test verifies compilation of DoNotOptimize() for some types - char buffer8[8]; + char buffer8[8] = ""; benchmark::DoNotOptimize(buffer8); - char buffer20[20]; + char buffer20[20] = ""; benchmark::DoNotOptimize(buffer20); - char buffer1024[1024]; + char buffer1024[1024] = ""; benchmark::DoNotOptimize(buffer1024); benchmark::DoNotOptimize(&buffer1024[0]); diff --git a/libcxx/utils/google-benchmark/test/link_main_test.cc b/libcxx/utils/google-benchmark/test/link_main_test.cc new file mode 100644 index 0000000..241ad5c --- /dev/null +++ b/libcxx/utils/google-benchmark/test/link_main_test.cc @@ -0,0 +1,8 @@ +#include "benchmark/benchmark.h" + +void BM_empty(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(state.iterations()); + } +} +BENCHMARK(BM_empty); diff --git a/libcxx/utils/google-benchmark/test/map_test.cc b/libcxx/utils/google-benchmark/test/map_test.cc index 311d2d2..dbf7982 100644 --- a/libcxx/utils/google-benchmark/test/map_test.cc +++ b/libcxx/utils/google-benchmark/test/map_test.cc @@ -8,7 +8,7 @@ namespace { std::map ConstructRandomMap(int size) { std::map m; for (int i = 0; i < size; ++i) { - m.insert(std::make_pair(rand() % size, rand() % size)); + m.insert(std::make_pair(std::rand() % size, std::rand() % size)); } return m; } @@ -17,14 +17,14 @@ std::map ConstructRandomMap(int size) { // Basic version. static void BM_MapLookup(benchmark::State& state) { - const int size = state.range(0); + const int size = static_cast(state.range(0)); std::map m; for (auto _ : state) { state.PauseTiming(); m = ConstructRandomMap(size); state.ResumeTiming(); for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(rand() % size)); + benchmark::DoNotOptimize(m.find(std::rand() % size)); } } state.SetItemsProcessed(state.iterations() * size); @@ -35,7 +35,7 @@ BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); class MapFixture : public ::benchmark::Fixture { public: void SetUp(const ::benchmark::State& st) { - m = ConstructRandomMap(st.range(0)); + m = ConstructRandomMap(static_cast(st.range(0))); } void TearDown(const ::benchmark::State&) { m.clear(); } @@ -44,10 +44,10 @@ class MapFixture : public ::benchmark::Fixture { }; BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { - const int size = state.range(0); + const int size = static_cast(state.range(0)); for (auto _ : state) { for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(rand() % size)); + benchmark::DoNotOptimize(m.find(std::rand() % size)); } } state.SetItemsProcessed(state.iterations() * size); diff --git a/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc b/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc index 0a82382..c64acab 100644 --- a/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc +++ b/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc @@ -1,7 +1,9 @@ #include "benchmark/benchmark.h" #include +#include #include +#include class MultipleRangesFixture : public ::benchmark::Fixture { public: @@ -27,25 +29,46 @@ class MultipleRangesFixture : public ::benchmark::Fixture { {7, 6, 3}}) {} void SetUp(const ::benchmark::State& state) { - std::vector ranges = {state.range(0), state.range(1), state.range(2)}; + std::vector ranges = {state.range(0), state.range(1), + state.range(2)}; assert(expectedValues.find(ranges) != expectedValues.end()); actualValues.insert(ranges); } + // NOTE: This is not TearDown as we want to check after _all_ runs are + // complete. virtual ~MultipleRangesFixture() { assert(actualValues.size() == expectedValues.size()); + if (actualValues.size() != expectedValues.size()) { + std::cout << "EXPECTED\n"; + for (auto v : expectedValues) { + std::cout << "{"; + for (int64_t iv : v) { + std::cout << iv << ", "; + } + std::cout << "}\n"; + } + std::cout << "ACTUAL\n"; + for (auto v : actualValues) { + std::cout << "{"; + for (int64_t iv : v) { + std::cout << iv << ", "; + } + std::cout << "}\n"; + } + } } - std::set> expectedValues; - std::set> actualValues; + std::set> expectedValues; + std::set> actualValues; }; BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { for (auto _ : state) { - int product = state.range(0) * state.range(1) * state.range(2); - for (int x = 0; x < product; x++) { + int64_t product = state.range(0) * state.range(1) * state.range(2); + for (int64_t x = 0; x < product; x++) { benchmark::DoNotOptimize(x); } } diff --git a/libcxx/utils/google-benchmark/test/output_test.h b/libcxx/utils/google-benchmark/test/output_test.h index 897a138..31a9199 100644 --- a/libcxx/utils/google-benchmark/test/output_test.h +++ b/libcxx/utils/google-benchmark/test/output_test.h @@ -2,13 +2,13 @@ #define TEST_OUTPUT_TEST_H #undef NDEBUG +#include #include #include +#include #include #include #include -#include -#include #include "../src/re.h" #include "benchmark/benchmark.h" @@ -73,26 +73,27 @@ void RunOutputTests(int argc, char* argv[]); // will be the subject of a call to checker_function // checker_function: should be of type ResultsCheckFn (see below) #define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \ - size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) + size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) struct Results; -typedef std::function< void(Results const&) > ResultsCheckFn; +typedef std::function ResultsCheckFn; size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); // Class holding the results of a benchmark. // It is passed in calls to checker functions. struct Results { - // the benchmark name std::string name; // the benchmark fields - std::map< std::string, std::string > values; + std::map values; Results(const std::string& n) : name(n) {} int NumThreads() const; + double NumIterations() const; + typedef enum { kCpuTime, kRealTime } BenchmarkTime; // get cpu_time or real_time in seconds @@ -102,18 +103,18 @@ struct Results { // it is better to use fuzzy float checks for this, as the float // ASCII formatting is lossy. double DurationRealTime() const { - return GetAs< double >("iterations") * GetTime(kRealTime); + return NumIterations() * GetTime(kRealTime); } // get the cpu_time duration of the benchmark in seconds double DurationCPUTime() const { - return GetAs< double >("iterations") * GetTime(kCpuTime); + return NumIterations() * GetTime(kCpuTime); } // get the string for a result by name, or nullptr if the name // is not found const std::string* Get(const char* entry_name) const { auto it = values.find(entry_name); - if(it == values.end()) return nullptr; + if (it == values.end()) return nullptr; return &it->second; } @@ -126,15 +127,15 @@ struct Results { // as a double, and only then converted to the asked type. template T GetCounterAs(const char* entry_name) const { - double dval = GetAs< double >(entry_name); - T tval = static_cast< T >(dval); + double dval = GetAs(entry_name); + T tval = static_cast(dval); return tval; } }; template T Results::GetAs(const char* entry_name) const { - auto *sv = Get(entry_name); + auto* sv = Get(entry_name); CHECK(sv != nullptr && !sv->empty()); std::stringstream ss; ss << *sv; @@ -148,6 +149,8 @@ T Results::GetAs(const char* entry_name) const { // Macros to help in result checking. Do not use them with arguments causing // side-effects. +// clang-format off + #define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \ CONCAT(CHECK_, relationship) \ (entry.getfn< var_type >(var_name), (value)) << "\n" \ @@ -188,6 +191,8 @@ T Results::GetAs(const char* entry_name) const { #define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \ _CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) +// clang-format on + // ========================================================================= // // --------------------------- Misc Utilities ------------------------------ // // ========================================================================= // diff --git a/libcxx/utils/google-benchmark/test/output_test_helper.cc b/libcxx/utils/google-benchmark/test/output_test_helper.cc index 24746f6..394c4f5 100644 --- a/libcxx/utils/google-benchmark/test/output_test_helper.cc +++ b/libcxx/utils/google-benchmark/test/output_test_helper.cc @@ -1,13 +1,13 @@ +#include #include #include #include #include -#include +#include "../src/benchmark_api_internal.h" #include "../src/check.h" // NOTE: check.h is for internal use only! #include "../src/re.h" // NOTE: re.h is for internal use only #include "output_test.h" -#include "../src/benchmark_api_internal.h" // ========================================================================= // // ------------------------------ Internals -------------------------------- // @@ -33,6 +33,7 @@ TestCaseList& GetTestCaseList(TestCaseID ID) { SubMap& GetSubstitutions() { // Don't use 'dec_re' from header because it may not yet be initialized. + // clang-format off static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; static SubMap map = { {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, @@ -40,8 +41,8 @@ SubMap& GetSubstitutions() { {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, {"%int", "[ ]*[0-9]+"}, {" %s ", "[ ]+"}, - {"%time", "[ ]*[0-9]{1,5} ns"}, - {"%console_report", "[ ]*[0-9]{1,5} ns [ ]*[0-9]{1,5} ns [ ]*[0-9]+"}, + {"%time", "[ ]*[0-9]{1,6} ns"}, + {"%console_report", "[ ]*[0-9]{1,6} ns [ ]*[0-9]{1,6} ns [ ]*[0-9]+"}, {"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"}, {"%csv_header", "name,iterations,real_time,cpu_time,time_unit,bytes_per_second," @@ -57,6 +58,7 @@ SubMap& GetSubstitutions() { "," + safe_dec_re + ",,,"}, {"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"}, {"%csv_label_report_end", ",,"}}; + // clang-format on return map; } @@ -147,9 +149,9 @@ class TestReporter : public benchmark::BenchmarkReporter { } private: - std::vector reporters_; + std::vector reporters_; }; -} +} // namespace } // end namespace internal @@ -163,28 +165,25 @@ namespace internal { // It works by parsing the CSV output to read the results. class ResultsChecker { public: - - struct PatternAndFn : public TestCase { // reusing TestCase for its regexes + struct PatternAndFn : public TestCase { // reusing TestCase for its regexes PatternAndFn(const std::string& rx, ResultsCheckFn fn_) - : TestCase(rx), fn(fn_) {} + : TestCase(rx), fn(fn_) {} ResultsCheckFn fn; }; - std::vector< PatternAndFn > check_patterns; - std::vector< Results > results; - std::vector< std::string > field_names; + std::vector check_patterns; + std::vector results; + std::vector field_names; void Add(const std::string& entry_pattern, ResultsCheckFn fn); void CheckResults(std::stringstream& output); private: - void SetHeader_(const std::string& csv_header); void SetValues_(const std::string& entry_csv_line); - std::vector< std::string > SplitCsv_(const std::string& line); - + std::vector SplitCsv_(const std::string& line); }; // store the static ResultsChecker in a function to prevent initialization @@ -207,7 +206,7 @@ void ResultsChecker::CheckResults(std::stringstream& output) { // clear before calling tellg() output.clear(); // seek to zero only when needed - if(output.tellg() > start) output.seekg(start); + if (output.tellg() > start) output.seekg(start); // and just in case output.clear(); } @@ -218,18 +217,18 @@ void ResultsChecker::CheckResults(std::stringstream& output) { CHECK(output.good()); std::getline(output, line); if (on_first) { - SetHeader_(line); // this is important + SetHeader_(line); // this is important on_first = false; continue; } SetValues_(line); } // finally we can call the subscribed check functions - for(const auto& p : check_patterns) { + for (const auto& p : check_patterns) { VLOG(2) << "--------------------------------\n"; VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; - for(const auto& r : results) { - if(!p.regex->Match(r.name)) { + for (const auto& r : results) { + if (!p.regex->Match(r.name)) { VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; continue; } else { @@ -249,51 +248,50 @@ void ResultsChecker::SetHeader_(const std::string& csv_header) { // set the values for a benchmark void ResultsChecker::SetValues_(const std::string& entry_csv_line) { - if(entry_csv_line.empty()) return; // some lines are empty + if (entry_csv_line.empty()) return; // some lines are empty CHECK(!field_names.empty()); auto vals = SplitCsv_(entry_csv_line); CHECK_EQ(vals.size(), field_names.size()); - results.emplace_back(vals[0]); // vals[0] is the benchmark name - auto &entry = results.back(); + results.emplace_back(vals[0]); // vals[0] is the benchmark name + auto& entry = results.back(); for (size_t i = 1, e = vals.size(); i < e; ++i) { entry.values[field_names[i]] = vals[i]; } } // a quick'n'dirty csv splitter (eliminating quotes) -std::vector< std::string > ResultsChecker::SplitCsv_(const std::string& line) { - std::vector< std::string > out; - if(line.empty()) return out; - if(!field_names.empty()) out.reserve(field_names.size()); +std::vector ResultsChecker::SplitCsv_(const std::string& line) { + std::vector out; + if (line.empty()) return out; + if (!field_names.empty()) out.reserve(field_names.size()); size_t prev = 0, pos = line.find_first_of(','), curr = pos; - while(pos != line.npos) { + while (pos != line.npos) { CHECK(curr > 0); - if(line[prev] == '"') ++prev; - if(line[curr-1] == '"') --curr; - out.push_back(line.substr(prev, curr-prev)); + if (line[prev] == '"') ++prev; + if (line[curr - 1] == '"') --curr; + out.push_back(line.substr(prev, curr - prev)); prev = pos + 1; pos = line.find_first_of(',', pos + 1); curr = pos; } curr = line.size(); - if(line[prev] == '"') ++prev; - if(line[curr-1] == '"') --curr; - out.push_back(line.substr(prev, curr-prev)); + if (line[prev] == '"') ++prev; + if (line[curr - 1] == '"') --curr; + out.push_back(line.substr(prev, curr - prev)); return out; } } // end namespace internal -size_t AddChecker(const char* bm_name, ResultsCheckFn fn) -{ - auto &rc = internal::GetResultsChecker(); +size_t AddChecker(const char* bm_name, ResultsCheckFn fn) { + auto& rc = internal::GetResultsChecker(); rc.Add(bm_name, fn); return rc.results.size(); } int Results::NumThreads() const { auto pos = name.find("/threads:"); - if(pos == name.npos) return 1; + if (pos == name.npos) return 1; auto end = name.find('/', pos + 9); std::stringstream ss; ss << name.substr(pos + 9, end); @@ -303,19 +301,23 @@ int Results::NumThreads() const { return num; } +double Results::NumIterations() const { + return GetAs("iterations"); +} + double Results::GetTime(BenchmarkTime which) const { CHECK(which == kCpuTime || which == kRealTime); - const char *which_str = which == kCpuTime ? "cpu_time" : "real_time"; - double val = GetAs< double >(which_str); + const char* which_str = which == kCpuTime ? "cpu_time" : "real_time"; + double val = GetAs(which_str); auto unit = Get("time_unit"); CHECK(unit); - if(*unit == "ns") { + if (*unit == "ns") { return val * 1.e-9; - } else if(*unit == "us") { + } else if (*unit == "us") { return val * 1.e-6; - } else if(*unit == "ms") { + } else if (*unit == "ms") { return val * 1.e-3; - } else if(*unit == "s") { + } else if (*unit == "s") { return val; } else { CHECK(1 == 0) << "unknown time unit: " << *unit; @@ -333,7 +335,7 @@ TestCase::TestCase(std::string re, int rule) substituted_regex(internal::PerformSubstitutions(regex_str)), regex(std::make_shared()) { std::string err_str; - regex->Init(substituted_regex,& err_str); + regex->Init(substituted_regex, &err_str); CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex << "\"" << "\n originally \"" << regex_str << "\"" @@ -367,7 +369,7 @@ int SetSubstitutions( void RunOutputTests(int argc, char* argv[]) { using internal::GetTestCaseList; benchmark::Initialize(&argc, argv); - auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/true); + auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true); benchmark::ConsoleReporter CR(options); benchmark::JSONReporter JR; benchmark::CSVReporter CSVR; @@ -416,7 +418,7 @@ void RunOutputTests(int argc, char* argv[]) { // now that we know the output is as expected, we can dispatch // the checks to subscribees. - auto &csv = TestCases[2]; + auto& csv = TestCases[2]; // would use == but gcc spits a warning CHECK(std::strcmp(csv.name, "CSVReporter") == 0); internal::GetResultsChecker().CheckResults(csv.out_stream); diff --git a/libcxx/utils/google-benchmark/test/register_benchmark_test.cc b/libcxx/utils/google-benchmark/test/register_benchmark_test.cc index 8ab2c29..18de6d6 100644 --- a/libcxx/utils/google-benchmark/test/register_benchmark_test.cc +++ b/libcxx/utils/google-benchmark/test/register_benchmark_test.cc @@ -29,6 +29,7 @@ struct TestCase { typedef benchmark::BenchmarkReporter::Run Run; void CheckRun(Run const& run) const { + // clang-format off CHECK(name == run.benchmark_name) << "expected " << name << " got " << run.benchmark_name; if (label) { @@ -37,6 +38,7 @@ struct TestCase { } else { CHECK(run.report_label == ""); } + // clang-format on } }; diff --git a/libcxx/utils/google-benchmark/test/reporter_output_test.cc b/libcxx/utils/google-benchmark/test/reporter_output_test.cc index 1620b31..1662fcb 100644 --- a/libcxx/utils/google-benchmark/test/reporter_output_test.cc +++ b/libcxx/utils/google-benchmark/test/reporter_output_test.cc @@ -9,23 +9,25 @@ // ---------------------- Testing Prologue Output -------------------------- // // ========================================================================= // -ADD_CASES(TC_ConsoleOut, - {{"^[-]+$", MR_Next}, - {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, - {"^[-]+$", MR_Next}}); +ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, + {"^[-]+$", MR_Next}}); static int AddContextCases() { AddCases(TC_ConsoleErr, { {"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default}, + {"Running .*/reporter_output_test(\\.exe)?$", MR_Next}, {"Run on \\(%int X %float MHz CPU s\\)", MR_Next}, }); - AddCases(TC_JSONOut, {{"^\\{", MR_Default}, - {"\"context\":", MR_Next}, - {"\"date\": \"", MR_Next}, - {"\"num_cpus\": %int,$", MR_Next}, - {"\"mhz_per_cpu\": %float,$", MR_Next}, - {"\"cpu_scaling_enabled\": ", MR_Next}, - {"\"caches\": \\[$", MR_Next}}); + AddCases(TC_JSONOut, + {{"^\\{", MR_Default}, + {"\"context\":", MR_Next}, + {"\"date\": \"", MR_Next}, + {"\"executable\": \".*/reporter_output_test(\\.exe)?\",", MR_Next}, + {"\"num_cpus\": %int,$", MR_Next}, + {"\"mhz_per_cpu\": %float,$", MR_Next}, + {"\"cpu_scaling_enabled\": ", MR_Next}, + {"\"caches\": \\[$", MR_Next}}); auto const& Caches = benchmark::CPUInfo::Get().caches; if (!Caches.empty()) { AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}}); @@ -346,9 +348,12 @@ void BM_UserStats(benchmark::State& state) { for (auto _ : state) { } } +// clang-format off BENCHMARK(BM_UserStats) - ->Repetitions(3) - ->ComputeStatistics("", UserStatistics); + ->Repetitions(3) + ->ComputeStatistics("", UserStatistics); +// clang-format on + // check that user-provided stats is calculated, and is after the default-ones // empty string as name is intentional, it would sort before anything else ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/repeats:3 %console_report$"}, diff --git a/libcxx/utils/google-benchmark/test/skip_with_error_test.cc b/libcxx/utils/google-benchmark/test/skip_with_error_test.cc index 0c2f348..39785fb 100644 --- a/libcxx/utils/google-benchmark/test/skip_with_error_test.cc +++ b/libcxx/utils/google-benchmark/test/skip_with_error_test.cc @@ -33,8 +33,8 @@ struct TestCase { typedef benchmark::BenchmarkReporter::Run Run; void CheckRun(Run const& run) const { - CHECK(name == run.benchmark_name) << "expected " << name << " got " - << run.benchmark_name; + CHECK(name == run.benchmark_name) + << "expected " << name << " got " << run.benchmark_name; CHECK(error_occurred == run.error_occurred); CHECK(error_message == run.error_message); if (error_occurred) { @@ -70,6 +70,15 @@ void BM_error_before_running(benchmark::State& state) { BENCHMARK(BM_error_before_running); ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); +void BM_error_before_running_batch(benchmark::State& state) { + state.SkipWithError("error message"); + while (state.KeepRunningBatch(17)) { + assert(false); + } +} +BENCHMARK(BM_error_before_running_batch); +ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}}); + void BM_error_before_running_range_for(benchmark::State& state) { state.SkipWithError("error message"); for (auto _ : state) { @@ -114,7 +123,7 @@ void BM_error_during_running_ranged_for(benchmark::State& state) { // Test the unfortunate but documented behavior that the ranged-for loop // doesn't automatically terminate when SkipWithError is set. assert(++It != End); - break; // Required behavior + break; // Required behavior } } } @@ -123,8 +132,6 @@ ADD_CASES("BM_error_during_running_ranged_for", {{"/1/iterations:5", true, "error message"}, {"/2/iterations:5", false, ""}}); - - void BM_error_after_running(benchmark::State& state) { for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); diff --git a/libcxx/utils/google-benchmark/test/state_assembly_test.cc b/libcxx/utils/google-benchmark/test/state_assembly_test.cc new file mode 100644 index 0000000..abe9a4d --- /dev/null +++ b/libcxx/utils/google-benchmark/test/state_assembly_test.cc @@ -0,0 +1,68 @@ +#include + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wreturn-type" +#endif + +// clang-format off +extern "C" { + extern int ExternInt; + benchmark::State& GetState(); + void Fn(); +} +// clang-format on + +using benchmark::State; + +// CHECK-LABEL: test_for_auto_loop: +extern "C" int test_for_auto_loop() { + State& S = GetState(); + int x = 42; + // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv + // CHECK-NEXT: testq %rbx, %rbx + // CHECK-NEXT: je [[LOOP_END:.*]] + + for (auto _ : S) { + // CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]: + // CHECK-GNU-NEXT: subq $1, %rbx + // CHECK-CLANG-NEXT: {{(addq \$1,|incq)}} %rax + // CHECK-NEXT: jne .L[[LOOP_HEAD]] + benchmark::DoNotOptimize(x); + } + // CHECK: [[LOOP_END]]: + // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv + + // CHECK: movl $101, %eax + // CHECK: ret + return 101; +} + +// CHECK-LABEL: test_while_loop: +extern "C" int test_while_loop() { + State& S = GetState(); + int x = 42; + + // CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]] + // CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]: + while (S.KeepRunning()) { + // CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]] + // CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]] + // CHECK: movq %[[IREG]], [[DEST:.*]] + benchmark::DoNotOptimize(x); + } + // CHECK-DAG: movq [[DEST]], %[[IREG]] + // CHECK-DAG: testq %[[IREG]], %[[IREG]] + // CHECK-DAG: jne .L[[LOOP_BODY]] + // CHECK-DAG: .L[[LOOP_HEADER]]: + + // CHECK: cmpb $0 + // CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]] + // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv + + // CHECK: .L[[LOOP_END]]: + // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv + + // CHECK: movl $101, %eax + // CHECK: ret + return 101; +} diff --git a/libcxx/utils/google-benchmark/test/statistics_gtest.cc b/libcxx/utils/google-benchmark/test/statistics_gtest.cc new file mode 100644 index 0000000..99e3149 --- /dev/null +++ b/libcxx/utils/google-benchmark/test/statistics_gtest.cc @@ -0,0 +1,28 @@ +//===---------------------------------------------------------------------===// +// statistics_test - Unit tests for src/statistics.cc +//===---------------------------------------------------------------------===// + +#include "../src/statistics.h" +#include "gtest/gtest.h" + +namespace { +TEST(StatisticsTest, Mean) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0); +} + +TEST(StatisticsTest, Median) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0); +} + +TEST(StatisticsTest, StdDev) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0); + EXPECT_FLOAT_EQ(benchmark::StatisticsStdDev({1.5, 2.4, 3.3, 4.2, 5.1}), + 1.42302495); +} + +} // end namespace diff --git a/libcxx/utils/google-benchmark/test/statistics_test.cc b/libcxx/utils/google-benchmark/test/statistics_test.cc deleted file mode 100644 index b4d6abb..0000000 --- a/libcxx/utils/google-benchmark/test/statistics_test.cc +++ /dev/null @@ -1,61 +0,0 @@ -//===---------------------------------------------------------------------===// -// statistics_test - Unit tests for src/statistics.cc -//===---------------------------------------------------------------------===// - -#include "../src/statistics.h" -#include "gtest/gtest.h" - -namespace { -TEST(StatisticsTest, Mean) { - std::vector Inputs; - { - Inputs = {42, 42, 42, 42}; - double Res = benchmark::StatisticsMean(Inputs); - EXPECT_DOUBLE_EQ(Res, 42.0); - } - { - Inputs = {1, 2, 3, 4}; - double Res = benchmark::StatisticsMean(Inputs); - EXPECT_DOUBLE_EQ(Res, 2.5); - } - { - Inputs = {1, 2, 5, 10, 10, 14}; - double Res = benchmark::StatisticsMean(Inputs); - EXPECT_DOUBLE_EQ(Res, 7.0); - } -} - -TEST(StatisticsTest, Median) { - std::vector Inputs; - { - Inputs = {42, 42, 42, 42}; - double Res = benchmark::StatisticsMedian(Inputs); - EXPECT_DOUBLE_EQ(Res, 42.0); - } - { - Inputs = {1, 2, 3, 4}; - double Res = benchmark::StatisticsMedian(Inputs); - EXPECT_DOUBLE_EQ(Res, 2.5); - } - { - Inputs = {1, 2, 5, 10, 10}; - double Res = benchmark::StatisticsMedian(Inputs); - EXPECT_DOUBLE_EQ(Res, 5.0); - } -} - -TEST(StatisticsTest, StdDev) { - std::vector Inputs; - { - Inputs = {101, 101, 101, 101}; - double Res = benchmark::StatisticsStdDev(Inputs); - EXPECT_DOUBLE_EQ(Res, 0.0); - } - { - Inputs = {1, 2, 3}; - double Res = benchmark::StatisticsStdDev(Inputs); - EXPECT_DOUBLE_EQ(Res, 1.0); - } -} - -} // end namespace diff --git a/libcxx/utils/google-benchmark/test/string_util_gtest.cc b/libcxx/utils/google-benchmark/test/string_util_gtest.cc new file mode 100644 index 0000000..4c81734 --- /dev/null +++ b/libcxx/utils/google-benchmark/test/string_util_gtest.cc @@ -0,0 +1,146 @@ +//===---------------------------------------------------------------------===// +// statistics_test - Unit tests for src/statistics.cc +//===---------------------------------------------------------------------===// + +#include "../src/string_util.h" +#include "gtest/gtest.h" + +namespace { +TEST(StringUtilTest, stoul) { + { + size_t pos = 0; + EXPECT_EQ(0, benchmark::stoul("0", &pos)); + EXPECT_EQ(1, pos); + } + { + size_t pos = 0; + EXPECT_EQ(7, benchmark::stoul("7", &pos)); + EXPECT_EQ(1, pos); + } + { + size_t pos = 0; + EXPECT_EQ(135, benchmark::stoul("135", &pos)); + EXPECT_EQ(3, pos); + } +#if ULONG_MAX == 0xFFFFFFFFul + { + size_t pos = 0; + EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos)); + EXPECT_EQ(10, pos); + } +#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul + { + size_t pos = 0; + EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos)); + EXPECT_EQ(20, pos); + } +#endif + { + size_t pos = 0; + EXPECT_EQ(10, benchmark::stoul("1010", &pos, 2)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(520, benchmark::stoul("1010", &pos, 8)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1010, benchmark::stoul("1010", &pos, 10)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(4112, benchmark::stoul("1010", &pos, 16)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(0xBEEF, benchmark::stoul("BEEF", &pos, 16)); + EXPECT_EQ(4, pos); + } + { + ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); + } +} + +TEST(StringUtilTest, stoi) { + { + size_t pos = 0; + EXPECT_EQ(0, benchmark::stoi("0", &pos)); + EXPECT_EQ(1, pos); + } + { + size_t pos = 0; + EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); + EXPECT_EQ(3, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); + EXPECT_EQ(4, pos); + } + { + ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); + } +} + +TEST(StringUtilTest, stod) { + { + size_t pos = 0; + EXPECT_EQ(0.0, benchmark::stod("0", &pos)); + EXPECT_EQ(1, pos); + } + { + size_t pos = 0; + EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); + EXPECT_EQ(3, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); + EXPECT_EQ(3, pos); + } + { + size_t pos = 0; + /* Note: exactly representable as double */ + EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); + EXPECT_EQ(8, pos); + } + { + ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); + } +} + +} // end namespace diff --git a/libcxx/utils/google-benchmark/test/templated_fixture_test.cc b/libcxx/utils/google-benchmark/test/templated_fixture_test.cc index ec5b4c0..fe9865c 100644 --- a/libcxx/utils/google-benchmark/test/templated_fixture_test.cc +++ b/libcxx/utils/google-benchmark/test/templated_fixture_test.cc @@ -4,15 +4,15 @@ #include #include -template +template class MyFixture : public ::benchmark::Fixture { -public: + public: MyFixture() : data(0) {} T data; }; -BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st) { +BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) { for (auto _ : st) { data += 1; } diff --git a/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc b/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc index 9b8a613..4f126b6 100644 --- a/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc +++ b/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc @@ -7,9 +7,11 @@ // @todo: this checks the full output at once; the rule for // CounterSet1 was failing because it was not matching "^[-]+$". // @todo: check that the counters are vertically aligned. -ADD_CASES(TC_ConsoleOut, { -// keeping these lines long improves readability, so: -// clang-format off +ADD_CASES( + TC_ConsoleOut, + { + // keeping these lines long improves readability, so: + // clang-format off {"^[-]+$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next}, {"^[-]+$", MR_Next}, @@ -44,8 +46,8 @@ ADD_CASES(TC_ConsoleOut, { {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"}, -// clang-format on -}); + // clang-format on + }); ADD_CASES(TC_CSVOut, {{"%csv_header," "\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}}); @@ -58,12 +60,12 @@ void BM_Counters_Tabular(benchmark::State& state) { } namespace bm = benchmark; state.counters.insert({ - {"Foo", { 1, bm::Counter::kAvgThreads}}, - {"Bar", { 2, bm::Counter::kAvgThreads}}, - {"Baz", { 4, bm::Counter::kAvgThreads}}, - {"Bat", { 8, bm::Counter::kAvgThreads}}, - {"Frob", {16, bm::Counter::kAvgThreads}}, - {"Lob", {32, bm::Counter::kAvgThreads}}, + {"Foo", {1, bm::Counter::kAvgThreads}}, + {"Bar", {2, bm::Counter::kAvgThreads}}, + {"Baz", {4, bm::Counter::kAvgThreads}}, + {"Bat", {8, bm::Counter::kAvgThreads}}, + {"Frob", {16, bm::Counter::kAvgThreads}}, + {"Lob", {32, bm::Counter::kAvgThreads}}, }); } BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); @@ -102,12 +104,12 @@ void BM_CounterRates_Tabular(benchmark::State& state) { } namespace bm = benchmark; state.counters.insert({ - {"Foo", { 1, bm::Counter::kAvgThreadsRate}}, - {"Bar", { 2, bm::Counter::kAvgThreadsRate}}, - {"Baz", { 4, bm::Counter::kAvgThreadsRate}}, - {"Bat", { 8, bm::Counter::kAvgThreadsRate}}, - {"Frob", {16, bm::Counter::kAvgThreadsRate}}, - {"Lob", {32, bm::Counter::kAvgThreadsRate}}, + {"Foo", {1, bm::Counter::kAvgThreadsRate}}, + {"Bar", {2, bm::Counter::kAvgThreadsRate}}, + {"Baz", {4, bm::Counter::kAvgThreadsRate}}, + {"Bat", {8, bm::Counter::kAvgThreadsRate}}, + {"Frob", {16, bm::Counter::kAvgThreadsRate}}, + {"Lob", {32, bm::Counter::kAvgThreadsRate}}, }); } BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); @@ -129,12 +131,12 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report," // to CHECK_BENCHMARK_RESULTS() void CheckTabularRate(Results const& e) { double t = e.DurationCPUTime(); - CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001); } CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", &CheckTabularRate); @@ -149,9 +151,9 @@ void BM_CounterSet0_Tabular(benchmark::State& state) { } namespace bm = benchmark; state.counters.insert({ - {"Foo", {10, bm::Counter::kAvgThreads}}, - {"Bar", {20, bm::Counter::kAvgThreads}}, - {"Baz", {40, bm::Counter::kAvgThreads}}, + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bar", {20, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, }); } BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); @@ -181,9 +183,9 @@ void BM_CounterSet1_Tabular(benchmark::State& state) { } namespace bm = benchmark; state.counters.insert({ - {"Foo", {15, bm::Counter::kAvgThreads}}, - {"Bar", {25, bm::Counter::kAvgThreads}}, - {"Baz", {45, bm::Counter::kAvgThreads}}, + {"Foo", {15, bm::Counter::kAvgThreads}}, + {"Bar", {25, bm::Counter::kAvgThreads}}, + {"Baz", {45, bm::Counter::kAvgThreads}}, }); } BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); @@ -217,9 +219,9 @@ void BM_CounterSet2_Tabular(benchmark::State& state) { } namespace bm = benchmark; state.counters.insert({ - {"Foo", {10, bm::Counter::kAvgThreads}}, - {"Bat", {30, bm::Counter::kAvgThreads}}, - {"Baz", {40, bm::Counter::kAvgThreads}}, + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bat", {30, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, }); } BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); diff --git a/libcxx/utils/google-benchmark/test/user_counters_test.cc b/libcxx/utils/google-benchmark/test/user_counters_test.cc index 06aafb1..7f7ccb9 100644 --- a/libcxx/utils/google-benchmark/test/user_counters_test.cc +++ b/libcxx/utils/google-benchmark/test/user_counters_test.cc @@ -8,12 +8,16 @@ // ---------------------- Testing Prologue Output -------------------------- // // ========================================================================= // +// clang-format off + ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next}, {"^[-]+$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}}); +// clang-format on + // ========================================================================= // // ------------------------- Simple Counters Output ------------------------ // // ========================================================================= // @@ -25,7 +29,8 @@ void BM_Counters_Simple(benchmark::State& state) { state.counters["bar"] = 2 * (double)state.iterations(); } BENCHMARK(BM_Counters_Simple); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, @@ -38,10 +43,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckSimple(Results const& e) { - double its = e.GetAs< double >("iterations"); + double its = e.NumIterations(); CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); // check that the value of bar is within 0.1% of the expected value - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2.*its, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); } CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple); @@ -49,7 +54,9 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple); // --------------------- Counters+Items+Bytes/s Output --------------------- // // ========================================================================= // -namespace { int num_calls1 = 0; } +namespace { +int num_calls1 = 0; +} void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { for (auto _ : state) { } @@ -77,12 +84,12 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\"," // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckBytesAndItemsPSec(Results const& e) { - double t = e.DurationCPUTime(); // this (and not real time) is the time used + double t = e.DurationCPUTime(); // this (and not real time) is the time used CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1); // check that the values are within 0.1% of the expected values - CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364./t, 0.001); - CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150./t, 0.001); + CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001); + CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001); } CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", &CheckBytesAndItemsPSec); @@ -99,7 +106,9 @@ void BM_Counters_Rate(benchmark::State& state) { state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate}; } BENCHMARK(BM_Counters_Rate); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES( + TC_ConsoleOut, + {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, @@ -112,10 +121,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckRate(Results const& e) { - double t = e.DurationCPUTime(); // this (and not real time) is the time used + double t = e.DurationCPUTime(); // this (and not real time) is the time used // check that the values are within 0.1% of the expected values - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001); } CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); @@ -130,7 +139,8 @@ void BM_Counters_Threads(benchmark::State& state) { state.counters["bar"] = 2; } BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, @@ -139,7 +149,9 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, {"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next}, {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}}); +ADD_CASES( + TC_CSVOut, + {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckThreads(Results const& e) { @@ -160,7 +172,8 @@ void BM_Counters_AvgThreads(benchmark::State& state) { state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads}; } BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int " + "%console_report bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, @@ -169,7 +182,9 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, {"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next}, {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}}); +ADD_CASES( + TC_CSVOut, + {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckAvgThreads(Results const& e) { @@ -191,8 +206,43 @@ void BM_Counters_AvgThreadsRate(benchmark::State& state) { state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate}; } BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/" + "threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgThreadsRate(Results const& e) { + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", + &CheckAvgThreadsRate); + +// ========================================================================= // +// ------------------- IterationInvariant Counters Output ------------------ // +// ========================================================================= // + +void BM_Counters_IterationInvariant(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant}; +} +BENCHMARK(BM_Counters_IterationInvariant); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_IterationInvariant\",$"}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -200,15 +250,128 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$ {"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next}, {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/threads:%int\",%csv_report,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() -void CheckAvgThreadsRate(Results const& e) { - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1./e.DurationCPUTime(), 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2./e.DurationCPUTime(), 0.001); +void CheckIterationInvariant(Results const& e) { + double its = e.NumIterations(); + // check that the values are within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); } -CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", - &CheckAvgThreadsRate); +CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", + &CheckIterationInvariant); + +// ========================================================================= // +// ----------------- IterationInvariantRate Counters Output ---------------- // +// ========================================================================= // + +void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = + bm::Counter{1, bm::Counter::kIsIterationInvariantRate}; + state.counters["bar"] = + bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant}; +} +BENCHMARK(BM_Counters_kIsIterationInvariantRate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report," + "%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckIsIterationInvariantRate(Results const& e) { + double its = e.NumIterations(); + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", + &CheckIsIterationInvariantRate); + +// ========================================================================= // +// ------------------- AvgIterations Counters Output ------------------ // +// ========================================================================= // + +void BM_Counters_AvgIterations(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations}; +} +BENCHMARK(BM_Counters_AvgIterations); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgIterations\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgIterations(Results const& e) { + double its = e.NumIterations(); + // check that the values are within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations); + +// ========================================================================= // +// ----------------- AvgIterationsRate Counters Output ---------------- // +// ========================================================================= // + +void BM_Counters_kAvgIterationsRate(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate}; + state.counters["bar"] = + bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations}; +} +BENCHMARK(BM_Counters_kAvgIterationsRate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report," + "%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgIterationsRate(Results const& e) { + double its = e.NumIterations(); + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", + &CheckAvgIterationsRate); // ========================================================================= // // --------------------------- TEST CASES END ------------------------------ // diff --git a/libcxx/utils/google-benchmark/tools/compare.py b/libcxx/utils/google-benchmark/tools/compare.py index c4a47e8..d27e24b 100755 --- a/libcxx/utils/google-benchmark/tools/compare.py +++ b/libcxx/utils/google-benchmark/tools/compare.py @@ -35,6 +35,23 @@ def check_inputs(in1, in2, flags): def create_parser(): parser = ArgumentParser( description='versatile benchmark output compare tool') + + utest = parser.add_argument_group() + utest.add_argument( + '--no-utest', + dest='utest', + default=True, + action="store_false", + help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS)) + alpha_default = 0.05 + utest.add_argument( + "--alpha", + dest='utest_alpha', + default=alpha_default, + type=float, + help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") % + alpha_default) + subparsers = parser.add_subparsers( help='This tool has multiple modes of operation:', dest='mode') @@ -138,6 +155,9 @@ def main(): # Parse the command line flags parser = create_parser() args, unknown_args = parser.parse_known_args() + if args.mode is None: + parser.print_help() + exit(1) assert not unknown_args benchmark_options = args.benchmark_options @@ -175,6 +195,7 @@ def main(): else: # should never happen print("Unrecognized mode of operation: '%s'" % args.mode) + parser.print_help() exit(1) check_inputs(test_baseline, test_contender, benchmark_options) @@ -201,7 +222,8 @@ def main(): json2_orig, filter_contender, replacement) # Diff and output - output_lines = gbench.report.generate_difference_report(json1, json2) + output_lines = gbench.report.generate_difference_report( + json1, json2, args.utest, args.utest_alpha) print(description) for ln in output_lines: print(ln) @@ -218,12 +240,43 @@ class TestParser(unittest.TestCase): os.path.realpath(__file__)), 'gbench', 'Inputs') - self.testInput0 = os.path.join(testInputs, 'test_baseline_run1.json') - self.testInput1 = os.path.join(testInputs, 'test_baseline_run2.json') + self.testInput0 = os.path.join(testInputs, 'test1_run1.json') + self.testInput1 = os.path.join(testInputs, 'test1_run2.json') def test_benchmarks_basic(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1]) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_without_utest(self): + parsed = self.parser.parse_args( + ['--no-utest', 'benchmarks', self.testInput0, self.testInput1]) + self.assertFalse(parsed.utest) + self.assertEqual(parsed.utest_alpha, 0.05) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_with_utest_alpha(self): + parsed = self.parser.parse_args( + ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.utest_alpha, 0.314) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_without_utest_with_utest_alpha(self): + parsed = self.parser.parse_args( + ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) + self.assertFalse(parsed.utest) + self.assertEqual(parsed.utest_alpha, 0.314) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) @@ -232,6 +285,7 @@ class TestParser(unittest.TestCase): def test_benchmarks_with_remainder(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1, 'd']) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) @@ -240,6 +294,7 @@ class TestParser(unittest.TestCase): def test_benchmarks_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1, '--', 'e']) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) @@ -248,6 +303,7 @@ class TestParser(unittest.TestCase): def test_filters_basic(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd']) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') @@ -257,6 +313,7 @@ class TestParser(unittest.TestCase): def test_filters_with_remainder(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd', 'e']) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') @@ -266,6 +323,7 @@ class TestParser(unittest.TestCase): def test_filters_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd', '--', 'f']) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') @@ -275,6 +333,7 @@ class TestParser(unittest.TestCase): def test_benchmarksfiltered_basic(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e']) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') @@ -285,6 +344,7 @@ class TestParser(unittest.TestCase): def test_benchmarksfiltered_with_remainder(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f']) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') @@ -295,6 +355,7 @@ class TestParser(unittest.TestCase): def test_benchmarksfiltered_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g']) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') diff --git a/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json b/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json new file mode 100644 index 0000000..ca793f3 --- /dev/null +++ b/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json @@ -0,0 +1,39 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_One", + "iterations": 1000, + "real_time": 10, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "iterations": 1000, + "real_time": 9, + "cpu_time": 90, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "iterations": 1000, + "real_time": 8, + "cpu_time": 80, + "time_unit": "ns" + }, + { + "name": "short", + "iterations": 1000, + "real_time": 8, + "cpu_time": 80, + "time_unit": "ns" + } + ] +} diff --git a/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json b/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json new file mode 100644 index 0000000..e5cf50c --- /dev/null +++ b/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json @@ -0,0 +1,39 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_One", + "iterations": 1000, + "real_time": 9, + "cpu_time": 110, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "iterations": 1000, + "real_time": 10, + "cpu_time": 89, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "iterations": 1000, + "real_time": 7, + "cpu_time": 70, + "time_unit": "ns" + }, + { + "name": "short", + "iterations": 1000, + "real_time": 8, + "cpu_time": 80, + "time_unit": "ns" + } + ] +} diff --git a/libcxx/utils/google-benchmark/tools/gbench/report.py b/libcxx/utils/google-benchmark/tools/gbench/report.py index 8d68fe9..4d03a54 100644 --- a/libcxx/utils/google-benchmark/tools/gbench/report.py +++ b/libcxx/utils/google-benchmark/tools/gbench/report.py @@ -4,6 +4,9 @@ import os import re import copy +from scipy.stats import mannwhitneyu + + class BenchmarkColor(object): def __init__(self, name, code): self.name = name @@ -16,11 +19,13 @@ class BenchmarkColor(object): def __format__(self, format): return self.code + # Benchmark Colors Enumeration BC_NONE = BenchmarkColor('NONE', '') BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') BC_CYAN = BenchmarkColor('CYAN', '\033[96m') BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') +BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m') BC_HEADER = BenchmarkColor('HEADER', '\033[92m') BC_WARNING = BenchmarkColor('WARNING', '\033[93m') BC_WHITE = BenchmarkColor('WHITE', '\033[97m') @@ -29,6 +34,10 @@ BC_ENDC = BenchmarkColor('ENDC', '\033[0m') BC_BOLD = BenchmarkColor('BOLD', '\033[1m') BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') +UTEST_MIN_REPETITIONS = 2 +UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better. + + def color_format(use_color, fmt_str, *args, **kwargs): """ Return the result of 'fmt_str.format(*args, **kwargs)' after transforming @@ -78,30 +87,90 @@ def filter_benchmark(json_orig, family, replacement=""): for be in json_orig['benchmarks']: if not regex.search(be['name']): continue - filteredbench = copy.deepcopy(be) # Do NOT modify the old name! + filteredbench = copy.deepcopy(be) # Do NOT modify the old name! filteredbench['name'] = regex.sub(replacement, filteredbench['name']) filtered['benchmarks'].append(filteredbench) return filtered -def generate_difference_report(json1, json2, use_color=True): +def generate_difference_report( + json1, + json2, + utest=False, + utest_alpha=0.05, + use_color=True): """ Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. """ + assert utest is True or utest is False first_col_width = find_longest_name(json1['benchmarks']) + def find_test(name): for b in json2['benchmarks']: if b['name'] == name: return b return None - first_col_width = max(first_col_width, len('Benchmark')) + + utest_col_name = "_pvalue" + first_col_width = max( + first_col_width, + len('Benchmark')) + first_col_width += len(utest_col_name) first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( 'Benchmark', 12 + first_col_width) output_strs = [first_line, '-' * len(first_line)] - gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn) + last_name = None + timings_time = [[], []] + timings_cpu = [[], []] + + gen = (bn for bn in json1['benchmarks'] + if 'real_time' in bn and 'cpu_time' in bn) for bn in gen: + fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" + special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" + + if last_name is None: + last_name = bn['name'] + if last_name != bn['name']: + if ((len(timings_time[0]) >= UTEST_MIN_REPETITIONS) and + (len(timings_time[1]) >= UTEST_MIN_REPETITIONS) and + (len(timings_cpu[0]) >= UTEST_MIN_REPETITIONS) and + (len(timings_cpu[1]) >= UTEST_MIN_REPETITIONS)): + if utest: + def get_utest_color(pval): + if pval >= utest_alpha: + return BC_FAIL + else: + return BC_OKGREEN + time_pvalue = mannwhitneyu( + timings_time[0], timings_time[1], alternative='two-sided').pvalue + cpu_pvalue = mannwhitneyu( + timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue + dsc = "U Test, Repetitions: {}".format(len(timings_cpu[0])) + dsc_color = BC_OKGREEN + if len(timings_cpu[0]) < UTEST_OPTIMAL_REPETITIONS: + dsc_color = BC_WARNING + dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( + UTEST_OPTIMAL_REPETITIONS) + output_strs += [color_format(use_color, + special_str, + BC_HEADER, + "{}{}".format(last_name, + utest_col_name), + first_col_width, + get_utest_color(time_pvalue), + time_pvalue, + get_utest_color(cpu_pvalue), + cpu_pvalue, + dsc_color, + dsc, + endc=BC_ENDC)] + last_name = bn['name'] + timings_time = [[], []] + timings_cpu = [[], []] + other_bench = find_test(bn['name']) if not other_bench: continue @@ -116,26 +185,44 @@ def generate_difference_report(json1, json2, use_color=True): return BC_WHITE else: return BC_CYAN - fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" - tres = calculate_change(bn['real_time'], other_bench['real_time']) - cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) - output_strs += [color_format(use_color, fmt_str, - BC_HEADER, bn['name'], first_col_width, - get_color(tres), tres, get_color(cpures), cpures, - bn['real_time'], other_bench['real_time'], - bn['cpu_time'], other_bench['cpu_time'], - endc=BC_ENDC)] + + timings_time[0].append(bn['real_time']) + timings_time[1].append(other_bench['real_time']) + timings_cpu[0].append(bn['cpu_time']) + timings_cpu[1].append(other_bench['cpu_time']) + + tres = calculate_change(timings_time[0][-1], timings_time[1][-1]) + cpures = calculate_change(timings_cpu[0][-1], timings_cpu[1][-1]) + output_strs += [color_format(use_color, + fmt_str, + BC_HEADER, + bn['name'], + first_col_width, + get_color(tres), + tres, + get_color(cpures), + cpures, + timings_time[0][-1], + timings_time[1][-1], + timings_cpu[0][-1], + timings_cpu[1][-1], + endc=BC_ENDC)] return output_strs ############################################################################### # Unit tests + import unittest + class TestReportDifference(unittest.TestCase): def load_results(self): import json - testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs') + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') testOutput1 = os.path.join(testInputs, 'test1_run1.json') testOutput2 = os.path.join(testInputs, 'test1_run2.json') with open(testOutput1, 'r') as f: @@ -153,15 +240,20 @@ class TestReportDifference(unittest.TestCase): ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], - ['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'], - ['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'], - ['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'], + ['BM_100xSlower', '+99.0000', '+99.0000', + '100', '10000', '100', '10000'], + ['BM_100xFaster', '-0.9900', '-0.9900', + '10000', '100', '10000', '100'], + ['BM_10PercentCPUToTime', '+0.1000', + '-0.1000', '100', '110', '100', '90'], ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], ] json1, json2 = self.load_results() - output_lines_with_header = generate_difference_report(json1, json2, use_color=False) + output_lines_with_header = generate_difference_report( + json1, json2, use_color=False) output_lines = output_lines_with_header[2:] + print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): @@ -173,7 +265,10 @@ class TestReportDifference(unittest.TestCase): class TestReportDifferenceBetweenFamilies(unittest.TestCase): def load_result(self): import json - testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs') + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') testOutput = os.path.join(testInputs, 'test2_run.json') with open(testOutput, 'r') as f: json = json.load(f) @@ -189,9 +284,10 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase): json = self.load_result() json1 = filter_benchmark(json, "BM_Z.ro", ".") json2 = filter_benchmark(json, "BM_O.e", ".") - output_lines_with_header = generate_difference_report(json1, json2, use_color=False) + output_lines_with_header = generate_difference_report( + json1, json2, use_color=False) output_lines = output_lines_with_header[2:] - print "\n" + print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): @@ -200,6 +296,54 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase): self.assertEqual(parts, expect_lines[i]) +class TestReportDifferenceWithUTest(unittest.TestCase): + def load_results(self): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test3_run0.json') + testOutput2 = os.path.join(testInputs, 'test3_run1.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + def test_utest(self): + expect_lines = [] + expect_lines = [ + ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], + ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], + ['BM_Two', '+0.2500', '+0.1125', '8', '10', '80', '89'], + ['BM_Two_pvalue', + '0.2207', + '0.6831', + 'U', + 'Test,', + 'Repetitions:', + '2.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ['short', '+0.0000', '+0.0000', '8', '8', '80', '80'], + ] + json1, json2 = self.load_results() + output_lines_with_header = generate_difference_report( + json1, json2, True, 0.05, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(parts, expect_lines[i]) + + if __name__ == '__main__': unittest.main() diff --git a/libcxx/utils/google-benchmark/tools/strip_asm.py b/libcxx/utils/google-benchmark/tools/strip_asm.py new file mode 100755 index 0000000..9030550 --- /dev/null +++ b/libcxx/utils/google-benchmark/tools/strip_asm.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python + +""" +strip_asm.py - Cleanup ASM output for the specified file +""" + +from argparse import ArgumentParser +import sys +import os +import re + +def find_used_labels(asm): + found = set() + label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") + for l in asm.splitlines(): + m = label_re.match(l) + if m: + found.add('.L%s' % m.group(1)) + return found + + +def normalize_labels(asm): + decls = set() + label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") + for l in asm.splitlines(): + m = label_decl.match(l) + if m: + decls.add(m.group(0)) + if len(decls) == 0: + return asm + needs_dot = next(iter(decls))[0] != '.' + if not needs_dot: + return asm + for ld in decls: + asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm) + return asm + + +def transform_labels(asm): + asm = normalize_labels(asm) + used_decls = find_used_labels(asm) + new_asm = '' + label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") + for l in asm.splitlines(): + m = label_decl.match(l) + if not m or m.group(0) in used_decls: + new_asm += l + new_asm += '\n' + return new_asm + + +def is_identifier(tk): + if len(tk) == 0: + return False + first = tk[0] + if not first.isalpha() and first != '_': + return False + for i in range(1, len(tk)): + c = tk[i] + if not c.isalnum() and c != '_': + return False + return True + +def process_identifiers(l): + """ + process_identifiers - process all identifiers and modify them to have + consistent names across all platforms; specifically across ELF and MachO. + For example, MachO inserts an additional understore at the beginning of + names. This function removes that. + """ + parts = re.split(r'([a-zA-Z0-9_]+)', l) + new_line = '' + for tk in parts: + if is_identifier(tk): + if tk.startswith('__Z'): + tk = tk[1:] + elif tk.startswith('_') and len(tk) > 1 and \ + tk[1].isalpha() and tk[1] != 'Z': + tk = tk[1:] + new_line += tk + return new_line + + +def process_asm(asm): + """ + Strip the ASM of unwanted directives and lines + """ + new_contents = '' + asm = transform_labels(asm) + + # TODO: Add more things we want to remove + discard_regexes = [ + re.compile("\s+\..*$"), # directive + re.compile("\s*#(NO_APP|APP)$"), #inline ASM + re.compile("\s*#.*$"), # comment line + re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive + re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"), + ] + keep_regexes = [ + + ] + fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:") + for l in asm.splitlines(): + # Remove Mach-O attribute + l = l.replace('@GOTPCREL', '') + add_line = True + for reg in discard_regexes: + if reg.match(l) is not None: + add_line = False + break + for reg in keep_regexes: + if reg.match(l) is not None: + add_line = True + break + if add_line: + if fn_label_def.match(l) and len(new_contents) != 0: + new_contents += '\n' + l = process_identifiers(l) + new_contents += l + new_contents += '\n' + return new_contents + +def main(): + parser = ArgumentParser( + description='generate a stripped assembly file') + parser.add_argument( + 'input', metavar='input', type=str, nargs=1, + help='An input assembly file') + parser.add_argument( + 'out', metavar='output', type=str, nargs=1, + help='The output file') + args, unknown_args = parser.parse_known_args() + input = args.input[0] + output = args.out[0] + if not os.path.isfile(input): + print(("ERROR: input file '%s' does not exist") % input) + sys.exit(1) + contents = None + with open(input, 'r') as f: + contents = f.read() + new_contents = process_asm(contents) + with open(output, 'w') as f: + f.write(new_contents) + + +if __name__ == '__main__': + main() + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 +# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; +# kate: indent-mode python; remove-trailing-spaces modified; -- 2.7.4