➕ using Google Benchmark #921
This commit is contained in:
		
							parent
							
								
									a8f711a2f1
								
							
						
					
					
						commit
						b406e3704b
					
				
					 53 changed files with 8039 additions and 0 deletions
				
			
		
							
								
								
									
										43
									
								
								benchmarks/thirdparty/benchmark/AUTHORS
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								benchmarks/thirdparty/benchmark/AUTHORS
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,43 @@
 | 
			
		|||
# This is the official list of benchmark authors for copyright purposes.
 | 
			
		||||
# This file is distinct from the CONTRIBUTORS files.
 | 
			
		||||
# See the latter for an explanation.
 | 
			
		||||
#
 | 
			
		||||
# Names should be added to this file as:
 | 
			
		||||
#	Name or Organization <email address>
 | 
			
		||||
# The email address is not required for organizations.
 | 
			
		||||
#
 | 
			
		||||
# Please keep the list sorted.
 | 
			
		||||
 | 
			
		||||
Albert Pretorius <pretoalb@gmail.com>
 | 
			
		||||
Arne Beer <arne@twobeer.de>
 | 
			
		||||
Carto
 | 
			
		||||
Christopher Seymour <chris.j.seymour@hotmail.com>
 | 
			
		||||
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
 | 
			
		||||
Dirac Research 
 | 
			
		||||
Dominik Czarnota <dominik.b.czarnota@gmail.com>
 | 
			
		||||
Eric Fiselier <eric@efcs.ca>
 | 
			
		||||
Eugene Zhuk <eugene.zhuk@gmail.com>
 | 
			
		||||
Evgeny Safronov <division494@gmail.com>
 | 
			
		||||
Felix Homann <linuxaudio@showlabor.de>
 | 
			
		||||
Google Inc.
 | 
			
		||||
International Business Machines Corporation
 | 
			
		||||
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
 | 
			
		||||
Jern-Kuan Leong <jernkuan@gmail.com>
 | 
			
		||||
JianXiong Zhou <zhoujianxiong2@gmail.com>
 | 
			
		||||
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
 | 
			
		||||
Jussi Knuuttila <jussi.knuuttila@gmail.com>
 | 
			
		||||
Kaito Udagawa <umireon@gmail.com>
 | 
			
		||||
Kishan Kumar <kumar.kishan@outlook.com>
 | 
			
		||||
Lei Xu <eddyxu@gmail.com>
 | 
			
		||||
Matt Clarkson <mattyclarkson@gmail.com>
 | 
			
		||||
Maxim Vafin <maxvafin@gmail.com>
 | 
			
		||||
Nick Hutchinson <nshutchinson@gmail.com>
 | 
			
		||||
Oleksandr Sochka <sasha.sochka@gmail.com>
 | 
			
		||||
Paul Redmond <paul.redmond@gmail.com>
 | 
			
		||||
Radoslav Yovchev <radoslav.tm@gmail.com>
 | 
			
		||||
Roman Lebedev <lebedev.ri@gmail.com>
 | 
			
		||||
Shuo Chen <chenshuo@chenshuo.com>
 | 
			
		||||
Steinar H. Gunderson <sgunderson@bigfoot.com>
 | 
			
		||||
Yixuan Qiu <yixuanq@gmail.com>
 | 
			
		||||
Yusuke Suzuki <utatane.tea@gmail.com>
 | 
			
		||||
Zbigniew Skowron <zbychs@gmail.com>
 | 
			
		||||
							
								
								
									
										213
									
								
								benchmarks/thirdparty/benchmark/CMakeLists.txt
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										213
									
								
								benchmarks/thirdparty/benchmark/CMakeLists.txt
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,213 @@
 | 
			
		|||
cmake_minimum_required (VERSION 2.8.12)
 | 
			
		||||
 | 
			
		||||
project (benchmark)
 | 
			
		||||
 | 
			
		||||
foreach(p
 | 
			
		||||
    CMP0054 # CMake 3.1
 | 
			
		||||
    CMP0056 # export EXE_LINKER_FLAGS to try_run
 | 
			
		||||
    CMP0057 # Support no if() IN_LIST operator
 | 
			
		||||
    )
 | 
			
		||||
  if(POLICY ${p})
 | 
			
		||||
    cmake_policy(SET ${p} NEW)
 | 
			
		||||
  endif()
 | 
			
		||||
endforeach()
 | 
			
		||||
 | 
			
		||||
option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON)
 | 
			
		||||
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
 | 
			
		||||
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
 | 
			
		||||
option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
 | 
			
		||||
option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF)
 | 
			
		||||
option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON)
 | 
			
		||||
 | 
			
		||||
# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which
 | 
			
		||||
# may require downloading the source code.
 | 
			
		||||
option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree building of unmet dependencies" OFF)
 | 
			
		||||
 | 
			
		||||
# This option can be used to disable building and running unit tests which depend on gtest
 | 
			
		||||
# in cases where it is not possible to build or find a valid version of gtest.
 | 
			
		||||
option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON)
 | 
			
		||||
 | 
			
		||||
# Make sure we can import out CMake functions
 | 
			
		||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
 | 
			
		||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
 | 
			
		||||
 | 
			
		||||
# Read the git tags to determine the project version
 | 
			
		||||
include(GetGitVersion)
 | 
			
		||||
get_git_version(GIT_VERSION)
 | 
			
		||||
 | 
			
		||||
# Tell the user what versions we are using
 | 
			
		||||
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" VERSION ${GIT_VERSION})
 | 
			
		||||
message("-- Version: ${VERSION}")
 | 
			
		||||
 | 
			
		||||
# The version of the libraries
 | 
			
		||||
set(GENERIC_LIB_VERSION ${VERSION})
 | 
			
		||||
string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION)
 | 
			
		||||
 | 
			
		||||
# Import our CMake modules
 | 
			
		||||
include(CheckCXXCompilerFlag)
 | 
			
		||||
include(AddCXXCompilerFlag)
 | 
			
		||||
include(CXXFeatureCheck)
 | 
			
		||||
 | 
			
		||||
if (BENCHMARK_BUILD_32_BITS)
 | 
			
		||||
  add_required_cxx_compiler_flag(-m32)
 | 
			
		||||
endif()
 | 
			
		||||
 | 
			
		||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
 | 
			
		||||
  # Turn compiler warnings up to 11
 | 
			
		||||
  string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
 | 
			
		||||
  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
 | 
			
		||||
  add_definitions(-D_CRT_SECURE_NO_WARNINGS)
 | 
			
		||||
 | 
			
		||||
  if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
 | 
			
		||||
    add_cxx_compiler_flag(-EHs-)
 | 
			
		||||
    add_cxx_compiler_flag(-EHa-)
 | 
			
		||||
  endif()
 | 
			
		||||
  # Link time optimisation
 | 
			
		||||
  if (BENCHMARK_ENABLE_LTO)
 | 
			
		||||
    set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL")
 | 
			
		||||
    set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG")
 | 
			
		||||
    set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG")
 | 
			
		||||
    set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG")
 | 
			
		||||
 | 
			
		||||
    set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GL")
 | 
			
		||||
    string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO}")
 | 
			
		||||
    set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
 | 
			
		||||
    string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}")
 | 
			
		||||
    set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
 | 
			
		||||
    string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}")
 | 
			
		||||
    set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
 | 
			
		||||
 | 
			
		||||
    set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /GL")
 | 
			
		||||
    set(CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL "${CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL} /LTCG")
 | 
			
		||||
    set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "${CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL} /LTCG")
 | 
			
		||||
    set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG")
 | 
			
		||||
  endif()
 | 
			
		||||
else()
 | 
			
		||||
  # Try and enable C++11. Don't use C++14 because it doesn't work in some
 | 
			
		||||
  # configurations.
 | 
			
		||||
  add_cxx_compiler_flag(-std=c++11)
 | 
			
		||||
  if (NOT HAVE_CXX_FLAG_STD_CXX11)
 | 
			
		||||
    add_cxx_compiler_flag(-std=c++0x)
 | 
			
		||||
  endif()
 | 
			
		||||
 | 
			
		||||
  # Turn compiler warnings up to 11
 | 
			
		||||
  add_cxx_compiler_flag(-Wall)
 | 
			
		||||
 | 
			
		||||
  add_cxx_compiler_flag(-Wextra)
 | 
			
		||||
  add_cxx_compiler_flag(-Wshadow)
 | 
			
		||||
  add_cxx_compiler_flag(-Werror RELEASE)
 | 
			
		||||
  add_cxx_compiler_flag(-Werror RELWITHDEBINFO)
 | 
			
		||||
  add_cxx_compiler_flag(-Werror MINSIZEREL)
 | 
			
		||||
  add_cxx_compiler_flag(-pedantic)
 | 
			
		||||
  add_cxx_compiler_flag(-pedantic-errors)
 | 
			
		||||
  add_cxx_compiler_flag(-Wshorten-64-to-32)
 | 
			
		||||
  add_cxx_compiler_flag(-Wfloat-equal)
 | 
			
		||||
  add_cxx_compiler_flag(-fstrict-aliasing)
 | 
			
		||||
  if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
 | 
			
		||||
    add_cxx_compiler_flag(-fno-exceptions)
 | 
			
		||||
  endif()
 | 
			
		||||
 | 
			
		||||
  if (HAVE_CXX_FLAG_FSTRICT_ALIASING)
 | 
			
		||||
    if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing
 | 
			
		||||
      add_cxx_compiler_flag(-Wstrict-aliasing)
 | 
			
		||||
    endif()
 | 
			
		||||
  endif()
 | 
			
		||||
  # ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden
 | 
			
		||||
  # (because of deprecated overload)
 | 
			
		||||
  add_cxx_compiler_flag(-wd654)  
 | 
			
		||||
  add_cxx_compiler_flag(-Wthread-safety)
 | 
			
		||||
  if (HAVE_CXX_FLAG_WTHREAD_SAFETY)
 | 
			
		||||
    cxx_feature_check(THREAD_SAFETY_ATTRIBUTES)
 | 
			
		||||
  endif()
 | 
			
		||||
 | 
			
		||||
  # On most UNIX like platforms g++ and clang++ define _GNU_SOURCE as a
 | 
			
		||||
  # predefined macro, which turns on all of the wonderful libc extensions.
 | 
			
		||||
  # However g++ doesn't do this in Cygwin so we have to define it ourselfs
 | 
			
		||||
  # since we depend on GNU/POSIX/BSD extensions.
 | 
			
		||||
  if (CYGWIN)
 | 
			
		||||
    add_definitions(-D_GNU_SOURCE=1)
 | 
			
		||||
  endif()
 | 
			
		||||
 | 
			
		||||
  # Link time optimisation
 | 
			
		||||
  if (BENCHMARK_ENABLE_LTO)
 | 
			
		||||
    add_cxx_compiler_flag(-flto)
 | 
			
		||||
    if ("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
 | 
			
		||||
      find_program(GCC_AR gcc-ar)
 | 
			
		||||
      if (GCC_AR)
 | 
			
		||||
        set(CMAKE_AR ${GCC_AR})
 | 
			
		||||
      endif()
 | 
			
		||||
      find_program(GCC_RANLIB gcc-ranlib)
 | 
			
		||||
      if (GCC_RANLIB)
 | 
			
		||||
        set(CMAKE_RANLIB ${GCC_RANLIB})
 | 
			
		||||
      endif()
 | 
			
		||||
    elseif("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang")
 | 
			
		||||
      include(llvm-toolchain)
 | 
			
		||||
    endif()
 | 
			
		||||
  endif()
 | 
			
		||||
 | 
			
		||||
  # Coverage build type
 | 
			
		||||
  set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}"
 | 
			
		||||
    CACHE STRING "Flags used by the C++ compiler during coverage builds."
 | 
			
		||||
    FORCE)
 | 
			
		||||
  set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}"
 | 
			
		||||
    CACHE STRING "Flags used for linking binaries during coverage builds."
 | 
			
		||||
    FORCE)
 | 
			
		||||
  set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}"
 | 
			
		||||
    CACHE STRING "Flags used by the shared libraries linker during coverage builds."
 | 
			
		||||
    FORCE)
 | 
			
		||||
  mark_as_advanced(
 | 
			
		||||
    BENCHMARK_CXX_FLAGS_COVERAGE
 | 
			
		||||
    BENCHMARK_EXE_LINKER_FLAGS_COVERAGE
 | 
			
		||||
    BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE)
 | 
			
		||||
  set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING
 | 
			
		||||
    "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.")
 | 
			
		||||
  add_cxx_compiler_flag(--coverage COVERAGE)
 | 
			
		||||
endif()
 | 
			
		||||
 | 
			
		||||
if (BENCHMARK_USE_LIBCXX)
 | 
			
		||||
  if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
 | 
			
		||||
    add_cxx_compiler_flag(-stdlib=libc++)
 | 
			
		||||
  elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR
 | 
			
		||||
          "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
 | 
			
		||||
    add_cxx_compiler_flag(-nostdinc++)
 | 
			
		||||
    message("libc++ header path must be manually specified using CMAKE_CXX_FLAGS")
 | 
			
		||||
    # Adding -nodefaultlibs directly to CMAKE_<TYPE>_LINKER_FLAGS will break
 | 
			
		||||
    # configuration checks such as 'find_package(Threads)'
 | 
			
		||||
    list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs)
 | 
			
		||||
    # -lc++ cannot be added directly to CMAKE_<TYPE>_LINKER_FLAGS because
 | 
			
		||||
    # linker flags appear before all linker inputs and -lc++ must appear after.
 | 
			
		||||
    list(APPEND BENCHMARK_CXX_LIBRARIES c++)
 | 
			
		||||
  else()
 | 
			
		||||
    message(FATAL "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
 | 
			
		||||
  endif()
 | 
			
		||||
endif(BENCHMARK_USE_LIBCXX)
 | 
			
		||||
 | 
			
		||||
# C++ feature checks
 | 
			
		||||
# Determine the correct regular expression engine to use
 | 
			
		||||
cxx_feature_check(STD_REGEX)
 | 
			
		||||
cxx_feature_check(GNU_POSIX_REGEX)
 | 
			
		||||
cxx_feature_check(POSIX_REGEX)
 | 
			
		||||
if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
 | 
			
		||||
  message(FATAL_ERROR "Failed to determine the source files for the regular expression backend")
 | 
			
		||||
endif()
 | 
			
		||||
if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX
 | 
			
		||||
        AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
 | 
			
		||||
  message(WARNING "Using std::regex with exceptions disabled is not fully supported")
 | 
			
		||||
endif()
 | 
			
		||||
cxx_feature_check(STEADY_CLOCK)
 | 
			
		||||
# Ensure we have pthreads
 | 
			
		||||
find_package(Threads REQUIRED)
 | 
			
		||||
 | 
			
		||||
# Set up directories
 | 
			
		||||
include_directories(${PROJECT_SOURCE_DIR}/include)
 | 
			
		||||
 | 
			
		||||
# Build the targets
 | 
			
		||||
add_subdirectory(src)
 | 
			
		||||
 | 
			
		||||
if (BENCHMARK_ENABLE_TESTING)
 | 
			
		||||
  enable_testing()
 | 
			
		||||
  if (BENCHMARK_ENABLE_GTEST_TESTS)
 | 
			
		||||
    include(HandleGTest)
 | 
			
		||||
  endif()
 | 
			
		||||
  add_subdirectory(test)
 | 
			
		||||
endif()
 | 
			
		||||
							
								
								
									
										62
									
								
								benchmarks/thirdparty/benchmark/CONTRIBUTORS
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										62
									
								
								benchmarks/thirdparty/benchmark/CONTRIBUTORS
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,62 @@
 | 
			
		|||
# People who have agreed to one of the CLAs and can contribute patches.
 | 
			
		||||
# The AUTHORS file lists the copyright holders; this file
 | 
			
		||||
# lists people.  For example, Google employees are listed here
 | 
			
		||||
# but not in AUTHORS, because Google holds the copyright.
 | 
			
		||||
#
 | 
			
		||||
# Names should be added to this file only after verifying that
 | 
			
		||||
# the individual or the individual's organization has agreed to
 | 
			
		||||
# the appropriate Contributor License Agreement, found here:
 | 
			
		||||
#
 | 
			
		||||
# https://developers.google.com/open-source/cla/individual
 | 
			
		||||
# https://developers.google.com/open-source/cla/corporate
 | 
			
		||||
#
 | 
			
		||||
# The agreement for individuals can be filled out on the web.
 | 
			
		||||
#
 | 
			
		||||
# When adding J Random Contributor's name to this file,
 | 
			
		||||
# either J's name or J's organization's name should be
 | 
			
		||||
# added to the AUTHORS file, depending on whether the
 | 
			
		||||
# individual or corporate CLA was used.
 | 
			
		||||
#
 | 
			
		||||
# Names should be added to this file as:
 | 
			
		||||
#     Name <email address>
 | 
			
		||||
#
 | 
			
		||||
# Please keep the list sorted.
 | 
			
		||||
 | 
			
		||||
Albert Pretorius <pretoalb@gmail.com>
 | 
			
		||||
Arne Beer <arne@twobeer.de>
 | 
			
		||||
Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
 | 
			
		||||
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
 | 
			
		||||
Christopher Seymour <chris.j.seymour@hotmail.com>
 | 
			
		||||
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
 | 
			
		||||
Dominic Hamon <dma@stripysock.com> <dominic@google.com>
 | 
			
		||||
Dominik Czarnota <dominik.b.czarnota@gmail.com>
 | 
			
		||||
Eric Fiselier <eric@efcs.ca>
 | 
			
		||||
Eugene Zhuk <eugene.zhuk@gmail.com>
 | 
			
		||||
Evgeny Safronov <division494@gmail.com>
 | 
			
		||||
Felix Homann <linuxaudio@showlabor.de>
 | 
			
		||||
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
 | 
			
		||||
Jern-Kuan Leong <jernkuan@gmail.com>
 | 
			
		||||
JianXiong Zhou <zhoujianxiong2@gmail.com>
 | 
			
		||||
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
 | 
			
		||||
Jussi Knuuttila <jussi.knuuttila@gmail.com>
 | 
			
		||||
Kai Wolf <kai.wolf@gmail.com>
 | 
			
		||||
Kishan Kumar <kumar.kishan@outlook.com>
 | 
			
		||||
Kaito Udagawa <umireon@gmail.com>
 | 
			
		||||
Lei Xu <eddyxu@gmail.com>
 | 
			
		||||
Matt Clarkson <mattyclarkson@gmail.com>
 | 
			
		||||
Maxim Vafin <maxvafin@gmail.com>
 | 
			
		||||
Nick Hutchinson <nshutchinson@gmail.com>
 | 
			
		||||
Oleksandr Sochka <sasha.sochka@gmail.com>
 | 
			
		||||
Pascal Leroy <phl@google.com>
 | 
			
		||||
Paul Redmond <paul.redmond@gmail.com>
 | 
			
		||||
Pierre Phaneuf <pphaneuf@google.com>
 | 
			
		||||
Radoslav Yovchev <radoslav.tm@gmail.com>
 | 
			
		||||
Raul Marin <rmrodriguez@cartodb.com>
 | 
			
		||||
Ray Glover <ray.glover@uk.ibm.com>
 | 
			
		||||
Roman Lebedev <lebedev.ri@gmail.com>
 | 
			
		||||
Shuo Chen <chenshuo@chenshuo.com>
 | 
			
		||||
Tobias Ulvgård <tobias.ulvgard@dirac.se>
 | 
			
		||||
Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com>
 | 
			
		||||
Yixuan Qiu <yixuanq@gmail.com>
 | 
			
		||||
Yusuke Suzuki <utatane.tea@gmail.com>
 | 
			
		||||
Zbigniew Skowron <zbychs@gmail.com>
 | 
			
		||||
							
								
								
									
										202
									
								
								benchmarks/thirdparty/benchmark/LICENSE
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										202
									
								
								benchmarks/thirdparty/benchmark/LICENSE
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,202 @@
 | 
			
		|||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   APPENDIX: How to apply the Apache License to your work.
 | 
			
		||||
 | 
			
		||||
      To apply the Apache License to your work, attach the following
 | 
			
		||||
      boilerplate notice, with the fields enclosed by brackets "[]"
 | 
			
		||||
      replaced with your own identifying information. (Don't include
 | 
			
		||||
      the brackets!)  The text should be enclosed in the appropriate
 | 
			
		||||
      comment syntax for the file format. We also recommend that a
 | 
			
		||||
      file or class name and description of purpose be included on the
 | 
			
		||||
      same "printed page" as the copyright notice for easier
 | 
			
		||||
      identification within third-party archives.
 | 
			
		||||
 | 
			
		||||
   Copyright [yyyy] [name of copyright owner]
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
							
								
								
									
										935
									
								
								benchmarks/thirdparty/benchmark/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										935
									
								
								benchmarks/thirdparty/benchmark/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,935 @@
 | 
			
		|||
# benchmark
 | 
			
		||||
[](https://travis-ci.org/google/benchmark)
 | 
			
		||||
[](https://ci.appveyor.com/project/google/benchmark/branch/master)
 | 
			
		||||
[](https://coveralls.io/r/google/benchmark)
 | 
			
		||||
[](https://slackin-iqtfqnpzxd.now.sh/)
 | 
			
		||||
 | 
			
		||||
A library to support the benchmarking of functions, similar to unit-tests.
 | 
			
		||||
 | 
			
		||||
Discussion group: https://groups.google.com/d/forum/benchmark-discuss
 | 
			
		||||
 | 
			
		||||
IRC channel: https://freenode.net #googlebenchmark
 | 
			
		||||
 | 
			
		||||
[Known issues and common problems](#known-issues)
 | 
			
		||||
 | 
			
		||||
[Additional Tooling Documentation](docs/tools.md)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Building
 | 
			
		||||
 | 
			
		||||
The basic steps for configuring and building the library look like this:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
$ git clone https://github.com/google/benchmark.git
 | 
			
		||||
# Benchmark requires GTest as a dependency. Add the source tree as a subdirectory.
 | 
			
		||||
$ git clone https://github.com/google/googletest.git benchmark/googletest
 | 
			
		||||
$ mkdir build && cd build
 | 
			
		||||
$ cmake -G <generator> [options] ../benchmark
 | 
			
		||||
# Assuming a makefile generator was used
 | 
			
		||||
$ make
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that Google Benchmark requires GTest to build and run the tests. This
 | 
			
		||||
dependency can be provided three ways:
 | 
			
		||||
 | 
			
		||||
* Checkout the GTest sources into `benchmark/googletest`.
 | 
			
		||||
* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during
 | 
			
		||||
  configuration, the library will automatically download and build any required
 | 
			
		||||
  dependencies.
 | 
			
		||||
* Otherwise, if nothing is done, CMake will use `find_package(GTest REQUIRED)`
 | 
			
		||||
  to resolve the required GTest dependency.
 | 
			
		||||
 | 
			
		||||
If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
 | 
			
		||||
to `CMAKE_ARGS`.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Installation Guide
 | 
			
		||||
 | 
			
		||||
For Ubuntu and Debian Based System
 | 
			
		||||
 | 
			
		||||
First make sure you have git and cmake installed (If not please install it)
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
sudo apt-get install git
 | 
			
		||||
sudo apt-get install cmake
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Now, let's clone the repository and build it
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
git clone https://github.com/google/benchmark.git
 | 
			
		||||
cd benchmark
 | 
			
		||||
mkdir build
 | 
			
		||||
cd build
 | 
			
		||||
cmake .. -DCMAKE_BUILD_TYPE=RELEASE
 | 
			
		||||
make
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
We need to install the library globally now
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
sudo make install
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Now you have google/benchmark installed in your machine 
 | 
			
		||||
Note: Don't forget to link to pthread library while building
 | 
			
		||||
 | 
			
		||||
## Stable and Experimental Library Versions
 | 
			
		||||
 | 
			
		||||
The main branch contains the latest stable version of the benchmarking library;
 | 
			
		||||
the API of which can be considered largely stable, with source breaking changes
 | 
			
		||||
being made only upon the release of a new major version.
 | 
			
		||||
 | 
			
		||||
Newer, experimental, features are implemented and tested on the
 | 
			
		||||
[`v2` branch](https://github.com/google/benchmark/tree/v2). Users who wish
 | 
			
		||||
to use, test, and provide feedback on the new features are encouraged to try
 | 
			
		||||
this branch. However, this branch provides no stability guarantees and reserves
 | 
			
		||||
the right to change and break the API at any time.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Example usage
 | 
			
		||||
### Basic usage
 | 
			
		||||
Define a function that executes the code to be measured.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
#include <benchmark/benchmark.h>
 | 
			
		||||
 | 
			
		||||
static void BM_StringCreation(benchmark::State& state) {
 | 
			
		||||
  for (auto _ : state)
 | 
			
		||||
    std::string empty_string;
 | 
			
		||||
}
 | 
			
		||||
// Register the function as a benchmark
 | 
			
		||||
BENCHMARK(BM_StringCreation);
 | 
			
		||||
 | 
			
		||||
// Define another benchmark
 | 
			
		||||
static void BM_StringCopy(benchmark::State& state) {
 | 
			
		||||
  std::string x = "hello";
 | 
			
		||||
  for (auto _ : state)
 | 
			
		||||
    std::string copy(x);
 | 
			
		||||
}
 | 
			
		||||
BENCHMARK(BM_StringCopy);
 | 
			
		||||
 | 
			
		||||
BENCHMARK_MAIN();
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Don't forget to inform your linker to add benchmark library e.g. through `-lbenchmark` compilation flag.
 | 
			
		||||
 | 
			
		||||
The benchmark library will reporting the timing for the code within the `for(...)` loop.
 | 
			
		||||
 | 
			
		||||
### Passing arguments
 | 
			
		||||
Sometimes a family of benchmarks can be implemented with just one routine that
 | 
			
		||||
takes an extra argument to specify which one of the family of benchmarks to
 | 
			
		||||
run. For example, the following code defines a family of benchmarks for
 | 
			
		||||
measuring the speed of `memcpy()` calls of different lengths:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void BM_memcpy(benchmark::State& state) {
 | 
			
		||||
  char* src = new char[state.range(0)];
 | 
			
		||||
  char* dst = new char[state.range(0)];
 | 
			
		||||
  memset(src, 'x', state.range(0));
 | 
			
		||||
  for (auto _ : state)
 | 
			
		||||
    memcpy(dst, src, state.range(0));
 | 
			
		||||
  state.SetBytesProcessed(int64_t(state.iterations()) *
 | 
			
		||||
                          int64_t(state.range(0)));
 | 
			
		||||
  delete[] src;
 | 
			
		||||
  delete[] dst;
 | 
			
		||||
}
 | 
			
		||||
BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The preceding code is quite repetitive, and can be replaced with the following
 | 
			
		||||
short-hand. The following invocation will pick a few appropriate arguments in
 | 
			
		||||
the specified range and will generate a benchmark for each such argument.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
BENCHMARK(BM_memcpy)->Range(8, 8<<10);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
By default the arguments in the range are generated in multiples of eight and
 | 
			
		||||
the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the
 | 
			
		||||
range multiplier is changed to multiples of two.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10);
 | 
			
		||||
```
 | 
			
		||||
Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ].
 | 
			
		||||
 | 
			
		||||
You might have a benchmark that depends on two or more inputs. For example, the
 | 
			
		||||
following code defines a family of benchmarks for measuring the speed of set
 | 
			
		||||
insertion.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void BM_SetInsert(benchmark::State& state) {
 | 
			
		||||
  std::set<int> data;
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
    state.PauseTiming();
 | 
			
		||||
    data = ConstructRandomSet(state.range(0));
 | 
			
		||||
    state.ResumeTiming();
 | 
			
		||||
    for (int j = 0; j < state.range(1); ++j)
 | 
			
		||||
      data.insert(RandomNumber());
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
BENCHMARK(BM_SetInsert)
 | 
			
		||||
    ->Args({1<<10, 128})
 | 
			
		||||
    ->Args({2<<10, 128})
 | 
			
		||||
    ->Args({4<<10, 128})
 | 
			
		||||
    ->Args({8<<10, 128})
 | 
			
		||||
    ->Args({1<<10, 512})
 | 
			
		||||
    ->Args({2<<10, 512})
 | 
			
		||||
    ->Args({4<<10, 512})
 | 
			
		||||
    ->Args({8<<10, 512});
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The preceding code is quite repetitive, and can be replaced with the following
 | 
			
		||||
short-hand. The following macro will pick a few appropriate arguments in the
 | 
			
		||||
product of the two specified ranges and will generate a benchmark for each such
 | 
			
		||||
pair.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
For more complex patterns of inputs, passing a custom function to `Apply` allows
 | 
			
		||||
programmatic specification of an arbitrary set of arguments on which to run the
 | 
			
		||||
benchmark. The following example enumerates a dense range on one parameter,
 | 
			
		||||
and a sparse range on the second.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void CustomArguments(benchmark::internal::Benchmark* b) {
 | 
			
		||||
  for (int i = 0; i <= 10; ++i)
 | 
			
		||||
    for (int j = 32; j <= 1024*1024; j *= 8)
 | 
			
		||||
      b->Args({i, j});
 | 
			
		||||
}
 | 
			
		||||
BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Calculate asymptotic complexity (Big O)
 | 
			
		||||
Asymptotic complexity might be calculated for a family of benchmarks. The
 | 
			
		||||
following code will calculate the coefficient for the high-order term in the
 | 
			
		||||
running time and the normalized root-mean square error of string comparison.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void BM_StringCompare(benchmark::State& state) {
 | 
			
		||||
  std::string s1(state.range(0), '-');
 | 
			
		||||
  std::string s2(state.range(0), '-');
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
    benchmark::DoNotOptimize(s1.compare(s2));
 | 
			
		||||
  }
 | 
			
		||||
  state.SetComplexityN(state.range(0));
 | 
			
		||||
}
 | 
			
		||||
BENCHMARK(BM_StringCompare)
 | 
			
		||||
    ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
As shown in the following invocation, asymptotic complexity might also be
 | 
			
		||||
calculated automatically.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
BENCHMARK(BM_StringCompare)
 | 
			
		||||
    ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity();
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The following code will specify asymptotic complexity with a lambda function,
 | 
			
		||||
that might be used to customize high-order term calculation.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
 | 
			
		||||
    ->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; });
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Templated benchmarks
 | 
			
		||||
Templated benchmarks work the same way: This example produces and consumes
 | 
			
		||||
messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the
 | 
			
		||||
absence of multiprogramming.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
template <class Q> int BM_Sequential(benchmark::State& state) {
 | 
			
		||||
  Q q;
 | 
			
		||||
  typename Q::value_type v;
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
    for (int i = state.range(0); i--; )
 | 
			
		||||
      q.push(v);
 | 
			
		||||
    for (int e = state.range(0); e--; )
 | 
			
		||||
      q.Wait(&v);
 | 
			
		||||
  }
 | 
			
		||||
  // actually messages, not bytes:
 | 
			
		||||
  state.SetBytesProcessed(
 | 
			
		||||
      static_cast<int64_t>(state.iterations())*state.range(0));
 | 
			
		||||
}
 | 
			
		||||
BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Three macros are provided for adding benchmark templates.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
#ifdef BENCHMARK_HAS_CXX11
 | 
			
		||||
#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters.
 | 
			
		||||
#else // C++ < C++11
 | 
			
		||||
#define BENCHMARK_TEMPLATE(func, arg1)
 | 
			
		||||
#endif
 | 
			
		||||
#define BENCHMARK_TEMPLATE1(func, arg1)
 | 
			
		||||
#define BENCHMARK_TEMPLATE2(func, arg1, arg2)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### A Faster KeepRunning loop
 | 
			
		||||
 | 
			
		||||
In C++11 mode, a ranged-based for loop should be used in preference to
 | 
			
		||||
the `KeepRunning` loop for running the benchmarks. For example:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void BM_Fast(benchmark::State &state) {
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
    FastOperation();
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
BENCHMARK(BM_Fast);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The reason the ranged-for loop is faster than using `KeepRunning`, is
 | 
			
		||||
because `KeepRunning` requires a memory load and store of the iteration count
 | 
			
		||||
ever iteration, whereas the ranged-for variant is able to keep the iteration count
 | 
			
		||||
in a register.
 | 
			
		||||
 | 
			
		||||
For example, an empty inner loop of using the ranged-based for method looks like:
 | 
			
		||||
 | 
			
		||||
```asm
 | 
			
		||||
# Loop Init
 | 
			
		||||
  mov rbx, qword ptr [r14 + 104]
 | 
			
		||||
  call benchmark::State::StartKeepRunning()
 | 
			
		||||
  test rbx, rbx
 | 
			
		||||
  je .LoopEnd
 | 
			
		||||
.LoopHeader: # =>This Inner Loop Header: Depth=1
 | 
			
		||||
  add rbx, -1
 | 
			
		||||
  jne .LoopHeader
 | 
			
		||||
.LoopEnd:
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Compared to an empty `KeepRunning` loop, which looks like:
 | 
			
		||||
 | 
			
		||||
```asm
 | 
			
		||||
.LoopHeader: # in Loop: Header=BB0_3 Depth=1
 | 
			
		||||
  cmp byte ptr [rbx], 1
 | 
			
		||||
  jne .LoopInit
 | 
			
		||||
.LoopBody: # =>This Inner Loop Header: Depth=1
 | 
			
		||||
  mov rax, qword ptr [rbx + 8]
 | 
			
		||||
  lea rcx, [rax + 1]
 | 
			
		||||
  mov qword ptr [rbx + 8], rcx
 | 
			
		||||
  cmp rax, qword ptr [rbx + 104]
 | 
			
		||||
  jb .LoopHeader
 | 
			
		||||
  jmp .LoopEnd
 | 
			
		||||
.LoopInit:
 | 
			
		||||
  mov rdi, rbx
 | 
			
		||||
  call benchmark::State::StartKeepRunning()
 | 
			
		||||
  jmp .LoopBody
 | 
			
		||||
.LoopEnd:
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Unless C++03 compatibility is required, the ranged-for variant of writing
 | 
			
		||||
the benchmark loop should be preferred.  
 | 
			
		||||
 | 
			
		||||
## Passing arbitrary arguments to a benchmark
 | 
			
		||||
In C++11 it is possible to define a benchmark that takes an arbitrary number
 | 
			
		||||
of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
 | 
			
		||||
macro creates a benchmark that invokes `func`  with the `benchmark::State` as
 | 
			
		||||
the first argument followed by the specified `args...`.
 | 
			
		||||
The `test_case_name` is appended to the name of the benchmark and
 | 
			
		||||
should describe the values passed.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
template <class ...ExtraArgs>
 | 
			
		||||
void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
 | 
			
		||||
  [...]
 | 
			
		||||
}
 | 
			
		||||
// Registers a benchmark named "BM_takes_args/int_string_test" that passes
 | 
			
		||||
// the specified values to `extra_args`.
 | 
			
		||||
BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
 | 
			
		||||
```
 | 
			
		||||
Note that elements of `...args` may refer to global variables. Users should
 | 
			
		||||
avoid modifying global state inside of a benchmark.
 | 
			
		||||
 | 
			
		||||
## Using RegisterBenchmark(name, fn, args...)
 | 
			
		||||
 | 
			
		||||
The `RegisterBenchmark(name, func, args...)` function provides an alternative
 | 
			
		||||
way to create and register benchmarks.
 | 
			
		||||
`RegisterBenchmark(name, func, args...)` creates, registers, and returns a
 | 
			
		||||
pointer to a new benchmark with the specified `name` that invokes
 | 
			
		||||
`func(st, args...)` where `st` is a `benchmark::State` object.
 | 
			
		||||
 | 
			
		||||
Unlike the `BENCHMARK` registration macros, which can only be used at the global
 | 
			
		||||
scope, the `RegisterBenchmark` can be called anywhere. This allows for
 | 
			
		||||
benchmark tests to be registered programmatically.
 | 
			
		||||
 | 
			
		||||
Additionally `RegisterBenchmark` allows any callable object to be registered
 | 
			
		||||
as a benchmark. Including capturing lambdas and function objects.
 | 
			
		||||
 | 
			
		||||
For Example:
 | 
			
		||||
```c++
 | 
			
		||||
auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ };
 | 
			
		||||
 | 
			
		||||
int main(int argc, char** argv) {
 | 
			
		||||
  for (auto& test_input : { /* ... */ })
 | 
			
		||||
      benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input);
 | 
			
		||||
  benchmark::Initialize(&argc, argv);
 | 
			
		||||
  benchmark::RunSpecifiedBenchmarks();
 | 
			
		||||
}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Multithreaded benchmarks
 | 
			
		||||
In a multithreaded test (benchmark invoked by multiple threads simultaneously),
 | 
			
		||||
it is guaranteed that none of the threads will start until all have reached
 | 
			
		||||
the start of the benchmark loop, and all will have finished before any thread
 | 
			
		||||
exits the benchmark loop. (This behavior is also provided by the `KeepRunning()`
 | 
			
		||||
API) As such, any global setup or teardown can be wrapped in a check against the thread
 | 
			
		||||
index:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void BM_MultiThreaded(benchmark::State& state) {
 | 
			
		||||
  if (state.thread_index == 0) {
 | 
			
		||||
    // Setup code here.
 | 
			
		||||
  }
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
    // Run the test as normal.
 | 
			
		||||
  }
 | 
			
		||||
  if (state.thread_index == 0) {
 | 
			
		||||
    // Teardown code here.
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
BENCHMARK(BM_MultiThreaded)->Threads(2);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
If the benchmarked code itself uses threads and you want to compare it to
 | 
			
		||||
single-threaded code, you may want to use real-time ("wallclock") measurements
 | 
			
		||||
for latency comparisons:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime();
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Without `UseRealTime`, CPU time is used by default.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Manual timing
 | 
			
		||||
For benchmarking something for which neither CPU time nor real-time are
 | 
			
		||||
correct or accurate enough, completely manual timing is supported using
 | 
			
		||||
the `UseManualTime` function.
 | 
			
		||||
 | 
			
		||||
When `UseManualTime` is used, the benchmarked code must call
 | 
			
		||||
`SetIterationTime` once per iteration of the benchmark loop to
 | 
			
		||||
report the manually measured time.
 | 
			
		||||
 | 
			
		||||
An example use case for this is benchmarking GPU execution (e.g. OpenCL
 | 
			
		||||
or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot
 | 
			
		||||
be accurately measured using CPU time or real-time. Instead, they can be
 | 
			
		||||
measured accurately using a dedicated API, and these measurement results
 | 
			
		||||
can be reported back with `SetIterationTime`.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void BM_ManualTiming(benchmark::State& state) {
 | 
			
		||||
  int microseconds = state.range(0);
 | 
			
		||||
  std::chrono::duration<double, std::micro> sleep_duration {
 | 
			
		||||
    static_cast<double>(microseconds)
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
    auto start = std::chrono::high_resolution_clock::now();
 | 
			
		||||
    // Simulate some useful workload with a sleep
 | 
			
		||||
    std::this_thread::sleep_for(sleep_duration);
 | 
			
		||||
    auto end   = std::chrono::high_resolution_clock::now();
 | 
			
		||||
 | 
			
		||||
    auto elapsed_seconds =
 | 
			
		||||
      std::chrono::duration_cast<std::chrono::duration<double>>(
 | 
			
		||||
        end - start);
 | 
			
		||||
 | 
			
		||||
    state.SetIterationTime(elapsed_seconds.count());
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime();
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Preventing optimisation
 | 
			
		||||
To prevent a value or expression from being optimized away by the compiler
 | 
			
		||||
the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()`
 | 
			
		||||
functions can be used.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void BM_test(benchmark::State& state) {
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
      int x = 0;
 | 
			
		||||
      for (int i=0; i < 64; ++i) {
 | 
			
		||||
        benchmark::DoNotOptimize(x += i);
 | 
			
		||||
      }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
`DoNotOptimize(<expr>)` forces the  *result* of `<expr>` to be stored in either
 | 
			
		||||
memory or a register. For GNU based compilers it acts as read/write barrier
 | 
			
		||||
for global memory. More specifically it forces the compiler to flush pending
 | 
			
		||||
writes to memory and reload any other values as necessary.
 | 
			
		||||
 | 
			
		||||
Note that `DoNotOptimize(<expr>)` does not prevent optimizations on `<expr>`
 | 
			
		||||
in any way. `<expr>` may even be removed entirely when the result is already
 | 
			
		||||
known. For example:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
  /* Example 1: `<expr>` is removed entirely. */
 | 
			
		||||
  int foo(int x) { return x + 42; }
 | 
			
		||||
  while (...) DoNotOptimize(foo(0)); // Optimized to DoNotOptimize(42);
 | 
			
		||||
 | 
			
		||||
  /*  Example 2: Result of '<expr>' is only reused */
 | 
			
		||||
  int bar(int) __attribute__((const));
 | 
			
		||||
  while (...) DoNotOptimize(bar(0)); // Optimized to:
 | 
			
		||||
  // int __result__ = bar(0);
 | 
			
		||||
  // while (...) DoNotOptimize(__result__);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The second tool for preventing optimizations is `ClobberMemory()`. In essence
 | 
			
		||||
`ClobberMemory()` forces the compiler to perform all pending writes to global
 | 
			
		||||
memory. Memory managed by block scope objects must be "escaped" using
 | 
			
		||||
`DoNotOptimize(...)` before it can be clobbered. In the below example
 | 
			
		||||
`ClobberMemory()` prevents the call to `v.push_back(42)` from being optimized
 | 
			
		||||
away.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void BM_vector_push_back(benchmark::State& state) {
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
    std::vector<int> v;
 | 
			
		||||
    v.reserve(1);
 | 
			
		||||
    benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered.
 | 
			
		||||
    v.push_back(42);
 | 
			
		||||
    benchmark::ClobberMemory(); // Force 42 to be written to memory.
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that `ClobberMemory()` is only available for GNU or MSVC based compilers.
 | 
			
		||||
 | 
			
		||||
### Set time unit manually
 | 
			
		||||
If a benchmark runs a few milliseconds it may be hard to visually compare the
 | 
			
		||||
measured times, since the output data is given in nanoseconds per default. In
 | 
			
		||||
order to manually set the time unit, you can specify it manually:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Controlling number of iterations
 | 
			
		||||
In all cases, the number of iterations for which the benchmark is run is
 | 
			
		||||
governed by the amount of time the benchmark takes. Concretely, the number of
 | 
			
		||||
iterations is at least one, not more than 1e9, until CPU time is greater than
 | 
			
		||||
the minimum time, or the wallclock time is 5x minimum time. The minimum time is
 | 
			
		||||
set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on
 | 
			
		||||
the registered benchmark object.
 | 
			
		||||
 | 
			
		||||
## Reporting the mean, median and standard deviation by repeated benchmarks
 | 
			
		||||
By default each benchmark is run once and that single result is reported.
 | 
			
		||||
However benchmarks are often noisy and a single result may not be representative
 | 
			
		||||
of the overall behavior. For this reason it's possible to repeatedly rerun the
 | 
			
		||||
benchmark.
 | 
			
		||||
 | 
			
		||||
The number of runs of each benchmark is specified globally by the
 | 
			
		||||
`--benchmark_repetitions` flag or on a per benchmark basis by calling
 | 
			
		||||
`Repetitions` on the registered benchmark object. When a benchmark is run more
 | 
			
		||||
than once the mean, median and standard deviation of the runs will be reported.
 | 
			
		||||
 | 
			
		||||
Additionally the `--benchmark_report_aggregates_only={true|false}` flag or
 | 
			
		||||
`ReportAggregatesOnly(bool)` function can be used to change how repeated tests
 | 
			
		||||
are reported. By default the result of each repeated run is reported. When this
 | 
			
		||||
option is `true` only the mean, median and standard deviation of the runs is reported.
 | 
			
		||||
Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides
 | 
			
		||||
the value of the flag for that benchmark.
 | 
			
		||||
 | 
			
		||||
## User-defined statistics for repeated benchmarks
 | 
			
		||||
While having mean, median and standard deviation is nice, this may not be
 | 
			
		||||
enough for everyone. For example you may want to know what is the largest
 | 
			
		||||
observation, e.g. because you have some real-time constraints. This is easy.
 | 
			
		||||
The following code will specify a custom statistic to be calculated, defined
 | 
			
		||||
by a lambda function.
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
void BM_spin_empty(benchmark::State& state) {
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
    for (int x = 0; x < state.range(0); ++x) {
 | 
			
		||||
      benchmark::DoNotOptimize(x);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
BENCHMARK(BM_spin_empty)
 | 
			
		||||
  ->ComputeStatistics("max", [](const std::vector<double>& v) -> double {
 | 
			
		||||
    return *(std::max_element(std::begin(v), std::end(v)));
 | 
			
		||||
  })
 | 
			
		||||
  ->Arg(512);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Fixtures
 | 
			
		||||
Fixture tests are created by
 | 
			
		||||
first defining a type that derives from `::benchmark::Fixture` and then
 | 
			
		||||
creating/registering the tests using the following macros:
 | 
			
		||||
 | 
			
		||||
* `BENCHMARK_F(ClassName, Method)`
 | 
			
		||||
* `BENCHMARK_DEFINE_F(ClassName, Method)`
 | 
			
		||||
* `BENCHMARK_REGISTER_F(ClassName, Method)`
 | 
			
		||||
 | 
			
		||||
For Example:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
class MyFixture : public benchmark::Fixture {};
 | 
			
		||||
 | 
			
		||||
BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
 | 
			
		||||
   for (auto _ : st) {
 | 
			
		||||
     ...
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
 | 
			
		||||
   for (auto _ : st) {
 | 
			
		||||
     ...
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
/* BarTest is NOT registered */
 | 
			
		||||
BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
 | 
			
		||||
/* BarTest is now registered */
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Templated fixtures
 | 
			
		||||
Also you can create templated fixture by using the following macros:
 | 
			
		||||
 | 
			
		||||
* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)`
 | 
			
		||||
* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)`
 | 
			
		||||
 | 
			
		||||
For example:
 | 
			
		||||
```c++
 | 
			
		||||
template<typename T>
 | 
			
		||||
class MyFixture : public benchmark::Fixture {};
 | 
			
		||||
 | 
			
		||||
BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) {
 | 
			
		||||
   for (auto _ : st) {
 | 
			
		||||
     ...
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) {
 | 
			
		||||
   for (auto _ : st) {
 | 
			
		||||
     ...
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## User-defined counters
 | 
			
		||||
 | 
			
		||||
You can add your own counters with user-defined names. The example below
 | 
			
		||||
will add columns "Foo", "Bar" and "Baz" in its output:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void UserCountersExample1(benchmark::State& state) {
 | 
			
		||||
  double numFoos = 0, numBars = 0, numBazs = 0;
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
    // ... count Foo,Bar,Baz events
 | 
			
		||||
  }
 | 
			
		||||
  state.counters["Foo"] = numFoos;
 | 
			
		||||
  state.counters["Bar"] = numBars;
 | 
			
		||||
  state.counters["Baz"] = numBazs;
 | 
			
		||||
}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The `state.counters` object is a `std::map` with `std::string` keys
 | 
			
		||||
and `Counter` values. The latter is a `double`-like class, via an implicit
 | 
			
		||||
conversion to `double&`. Thus you can use all of the standard arithmetic
 | 
			
		||||
assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter.
 | 
			
		||||
 | 
			
		||||
In multithreaded benchmarks, each counter is set on the calling thread only.
 | 
			
		||||
When the benchmark finishes, the counters from each thread will be summed;
 | 
			
		||||
the resulting sum is the value which will be shown for the benchmark.
 | 
			
		||||
 | 
			
		||||
The `Counter` constructor accepts two parameters: the value as a `double`
 | 
			
		||||
and a bit flag which allows you to show counters as rates and/or as
 | 
			
		||||
per-thread averages:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
  // sets a simple counter
 | 
			
		||||
  state.counters["Foo"] = numFoos;
 | 
			
		||||
 | 
			
		||||
  // Set the counter as a rate. It will be presented divided
 | 
			
		||||
  // by the duration of the benchmark.
 | 
			
		||||
  state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate);
 | 
			
		||||
 | 
			
		||||
  // Set the counter as a thread-average quantity. It will
 | 
			
		||||
  // be presented divided by the number of threads.
 | 
			
		||||
  state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads);
 | 
			
		||||
 | 
			
		||||
  // There's also a combined flag:
 | 
			
		||||
  state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
When you're compiling in C++11 mode or later you can use `insert()` with
 | 
			
		||||
`std::initializer_list`:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
  // With C++11, this can be done:
 | 
			
		||||
  state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}});
 | 
			
		||||
  // ... instead of:
 | 
			
		||||
  state.counters["Foo"] = numFoos;
 | 
			
		||||
  state.counters["Bar"] = numBars;
 | 
			
		||||
  state.counters["Baz"] = numBazs;
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Counter reporting
 | 
			
		||||
 | 
			
		||||
When using the console reporter, by default, user counters are are printed at
 | 
			
		||||
the end after the table, the same way as ``bytes_processed`` and
 | 
			
		||||
``items_processed``. This is best for cases in which there are few counters,
 | 
			
		||||
or where there are only a couple of lines per benchmark. Here's an example of
 | 
			
		||||
the default output:
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
------------------------------------------------------------------------------
 | 
			
		||||
Benchmark                        Time           CPU Iterations UserCounters...
 | 
			
		||||
------------------------------------------------------------------------------
 | 
			
		||||
BM_UserCounter/threads:8      2248 ns      10277 ns      68808 Bar=16 Bat=40 Baz=24 Foo=8
 | 
			
		||||
BM_UserCounter/threads:1      9797 ns       9788 ns      71523 Bar=2 Bat=5 Baz=3 Foo=1024m
 | 
			
		||||
BM_UserCounter/threads:2      4924 ns       9842 ns      71036 Bar=4 Bat=10 Baz=6 Foo=2
 | 
			
		||||
BM_UserCounter/threads:4      2589 ns      10284 ns      68012 Bar=8 Bat=20 Baz=12 Foo=4
 | 
			
		||||
BM_UserCounter/threads:8      2212 ns      10287 ns      68040 Bar=16 Bat=40 Baz=24 Foo=8
 | 
			
		||||
BM_UserCounter/threads:16     1782 ns      10278 ns      68144 Bar=32 Bat=80 Baz=48 Foo=16
 | 
			
		||||
BM_UserCounter/threads:32     1291 ns      10296 ns      68256 Bar=64 Bat=160 Baz=96 Foo=32
 | 
			
		||||
BM_UserCounter/threads:4      2615 ns      10307 ns      68040 Bar=8 Bat=20 Baz=12 Foo=4
 | 
			
		||||
BM_Factorial                    26 ns         26 ns   26608979 40320
 | 
			
		||||
BM_Factorial/real_time          26 ns         26 ns   26587936 40320
 | 
			
		||||
BM_CalculatePiRange/1           16 ns         16 ns   45704255 0
 | 
			
		||||
BM_CalculatePiRange/8           73 ns         73 ns    9520927 3.28374
 | 
			
		||||
BM_CalculatePiRange/64         609 ns        609 ns    1140647 3.15746
 | 
			
		||||
BM_CalculatePiRange/512       4900 ns       4901 ns     142696 3.14355
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
If this doesn't suit you, you can print each counter as a table column by
 | 
			
		||||
passing the flag `--benchmark_counters_tabular=true` to the benchmark
 | 
			
		||||
application. This is best for cases in which there are a lot of counters, or
 | 
			
		||||
a lot of lines per individual benchmark. Note that this will trigger a
 | 
			
		||||
reprinting of the table header any time the counter set changes between
 | 
			
		||||
individual benchmarks. Here's an example of corresponding output when
 | 
			
		||||
`--benchmark_counters_tabular=true` is passed:
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
---------------------------------------------------------------------------------------
 | 
			
		||||
Benchmark                        Time           CPU Iterations    Bar   Bat   Baz   Foo
 | 
			
		||||
---------------------------------------------------------------------------------------
 | 
			
		||||
BM_UserCounter/threads:8      2198 ns       9953 ns      70688     16    40    24     8
 | 
			
		||||
BM_UserCounter/threads:1      9504 ns       9504 ns      73787      2     5     3     1
 | 
			
		||||
BM_UserCounter/threads:2      4775 ns       9550 ns      72606      4    10     6     2
 | 
			
		||||
BM_UserCounter/threads:4      2508 ns       9951 ns      70332      8    20    12     4
 | 
			
		||||
BM_UserCounter/threads:8      2055 ns       9933 ns      70344     16    40    24     8
 | 
			
		||||
BM_UserCounter/threads:16     1610 ns       9946 ns      70720     32    80    48    16
 | 
			
		||||
BM_UserCounter/threads:32     1192 ns       9948 ns      70496     64   160    96    32
 | 
			
		||||
BM_UserCounter/threads:4      2506 ns       9949 ns      70332      8    20    12     4
 | 
			
		||||
--------------------------------------------------------------
 | 
			
		||||
Benchmark                        Time           CPU Iterations
 | 
			
		||||
--------------------------------------------------------------
 | 
			
		||||
BM_Factorial                    26 ns         26 ns   26392245 40320
 | 
			
		||||
BM_Factorial/real_time          26 ns         26 ns   26494107 40320
 | 
			
		||||
BM_CalculatePiRange/1           15 ns         15 ns   45571597 0
 | 
			
		||||
BM_CalculatePiRange/8           74 ns         74 ns    9450212 3.28374
 | 
			
		||||
BM_CalculatePiRange/64         595 ns        595 ns    1173901 3.15746
 | 
			
		||||
BM_CalculatePiRange/512       4752 ns       4752 ns     147380 3.14355
 | 
			
		||||
BM_CalculatePiRange/4k       37970 ns      37972 ns      18453 3.14184
 | 
			
		||||
BM_CalculatePiRange/32k     303733 ns     303744 ns       2305 3.14162
 | 
			
		||||
BM_CalculatePiRange/256k   2434095 ns    2434186 ns        288 3.1416
 | 
			
		||||
BM_CalculatePiRange/1024k  9721140 ns    9721413 ns         71 3.14159
 | 
			
		||||
BM_CalculatePi/threads:8      2255 ns       9943 ns      70936
 | 
			
		||||
```
 | 
			
		||||
Note above the additional header printed when the benchmark changes from
 | 
			
		||||
``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does
 | 
			
		||||
not have the same counter set as ``BM_UserCounter``.
 | 
			
		||||
 | 
			
		||||
## Exiting Benchmarks in Error
 | 
			
		||||
 | 
			
		||||
When errors caused by external influences, such as file I/O and network
 | 
			
		||||
communication, occur within a benchmark the
 | 
			
		||||
`State::SkipWithError(const char* msg)` function can be used to skip that run
 | 
			
		||||
of benchmark and report the error. Note that only future iterations of the
 | 
			
		||||
`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop
 | 
			
		||||
Users must explicitly exit the loop, otherwise all iterations will be performed.
 | 
			
		||||
Users may explicitly return to exit the benchmark immediately.
 | 
			
		||||
 | 
			
		||||
The `SkipWithError(...)` function may be used at any point within the benchmark,
 | 
			
		||||
including before and after the benchmark loop.
 | 
			
		||||
 | 
			
		||||
For example:
 | 
			
		||||
 | 
			
		||||
```c++
 | 
			
		||||
static void BM_test(benchmark::State& state) {
 | 
			
		||||
  auto resource = GetResource();
 | 
			
		||||
  if (!resource.good()) {
 | 
			
		||||
      state.SkipWithError("Resource is not good!");
 | 
			
		||||
      // KeepRunning() loop will not be entered.
 | 
			
		||||
  }
 | 
			
		||||
  for (state.KeepRunning()) {
 | 
			
		||||
      auto data = resource.read_data();
 | 
			
		||||
      if (!resource.good()) {
 | 
			
		||||
        state.SkipWithError("Failed to read data!");
 | 
			
		||||
        break; // Needed to skip the rest of the iteration.
 | 
			
		||||
     }
 | 
			
		||||
     do_stuff(data);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void BM_test_ranged_fo(benchmark::State & state) {
 | 
			
		||||
  state.SkipWithError("test will not be entered");
 | 
			
		||||
  for (auto _ : state) {
 | 
			
		||||
    state.SkipWithError("Failed!");
 | 
			
		||||
    break; // REQUIRED to prevent all further iterations.
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Running a subset of the benchmarks
 | 
			
		||||
 | 
			
		||||
The `--benchmark_filter=<regex>` option can be used to only run the benchmarks
 | 
			
		||||
which match the specified `<regex>`. For example:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32
 | 
			
		||||
Run on (1 X 2300 MHz CPU )
 | 
			
		||||
2016-06-25 19:34:24
 | 
			
		||||
Benchmark              Time           CPU Iterations
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
BM_memcpy/32          11 ns         11 ns   79545455
 | 
			
		||||
BM_memcpy/32k       2181 ns       2185 ns     324074
 | 
			
		||||
BM_memcpy/32          12 ns         12 ns   54687500
 | 
			
		||||
BM_memcpy/32k       1834 ns       1837 ns     357143
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Output Formats
 | 
			
		||||
The library supports multiple output formats. Use the
 | 
			
		||||
`--benchmark_format=<console|json|csv>` flag to set the format type. `console`
 | 
			
		||||
is the default format.
 | 
			
		||||
 | 
			
		||||
The Console format is intended to be a human readable format. By default
 | 
			
		||||
the format generates color output. Context is output on stderr and the
 | 
			
		||||
tabular data on stdout. Example tabular output looks like:
 | 
			
		||||
```
 | 
			
		||||
Benchmark                               Time(ns)    CPU(ns) Iterations
 | 
			
		||||
----------------------------------------------------------------------
 | 
			
		||||
BM_SetInsert/1024/1                        28928      29349      23853  133.097kB/s   33.2742k items/s
 | 
			
		||||
BM_SetInsert/1024/8                        32065      32913      21375  949.487kB/s   237.372k items/s
 | 
			
		||||
BM_SetInsert/1024/10                       33157      33648      21431  1.13369MB/s   290.225k items/s
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The JSON format outputs human readable json split into two top level attributes.
 | 
			
		||||
The `context` attribute contains information about the run in general, including
 | 
			
		||||
information about the CPU and the date.
 | 
			
		||||
The `benchmarks` attribute contains a list of ever benchmark run. Example json
 | 
			
		||||
output looks like:
 | 
			
		||||
```json
 | 
			
		||||
{
 | 
			
		||||
  "context": {
 | 
			
		||||
    "date": "2015/03/17-18:40:25",
 | 
			
		||||
    "num_cpus": 40,
 | 
			
		||||
    "mhz_per_cpu": 2801,
 | 
			
		||||
    "cpu_scaling_enabled": false,
 | 
			
		||||
    "build_type": "debug"
 | 
			
		||||
  },
 | 
			
		||||
  "benchmarks": [
 | 
			
		||||
    {
 | 
			
		||||
      "name": "BM_SetInsert/1024/1",
 | 
			
		||||
      "iterations": 94877,
 | 
			
		||||
      "real_time": 29275,
 | 
			
		||||
      "cpu_time": 29836,
 | 
			
		||||
      "bytes_per_second": 134066,
 | 
			
		||||
      "items_per_second": 33516
 | 
			
		||||
    },
 | 
			
		||||
    {
 | 
			
		||||
      "name": "BM_SetInsert/1024/8",
 | 
			
		||||
      "iterations": 21609,
 | 
			
		||||
      "real_time": 32317,
 | 
			
		||||
      "cpu_time": 32429,
 | 
			
		||||
      "bytes_per_second": 986770,
 | 
			
		||||
      "items_per_second": 246693
 | 
			
		||||
    },
 | 
			
		||||
    {
 | 
			
		||||
      "name": "BM_SetInsert/1024/10",
 | 
			
		||||
      "iterations": 21393,
 | 
			
		||||
      "real_time": 32724,
 | 
			
		||||
      "cpu_time": 33355,
 | 
			
		||||
      "bytes_per_second": 1199226,
 | 
			
		||||
      "items_per_second": 299807
 | 
			
		||||
    }
 | 
			
		||||
  ]
 | 
			
		||||
}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The CSV format outputs comma-separated values. The `context` is output on stderr
 | 
			
		||||
and the CSV itself on stdout. Example CSV output looks like:
 | 
			
		||||
```
 | 
			
		||||
name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
 | 
			
		||||
"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942,
 | 
			
		||||
"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115,
 | 
			
		||||
"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Output Files
 | 
			
		||||
The library supports writing the output of the benchmark to a file specified
 | 
			
		||||
by `--benchmark_out=<filename>`. The format of the output can be specified
 | 
			
		||||
using `--benchmark_out_format={json|console|csv}`. Specifying
 | 
			
		||||
`--benchmark_out` does not suppress the console output.
 | 
			
		||||
 | 
			
		||||
## Debug vs Release
 | 
			
		||||
By default, benchmark builds as a debug library. You will see a warning in the output when this is the case. To build it as a release library instead, use:
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
cmake -DCMAKE_BUILD_TYPE=Release
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
To enable link-time optimisation, use
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake cache variables, if autodetection fails.
 | 
			
		||||
If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
 | 
			
		||||
 | 
			
		||||
## Linking against the library
 | 
			
		||||
When using gcc, it is necessary to link against pthread to avoid runtime exceptions.
 | 
			
		||||
This is due to how gcc implements std::thread.
 | 
			
		||||
See [issue #67](https://github.com/google/benchmark/issues/67) for more details.
 | 
			
		||||
 | 
			
		||||
## Compiler Support
 | 
			
		||||
 | 
			
		||||
Google Benchmark uses C++11 when building the library. As such we require
 | 
			
		||||
a modern C++ toolchain, both compiler and standard library.
 | 
			
		||||
 | 
			
		||||
The following minimum versions are strongly recommended build the library:
 | 
			
		||||
 | 
			
		||||
* GCC 4.8
 | 
			
		||||
* Clang 3.4
 | 
			
		||||
* Visual Studio 2013
 | 
			
		||||
* Intel 2015 Update 1
 | 
			
		||||
 | 
			
		||||
Anything older *may* work.
 | 
			
		||||
 | 
			
		||||
Note: Using the library and its headers in C++03 is supported. C++11 is only
 | 
			
		||||
required to build the library.
 | 
			
		||||
 | 
			
		||||
## Disable CPU frequency scaling
 | 
			
		||||
If you see this error:
 | 
			
		||||
```
 | 
			
		||||
***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead.
 | 
			
		||||
```
 | 
			
		||||
you might want to disable the CPU frequency scaling while running the benchmark:
 | 
			
		||||
```bash
 | 
			
		||||
sudo cpupower frequency-set --governor performance
 | 
			
		||||
./mybench
 | 
			
		||||
sudo cpupower frequency-set --governor powersave
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
# Known Issues
 | 
			
		||||
 | 
			
		||||
### Windows
 | 
			
		||||
 | 
			
		||||
* Users must manually link `shlwapi.lib`. Failure to do so may result
 | 
			
		||||
in unresolved symbols.
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										64
									
								
								benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,64 @@
 | 
			
		|||
# - Adds a compiler flag if it is supported by the compiler
 | 
			
		||||
#
 | 
			
		||||
# This function checks that the supplied compiler flag is supported and then
 | 
			
		||||
# adds it to the corresponding compiler flags
 | 
			
		||||
#
 | 
			
		||||
#  add_cxx_compiler_flag(<FLAG> [<VARIANT>])
 | 
			
		||||
#
 | 
			
		||||
# - Example
 | 
			
		||||
#
 | 
			
		||||
# include(AddCXXCompilerFlag)
 | 
			
		||||
# add_cxx_compiler_flag(-Wall)
 | 
			
		||||
# add_cxx_compiler_flag(-no-strict-aliasing RELEASE)
 | 
			
		||||
# Requires CMake 2.6+
 | 
			
		||||
 | 
			
		||||
if(__add_cxx_compiler_flag)
 | 
			
		||||
  return()
 | 
			
		||||
endif()
 | 
			
		||||
set(__add_cxx_compiler_flag INCLUDED)
 | 
			
		||||
 | 
			
		||||
include(CheckCXXCompilerFlag)
 | 
			
		||||
 | 
			
		||||
function(mangle_compiler_flag FLAG OUTPUT)
 | 
			
		||||
  string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG)
 | 
			
		||||
  string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG})
 | 
			
		||||
  string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
 | 
			
		||||
  string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
 | 
			
		||||
  set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE)
 | 
			
		||||
endfunction(mangle_compiler_flag)
 | 
			
		||||
 | 
			
		||||
function(add_cxx_compiler_flag FLAG)
 | 
			
		||||
  mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
 | 
			
		||||
  set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
 | 
			
		||||
  set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}")
 | 
			
		||||
  check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
 | 
			
		||||
  set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
 | 
			
		||||
  if(${MANGLED_FLAG})
 | 
			
		||||
    set(VARIANT ${ARGV1})
 | 
			
		||||
    if(ARGV1)
 | 
			
		||||
      string(TOUPPER "_${VARIANT}" VARIANT)
 | 
			
		||||
    endif()
 | 
			
		||||
    set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
 | 
			
		||||
  endif()
 | 
			
		||||
endfunction()
 | 
			
		||||
 | 
			
		||||
function(add_required_cxx_compiler_flag FLAG)
 | 
			
		||||
  mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
 | 
			
		||||
  set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
 | 
			
		||||
  set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}")
 | 
			
		||||
  check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
 | 
			
		||||
  set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
 | 
			
		||||
  if(${MANGLED_FLAG})
 | 
			
		||||
    set(VARIANT ${ARGV1})
 | 
			
		||||
    if(ARGV1)
 | 
			
		||||
      string(TOUPPER "_${VARIANT}" VARIANT)
 | 
			
		||||
    endif()
 | 
			
		||||
    set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
 | 
			
		||||
    set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
 | 
			
		||||
    set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
 | 
			
		||||
    set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
 | 
			
		||||
    set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE)
 | 
			
		||||
  else()
 | 
			
		||||
    message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler")
 | 
			
		||||
  endif()
 | 
			
		||||
endfunction()
 | 
			
		||||
							
								
								
									
										62
									
								
								benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										62
									
								
								benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,62 @@
 | 
			
		|||
# - Compile and run code to check for C++ features
 | 
			
		||||
#
 | 
			
		||||
# This functions compiles a source file under the `cmake` folder
 | 
			
		||||
# and adds the corresponding `HAVE_[FILENAME]` flag to the CMake
 | 
			
		||||
# environment
 | 
			
		||||
#
 | 
			
		||||
#  cxx_feature_check(<FLAG> [<VARIANT>])
 | 
			
		||||
#
 | 
			
		||||
# - Example
 | 
			
		||||
#
 | 
			
		||||
# include(CXXFeatureCheck)
 | 
			
		||||
# cxx_feature_check(STD_REGEX)
 | 
			
		||||
# Requires CMake 2.8.12+
 | 
			
		||||
 | 
			
		||||
if(__cxx_feature_check)
 | 
			
		||||
  return()
 | 
			
		||||
endif()
 | 
			
		||||
set(__cxx_feature_check INCLUDED)
 | 
			
		||||
 | 
			
		||||
function(cxx_feature_check FILE)
 | 
			
		||||
  string(TOLOWER ${FILE} FILE)
 | 
			
		||||
  string(TOUPPER ${FILE} VAR)
 | 
			
		||||
  string(TOUPPER "HAVE_${VAR}" FEATURE)
 | 
			
		||||
  if (DEFINED HAVE_${VAR})
 | 
			
		||||
    set(HAVE_${VAR} 1 PARENT_SCOPE)
 | 
			
		||||
    add_definitions(-DHAVE_${VAR})
 | 
			
		||||
    return()
 | 
			
		||||
  endif()
 | 
			
		||||
 | 
			
		||||
  message("-- Performing Test ${FEATURE}")
 | 
			
		||||
  if(CMAKE_CROSSCOMPILING)
 | 
			
		||||
    try_compile(COMPILE_${FEATURE}
 | 
			
		||||
            ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
 | 
			
		||||
            CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
 | 
			
		||||
            LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
 | 
			
		||||
    if(COMPILE_${FEATURE})
 | 
			
		||||
      message(WARNING
 | 
			
		||||
            "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
 | 
			
		||||
      set(RUN_${FEATURE} 0)
 | 
			
		||||
    else()
 | 
			
		||||
      set(RUN_${FEATURE} 1)
 | 
			
		||||
    endif()
 | 
			
		||||
  else()
 | 
			
		||||
    message("-- Performing Test ${FEATURE}")
 | 
			
		||||
    try_run(RUN_${FEATURE} COMPILE_${FEATURE}
 | 
			
		||||
            ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
 | 
			
		||||
            CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
 | 
			
		||||
            LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
 | 
			
		||||
  endif()
 | 
			
		||||
 | 
			
		||||
  if(RUN_${FEATURE} EQUAL 0)
 | 
			
		||||
    message("-- Performing Test ${FEATURE} -- success")
 | 
			
		||||
    set(HAVE_${VAR} 1 PARENT_SCOPE)
 | 
			
		||||
    add_definitions(-DHAVE_${VAR})
 | 
			
		||||
  else()
 | 
			
		||||
    if(NOT COMPILE_${FEATURE})
 | 
			
		||||
      message("-- Performing Test ${FEATURE} -- failed to compile")
 | 
			
		||||
    else()
 | 
			
		||||
      message("-- Performing Test ${FEATURE} -- compiled but failed to run")
 | 
			
		||||
    endif()
 | 
			
		||||
  endif()
 | 
			
		||||
endfunction()
 | 
			
		||||
							
								
								
									
										1
									
								
								benchmarks/thirdparty/benchmark/cmake/Config.cmake.in
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								benchmarks/thirdparty/benchmark/cmake/Config.cmake.in
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1 @@
 | 
			
		|||
include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake")
 | 
			
		||||
							
								
								
									
										51
									
								
								benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,51 @@
 | 
			
		|||
# - Returns a version string from Git tags
 | 
			
		||||
#
 | 
			
		||||
# This function inspects the annotated git tags for the project and returns a string
 | 
			
		||||
# into a CMake variable
 | 
			
		||||
#
 | 
			
		||||
#  get_git_version(<var>)
 | 
			
		||||
#
 | 
			
		||||
# - Example
 | 
			
		||||
#
 | 
			
		||||
# include(GetGitVersion)
 | 
			
		||||
# get_git_version(GIT_VERSION)
 | 
			
		||||
#
 | 
			
		||||
# Requires CMake 2.8.11+
 | 
			
		||||
find_package(Git)
 | 
			
		||||
 | 
			
		||||
if(__get_git_version)
 | 
			
		||||
  return()
 | 
			
		||||
endif()
 | 
			
		||||
set(__get_git_version INCLUDED)
 | 
			
		||||
 | 
			
		||||
function(get_git_version var)
 | 
			
		||||
  if(GIT_EXECUTABLE)
 | 
			
		||||
      execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
 | 
			
		||||
          RESULT_VARIABLE status
 | 
			
		||||
          OUTPUT_VARIABLE GIT_VERSION
 | 
			
		||||
          ERROR_QUIET)
 | 
			
		||||
      if(${status})
 | 
			
		||||
          set(GIT_VERSION "v0.0.0")
 | 
			
		||||
      else()
 | 
			
		||||
          string(STRIP ${GIT_VERSION} GIT_VERSION)
 | 
			
		||||
          string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION})
 | 
			
		||||
      endif()
 | 
			
		||||
 | 
			
		||||
      # Work out if the repository is dirty
 | 
			
		||||
      execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh
 | 
			
		||||
          OUTPUT_QUIET
 | 
			
		||||
          ERROR_QUIET)
 | 
			
		||||
      execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD --
 | 
			
		||||
          OUTPUT_VARIABLE GIT_DIFF_INDEX
 | 
			
		||||
          ERROR_QUIET)
 | 
			
		||||
      string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)
 | 
			
		||||
      if (${GIT_DIRTY})
 | 
			
		||||
          set(GIT_VERSION "${GIT_VERSION}-dirty")
 | 
			
		||||
      endif()
 | 
			
		||||
  else()
 | 
			
		||||
      set(GIT_VERSION "v0.0.0")
 | 
			
		||||
  endif()
 | 
			
		||||
 | 
			
		||||
  message("-- git Version: ${GIT_VERSION}")
 | 
			
		||||
  set(${var} ${GIT_VERSION} PARENT_SCOPE)
 | 
			
		||||
endfunction()
 | 
			
		||||
							
								
								
									
										79
									
								
								benchmarks/thirdparty/benchmark/cmake/HandleGTest.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								benchmarks/thirdparty/benchmark/cmake/HandleGTest.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,79 @@
 | 
			
		|||
 | 
			
		||||
macro(split_list listname)
 | 
			
		||||
  string(REPLACE ";" " " ${listname} "${${listname}}")
 | 
			
		||||
endmacro()
 | 
			
		||||
 | 
			
		||||
macro(build_external_gtest)
 | 
			
		||||
  include(ExternalProject)
 | 
			
		||||
  set(GTEST_FLAGS "")
 | 
			
		||||
  if (BENCHMARK_USE_LIBCXX)
 | 
			
		||||
    if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
 | 
			
		||||
      list(APPEND GTEST_FLAGS -stdlib=libc++)
 | 
			
		||||
    else()
 | 
			
		||||
      message(WARNING "Unsupported compiler (${CMAKE_CXX_COMPILER}) when using libc++")
 | 
			
		||||
    endif()
 | 
			
		||||
  endif()
 | 
			
		||||
  if (BENCHMARK_BUILD_32_BITS)
 | 
			
		||||
    list(APPEND GTEST_FLAGS -m32)
 | 
			
		||||
  endif()
 | 
			
		||||
  if (NOT "${CMAKE_CXX_FLAGS}" STREQUAL "")
 | 
			
		||||
    list(APPEND GTEST_FLAGS ${CMAKE_CXX_FLAGS})
 | 
			
		||||
  endif()
 | 
			
		||||
  string(TOUPPER "${CMAKE_BUILD_TYPE}" GTEST_BUILD_TYPE)
 | 
			
		||||
  if ("${GTEST_BUILD_TYPE}" STREQUAL "COVERAGE")
 | 
			
		||||
    set(GTEST_BUILD_TYPE "DEBUG")
 | 
			
		||||
  endif()
 | 
			
		||||
  split_list(GTEST_FLAGS)
 | 
			
		||||
  ExternalProject_Add(googletest
 | 
			
		||||
      EXCLUDE_FROM_ALL ON
 | 
			
		||||
      GIT_REPOSITORY https://github.com/google/googletest.git
 | 
			
		||||
      GIT_TAG master
 | 
			
		||||
      PREFIX "${CMAKE_BINARY_DIR}/googletest"
 | 
			
		||||
      INSTALL_DIR "${CMAKE_BINARY_DIR}/googletest"
 | 
			
		||||
      CMAKE_CACHE_ARGS
 | 
			
		||||
        -DCMAKE_BUILD_TYPE:STRING=${GTEST_BUILD_TYPE}
 | 
			
		||||
        -DCMAKE_C_COMPILER:STRING=${CMAKE_C_COMPILER}
 | 
			
		||||
        -DCMAKE_CXX_COMPILER:STRING=${CMAKE_CXX_COMPILER}
 | 
			
		||||
        -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
 | 
			
		||||
        -DCMAKE_CXX_FLAGS:STRING=${GTEST_FLAGS}
 | 
			
		||||
        -Dgtest_force_shared_crt:BOOL=ON
 | 
			
		||||
      )
 | 
			
		||||
 | 
			
		||||
  ExternalProject_Get_Property(googletest install_dir)
 | 
			
		||||
 | 
			
		||||
  add_library(gtest UNKNOWN IMPORTED)
 | 
			
		||||
  add_library(gtest_main UNKNOWN IMPORTED)
 | 
			
		||||
 | 
			
		||||
  set(LIB_SUFFIX "${CMAKE_STATIC_LIBRARY_SUFFIX}")
 | 
			
		||||
  set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}")
 | 
			
		||||
 | 
			
		||||
  if("${GTEST_BUILD_TYPE}" STREQUAL "DEBUG")
 | 
			
		||||
    set(LIB_SUFFIX "d${CMAKE_STATIC_LIBRARY_SUFFIX}")
 | 
			
		||||
  endif()
 | 
			
		||||
  file(MAKE_DIRECTORY ${install_dir}/include)
 | 
			
		||||
  set_target_properties(gtest PROPERTIES
 | 
			
		||||
    IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}gtest${LIB_SUFFIX}
 | 
			
		||||
    INTERFACE_INCLUDE_DIRECTORIES ${install_dir}/include
 | 
			
		||||
  )
 | 
			
		||||
  set_target_properties(gtest_main PROPERTIES
 | 
			
		||||
    IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}gtest_main${LIB_SUFFIX}
 | 
			
		||||
    INTERFACE_INCLUDE_DIRECTORIES ${install_dir}/include
 | 
			
		||||
  )
 | 
			
		||||
  add_dependencies(gtest googletest)
 | 
			
		||||
  add_dependencies(gtest_main googletest)
 | 
			
		||||
  set(GTEST_BOTH_LIBRARIES gtest gtest_main)
 | 
			
		||||
  #set(GTEST_INCLUDE_DIRS ${install_dir}/include)
 | 
			
		||||
endmacro(build_external_gtest)
 | 
			
		||||
 | 
			
		||||
if (BENCHMARK_ENABLE_GTEST_TESTS)
 | 
			
		||||
  if (IS_DIRECTORY ${CMAKE_SOURCE_DIR}/googletest)
 | 
			
		||||
    set(INSTALL_GTEST OFF CACHE INTERNAL "")
 | 
			
		||||
    set(INSTALL_GMOCK OFF CACHE INTERNAL "")
 | 
			
		||||
    add_subdirectory(${CMAKE_SOURCE_DIR}/googletest)
 | 
			
		||||
    set(GTEST_BOTH_LIBRARIES gtest gtest_main)
 | 
			
		||||
  elseif(BENCHMARK_DOWNLOAD_DEPENDENCIES)
 | 
			
		||||
    build_external_gtest()
 | 
			
		||||
  else()
 | 
			
		||||
    find_package(GTest REQUIRED)
 | 
			
		||||
  endif()
 | 
			
		||||
endif()
 | 
			
		||||
							
								
								
									
										16
									
								
								benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMAr.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMAr.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,16 @@
 | 
			
		|||
include(FeatureSummary)
 | 
			
		||||
 | 
			
		||||
find_program(LLVMAR_EXECUTABLE
 | 
			
		||||
  NAMES llvm-ar
 | 
			
		||||
  DOC "The llvm-ar executable"
 | 
			
		||||
  )
 | 
			
		||||
 | 
			
		||||
include(FindPackageHandleStandardArgs)
 | 
			
		||||
find_package_handle_standard_args(LLVMAr
 | 
			
		||||
  DEFAULT_MSG
 | 
			
		||||
  LLVMAR_EXECUTABLE)
 | 
			
		||||
 | 
			
		||||
SET_PACKAGE_PROPERTIES(LLVMAr PROPERTIES
 | 
			
		||||
  URL https://llvm.org/docs/CommandGuide/llvm-ar.html
 | 
			
		||||
  DESCRIPTION "create, modify, and extract from archives"
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										16
									
								
								benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMNm.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMNm.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,16 @@
 | 
			
		|||
include(FeatureSummary)
 | 
			
		||||
 | 
			
		||||
find_program(LLVMNM_EXECUTABLE
 | 
			
		||||
  NAMES llvm-nm
 | 
			
		||||
  DOC "The llvm-nm executable"
 | 
			
		||||
  )
 | 
			
		||||
 | 
			
		||||
include(FindPackageHandleStandardArgs)
 | 
			
		||||
find_package_handle_standard_args(LLVMNm
 | 
			
		||||
  DEFAULT_MSG
 | 
			
		||||
  LLVMNM_EXECUTABLE)
 | 
			
		||||
 | 
			
		||||
SET_PACKAGE_PROPERTIES(LLVMNm PROPERTIES
 | 
			
		||||
  URL https://llvm.org/docs/CommandGuide/llvm-nm.html
 | 
			
		||||
  DESCRIPTION "list LLVM bitcode and object file’s symbol table"
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										15
									
								
								benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMRanLib.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMRanLib.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,15 @@
 | 
			
		|||
include(FeatureSummary)
 | 
			
		||||
 | 
			
		||||
find_program(LLVMRANLIB_EXECUTABLE
 | 
			
		||||
  NAMES llvm-ranlib
 | 
			
		||||
  DOC "The llvm-ranlib executable"
 | 
			
		||||
  )
 | 
			
		||||
 | 
			
		||||
include(FindPackageHandleStandardArgs)
 | 
			
		||||
find_package_handle_standard_args(LLVMRanLib
 | 
			
		||||
  DEFAULT_MSG
 | 
			
		||||
  LLVMRANLIB_EXECUTABLE)
 | 
			
		||||
 | 
			
		||||
SET_PACKAGE_PROPERTIES(LLVMRanLib PROPERTIES
 | 
			
		||||
  DESCRIPTION "generate index for LLVM archive"
 | 
			
		||||
)
 | 
			
		||||
							
								
								
									
										11
									
								
								benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,11 @@
 | 
			
		|||
prefix=@CMAKE_INSTALL_PREFIX@
 | 
			
		||||
exec_prefix=${prefix}
 | 
			
		||||
libdir=${prefix}/lib
 | 
			
		||||
includedir=${prefix}/include
 | 
			
		||||
 | 
			
		||||
Name: @PROJECT_NAME@
 | 
			
		||||
Description: Google microbenchmark framework
 | 
			
		||||
Version: @VERSION@
 | 
			
		||||
 | 
			
		||||
Libs: -L${libdir} -lbenchmark
 | 
			
		||||
Cflags: -I${includedir}
 | 
			
		||||
							
								
								
									
										12
									
								
								benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,12 @@
 | 
			
		|||
#include <gnuregex.h>
 | 
			
		||||
#include <string>
 | 
			
		||||
int main() {
 | 
			
		||||
  std::string str = "test0159";
 | 
			
		||||
  regex_t re;
 | 
			
		||||
  int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
 | 
			
		||||
  if (ec != 0) {
 | 
			
		||||
    return ec;
 | 
			
		||||
  }
 | 
			
		||||
  return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										8
									
								
								benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,8 @@
 | 
			
		|||
find_package(LLVMAr REQUIRED)
 | 
			
		||||
set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE)
 | 
			
		||||
 | 
			
		||||
find_package(LLVMNm REQUIRED)
 | 
			
		||||
set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE)
 | 
			
		||||
 | 
			
		||||
find_package(LLVMRanLib REQUIRED)
 | 
			
		||||
set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE)
 | 
			
		||||
							
								
								
									
										14
									
								
								benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,14 @@
 | 
			
		|||
#include <regex.h>
 | 
			
		||||
#include <string>
 | 
			
		||||
int main() {
 | 
			
		||||
  std::string str = "test0159";
 | 
			
		||||
  regex_t re;
 | 
			
		||||
  int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
 | 
			
		||||
  if (ec != 0) {
 | 
			
		||||
    return ec;
 | 
			
		||||
  }
 | 
			
		||||
  int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
 | 
			
		||||
  regfree(&re);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										10
									
								
								benchmarks/thirdparty/benchmark/cmake/std_regex.cpp
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								benchmarks/thirdparty/benchmark/cmake/std_regex.cpp
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,10 @@
 | 
			
		|||
#include <regex>
 | 
			
		||||
#include <string>
 | 
			
		||||
int main() {
 | 
			
		||||
  const std::string str = "test0159";
 | 
			
		||||
  std::regex re;
 | 
			
		||||
  re = std::regex("^[a-z]+[0-9]+$",
 | 
			
		||||
       std::regex_constants::extended | std::regex_constants::nosubs);
 | 
			
		||||
  return std::regex_search(str, re) ? 0 : -1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										7
									
								
								benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,7 @@
 | 
			
		|||
#include <chrono>
 | 
			
		||||
 | 
			
		||||
int main() {
 | 
			
		||||
    typedef std::chrono::steady_clock Clock;
 | 
			
		||||
    Clock::time_point tp = Clock::now();
 | 
			
		||||
    ((void)tp);
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										4
									
								
								benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,4 @@
 | 
			
		|||
#define HAVE_THREAD_SAFETY_ATTRIBUTES
 | 
			
		||||
#include "../src/mutex.h"
 | 
			
		||||
 | 
			
		||||
int main() {}
 | 
			
		||||
							
								
								
									
										1389
									
								
								benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1389
									
								
								benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										87
									
								
								benchmarks/thirdparty/benchmark/src/CMakeLists.txt
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										87
									
								
								benchmarks/thirdparty/benchmark/src/CMakeLists.txt
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,87 @@
 | 
			
		|||
# Allow the source files to find headers in src/
 | 
			
		||||
include_directories(${PROJECT_SOURCE_DIR}/src)
 | 
			
		||||
 | 
			
		||||
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
 | 
			
		||||
  list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
 | 
			
		||||
  list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
 | 
			
		||||
endif()
 | 
			
		||||
 | 
			
		||||
file(GLOB
 | 
			
		||||
  SOURCE_FILES
 | 
			
		||||
    *.cc
 | 
			
		||||
    ${PROJECT_SOURCE_DIR}/include/benchmark/*.h
 | 
			
		||||
    ${CMAKE_CURRENT_SOURCE_DIR}/*.h)
 | 
			
		||||
 | 
			
		||||
add_library(benchmark ${SOURCE_FILES})
 | 
			
		||||
set_target_properties(benchmark PROPERTIES
 | 
			
		||||
  OUTPUT_NAME "benchmark"
 | 
			
		||||
  VERSION ${GENERIC_LIB_VERSION}
 | 
			
		||||
  SOVERSION ${GENERIC_LIB_SOVERSION}
 | 
			
		||||
)
 | 
			
		||||
target_include_directories(benchmark PUBLIC
 | 
			
		||||
    $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
# Link threads.
 | 
			
		||||
target_link_libraries(benchmark  ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
 | 
			
		||||
find_library(LIBRT rt)
 | 
			
		||||
if(LIBRT)
 | 
			
		||||
  target_link_libraries(benchmark ${LIBRT})
 | 
			
		||||
endif()
 | 
			
		||||
 | 
			
		||||
# We need extra libraries on Windows
 | 
			
		||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
 | 
			
		||||
  target_link_libraries(benchmark Shlwapi)
 | 
			
		||||
endif()
 | 
			
		||||
 | 
			
		||||
set(include_install_dir "include")
 | 
			
		||||
set(lib_install_dir "lib/")
 | 
			
		||||
set(bin_install_dir "bin/")
 | 
			
		||||
set(config_install_dir "lib/cmake/${PROJECT_NAME}")
 | 
			
		||||
set(pkgconfig_install_dir "lib/pkgconfig")
 | 
			
		||||
 | 
			
		||||
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
 | 
			
		||||
 | 
			
		||||
set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake")
 | 
			
		||||
set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
 | 
			
		||||
set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc")
 | 
			
		||||
set(targets_export_name "${PROJECT_NAME}Targets")
 | 
			
		||||
 | 
			
		||||
set(namespace "${PROJECT_NAME}::")
 | 
			
		||||
 | 
			
		||||
include(CMakePackageConfigHelpers)
 | 
			
		||||
write_basic_package_version_file(
 | 
			
		||||
    "${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY SameMajorVersion
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
 | 
			
		||||
configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY)
 | 
			
		||||
 | 
			
		||||
if (BENCHMARK_ENABLE_INSTALL)
 | 
			
		||||
  # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
 | 
			
		||||
  install(
 | 
			
		||||
    TARGETS benchmark
 | 
			
		||||
    EXPORT ${targets_export_name}
 | 
			
		||||
    ARCHIVE DESTINATION ${lib_install_dir}
 | 
			
		||||
    LIBRARY DESTINATION ${lib_install_dir}
 | 
			
		||||
    RUNTIME DESTINATION ${bin_install_dir}
 | 
			
		||||
    INCLUDES DESTINATION ${include_install_dir})
 | 
			
		||||
 | 
			
		||||
  install(
 | 
			
		||||
    DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
 | 
			
		||||
    DESTINATION ${include_install_dir}
 | 
			
		||||
    FILES_MATCHING PATTERN "*.*h")
 | 
			
		||||
 | 
			
		||||
  install(
 | 
			
		||||
      FILES "${project_config}" "${version_config}"
 | 
			
		||||
      DESTINATION "${config_install_dir}")
 | 
			
		||||
 | 
			
		||||
  install(
 | 
			
		||||
      FILES "${pkg_config}"
 | 
			
		||||
      DESTINATION "${pkgconfig_install_dir}")
 | 
			
		||||
 | 
			
		||||
  install(
 | 
			
		||||
      EXPORT "${targets_export_name}"
 | 
			
		||||
      NAMESPACE "${namespace}"
 | 
			
		||||
      DESTINATION "${config_install_dir}")
 | 
			
		||||
endif()
 | 
			
		||||
							
								
								
									
										33
									
								
								benchmarks/thirdparty/benchmark/src/arraysize.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								benchmarks/thirdparty/benchmark/src/arraysize.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,33 @@
 | 
			
		|||
#ifndef BENCHMARK_ARRAYSIZE_H_
 | 
			
		||||
#define BENCHMARK_ARRAYSIZE_H_
 | 
			
		||||
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
namespace internal {
 | 
			
		||||
// The arraysize(arr) macro returns the # of elements in an array arr.
 | 
			
		||||
// The expression is a compile-time constant, and therefore can be
 | 
			
		||||
// used in defining new arrays, for example.  If you use arraysize on
 | 
			
		||||
// a pointer by mistake, you will get a compile-time error.
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
// This template function declaration is used in defining arraysize.
 | 
			
		||||
// Note that the function doesn't need an implementation, as we only
 | 
			
		||||
// use its type.
 | 
			
		||||
template <typename T, size_t N>
 | 
			
		||||
char (&ArraySizeHelper(T (&array)[N]))[N];
 | 
			
		||||
 | 
			
		||||
// That gcc wants both of these prototypes seems mysterious. VC, for
 | 
			
		||||
// its part, can't decide which to use (another mystery). Matching of
 | 
			
		||||
// template overloads: the final frontier.
 | 
			
		||||
#ifndef COMPILER_MSVC
 | 
			
		||||
template <typename T, size_t N>
 | 
			
		||||
char (&ArraySizeHelper(const T (&array)[N]))[N];
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))
 | 
			
		||||
 | 
			
		||||
}  // end namespace internal
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_ARRAYSIZE_H_
 | 
			
		||||
							
								
								
									
										714
									
								
								benchmarks/thirdparty/benchmark/src/benchmark.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										714
									
								
								benchmarks/thirdparty/benchmark/src/benchmark.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,714 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
#include "benchmark_api_internal.h"
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
 | 
			
		||||
#ifndef BENCHMARK_OS_WINDOWS
 | 
			
		||||
#include <sys/resource.h>
 | 
			
		||||
#include <sys/time.h>
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
#include <atomic>
 | 
			
		||||
#include <condition_variable>
 | 
			
		||||
#include <cstdio>
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <cstring>
 | 
			
		||||
#include <fstream>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <thread>
 | 
			
		||||
 | 
			
		||||
#include "check.h"
 | 
			
		||||
#include "colorprint.h"
 | 
			
		||||
#include "commandlineflags.h"
 | 
			
		||||
#include "complexity.h"
 | 
			
		||||
#include "counter.h"
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
#include "log.h"
 | 
			
		||||
#include "mutex.h"
 | 
			
		||||
#include "re.h"
 | 
			
		||||
#include "statistics.h"
 | 
			
		||||
#include "string_util.h"
 | 
			
		||||
#include "timers.h"
 | 
			
		||||
 | 
			
		||||
DEFINE_bool(benchmark_list_tests, false,
 | 
			
		||||
            "Print a list of benchmarks. This option overrides all other "
 | 
			
		||||
            "options.");
 | 
			
		||||
 | 
			
		||||
DEFINE_string(benchmark_filter, ".",
 | 
			
		||||
              "A regular expression that specifies the set of benchmarks "
 | 
			
		||||
              "to execute.  If this flag is empty, no benchmarks are run.  "
 | 
			
		||||
              "If this flag is the string \"all\", all benchmarks linked "
 | 
			
		||||
              "into the process are run.");
 | 
			
		||||
 | 
			
		||||
DEFINE_double(benchmark_min_time, 0.5,
 | 
			
		||||
              "Minimum number of seconds we should run benchmark before "
 | 
			
		||||
              "results are considered significant.  For cpu-time based "
 | 
			
		||||
              "tests, this is the lower bound on the total cpu time "
 | 
			
		||||
              "used by all threads that make up the test.  For real-time "
 | 
			
		||||
              "based tests, this is the lower bound on the elapsed time "
 | 
			
		||||
              "of the benchmark execution, regardless of number of "
 | 
			
		||||
              "threads.");
 | 
			
		||||
 | 
			
		||||
DEFINE_int32(benchmark_repetitions, 1,
 | 
			
		||||
             "The number of runs of each benchmark. If greater than 1, the "
 | 
			
		||||
             "mean and standard deviation of the runs will be reported.");
 | 
			
		||||
 | 
			
		||||
DEFINE_bool(benchmark_report_aggregates_only, false,
 | 
			
		||||
            "Report the result of each benchmark repetitions. When 'true' is "
 | 
			
		||||
            "specified only the mean, standard deviation, and other statistics "
 | 
			
		||||
            "are reported for repeated benchmarks.");
 | 
			
		||||
 | 
			
		||||
DEFINE_string(benchmark_format, "console",
 | 
			
		||||
              "The format to use for console output. Valid values are "
 | 
			
		||||
              "'console', 'json', or 'csv'.");
 | 
			
		||||
 | 
			
		||||
DEFINE_string(benchmark_out_format, "json",
 | 
			
		||||
              "The format to use for file output. Valid values are "
 | 
			
		||||
              "'console', 'json', or 'csv'.");
 | 
			
		||||
 | 
			
		||||
DEFINE_string(benchmark_out, "", "The file to write additonal output to");
 | 
			
		||||
 | 
			
		||||
DEFINE_string(benchmark_color, "auto",
 | 
			
		||||
              "Whether to use colors in the output.  Valid values: "
 | 
			
		||||
              "'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use "
 | 
			
		||||
              "colors if the output is being sent to a terminal and the TERM "
 | 
			
		||||
              "environment variable is set to a terminal type that supports "
 | 
			
		||||
              "colors.");
 | 
			
		||||
 | 
			
		||||
DEFINE_bool(benchmark_counters_tabular, false,
 | 
			
		||||
            "Whether to use tabular format when printing user counters to "
 | 
			
		||||
            "the console.  Valid values: 'true'/'yes'/1, 'false'/'no'/0."
 | 
			
		||||
            "Defaults to false.");
 | 
			
		||||
 | 
			
		||||
DEFINE_int32(v, 0, "The level of verbose logging to output");
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
static const size_t kMaxIterations = 1000000000;
 | 
			
		||||
}  // end namespace
 | 
			
		||||
 | 
			
		||||
namespace internal {
 | 
			
		||||
 | 
			
		||||
void UseCharPointer(char const volatile*) {}
 | 
			
		||||
 | 
			
		||||
class ThreadManager {
 | 
			
		||||
 public:
 | 
			
		||||
  ThreadManager(int num_threads)
 | 
			
		||||
      : alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
 | 
			
		||||
 | 
			
		||||
  Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
 | 
			
		||||
    return benchmark_mutex_;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
 | 
			
		||||
    return start_stop_barrier_.wait();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
 | 
			
		||||
    start_stop_barrier_.removeThread();
 | 
			
		||||
    if (--alive_threads_ == 0) {
 | 
			
		||||
      MutexLock lock(end_cond_mutex_);
 | 
			
		||||
      end_condition_.notify_all();
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
 | 
			
		||||
    MutexLock lock(end_cond_mutex_);
 | 
			
		||||
    end_condition_.wait(lock.native_handle(),
 | 
			
		||||
                        [this]() { return alive_threads_ == 0; });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
  struct Result {
 | 
			
		||||
    double real_time_used = 0;
 | 
			
		||||
    double cpu_time_used = 0;
 | 
			
		||||
    double manual_time_used = 0;
 | 
			
		||||
    int64_t bytes_processed = 0;
 | 
			
		||||
    int64_t items_processed = 0;
 | 
			
		||||
    int complexity_n = 0;
 | 
			
		||||
    std::string report_label_;
 | 
			
		||||
    std::string error_message_;
 | 
			
		||||
    bool has_error_ = false;
 | 
			
		||||
    UserCounters counters;
 | 
			
		||||
  };
 | 
			
		||||
  GUARDED_BY(GetBenchmarkMutex()) Result results;
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  mutable Mutex benchmark_mutex_;
 | 
			
		||||
  std::atomic<int> alive_threads_;
 | 
			
		||||
  Barrier start_stop_barrier_;
 | 
			
		||||
  Mutex end_cond_mutex_;
 | 
			
		||||
  Condition end_condition_;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Timer management class
 | 
			
		||||
class ThreadTimer {
 | 
			
		||||
 public:
 | 
			
		||||
  ThreadTimer() = default;
 | 
			
		||||
 | 
			
		||||
  // Called by each thread
 | 
			
		||||
  void StartTimer() {
 | 
			
		||||
    running_ = true;
 | 
			
		||||
    start_real_time_ = ChronoClockNow();
 | 
			
		||||
    start_cpu_time_ = ThreadCPUUsage();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Called by each thread
 | 
			
		||||
  void StopTimer() {
 | 
			
		||||
    CHECK(running_);
 | 
			
		||||
    running_ = false;
 | 
			
		||||
    real_time_used_ += ChronoClockNow() - start_real_time_;
 | 
			
		||||
    // Floating point error can result in the subtraction producing a negative
 | 
			
		||||
    // time. Guard against that.
 | 
			
		||||
    cpu_time_used_ += std::max<double>(ThreadCPUUsage() - start_cpu_time_, 0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Called by each thread
 | 
			
		||||
  void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
 | 
			
		||||
 | 
			
		||||
  bool running() const { return running_; }
 | 
			
		||||
 | 
			
		||||
  // REQUIRES: timer is not running
 | 
			
		||||
  double real_time_used() {
 | 
			
		||||
    CHECK(!running_);
 | 
			
		||||
    return real_time_used_;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // REQUIRES: timer is not running
 | 
			
		||||
  double cpu_time_used() {
 | 
			
		||||
    CHECK(!running_);
 | 
			
		||||
    return cpu_time_used_;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // REQUIRES: timer is not running
 | 
			
		||||
  double manual_time_used() {
 | 
			
		||||
    CHECK(!running_);
 | 
			
		||||
    return manual_time_used_;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  bool running_ = false;        // Is the timer running
 | 
			
		||||
  double start_real_time_ = 0;  // If running_
 | 
			
		||||
  double start_cpu_time_ = 0;   // If running_
 | 
			
		||||
 | 
			
		||||
  // Accumulated time so far (does not contain current slice if running_)
 | 
			
		||||
  double real_time_used_ = 0;
 | 
			
		||||
  double cpu_time_used_ = 0;
 | 
			
		||||
  // Manually set iteration time. User sets this with SetIterationTime(seconds).
 | 
			
		||||
  double manual_time_used_ = 0;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
BenchmarkReporter::Run CreateRunReport(
 | 
			
		||||
    const benchmark::internal::Benchmark::Instance& b,
 | 
			
		||||
    const internal::ThreadManager::Result& results, size_t iters,
 | 
			
		||||
    double seconds) {
 | 
			
		||||
  // Create report about this benchmark run.
 | 
			
		||||
  BenchmarkReporter::Run report;
 | 
			
		||||
 | 
			
		||||
  report.benchmark_name = b.name;
 | 
			
		||||
  report.error_occurred = results.has_error_;
 | 
			
		||||
  report.error_message = results.error_message_;
 | 
			
		||||
  report.report_label = results.report_label_;
 | 
			
		||||
  // Report the total iterations across all threads.
 | 
			
		||||
  report.iterations = static_cast<int64_t>(iters) * b.threads;
 | 
			
		||||
  report.time_unit = b.time_unit;
 | 
			
		||||
 | 
			
		||||
  if (!report.error_occurred) {
 | 
			
		||||
    double bytes_per_second = 0;
 | 
			
		||||
    if (results.bytes_processed > 0 && seconds > 0.0) {
 | 
			
		||||
      bytes_per_second = (results.bytes_processed / seconds);
 | 
			
		||||
    }
 | 
			
		||||
    double items_per_second = 0;
 | 
			
		||||
    if (results.items_processed > 0 && seconds > 0.0) {
 | 
			
		||||
      items_per_second = (results.items_processed / seconds);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (b.use_manual_time) {
 | 
			
		||||
      report.real_accumulated_time = results.manual_time_used;
 | 
			
		||||
    } else {
 | 
			
		||||
      report.real_accumulated_time = results.real_time_used;
 | 
			
		||||
    }
 | 
			
		||||
    report.cpu_accumulated_time = results.cpu_time_used;
 | 
			
		||||
    report.bytes_per_second = bytes_per_second;
 | 
			
		||||
    report.items_per_second = items_per_second;
 | 
			
		||||
    report.complexity_n = results.complexity_n;
 | 
			
		||||
    report.complexity = b.complexity;
 | 
			
		||||
    report.complexity_lambda = b.complexity_lambda;
 | 
			
		||||
    report.statistics = b.statistics;
 | 
			
		||||
    report.counters = results.counters;
 | 
			
		||||
    internal::Finish(&report.counters, seconds, b.threads);
 | 
			
		||||
  }
 | 
			
		||||
  return report;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Execute one thread of benchmark b for the specified number of iterations.
 | 
			
		||||
// Adds the stats collected for the thread into *total.
 | 
			
		||||
void RunInThread(const benchmark::internal::Benchmark::Instance* b,
 | 
			
		||||
                 size_t iters, int thread_id,
 | 
			
		||||
                 internal::ThreadManager* manager) {
 | 
			
		||||
  internal::ThreadTimer timer;
 | 
			
		||||
  State st(iters, b->arg, thread_id, b->threads, &timer, manager);
 | 
			
		||||
  b->benchmark->Run(st);
 | 
			
		||||
  CHECK(st.iterations() == st.max_iterations)
 | 
			
		||||
      << "Benchmark returned before State::KeepRunning() returned false!";
 | 
			
		||||
  {
 | 
			
		||||
    MutexLock l(manager->GetBenchmarkMutex());
 | 
			
		||||
    internal::ThreadManager::Result& results = manager->results;
 | 
			
		||||
    results.cpu_time_used += timer.cpu_time_used();
 | 
			
		||||
    results.real_time_used += timer.real_time_used();
 | 
			
		||||
    results.manual_time_used += timer.manual_time_used();
 | 
			
		||||
    results.bytes_processed += st.bytes_processed();
 | 
			
		||||
    results.items_processed += st.items_processed();
 | 
			
		||||
    results.complexity_n += st.complexity_length_n();
 | 
			
		||||
    internal::Increment(&results.counters, st.counters);
 | 
			
		||||
  }
 | 
			
		||||
  manager->NotifyThreadComplete();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::vector<BenchmarkReporter::Run> RunBenchmark(
 | 
			
		||||
    const benchmark::internal::Benchmark::Instance& b,
 | 
			
		||||
    std::vector<BenchmarkReporter::Run>* complexity_reports) {
 | 
			
		||||
  std::vector<BenchmarkReporter::Run> reports;  // return value
 | 
			
		||||
 | 
			
		||||
  const bool has_explicit_iteration_count = b.iterations != 0;
 | 
			
		||||
  size_t iters = has_explicit_iteration_count ? b.iterations : 1;
 | 
			
		||||
  std::unique_ptr<internal::ThreadManager> manager;
 | 
			
		||||
  std::vector<std::thread> pool(b.threads - 1);
 | 
			
		||||
  const int repeats =
 | 
			
		||||
      b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions;
 | 
			
		||||
  const bool report_aggregates_only =
 | 
			
		||||
      repeats != 1 &&
 | 
			
		||||
      (b.report_mode == internal::RM_Unspecified
 | 
			
		||||
           ? FLAGS_benchmark_report_aggregates_only
 | 
			
		||||
           : b.report_mode == internal::RM_ReportAggregatesOnly);
 | 
			
		||||
  for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
 | 
			
		||||
    for (;;) {
 | 
			
		||||
      // Try benchmark
 | 
			
		||||
      VLOG(2) << "Running " << b.name << " for " << iters << "\n";
 | 
			
		||||
 | 
			
		||||
      manager.reset(new internal::ThreadManager(b.threads));
 | 
			
		||||
      for (std::size_t ti = 0; ti < pool.size(); ++ti) {
 | 
			
		||||
        pool[ti] = std::thread(&RunInThread, &b, iters,
 | 
			
		||||
                               static_cast<int>(ti + 1), manager.get());
 | 
			
		||||
      }
 | 
			
		||||
      RunInThread(&b, iters, 0, manager.get());
 | 
			
		||||
      manager->WaitForAllThreads();
 | 
			
		||||
      for (std::thread& thread : pool) thread.join();
 | 
			
		||||
      internal::ThreadManager::Result results;
 | 
			
		||||
      {
 | 
			
		||||
        MutexLock l(manager->GetBenchmarkMutex());
 | 
			
		||||
        results = manager->results;
 | 
			
		||||
      }
 | 
			
		||||
      manager.reset();
 | 
			
		||||
      // Adjust real/manual time stats since they were reported per thread.
 | 
			
		||||
      results.real_time_used /= b.threads;
 | 
			
		||||
      results.manual_time_used /= b.threads;
 | 
			
		||||
 | 
			
		||||
      VLOG(2) << "Ran in " << results.cpu_time_used << "/"
 | 
			
		||||
              << results.real_time_used << "\n";
 | 
			
		||||
 | 
			
		||||
      // Base decisions off of real time if requested by this benchmark.
 | 
			
		||||
      double seconds = results.cpu_time_used;
 | 
			
		||||
      if (b.use_manual_time) {
 | 
			
		||||
        seconds = results.manual_time_used;
 | 
			
		||||
      } else if (b.use_real_time) {
 | 
			
		||||
        seconds = results.real_time_used;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const double min_time =
 | 
			
		||||
          !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time;
 | 
			
		||||
 | 
			
		||||
      // Determine if this run should be reported; Either it has
 | 
			
		||||
      // run for a sufficient amount of time or because an error was reported.
 | 
			
		||||
      const bool should_report =  repetition_num > 0
 | 
			
		||||
        || has_explicit_iteration_count // An exact iteration count was requested
 | 
			
		||||
        || results.has_error_
 | 
			
		||||
        || iters >= kMaxIterations
 | 
			
		||||
        || seconds >= min_time // the elapsed time is large enough
 | 
			
		||||
        // CPU time is specified but the elapsed real time greatly exceeds the
 | 
			
		||||
        // minimum time. Note that user provided timers are except from this
 | 
			
		||||
        // sanity check.
 | 
			
		||||
        || ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
 | 
			
		||||
 | 
			
		||||
      if (should_report) {
 | 
			
		||||
        BenchmarkReporter::Run report =
 | 
			
		||||
            CreateRunReport(b, results, iters, seconds);
 | 
			
		||||
        if (!report.error_occurred && b.complexity != oNone)
 | 
			
		||||
          complexity_reports->push_back(report);
 | 
			
		||||
        reports.push_back(report);
 | 
			
		||||
        break;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      // See how much iterations should be increased by
 | 
			
		||||
      // Note: Avoid division by zero with max(seconds, 1ns).
 | 
			
		||||
      double multiplier = min_time * 1.4 / std::max(seconds, 1e-9);
 | 
			
		||||
      // If our last run was at least 10% of FLAGS_benchmark_min_time then we
 | 
			
		||||
      // use the multiplier directly. Otherwise we use at most 10 times
 | 
			
		||||
      // expansion.
 | 
			
		||||
      // NOTE: When the last run was at least 10% of the min time the max
 | 
			
		||||
      // expansion should be 14x.
 | 
			
		||||
      bool is_significant = (seconds / min_time) > 0.1;
 | 
			
		||||
      multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
 | 
			
		||||
      if (multiplier <= 1.0) multiplier = 2.0;
 | 
			
		||||
      double next_iters = std::max(multiplier * iters, iters + 1.0);
 | 
			
		||||
      if (next_iters > kMaxIterations) {
 | 
			
		||||
        next_iters = kMaxIterations;
 | 
			
		||||
      }
 | 
			
		||||
      VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
 | 
			
		||||
      iters = static_cast<int>(next_iters + 0.5);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  // Calculate additional statistics
 | 
			
		||||
  auto stat_reports = ComputeStats(reports);
 | 
			
		||||
  if ((b.complexity != oNone) && b.last_benchmark_instance) {
 | 
			
		||||
    auto additional_run_stats = ComputeBigO(*complexity_reports);
 | 
			
		||||
    stat_reports.insert(stat_reports.end(), additional_run_stats.begin(),
 | 
			
		||||
                        additional_run_stats.end());
 | 
			
		||||
    complexity_reports->clear();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (report_aggregates_only) reports.clear();
 | 
			
		||||
  reports.insert(reports.end(), stat_reports.begin(), stat_reports.end());
 | 
			
		||||
  return reports;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // namespace
 | 
			
		||||
}  // namespace internal
 | 
			
		||||
 | 
			
		||||
State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
 | 
			
		||||
             int n_threads, internal::ThreadTimer* timer,
 | 
			
		||||
             internal::ThreadManager* manager)
 | 
			
		||||
    : started_(false),
 | 
			
		||||
      finished_(false),
 | 
			
		||||
      total_iterations_(max_iters + 1),
 | 
			
		||||
      range_(ranges),
 | 
			
		||||
      bytes_processed_(0),
 | 
			
		||||
      items_processed_(0),
 | 
			
		||||
      complexity_n_(0),
 | 
			
		||||
      error_occurred_(false),
 | 
			
		||||
      counters(),
 | 
			
		||||
      thread_index(thread_i),
 | 
			
		||||
      threads(n_threads),
 | 
			
		||||
      max_iterations(max_iters),
 | 
			
		||||
      timer_(timer),
 | 
			
		||||
      manager_(manager) {
 | 
			
		||||
  CHECK(max_iterations != 0) << "At least one iteration must be run";
 | 
			
		||||
  CHECK(total_iterations_ != 0) << "max iterations wrapped around";
 | 
			
		||||
  CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void State::PauseTiming() {
 | 
			
		||||
  // Add in time accumulated so far
 | 
			
		||||
  CHECK(started_ && !finished_ && !error_occurred_);
 | 
			
		||||
  timer_->StopTimer();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void State::ResumeTiming() {
 | 
			
		||||
  CHECK(started_ && !finished_ && !error_occurred_);
 | 
			
		||||
  timer_->StartTimer();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void State::SkipWithError(const char* msg) {
 | 
			
		||||
  CHECK(msg);
 | 
			
		||||
  error_occurred_ = true;
 | 
			
		||||
  {
 | 
			
		||||
    MutexLock l(manager_->GetBenchmarkMutex());
 | 
			
		||||
    if (manager_->results.has_error_ == false) {
 | 
			
		||||
      manager_->results.error_message_ = msg;
 | 
			
		||||
      manager_->results.has_error_ = true;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  total_iterations_ = 1;
 | 
			
		||||
  if (timer_->running()) timer_->StopTimer();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void State::SetIterationTime(double seconds) {
 | 
			
		||||
  timer_->SetIterationTime(seconds);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void State::SetLabel(const char* label) {
 | 
			
		||||
  MutexLock l(manager_->GetBenchmarkMutex());
 | 
			
		||||
  manager_->results.report_label_ = label;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void State::StartKeepRunning() {
 | 
			
		||||
  CHECK(!started_ && !finished_);
 | 
			
		||||
  started_ = true;
 | 
			
		||||
  manager_->StartStopBarrier();
 | 
			
		||||
  if (!error_occurred_) ResumeTiming();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void State::FinishKeepRunning() {
 | 
			
		||||
  CHECK(started_ && (!finished_ || error_occurred_));
 | 
			
		||||
  if (!error_occurred_) {
 | 
			
		||||
    PauseTiming();
 | 
			
		||||
  }
 | 
			
		||||
  // Total iterations has now wrapped around zero. Fix this.
 | 
			
		||||
  total_iterations_ = 1;
 | 
			
		||||
  finished_ = true;
 | 
			
		||||
  manager_->StartStopBarrier();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace internal {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
 | 
			
		||||
                           BenchmarkReporter* console_reporter,
 | 
			
		||||
                           BenchmarkReporter* file_reporter) {
 | 
			
		||||
  // Note the file_reporter can be null.
 | 
			
		||||
  CHECK(console_reporter != nullptr);
 | 
			
		||||
 | 
			
		||||
  // Determine the width of the name field using a minimum width of 10.
 | 
			
		||||
  bool has_repetitions = FLAGS_benchmark_repetitions > 1;
 | 
			
		||||
  size_t name_field_width = 10;
 | 
			
		||||
  size_t stat_field_width = 0;
 | 
			
		||||
  for (const Benchmark::Instance& benchmark : benchmarks) {
 | 
			
		||||
    name_field_width =
 | 
			
		||||
        std::max<size_t>(name_field_width, benchmark.name.size());
 | 
			
		||||
    has_repetitions |= benchmark.repetitions > 1;
 | 
			
		||||
 | 
			
		||||
    for(const auto& Stat : *benchmark.statistics)
 | 
			
		||||
      stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
 | 
			
		||||
  }
 | 
			
		||||
  if (has_repetitions) name_field_width += 1 + stat_field_width;
 | 
			
		||||
 | 
			
		||||
  // Print header here
 | 
			
		||||
  BenchmarkReporter::Context context;
 | 
			
		||||
  context.name_field_width = name_field_width;
 | 
			
		||||
 | 
			
		||||
  // Keep track of runing times of all instances of current benchmark
 | 
			
		||||
  std::vector<BenchmarkReporter::Run> complexity_reports;
 | 
			
		||||
 | 
			
		||||
  // We flush streams after invoking reporter methods that write to them. This
 | 
			
		||||
  // ensures users get timely updates even when streams are not line-buffered.
 | 
			
		||||
  auto flushStreams = [](BenchmarkReporter* reporter) {
 | 
			
		||||
    if (!reporter) return;
 | 
			
		||||
    std::flush(reporter->GetOutputStream());
 | 
			
		||||
    std::flush(reporter->GetErrorStream());
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  if (console_reporter->ReportContext(context) &&
 | 
			
		||||
      (!file_reporter || file_reporter->ReportContext(context))) {
 | 
			
		||||
    flushStreams(console_reporter);
 | 
			
		||||
    flushStreams(file_reporter);
 | 
			
		||||
    for (const auto& benchmark : benchmarks) {
 | 
			
		||||
      std::vector<BenchmarkReporter::Run> reports =
 | 
			
		||||
          RunBenchmark(benchmark, &complexity_reports);
 | 
			
		||||
      console_reporter->ReportRuns(reports);
 | 
			
		||||
      if (file_reporter) file_reporter->ReportRuns(reports);
 | 
			
		||||
      flushStreams(console_reporter);
 | 
			
		||||
      flushStreams(file_reporter);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  console_reporter->Finalize();
 | 
			
		||||
  if (file_reporter) file_reporter->Finalize();
 | 
			
		||||
  flushStreams(console_reporter);
 | 
			
		||||
  flushStreams(file_reporter);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::unique_ptr<BenchmarkReporter> CreateReporter(
 | 
			
		||||
    std::string const& name, ConsoleReporter::OutputOptions output_opts) {
 | 
			
		||||
  typedef std::unique_ptr<BenchmarkReporter> PtrType;
 | 
			
		||||
  if (name == "console") {
 | 
			
		||||
    return PtrType(new ConsoleReporter(output_opts));
 | 
			
		||||
  } else if (name == "json") {
 | 
			
		||||
    return PtrType(new JSONReporter);
 | 
			
		||||
  } else if (name == "csv") {
 | 
			
		||||
    return PtrType(new CSVReporter);
 | 
			
		||||
  } else {
 | 
			
		||||
    std::cerr << "Unexpected format: '" << name << "'\n";
 | 
			
		||||
    std::exit(1);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace
 | 
			
		||||
 | 
			
		||||
bool IsZero(double n) {
 | 
			
		||||
  return std::abs(n) < std::numeric_limits<double>::epsilon();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
 | 
			
		||||
  int output_opts = ConsoleReporter::OO_Defaults;
 | 
			
		||||
  if ((FLAGS_benchmark_color == "auto" && IsColorTerminal()) ||
 | 
			
		||||
      IsTruthyFlagValue(FLAGS_benchmark_color)) {
 | 
			
		||||
    output_opts |= ConsoleReporter::OO_Color;
 | 
			
		||||
  } else {
 | 
			
		||||
    output_opts &= ~ConsoleReporter::OO_Color;
 | 
			
		||||
  }
 | 
			
		||||
  if(force_no_color) {
 | 
			
		||||
    output_opts &= ~ConsoleReporter::OO_Color;
 | 
			
		||||
  }
 | 
			
		||||
  if(FLAGS_benchmark_counters_tabular) {
 | 
			
		||||
    output_opts |= ConsoleReporter::OO_Tabular;
 | 
			
		||||
  } else {
 | 
			
		||||
    output_opts &= ~ConsoleReporter::OO_Tabular;
 | 
			
		||||
  }
 | 
			
		||||
  return static_cast< ConsoleReporter::OutputOptions >(output_opts);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace internal
 | 
			
		||||
 | 
			
		||||
size_t RunSpecifiedBenchmarks() {
 | 
			
		||||
  return RunSpecifiedBenchmarks(nullptr, nullptr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) {
 | 
			
		||||
  return RunSpecifiedBenchmarks(console_reporter, nullptr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
 | 
			
		||||
                              BenchmarkReporter* file_reporter) {
 | 
			
		||||
  std::string spec = FLAGS_benchmark_filter;
 | 
			
		||||
  if (spec.empty() || spec == "all")
 | 
			
		||||
    spec = ".";  // Regexp that matches all benchmarks
 | 
			
		||||
 | 
			
		||||
  // Setup the reporters
 | 
			
		||||
  std::ofstream output_file;
 | 
			
		||||
  std::unique_ptr<BenchmarkReporter> default_console_reporter;
 | 
			
		||||
  std::unique_ptr<BenchmarkReporter> default_file_reporter;
 | 
			
		||||
  if (!console_reporter) {
 | 
			
		||||
    default_console_reporter = internal::CreateReporter(
 | 
			
		||||
          FLAGS_benchmark_format, internal::GetOutputOptions());
 | 
			
		||||
    console_reporter = default_console_reporter.get();
 | 
			
		||||
  }
 | 
			
		||||
  auto& Out = console_reporter->GetOutputStream();
 | 
			
		||||
  auto& Err = console_reporter->GetErrorStream();
 | 
			
		||||
 | 
			
		||||
  std::string const& fname = FLAGS_benchmark_out;
 | 
			
		||||
  if (fname.empty() && file_reporter) {
 | 
			
		||||
    Err << "A custom file reporter was provided but "
 | 
			
		||||
           "--benchmark_out=<file> was not specified."
 | 
			
		||||
        << std::endl;
 | 
			
		||||
    std::exit(1);
 | 
			
		||||
  }
 | 
			
		||||
  if (!fname.empty()) {
 | 
			
		||||
    output_file.open(fname);
 | 
			
		||||
    if (!output_file.is_open()) {
 | 
			
		||||
      Err << "invalid file name: '" << fname << std::endl;
 | 
			
		||||
      std::exit(1);
 | 
			
		||||
    }
 | 
			
		||||
    if (!file_reporter) {
 | 
			
		||||
      default_file_reporter = internal::CreateReporter(
 | 
			
		||||
          FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
 | 
			
		||||
      file_reporter = default_file_reporter.get();
 | 
			
		||||
    }
 | 
			
		||||
    file_reporter->SetOutputStream(&output_file);
 | 
			
		||||
    file_reporter->SetErrorStream(&output_file);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::vector<internal::Benchmark::Instance> benchmarks;
 | 
			
		||||
  if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
 | 
			
		||||
 | 
			
		||||
  if (benchmarks.empty()) {
 | 
			
		||||
    Err << "Failed to match any benchmarks against regex: " << spec << "\n";
 | 
			
		||||
    return 0;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (FLAGS_benchmark_list_tests) {
 | 
			
		||||
    for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n";
 | 
			
		||||
  } else {
 | 
			
		||||
    internal::RunBenchmarks(benchmarks, console_reporter, file_reporter);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return benchmarks.size();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace internal {
 | 
			
		||||
 | 
			
		||||
void PrintUsageAndExit() {
 | 
			
		||||
  fprintf(stdout,
 | 
			
		||||
          "benchmark"
 | 
			
		||||
          " [--benchmark_list_tests={true|false}]\n"
 | 
			
		||||
          "          [--benchmark_filter=<regex>]\n"
 | 
			
		||||
          "          [--benchmark_min_time=<min_time>]\n"
 | 
			
		||||
          "          [--benchmark_repetitions=<num_repetitions>]\n"
 | 
			
		||||
          "          [--benchmark_report_aggregates_only={true|false}\n"
 | 
			
		||||
          "          [--benchmark_format=<console|json|csv>]\n"
 | 
			
		||||
          "          [--benchmark_out=<filename>]\n"
 | 
			
		||||
          "          [--benchmark_out_format=<json|console|csv>]\n"
 | 
			
		||||
          "          [--benchmark_color={auto|true|false}]\n"
 | 
			
		||||
          "          [--benchmark_counters_tabular={true|false}]\n"
 | 
			
		||||
          "          [--v=<verbosity>]\n");
 | 
			
		||||
  exit(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ParseCommandLineFlags(int* argc, char** argv) {
 | 
			
		||||
  using namespace benchmark;
 | 
			
		||||
  for (int i = 1; i < *argc; ++i) {
 | 
			
		||||
    if (ParseBoolFlag(argv[i], "benchmark_list_tests",
 | 
			
		||||
                      &FLAGS_benchmark_list_tests) ||
 | 
			
		||||
        ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
 | 
			
		||||
        ParseDoubleFlag(argv[i], "benchmark_min_time",
 | 
			
		||||
                        &FLAGS_benchmark_min_time) ||
 | 
			
		||||
        ParseInt32Flag(argv[i], "benchmark_repetitions",
 | 
			
		||||
                       &FLAGS_benchmark_repetitions) ||
 | 
			
		||||
        ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
 | 
			
		||||
                      &FLAGS_benchmark_report_aggregates_only) ||
 | 
			
		||||
        ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
 | 
			
		||||
        ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
 | 
			
		||||
        ParseStringFlag(argv[i], "benchmark_out_format",
 | 
			
		||||
                        &FLAGS_benchmark_out_format) ||
 | 
			
		||||
        ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
 | 
			
		||||
        // "color_print" is the deprecated name for "benchmark_color".
 | 
			
		||||
        // TODO: Remove this.
 | 
			
		||||
        ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
 | 
			
		||||
        ParseBoolFlag(argv[i], "benchmark_counters_tabular",
 | 
			
		||||
                        &FLAGS_benchmark_counters_tabular) ||
 | 
			
		||||
        ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
 | 
			
		||||
      for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
 | 
			
		||||
 | 
			
		||||
      --(*argc);
 | 
			
		||||
      --i;
 | 
			
		||||
    } else if (IsFlag(argv[i], "help")) {
 | 
			
		||||
      PrintUsageAndExit();
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  for (auto const* flag :
 | 
			
		||||
       {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
 | 
			
		||||
    if (*flag != "console" && *flag != "json" && *flag != "csv") {
 | 
			
		||||
      PrintUsageAndExit();
 | 
			
		||||
    }
 | 
			
		||||
  if (FLAGS_benchmark_color.empty()) {
 | 
			
		||||
    PrintUsageAndExit();
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int InitializeStreams() {
 | 
			
		||||
  static std::ios_base::Init init;
 | 
			
		||||
  return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace internal
 | 
			
		||||
 | 
			
		||||
void Initialize(int* argc, char** argv) {
 | 
			
		||||
  internal::ParseCommandLineFlags(argc, argv);
 | 
			
		||||
  internal::LogLevel() = FLAGS_v;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool ReportUnrecognizedArguments(int argc, char** argv) {
 | 
			
		||||
  for (int i = 1; i < argc; ++i) {
 | 
			
		||||
    fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
 | 
			
		||||
  }
 | 
			
		||||
  return argc > 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										47
									
								
								benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,47 @@
 | 
			
		|||
#ifndef BENCHMARK_API_INTERNAL_H
 | 
			
		||||
#define BENCHMARK_API_INTERNAL_H
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
 | 
			
		||||
#include <cmath>
 | 
			
		||||
#include <iosfwd>
 | 
			
		||||
#include <limits>
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
namespace internal {
 | 
			
		||||
 | 
			
		||||
// Information kept per benchmark we may want to run
 | 
			
		||||
struct Benchmark::Instance {
 | 
			
		||||
  std::string name;
 | 
			
		||||
  Benchmark* benchmark;
 | 
			
		||||
  ReportMode report_mode;
 | 
			
		||||
  std::vector<int> arg;
 | 
			
		||||
  TimeUnit time_unit;
 | 
			
		||||
  int range_multiplier;
 | 
			
		||||
  bool use_real_time;
 | 
			
		||||
  bool use_manual_time;
 | 
			
		||||
  BigO complexity;
 | 
			
		||||
  BigOFunc* complexity_lambda;
 | 
			
		||||
  UserCounters counters;
 | 
			
		||||
  const std::vector<Statistics>* statistics;
 | 
			
		||||
  bool last_benchmark_instance;
 | 
			
		||||
  int repetitions;
 | 
			
		||||
  double min_time;
 | 
			
		||||
  size_t iterations;
 | 
			
		||||
  int threads;  // Number of concurrent threads to us
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
bool FindBenchmarksInternal(const std::string& re,
 | 
			
		||||
                            std::vector<Benchmark::Instance>* benchmarks,
 | 
			
		||||
                            std::ostream* Err);
 | 
			
		||||
 | 
			
		||||
bool IsZero(double n);
 | 
			
		||||
 | 
			
		||||
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false);
 | 
			
		||||
 | 
			
		||||
}  // end namespace internal
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_API_INTERNAL_H
 | 
			
		||||
							
								
								
									
										476
									
								
								benchmarks/thirdparty/benchmark/src/benchmark_register.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										476
									
								
								benchmarks/thirdparty/benchmark/src/benchmark_register.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,476 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
#include "benchmark_api_internal.h"
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
 | 
			
		||||
#ifndef BENCHMARK_OS_WINDOWS
 | 
			
		||||
#include <sys/resource.h>
 | 
			
		||||
#include <sys/time.h>
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
#include <atomic>
 | 
			
		||||
#include <condition_variable>
 | 
			
		||||
#include <cstdio>
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <cstring>
 | 
			
		||||
#include <fstream>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <sstream>
 | 
			
		||||
#include <thread>
 | 
			
		||||
 | 
			
		||||
#include "check.h"
 | 
			
		||||
#include "commandlineflags.h"
 | 
			
		||||
#include "complexity.h"
 | 
			
		||||
#include "statistics.h"
 | 
			
		||||
#include "log.h"
 | 
			
		||||
#include "mutex.h"
 | 
			
		||||
#include "re.h"
 | 
			
		||||
#include "string_util.h"
 | 
			
		||||
#include "timers.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
// For non-dense Range, intermediate values are powers of kRangeMultiplier.
 | 
			
		||||
static const int kRangeMultiplier = 8;
 | 
			
		||||
// The size of a benchmark family determines is the number of inputs to repeat
 | 
			
		||||
// the benchmark on. If this is "large" then warn the user during configuration.
 | 
			
		||||
static const size_t kMaxFamilySize = 100;
 | 
			
		||||
}  // end namespace
 | 
			
		||||
 | 
			
		||||
namespace internal {
 | 
			
		||||
 | 
			
		||||
//=============================================================================//
 | 
			
		||||
//                         BenchmarkFamilies
 | 
			
		||||
//=============================================================================//
 | 
			
		||||
 | 
			
		||||
// Class for managing registered benchmarks.  Note that each registered
 | 
			
		||||
// benchmark identifies a family of related benchmarks to run.
 | 
			
		||||
class BenchmarkFamilies {
 | 
			
		||||
 public:
 | 
			
		||||
  static BenchmarkFamilies* GetInstance();
 | 
			
		||||
 | 
			
		||||
  // Registers a benchmark family and returns the index assigned to it.
 | 
			
		||||
  size_t AddBenchmark(std::unique_ptr<Benchmark> family);
 | 
			
		||||
 | 
			
		||||
  // Clear all registered benchmark families.
 | 
			
		||||
  void ClearBenchmarks();
 | 
			
		||||
 | 
			
		||||
  // Extract the list of benchmark instances that match the specified
 | 
			
		||||
  // regular expression.
 | 
			
		||||
  bool FindBenchmarks(const std::string& re,
 | 
			
		||||
                      std::vector<Benchmark::Instance>* benchmarks,
 | 
			
		||||
                      std::ostream* Err);
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  BenchmarkFamilies() {}
 | 
			
		||||
 | 
			
		||||
  std::vector<std::unique_ptr<Benchmark>> families_;
 | 
			
		||||
  Mutex mutex_;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
BenchmarkFamilies* BenchmarkFamilies::GetInstance() {
 | 
			
		||||
  static BenchmarkFamilies instance;
 | 
			
		||||
  return &instance;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family) {
 | 
			
		||||
  MutexLock l(mutex_);
 | 
			
		||||
  size_t index = families_.size();
 | 
			
		||||
  families_.push_back(std::move(family));
 | 
			
		||||
  return index;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void BenchmarkFamilies::ClearBenchmarks() {
 | 
			
		||||
  MutexLock l(mutex_);
 | 
			
		||||
  families_.clear();
 | 
			
		||||
  families_.shrink_to_fit();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool BenchmarkFamilies::FindBenchmarks(
 | 
			
		||||
    const std::string& spec, std::vector<Benchmark::Instance>* benchmarks,
 | 
			
		||||
    std::ostream* ErrStream) {
 | 
			
		||||
  CHECK(ErrStream);
 | 
			
		||||
  auto& Err = *ErrStream;
 | 
			
		||||
  // Make regular expression out of command-line flag
 | 
			
		||||
  std::string error_msg;
 | 
			
		||||
  Regex re;
 | 
			
		||||
  if (!re.Init(spec, &error_msg)) {
 | 
			
		||||
    Err << "Could not compile benchmark re: " << error_msg << std::endl;
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Special list of thread counts to use when none are specified
 | 
			
		||||
  const std::vector<int> one_thread = {1};
 | 
			
		||||
 | 
			
		||||
  MutexLock l(mutex_);
 | 
			
		||||
  for (std::unique_ptr<Benchmark>& family : families_) {
 | 
			
		||||
    // Family was deleted or benchmark doesn't match
 | 
			
		||||
    if (!family) continue;
 | 
			
		||||
 | 
			
		||||
    if (family->ArgsCnt() == -1) {
 | 
			
		||||
      family->Args({});
 | 
			
		||||
    }
 | 
			
		||||
    const std::vector<int>* thread_counts =
 | 
			
		||||
        (family->thread_counts_.empty()
 | 
			
		||||
             ? &one_thread
 | 
			
		||||
             : &static_cast<const std::vector<int>&>(family->thread_counts_));
 | 
			
		||||
    const size_t family_size = family->args_.size() * thread_counts->size();
 | 
			
		||||
    // The benchmark will be run at least 'family_size' different inputs.
 | 
			
		||||
    // If 'family_size' is very large warn the user.
 | 
			
		||||
    if (family_size > kMaxFamilySize) {
 | 
			
		||||
      Err << "The number of inputs is very large. " << family->name_
 | 
			
		||||
          << " will be repeated at least " << family_size << " times.\n";
 | 
			
		||||
    }
 | 
			
		||||
    // reserve in the special case the regex ".", since we know the final
 | 
			
		||||
    // family size.
 | 
			
		||||
    if (spec == ".") benchmarks->reserve(family_size);
 | 
			
		||||
 | 
			
		||||
    for (auto const& args : family->args_) {
 | 
			
		||||
      for (int num_threads : *thread_counts) {
 | 
			
		||||
        Benchmark::Instance instance;
 | 
			
		||||
        instance.name = family->name_;
 | 
			
		||||
        instance.benchmark = family.get();
 | 
			
		||||
        instance.report_mode = family->report_mode_;
 | 
			
		||||
        instance.arg = args;
 | 
			
		||||
        instance.time_unit = family->time_unit_;
 | 
			
		||||
        instance.range_multiplier = family->range_multiplier_;
 | 
			
		||||
        instance.min_time = family->min_time_;
 | 
			
		||||
        instance.iterations = family->iterations_;
 | 
			
		||||
        instance.repetitions = family->repetitions_;
 | 
			
		||||
        instance.use_real_time = family->use_real_time_;
 | 
			
		||||
        instance.use_manual_time = family->use_manual_time_;
 | 
			
		||||
        instance.complexity = family->complexity_;
 | 
			
		||||
        instance.complexity_lambda = family->complexity_lambda_;
 | 
			
		||||
        instance.statistics = &family->statistics_;
 | 
			
		||||
        instance.threads = num_threads;
 | 
			
		||||
 | 
			
		||||
        // Add arguments to instance name
 | 
			
		||||
        size_t arg_i = 0;
 | 
			
		||||
        for (auto const& arg : args) {
 | 
			
		||||
          instance.name += "/";
 | 
			
		||||
 | 
			
		||||
          if (arg_i < family->arg_names_.size()) {
 | 
			
		||||
            const auto& arg_name = family->arg_names_[arg_i];
 | 
			
		||||
            if (!arg_name.empty()) {
 | 
			
		||||
              instance.name +=
 | 
			
		||||
                  StringPrintF("%s:", family->arg_names_[arg_i].c_str());
 | 
			
		||||
            }
 | 
			
		||||
          }
 | 
			
		||||
          
 | 
			
		||||
          instance.name += StringPrintF("%d", arg);
 | 
			
		||||
          ++arg_i;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (!IsZero(family->min_time_))
 | 
			
		||||
          instance.name += StringPrintF("/min_time:%0.3f", family->min_time_);
 | 
			
		||||
        if (family->iterations_ != 0)
 | 
			
		||||
          instance.name += StringPrintF("/iterations:%d", family->iterations_);
 | 
			
		||||
        if (family->repetitions_ != 0)
 | 
			
		||||
          instance.name += StringPrintF("/repeats:%d", family->repetitions_);
 | 
			
		||||
 | 
			
		||||
        if (family->use_manual_time_) {
 | 
			
		||||
          instance.name += "/manual_time";
 | 
			
		||||
        } else if (family->use_real_time_) {
 | 
			
		||||
          instance.name += "/real_time";
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // Add the number of threads used to the name
 | 
			
		||||
        if (!family->thread_counts_.empty()) {
 | 
			
		||||
          instance.name += StringPrintF("/threads:%d", instance.threads);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (re.Match(instance.name)) {
 | 
			
		||||
          instance.last_benchmark_instance = (&args == &family->args_.back());
 | 
			
		||||
          benchmarks->push_back(std::move(instance));
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
 | 
			
		||||
  std::unique_ptr<Benchmark> bench_ptr(bench);
 | 
			
		||||
  BenchmarkFamilies* families = BenchmarkFamilies::GetInstance();
 | 
			
		||||
  families->AddBenchmark(std::move(bench_ptr));
 | 
			
		||||
  return bench;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FIXME: This function is a hack so that benchmark.cc can access
 | 
			
		||||
// `BenchmarkFamilies`
 | 
			
		||||
bool FindBenchmarksInternal(const std::string& re,
 | 
			
		||||
                            std::vector<Benchmark::Instance>* benchmarks,
 | 
			
		||||
                            std::ostream* Err) {
 | 
			
		||||
  return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//=============================================================================//
 | 
			
		||||
//                               Benchmark
 | 
			
		||||
//=============================================================================//
 | 
			
		||||
 | 
			
		||||
Benchmark::Benchmark(const char* name)
 | 
			
		||||
    : name_(name),
 | 
			
		||||
      report_mode_(RM_Unspecified),
 | 
			
		||||
      time_unit_(kNanosecond),
 | 
			
		||||
      range_multiplier_(kRangeMultiplier),
 | 
			
		||||
      min_time_(0),
 | 
			
		||||
      iterations_(0),
 | 
			
		||||
      repetitions_(0),
 | 
			
		||||
      use_real_time_(false),
 | 
			
		||||
      use_manual_time_(false),
 | 
			
		||||
      complexity_(oNone),
 | 
			
		||||
      complexity_lambda_(nullptr) {
 | 
			
		||||
  ComputeStatistics("mean", StatisticsMean);
 | 
			
		||||
  ComputeStatistics("median", StatisticsMedian);
 | 
			
		||||
  ComputeStatistics("stddev", StatisticsStdDev);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark::~Benchmark() {}
 | 
			
		||||
 | 
			
		||||
void Benchmark::AddRange(std::vector<int>* dst, int lo, int hi, int mult) {
 | 
			
		||||
  CHECK_GE(lo, 0);
 | 
			
		||||
  CHECK_GE(hi, lo);
 | 
			
		||||
  CHECK_GE(mult, 2);
 | 
			
		||||
 | 
			
		||||
  // Add "lo"
 | 
			
		||||
  dst->push_back(lo);
 | 
			
		||||
 | 
			
		||||
  static const int kint32max = std::numeric_limits<int32_t>::max();
 | 
			
		||||
 | 
			
		||||
  // Now space out the benchmarks in multiples of "mult"
 | 
			
		||||
  for (int32_t i = 1; i < kint32max / mult; i *= mult) {
 | 
			
		||||
    if (i >= hi) break;
 | 
			
		||||
    if (i > lo) {
 | 
			
		||||
      dst->push_back(i);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  // Add "hi" (if different from "lo")
 | 
			
		||||
  if (hi != lo) {
 | 
			
		||||
    dst->push_back(hi);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Arg(int x) {
 | 
			
		||||
  CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
 | 
			
		||||
  args_.push_back({x});
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Unit(TimeUnit unit) {
 | 
			
		||||
  time_unit_ = unit;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Range(int start, int limit) {
 | 
			
		||||
  CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
 | 
			
		||||
  std::vector<int> arglist;
 | 
			
		||||
  AddRange(&arglist, start, limit, range_multiplier_);
 | 
			
		||||
 | 
			
		||||
  for (int i : arglist) {
 | 
			
		||||
    args_.push_back({i});
 | 
			
		||||
  }
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Ranges(const std::vector<std::pair<int, int>>& ranges) {
 | 
			
		||||
  CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
 | 
			
		||||
  std::vector<std::vector<int>> arglists(ranges.size());
 | 
			
		||||
  std::size_t total = 1;
 | 
			
		||||
  for (std::size_t i = 0; i < ranges.size(); i++) {
 | 
			
		||||
    AddRange(&arglists[i], ranges[i].first, ranges[i].second,
 | 
			
		||||
             range_multiplier_);
 | 
			
		||||
    total *= arglists[i].size();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::vector<std::size_t> ctr(arglists.size(), 0);
 | 
			
		||||
 | 
			
		||||
  for (std::size_t i = 0; i < total; i++) {
 | 
			
		||||
    std::vector<int> tmp;
 | 
			
		||||
    tmp.reserve(arglists.size());
 | 
			
		||||
 | 
			
		||||
    for (std::size_t j = 0; j < arglists.size(); j++) {
 | 
			
		||||
      tmp.push_back(arglists[j].at(ctr[j]));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    args_.push_back(std::move(tmp));
 | 
			
		||||
 | 
			
		||||
    for (std::size_t j = 0; j < arglists.size(); j++) {
 | 
			
		||||
      if (ctr[j] + 1 < arglists[j].size()) {
 | 
			
		||||
        ++ctr[j];
 | 
			
		||||
        break;
 | 
			
		||||
      }
 | 
			
		||||
      ctr[j] = 0;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::ArgName(const std::string& name) {
 | 
			
		||||
  CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
 | 
			
		||||
  arg_names_ = {name};
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
 | 
			
		||||
  CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
 | 
			
		||||
  arg_names_ = names;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::DenseRange(int start, int limit, int step) {
 | 
			
		||||
  CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
 | 
			
		||||
  CHECK_GE(start, 0);
 | 
			
		||||
  CHECK_LE(start, limit);
 | 
			
		||||
  for (int arg = start; arg <= limit; arg += step) {
 | 
			
		||||
    args_.push_back({arg});
 | 
			
		||||
  }
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Args(const std::vector<int>& args) {
 | 
			
		||||
  CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
 | 
			
		||||
  args_.push_back(args);
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) {
 | 
			
		||||
  custom_arguments(this);
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::RangeMultiplier(int multiplier) {
 | 
			
		||||
  CHECK(multiplier > 1);
 | 
			
		||||
  range_multiplier_ = multiplier;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::MinTime(double t) {
 | 
			
		||||
  CHECK(t > 0.0);
 | 
			
		||||
  CHECK(iterations_ == 0);
 | 
			
		||||
  min_time_ = t;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Iterations(size_t n) {
 | 
			
		||||
  CHECK(n > 0);
 | 
			
		||||
  CHECK(IsZero(min_time_));
 | 
			
		||||
  iterations_ = n;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Repetitions(int n) {
 | 
			
		||||
  CHECK(n > 0);
 | 
			
		||||
  repetitions_ = n;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
 | 
			
		||||
  report_mode_ = value ? RM_ReportAggregatesOnly : RM_Default;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::UseRealTime() {
 | 
			
		||||
  CHECK(!use_manual_time_)
 | 
			
		||||
      << "Cannot set UseRealTime and UseManualTime simultaneously.";
 | 
			
		||||
  use_real_time_ = true;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::UseManualTime() {
 | 
			
		||||
  CHECK(!use_real_time_)
 | 
			
		||||
      << "Cannot set UseRealTime and UseManualTime simultaneously.";
 | 
			
		||||
  use_manual_time_ = true;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Complexity(BigO complexity) {
 | 
			
		||||
  complexity_ = complexity;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
 | 
			
		||||
  complexity_lambda_ = complexity;
 | 
			
		||||
  complexity_ = oLambda;
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::ComputeStatistics(std::string name,
 | 
			
		||||
                                        StatisticsFunc* statistics) {
 | 
			
		||||
  statistics_.emplace_back(name, statistics);
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::Threads(int t) {
 | 
			
		||||
  CHECK_GT(t, 0);
 | 
			
		||||
  thread_counts_.push_back(t);
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
 | 
			
		||||
  CHECK_GT(min_threads, 0);
 | 
			
		||||
  CHECK_GE(max_threads, min_threads);
 | 
			
		||||
 | 
			
		||||
  AddRange(&thread_counts_, min_threads, max_threads, 2);
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads,
 | 
			
		||||
                                       int stride) {
 | 
			
		||||
  CHECK_GT(min_threads, 0);
 | 
			
		||||
  CHECK_GE(max_threads, min_threads);
 | 
			
		||||
  CHECK_GE(stride, 1);
 | 
			
		||||
 | 
			
		||||
  for (auto i = min_threads; i < max_threads; i += stride) {
 | 
			
		||||
    thread_counts_.push_back(i);
 | 
			
		||||
  }
 | 
			
		||||
  thread_counts_.push_back(max_threads);
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Benchmark* Benchmark::ThreadPerCpu() {
 | 
			
		||||
  thread_counts_.push_back(CPUInfo::Get().num_cpus);
 | 
			
		||||
  return this;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Benchmark::SetName(const char* name) { name_ = name; }
 | 
			
		||||
 | 
			
		||||
int Benchmark::ArgsCnt() const {
 | 
			
		||||
  if (args_.empty()) {
 | 
			
		||||
    if (arg_names_.empty()) return -1;
 | 
			
		||||
    return static_cast<int>(arg_names_.size());
 | 
			
		||||
  }
 | 
			
		||||
  return static_cast<int>(args_.front().size());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//=============================================================================//
 | 
			
		||||
//                            FunctionBenchmark
 | 
			
		||||
//=============================================================================//
 | 
			
		||||
 | 
			
		||||
void FunctionBenchmark::Run(State& st) { func_(st); }
 | 
			
		||||
 | 
			
		||||
}  // end namespace internal
 | 
			
		||||
 | 
			
		||||
void ClearRegisteredBenchmarks() {
 | 
			
		||||
  internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										79
									
								
								benchmarks/thirdparty/benchmark/src/check.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								benchmarks/thirdparty/benchmark/src/check.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,79 @@
 | 
			
		|||
#ifndef CHECK_H_
 | 
			
		||||
#define CHECK_H_
 | 
			
		||||
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <ostream>
 | 
			
		||||
#include <cmath>
 | 
			
		||||
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
#include "log.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
namespace internal {
 | 
			
		||||
 | 
			
		||||
typedef void(AbortHandlerT)();
 | 
			
		||||
 | 
			
		||||
inline AbortHandlerT*& GetAbortHandler() {
 | 
			
		||||
  static AbortHandlerT* handler = &std::abort;
 | 
			
		||||
  return handler;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
BENCHMARK_NORETURN inline void CallAbortHandler() {
 | 
			
		||||
  GetAbortHandler()();
 | 
			
		||||
  std::abort();  // fallback to enforce noreturn
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CheckHandler is the class constructed by failing CHECK macros. CheckHandler
 | 
			
		||||
// will log information about the failures and abort when it is destructed.
 | 
			
		||||
class CheckHandler {
 | 
			
		||||
 public:
 | 
			
		||||
  CheckHandler(const char* check, const char* file, const char* func, int line)
 | 
			
		||||
      : log_(GetErrorLogInstance()) {
 | 
			
		||||
    log_ << file << ":" << line << ": " << func << ": Check `" << check
 | 
			
		||||
         << "' failed. ";
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  LogType& GetLog() { return log_; }
 | 
			
		||||
 | 
			
		||||
  BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) {
 | 
			
		||||
    log_ << std::endl;
 | 
			
		||||
    CallAbortHandler();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  CheckHandler& operator=(const CheckHandler&) = delete;
 | 
			
		||||
  CheckHandler(const CheckHandler&) = delete;
 | 
			
		||||
  CheckHandler() = delete;
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  LogType& log_;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
}  // end namespace internal
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
// The CHECK macro returns a std::ostream object that can have extra information
 | 
			
		||||
// written to it.
 | 
			
		||||
#ifndef NDEBUG
 | 
			
		||||
#define CHECK(b)                                                             \
 | 
			
		||||
  (b ? ::benchmark::internal::GetNullLogInstance()                           \
 | 
			
		||||
     : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \
 | 
			
		||||
           .GetLog())
 | 
			
		||||
#else
 | 
			
		||||
#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define CHECK_EQ(a, b) CHECK((a) == (b))
 | 
			
		||||
#define CHECK_NE(a, b) CHECK((a) != (b))
 | 
			
		||||
#define CHECK_GE(a, b) CHECK((a) >= (b))
 | 
			
		||||
#define CHECK_LE(a, b) CHECK((a) <= (b))
 | 
			
		||||
#define CHECK_GT(a, b) CHECK((a) > (b))
 | 
			
		||||
#define CHECK_LT(a, b) CHECK((a) < (b))
 | 
			
		||||
 | 
			
		||||
#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) <  (eps))
 | 
			
		||||
#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps))
 | 
			
		||||
#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps))
 | 
			
		||||
#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps))
 | 
			
		||||
#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) >  (eps))
 | 
			
		||||
#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) >  (eps))
 | 
			
		||||
 | 
			
		||||
#endif  // CHECK_H_
 | 
			
		||||
							
								
								
									
										188
									
								
								benchmarks/thirdparty/benchmark/src/colorprint.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										188
									
								
								benchmarks/thirdparty/benchmark/src/colorprint.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,188 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "colorprint.h"
 | 
			
		||||
 | 
			
		||||
#include <cstdarg>
 | 
			
		||||
#include <cstdio>
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <cstring>
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <string>
 | 
			
		||||
 | 
			
		||||
#include "check.h"
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
 | 
			
		||||
#ifdef BENCHMARK_OS_WINDOWS
 | 
			
		||||
#include <Windows.h>
 | 
			
		||||
#include <io.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
#endif  // BENCHMARK_OS_WINDOWS
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
namespace {
 | 
			
		||||
#ifdef BENCHMARK_OS_WINDOWS
 | 
			
		||||
typedef WORD PlatformColorCode;
 | 
			
		||||
#else
 | 
			
		||||
typedef const char* PlatformColorCode;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
PlatformColorCode GetPlatformColorCode(LogColor color) {
 | 
			
		||||
#ifdef BENCHMARK_OS_WINDOWS
 | 
			
		||||
  switch (color) {
 | 
			
		||||
    case COLOR_RED:
 | 
			
		||||
      return FOREGROUND_RED;
 | 
			
		||||
    case COLOR_GREEN:
 | 
			
		||||
      return FOREGROUND_GREEN;
 | 
			
		||||
    case COLOR_YELLOW:
 | 
			
		||||
      return FOREGROUND_RED | FOREGROUND_GREEN;
 | 
			
		||||
    case COLOR_BLUE:
 | 
			
		||||
      return FOREGROUND_BLUE;
 | 
			
		||||
    case COLOR_MAGENTA:
 | 
			
		||||
      return FOREGROUND_BLUE | FOREGROUND_RED;
 | 
			
		||||
    case COLOR_CYAN:
 | 
			
		||||
      return FOREGROUND_BLUE | FOREGROUND_GREEN;
 | 
			
		||||
    case COLOR_WHITE:  // fall through to default
 | 
			
		||||
    default:
 | 
			
		||||
      return 0;
 | 
			
		||||
  }
 | 
			
		||||
#else
 | 
			
		||||
  switch (color) {
 | 
			
		||||
    case COLOR_RED:
 | 
			
		||||
      return "1";
 | 
			
		||||
    case COLOR_GREEN:
 | 
			
		||||
      return "2";
 | 
			
		||||
    case COLOR_YELLOW:
 | 
			
		||||
      return "3";
 | 
			
		||||
    case COLOR_BLUE:
 | 
			
		||||
      return "4";
 | 
			
		||||
    case COLOR_MAGENTA:
 | 
			
		||||
      return "5";
 | 
			
		||||
    case COLOR_CYAN:
 | 
			
		||||
      return "6";
 | 
			
		||||
    case COLOR_WHITE:
 | 
			
		||||
      return "7";
 | 
			
		||||
    default:
 | 
			
		||||
      return nullptr;
 | 
			
		||||
  };
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace
 | 
			
		||||
 | 
			
		||||
std::string FormatString(const char* msg, va_list args) {
 | 
			
		||||
  // we might need a second shot at this, so pre-emptivly make a copy
 | 
			
		||||
  va_list args_cp;
 | 
			
		||||
  va_copy(args_cp, args);
 | 
			
		||||
 | 
			
		||||
  std::size_t size = 256;
 | 
			
		||||
  char local_buff[256];
 | 
			
		||||
  auto ret = vsnprintf(local_buff, size, msg, args_cp);
 | 
			
		||||
 | 
			
		||||
  va_end(args_cp);
 | 
			
		||||
 | 
			
		||||
  // currently there is no error handling for failure, so this is hack.
 | 
			
		||||
  CHECK(ret >= 0);
 | 
			
		||||
 | 
			
		||||
  if (ret == 0)  // handle empty expansion
 | 
			
		||||
    return {};
 | 
			
		||||
  else if (static_cast<size_t>(ret) < size)
 | 
			
		||||
    return local_buff;
 | 
			
		||||
  else {
 | 
			
		||||
    // we did not provide a long enough buffer on our first attempt.
 | 
			
		||||
    size = (size_t)ret + 1;  // + 1 for the null byte
 | 
			
		||||
    std::unique_ptr<char[]> buff(new char[size]);
 | 
			
		||||
    ret = vsnprintf(buff.get(), size, msg, args);
 | 
			
		||||
    CHECK(ret > 0 && ((size_t)ret) < size);
 | 
			
		||||
    return buff.get();
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string FormatString(const char* msg, ...) {
 | 
			
		||||
  va_list args;
 | 
			
		||||
  va_start(args, msg);
 | 
			
		||||
  auto tmp = FormatString(msg, args);
 | 
			
		||||
  va_end(args);
 | 
			
		||||
  return tmp;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
 | 
			
		||||
  va_list args;
 | 
			
		||||
  va_start(args, fmt);
 | 
			
		||||
  ColorPrintf(out, color, fmt, args);
 | 
			
		||||
  va_end(args);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
 | 
			
		||||
                 va_list args) {
 | 
			
		||||
#ifdef BENCHMARK_OS_WINDOWS
 | 
			
		||||
  ((void)out);  // suppress unused warning
 | 
			
		||||
 | 
			
		||||
  const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
 | 
			
		||||
 | 
			
		||||
  // Gets the current text color.
 | 
			
		||||
  CONSOLE_SCREEN_BUFFER_INFO buffer_info;
 | 
			
		||||
  GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
 | 
			
		||||
  const WORD old_color_attrs = buffer_info.wAttributes;
 | 
			
		||||
 | 
			
		||||
  // We need to flush the stream buffers into the console before each
 | 
			
		||||
  // SetConsoleTextAttribute call lest it affect the text that is already
 | 
			
		||||
  // printed but has not yet reached the console.
 | 
			
		||||
  fflush(stdout);
 | 
			
		||||
  SetConsoleTextAttribute(stdout_handle,
 | 
			
		||||
                          GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
 | 
			
		||||
  vprintf(fmt, args);
 | 
			
		||||
 | 
			
		||||
  fflush(stdout);
 | 
			
		||||
  // Restores the text color.
 | 
			
		||||
  SetConsoleTextAttribute(stdout_handle, old_color_attrs);
 | 
			
		||||
#else
 | 
			
		||||
  const char* color_code = GetPlatformColorCode(color);
 | 
			
		||||
  if (color_code) out << FormatString("\033[0;3%sm", color_code);
 | 
			
		||||
  out << FormatString(fmt, args) << "\033[m";
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool IsColorTerminal() {
 | 
			
		||||
#if BENCHMARK_OS_WINDOWS
 | 
			
		||||
  // On Windows the TERM variable is usually not set, but the
 | 
			
		||||
  // console there does support colors.
 | 
			
		||||
  return 0 != _isatty(_fileno(stdout));
 | 
			
		||||
#else
 | 
			
		||||
  // On non-Windows platforms, we rely on the TERM variable. This list of
 | 
			
		||||
  // supported TERM values is copied from Google Test:
 | 
			
		||||
  // <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
 | 
			
		||||
  const char* const SUPPORTED_TERM_VALUES[] = {
 | 
			
		||||
      "xterm",         "xterm-color",     "xterm-256color",
 | 
			
		||||
      "screen",        "screen-256color", "tmux",
 | 
			
		||||
      "tmux-256color", "rxvt-unicode",    "rxvt-unicode-256color",
 | 
			
		||||
      "linux",         "cygwin",
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const char* const term = getenv("TERM");
 | 
			
		||||
 | 
			
		||||
  bool term_supports_color = false;
 | 
			
		||||
  for (const char* candidate : SUPPORTED_TERM_VALUES) {
 | 
			
		||||
    if (term && 0 == strcmp(term, candidate)) {
 | 
			
		||||
      term_supports_color = true;
 | 
			
		||||
      break;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return 0 != isatty(fileno(stdout)) && term_supports_color;
 | 
			
		||||
#endif  // BENCHMARK_OS_WINDOWS
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										33
									
								
								benchmarks/thirdparty/benchmark/src/colorprint.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								benchmarks/thirdparty/benchmark/src/colorprint.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,33 @@
 | 
			
		|||
#ifndef BENCHMARK_COLORPRINT_H_
 | 
			
		||||
#define BENCHMARK_COLORPRINT_H_
 | 
			
		||||
 | 
			
		||||
#include <cstdarg>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <string>
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
enum LogColor {
 | 
			
		||||
  COLOR_DEFAULT,
 | 
			
		||||
  COLOR_RED,
 | 
			
		||||
  COLOR_GREEN,
 | 
			
		||||
  COLOR_YELLOW,
 | 
			
		||||
  COLOR_BLUE,
 | 
			
		||||
  COLOR_MAGENTA,
 | 
			
		||||
  COLOR_CYAN,
 | 
			
		||||
  COLOR_WHITE
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
std::string FormatString(const char* msg, va_list args);
 | 
			
		||||
std::string FormatString(const char* msg, ...);
 | 
			
		||||
 | 
			
		||||
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
 | 
			
		||||
                 va_list args);
 | 
			
		||||
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...);
 | 
			
		||||
 | 
			
		||||
// Returns true if stdout appears to be a terminal that supports colored
 | 
			
		||||
// output, false otherwise.
 | 
			
		||||
bool IsColorTerminal();
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_COLORPRINT_H_
 | 
			
		||||
							
								
								
									
										218
									
								
								benchmarks/thirdparty/benchmark/src/commandlineflags.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										218
									
								
								benchmarks/thirdparty/benchmark/src/commandlineflags.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,218 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "commandlineflags.h"
 | 
			
		||||
 | 
			
		||||
#include <cctype>
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <cstring>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <limits>
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
// Parses 'str' for a 32-bit signed integer.  If successful, writes
 | 
			
		||||
// the result to *value and returns true; otherwise leaves *value
 | 
			
		||||
// unchanged and returns false.
 | 
			
		||||
bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
 | 
			
		||||
  // Parses the environment variable as a decimal integer.
 | 
			
		||||
  char* end = nullptr;
 | 
			
		||||
  const long long_value = strtol(str, &end, 10);  // NOLINT
 | 
			
		||||
 | 
			
		||||
  // Has strtol() consumed all characters in the string?
 | 
			
		||||
  if (*end != '\0') {
 | 
			
		||||
    // No - an invalid character was encountered.
 | 
			
		||||
    std::cerr << src_text << " is expected to be a 32-bit integer, "
 | 
			
		||||
              << "but actually has value \"" << str << "\".\n";
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Is the parsed value in the range of an Int32?
 | 
			
		||||
  const int32_t result = static_cast<int32_t>(long_value);
 | 
			
		||||
  if (long_value == std::numeric_limits<long>::max() ||
 | 
			
		||||
      long_value == std::numeric_limits<long>::min() ||
 | 
			
		||||
      // The parsed value overflows as a long.  (strtol() returns
 | 
			
		||||
      // LONG_MAX or LONG_MIN when the input overflows.)
 | 
			
		||||
      result != long_value
 | 
			
		||||
      // The parsed value overflows as an Int32.
 | 
			
		||||
      ) {
 | 
			
		||||
    std::cerr << src_text << " is expected to be a 32-bit integer, "
 | 
			
		||||
              << "but actually has value \"" << str << "\", "
 | 
			
		||||
              << "which overflows.\n";
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  *value = result;
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Parses 'str' for a double.  If successful, writes the result to *value and
 | 
			
		||||
// returns true; otherwise leaves *value unchanged and returns false.
 | 
			
		||||
bool ParseDouble(const std::string& src_text, const char* str, double* value) {
 | 
			
		||||
  // Parses the environment variable as a decimal integer.
 | 
			
		||||
  char* end = nullptr;
 | 
			
		||||
  const double double_value = strtod(str, &end);  // NOLINT
 | 
			
		||||
 | 
			
		||||
  // Has strtol() consumed all characters in the string?
 | 
			
		||||
  if (*end != '\0') {
 | 
			
		||||
    // No - an invalid character was encountered.
 | 
			
		||||
    std::cerr << src_text << " is expected to be a double, "
 | 
			
		||||
              << "but actually has value \"" << str << "\".\n";
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  *value = double_value;
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Returns the name of the environment variable corresponding to the
 | 
			
		||||
// given flag.  For example, FlagToEnvVar("foo") will return
 | 
			
		||||
// "BENCHMARK_FOO" in the open-source version.
 | 
			
		||||
static std::string FlagToEnvVar(const char* flag) {
 | 
			
		||||
  const std::string flag_str(flag);
 | 
			
		||||
 | 
			
		||||
  std::string env_var;
 | 
			
		||||
  for (size_t i = 0; i != flag_str.length(); ++i)
 | 
			
		||||
    env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
 | 
			
		||||
 | 
			
		||||
  return "BENCHMARK_" + env_var;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reads and returns the Boolean environment variable corresponding to
 | 
			
		||||
// the given flag; if it's not set, returns default_value.
 | 
			
		||||
//
 | 
			
		||||
// The value is considered true iff it's not "0".
 | 
			
		||||
bool BoolFromEnv(const char* flag, bool default_value) {
 | 
			
		||||
  const std::string env_var = FlagToEnvVar(flag);
 | 
			
		||||
  const char* const string_value = getenv(env_var.c_str());
 | 
			
		||||
  return string_value == nullptr ? default_value
 | 
			
		||||
                                 : strcmp(string_value, "0") != 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reads and returns a 32-bit integer stored in the environment
 | 
			
		||||
// variable corresponding to the given flag; if it isn't set or
 | 
			
		||||
// doesn't represent a valid 32-bit integer, returns default_value.
 | 
			
		||||
int32_t Int32FromEnv(const char* flag, int32_t default_value) {
 | 
			
		||||
  const std::string env_var = FlagToEnvVar(flag);
 | 
			
		||||
  const char* const string_value = getenv(env_var.c_str());
 | 
			
		||||
  if (string_value == nullptr) {
 | 
			
		||||
    // The environment variable is not set.
 | 
			
		||||
    return default_value;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int32_t result = default_value;
 | 
			
		||||
  if (!ParseInt32(std::string("Environment variable ") + env_var, string_value,
 | 
			
		||||
                  &result)) {
 | 
			
		||||
    std::cout << "The default value " << default_value << " is used.\n";
 | 
			
		||||
    return default_value;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reads and returns the string environment variable corresponding to
 | 
			
		||||
// the given flag; if it's not set, returns default_value.
 | 
			
		||||
const char* StringFromEnv(const char* flag, const char* default_value) {
 | 
			
		||||
  const std::string env_var = FlagToEnvVar(flag);
 | 
			
		||||
  const char* const value = getenv(env_var.c_str());
 | 
			
		||||
  return value == nullptr ? default_value : value;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Parses a string as a command line flag.  The string should have
 | 
			
		||||
// the format "--flag=value".  When def_optional is true, the "=value"
 | 
			
		||||
// part can be omitted.
 | 
			
		||||
//
 | 
			
		||||
// Returns the value of the flag, or nullptr if the parsing failed.
 | 
			
		||||
const char* ParseFlagValue(const char* str, const char* flag,
 | 
			
		||||
                           bool def_optional) {
 | 
			
		||||
  // str and flag must not be nullptr.
 | 
			
		||||
  if (str == nullptr || flag == nullptr) return nullptr;
 | 
			
		||||
 | 
			
		||||
  // The flag must start with "--".
 | 
			
		||||
  const std::string flag_str = std::string("--") + std::string(flag);
 | 
			
		||||
  const size_t flag_len = flag_str.length();
 | 
			
		||||
  if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr;
 | 
			
		||||
 | 
			
		||||
  // Skips the flag name.
 | 
			
		||||
  const char* flag_end = str + flag_len;
 | 
			
		||||
 | 
			
		||||
  // When def_optional is true, it's OK to not have a "=value" part.
 | 
			
		||||
  if (def_optional && (flag_end[0] == '\0')) return flag_end;
 | 
			
		||||
 | 
			
		||||
  // If def_optional is true and there are more characters after the
 | 
			
		||||
  // flag name, or if def_optional is false, there must be a '=' after
 | 
			
		||||
  // the flag name.
 | 
			
		||||
  if (flag_end[0] != '=') return nullptr;
 | 
			
		||||
 | 
			
		||||
  // Returns the string after "=".
 | 
			
		||||
  return flag_end + 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
 | 
			
		||||
  // Gets the value of the flag as a string.
 | 
			
		||||
  const char* const value_str = ParseFlagValue(str, flag, true);
 | 
			
		||||
 | 
			
		||||
  // Aborts if the parsing failed.
 | 
			
		||||
  if (value_str == nullptr) return false;
 | 
			
		||||
 | 
			
		||||
  // Converts the string value to a bool.
 | 
			
		||||
  *value = IsTruthyFlagValue(value_str);
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
 | 
			
		||||
  // Gets the value of the flag as a string.
 | 
			
		||||
  const char* const value_str = ParseFlagValue(str, flag, false);
 | 
			
		||||
 | 
			
		||||
  // Aborts if the parsing failed.
 | 
			
		||||
  if (value_str == nullptr) return false;
 | 
			
		||||
 | 
			
		||||
  // Sets *value to the value of the flag.
 | 
			
		||||
  return ParseInt32(std::string("The value of flag --") + flag, value_str,
 | 
			
		||||
                    value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
 | 
			
		||||
  // Gets the value of the flag as a string.
 | 
			
		||||
  const char* const value_str = ParseFlagValue(str, flag, false);
 | 
			
		||||
 | 
			
		||||
  // Aborts if the parsing failed.
 | 
			
		||||
  if (value_str == nullptr) return false;
 | 
			
		||||
 | 
			
		||||
  // Sets *value to the value of the flag.
 | 
			
		||||
  return ParseDouble(std::string("The value of flag --") + flag, value_str,
 | 
			
		||||
                     value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
 | 
			
		||||
  // Gets the value of the flag as a string.
 | 
			
		||||
  const char* const value_str = ParseFlagValue(str, flag, false);
 | 
			
		||||
 | 
			
		||||
  // Aborts if the parsing failed.
 | 
			
		||||
  if (value_str == nullptr) return false;
 | 
			
		||||
 | 
			
		||||
  *value = value_str;
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool IsFlag(const char* str, const char* flag) {
 | 
			
		||||
  return (ParseFlagValue(str, flag, true) != nullptr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool IsTruthyFlagValue(const std::string& value) {
 | 
			
		||||
  if (value.empty()) return true;
 | 
			
		||||
  char ch = value[0];
 | 
			
		||||
  return isalnum(ch) &&
 | 
			
		||||
         !(ch == '0' || ch == 'f' || ch == 'F' || ch == 'n' || ch == 'N');
 | 
			
		||||
}
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										79
									
								
								benchmarks/thirdparty/benchmark/src/commandlineflags.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								benchmarks/thirdparty/benchmark/src/commandlineflags.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,79 @@
 | 
			
		|||
#ifndef BENCHMARK_COMMANDLINEFLAGS_H_
 | 
			
		||||
#define BENCHMARK_COMMANDLINEFLAGS_H_
 | 
			
		||||
 | 
			
		||||
#include <cstdint>
 | 
			
		||||
#include <string>
 | 
			
		||||
 | 
			
		||||
// Macro for referencing flags.
 | 
			
		||||
#define FLAG(name) FLAGS_##name
 | 
			
		||||
 | 
			
		||||
// Macros for declaring flags.
 | 
			
		||||
#define DECLARE_bool(name) extern bool FLAG(name)
 | 
			
		||||
#define DECLARE_int32(name) extern int32_t FLAG(name)
 | 
			
		||||
#define DECLARE_int64(name) extern int64_t FLAG(name)
 | 
			
		||||
#define DECLARE_double(name) extern double FLAG(name)
 | 
			
		||||
#define DECLARE_string(name) extern std::string FLAG(name)
 | 
			
		||||
 | 
			
		||||
// Macros for defining flags.
 | 
			
		||||
#define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val)
 | 
			
		||||
#define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val)
 | 
			
		||||
#define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val)
 | 
			
		||||
#define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val)
 | 
			
		||||
#define DEFINE_string(name, default_val, doc) \
 | 
			
		||||
  std::string FLAG(name) = (default_val)
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
// Parses 'str' for a 32-bit signed integer.  If successful, writes the result
 | 
			
		||||
// to *value and returns true; otherwise leaves *value unchanged and returns
 | 
			
		||||
// false.
 | 
			
		||||
bool ParseInt32(const std::string& src_text, const char* str, int32_t* value);
 | 
			
		||||
 | 
			
		||||
// Parses a bool/Int32/string from the environment variable
 | 
			
		||||
// corresponding to the given Google Test flag.
 | 
			
		||||
bool BoolFromEnv(const char* flag, bool default_val);
 | 
			
		||||
int32_t Int32FromEnv(const char* flag, int32_t default_val);
 | 
			
		||||
double DoubleFromEnv(const char* flag, double default_val);
 | 
			
		||||
const char* StringFromEnv(const char* flag, const char* default_val);
 | 
			
		||||
 | 
			
		||||
// Parses a string for a bool flag, in the form of either
 | 
			
		||||
// "--flag=value" or "--flag".
 | 
			
		||||
//
 | 
			
		||||
// In the former case, the value is taken as true if it passes IsTruthyValue().
 | 
			
		||||
//
 | 
			
		||||
// In the latter case, the value is taken as true.
 | 
			
		||||
//
 | 
			
		||||
// On success, stores the value of the flag in *value, and returns
 | 
			
		||||
// true.  On failure, returns false without changing *value.
 | 
			
		||||
bool ParseBoolFlag(const char* str, const char* flag, bool* value);
 | 
			
		||||
 | 
			
		||||
// Parses a string for an Int32 flag, in the form of
 | 
			
		||||
// "--flag=value".
 | 
			
		||||
//
 | 
			
		||||
// On success, stores the value of the flag in *value, and returns
 | 
			
		||||
// true.  On failure, returns false without changing *value.
 | 
			
		||||
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value);
 | 
			
		||||
 | 
			
		||||
// Parses a string for a Double flag, in the form of
 | 
			
		||||
// "--flag=value".
 | 
			
		||||
//
 | 
			
		||||
// On success, stores the value of the flag in *value, and returns
 | 
			
		||||
// true.  On failure, returns false without changing *value.
 | 
			
		||||
bool ParseDoubleFlag(const char* str, const char* flag, double* value);
 | 
			
		||||
 | 
			
		||||
// Parses a string for a string flag, in the form of
 | 
			
		||||
// "--flag=value".
 | 
			
		||||
//
 | 
			
		||||
// On success, stores the value of the flag in *value, and returns
 | 
			
		||||
// true.  On failure, returns false without changing *value.
 | 
			
		||||
bool ParseStringFlag(const char* str, const char* flag, std::string* value);
 | 
			
		||||
 | 
			
		||||
// Returns true if the string matches the flag.
 | 
			
		||||
bool IsFlag(const char* str, const char* flag);
 | 
			
		||||
 | 
			
		||||
// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
 | 
			
		||||
// some non-alphanumeric character. As a special case, also returns true if
 | 
			
		||||
// value is the empty string.
 | 
			
		||||
bool IsTruthyFlagValue(const std::string& value);
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_COMMANDLINEFLAGS_H_
 | 
			
		||||
							
								
								
									
										220
									
								
								benchmarks/thirdparty/benchmark/src/complexity.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										220
									
								
								benchmarks/thirdparty/benchmark/src/complexity.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,220 @@
 | 
			
		|||
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Source project : https://github.com/ismaelJimenez/cpp.leastsq
 | 
			
		||||
// Adapted to be used with google benchmark
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
#include <cmath>
 | 
			
		||||
#include "check.h"
 | 
			
		||||
#include "complexity.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
// Internal function to calculate the different scalability forms
 | 
			
		||||
BigOFunc* FittingCurve(BigO complexity) {
 | 
			
		||||
  switch (complexity) {
 | 
			
		||||
    case oN:
 | 
			
		||||
      return [](int n) -> double { return n; };
 | 
			
		||||
    case oNSquared:
 | 
			
		||||
      return [](int n) -> double { return std::pow(n, 2); };
 | 
			
		||||
    case oNCubed:
 | 
			
		||||
      return [](int n) -> double { return std::pow(n, 3); };
 | 
			
		||||
    case oLogN:
 | 
			
		||||
      return [](int n) { return log2(n); };
 | 
			
		||||
    case oNLogN:
 | 
			
		||||
      return [](int n) { return n * log2(n); };
 | 
			
		||||
    case o1:
 | 
			
		||||
    default:
 | 
			
		||||
      return [](int) { return 1.0; };
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Function to return an string for the calculated complexity
 | 
			
		||||
std::string GetBigOString(BigO complexity) {
 | 
			
		||||
  switch (complexity) {
 | 
			
		||||
    case oN:
 | 
			
		||||
      return "N";
 | 
			
		||||
    case oNSquared:
 | 
			
		||||
      return "N^2";
 | 
			
		||||
    case oNCubed:
 | 
			
		||||
      return "N^3";
 | 
			
		||||
    case oLogN:
 | 
			
		||||
      return "lgN";
 | 
			
		||||
    case oNLogN:
 | 
			
		||||
      return "NlgN";
 | 
			
		||||
    case o1:
 | 
			
		||||
      return "(1)";
 | 
			
		||||
    default:
 | 
			
		||||
      return "f(N)";
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Find the coefficient for the high-order term in the running time, by
 | 
			
		||||
// minimizing the sum of squares of relative error, for the fitting curve
 | 
			
		||||
// given by the lambda expresion.
 | 
			
		||||
//   - n             : Vector containing the size of the benchmark tests.
 | 
			
		||||
//   - time          : Vector containing the times for the benchmark tests.
 | 
			
		||||
//   - fitting_curve : lambda expresion (e.g. [](int n) {return n; };).
 | 
			
		||||
 | 
			
		||||
// For a deeper explanation on the algorithm logic, look the README file at
 | 
			
		||||
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
 | 
			
		||||
 | 
			
		||||
LeastSq MinimalLeastSq(const std::vector<int>& n,
 | 
			
		||||
                       const std::vector<double>& time,
 | 
			
		||||
                       BigOFunc* fitting_curve) {
 | 
			
		||||
  double sigma_gn = 0.0;
 | 
			
		||||
  double sigma_gn_squared = 0.0;
 | 
			
		||||
  double sigma_time = 0.0;
 | 
			
		||||
  double sigma_time_gn = 0.0;
 | 
			
		||||
 | 
			
		||||
  // Calculate least square fitting parameter
 | 
			
		||||
  for (size_t i = 0; i < n.size(); ++i) {
 | 
			
		||||
    double gn_i = fitting_curve(n[i]);
 | 
			
		||||
    sigma_gn += gn_i;
 | 
			
		||||
    sigma_gn_squared += gn_i * gn_i;
 | 
			
		||||
    sigma_time += time[i];
 | 
			
		||||
    sigma_time_gn += time[i] * gn_i;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  LeastSq result;
 | 
			
		||||
  result.complexity = oLambda;
 | 
			
		||||
 | 
			
		||||
  // Calculate complexity.
 | 
			
		||||
  result.coef = sigma_time_gn / sigma_gn_squared;
 | 
			
		||||
 | 
			
		||||
  // Calculate RMS
 | 
			
		||||
  double rms = 0.0;
 | 
			
		||||
  for (size_t i = 0; i < n.size(); ++i) {
 | 
			
		||||
    double fit = result.coef * fitting_curve(n[i]);
 | 
			
		||||
    rms += pow((time[i] - fit), 2);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Normalized RMS by the mean of the observed values
 | 
			
		||||
  double mean = sigma_time / n.size();
 | 
			
		||||
  result.rms = sqrt(rms / n.size()) / mean;
 | 
			
		||||
 | 
			
		||||
  return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Find the coefficient for the high-order term in the running time, by
 | 
			
		||||
// minimizing the sum of squares of relative error.
 | 
			
		||||
//   - n          : Vector containing the size of the benchmark tests.
 | 
			
		||||
//   - time       : Vector containing the times for the benchmark tests.
 | 
			
		||||
//   - complexity : If different than oAuto, the fitting curve will stick to
 | 
			
		||||
//                  this one. If it is oAuto, it will be calculated the best
 | 
			
		||||
//                  fitting curve.
 | 
			
		||||
LeastSq MinimalLeastSq(const std::vector<int>& n,
 | 
			
		||||
                       const std::vector<double>& time, const BigO complexity) {
 | 
			
		||||
  CHECK_EQ(n.size(), time.size());
 | 
			
		||||
  CHECK_GE(n.size(), 2);  // Do not compute fitting curve is less than two
 | 
			
		||||
                          // benchmark runs are given
 | 
			
		||||
  CHECK_NE(complexity, oNone);
 | 
			
		||||
 | 
			
		||||
  LeastSq best_fit;
 | 
			
		||||
 | 
			
		||||
  if (complexity == oAuto) {
 | 
			
		||||
    std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
 | 
			
		||||
 | 
			
		||||
    // Take o1 as default best fitting curve
 | 
			
		||||
    best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
 | 
			
		||||
    best_fit.complexity = o1;
 | 
			
		||||
 | 
			
		||||
    // Compute all possible fitting curves and stick to the best one
 | 
			
		||||
    for (const auto& fit : fit_curves) {
 | 
			
		||||
      LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
 | 
			
		||||
      if (current_fit.rms < best_fit.rms) {
 | 
			
		||||
        best_fit = current_fit;
 | 
			
		||||
        best_fit.complexity = fit;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
 | 
			
		||||
    best_fit.complexity = complexity;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return best_fit;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::vector<BenchmarkReporter::Run> ComputeBigO(
 | 
			
		||||
    const std::vector<BenchmarkReporter::Run>& reports) {
 | 
			
		||||
  typedef BenchmarkReporter::Run Run;
 | 
			
		||||
  std::vector<Run> results;
 | 
			
		||||
 | 
			
		||||
  if (reports.size() < 2) return results;
 | 
			
		||||
 | 
			
		||||
  // Accumulators.
 | 
			
		||||
  std::vector<int> n;
 | 
			
		||||
  std::vector<double> real_time;
 | 
			
		||||
  std::vector<double> cpu_time;
 | 
			
		||||
 | 
			
		||||
  // Populate the accumulators.
 | 
			
		||||
  for (const Run& run : reports) {
 | 
			
		||||
    CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
 | 
			
		||||
    n.push_back(run.complexity_n);
 | 
			
		||||
    real_time.push_back(run.real_accumulated_time / run.iterations);
 | 
			
		||||
    cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  LeastSq result_cpu;
 | 
			
		||||
  LeastSq result_real;
 | 
			
		||||
 | 
			
		||||
  if (reports[0].complexity == oLambda) {
 | 
			
		||||
    result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
 | 
			
		||||
    result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
 | 
			
		||||
  } else {
 | 
			
		||||
    result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
 | 
			
		||||
    result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
 | 
			
		||||
  }
 | 
			
		||||
  std::string benchmark_name =
 | 
			
		||||
      reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
 | 
			
		||||
 | 
			
		||||
  // Get the data from the accumulator to BenchmarkReporter::Run's.
 | 
			
		||||
  Run big_o;
 | 
			
		||||
  big_o.benchmark_name = benchmark_name + "_BigO";
 | 
			
		||||
  big_o.iterations = 0;
 | 
			
		||||
  big_o.real_accumulated_time = result_real.coef;
 | 
			
		||||
  big_o.cpu_accumulated_time = result_cpu.coef;
 | 
			
		||||
  big_o.report_big_o = true;
 | 
			
		||||
  big_o.complexity = result_cpu.complexity;
 | 
			
		||||
 | 
			
		||||
  // All the time results are reported after being multiplied by the
 | 
			
		||||
  // time unit multiplier. But since RMS is a relative quantity it
 | 
			
		||||
  // should not be multiplied at all. So, here, we _divide_ it by the
 | 
			
		||||
  // multiplier so that when it is multiplied later the result is the
 | 
			
		||||
  // correct one.
 | 
			
		||||
  double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
 | 
			
		||||
 | 
			
		||||
  // Only add label to mean/stddev if it is same for all runs
 | 
			
		||||
  Run rms;
 | 
			
		||||
  big_o.report_label = reports[0].report_label;
 | 
			
		||||
  rms.benchmark_name = benchmark_name + "_RMS";
 | 
			
		||||
  rms.report_label = big_o.report_label;
 | 
			
		||||
  rms.iterations = 0;
 | 
			
		||||
  rms.real_accumulated_time = result_real.rms / multiplier;
 | 
			
		||||
  rms.cpu_accumulated_time = result_cpu.rms / multiplier;
 | 
			
		||||
  rms.report_rms = true;
 | 
			
		||||
  rms.complexity = result_cpu.complexity;
 | 
			
		||||
  // don't forget to keep the time unit, or we won't be able to
 | 
			
		||||
  // recover the correct value.
 | 
			
		||||
  rms.time_unit = reports[0].time_unit;
 | 
			
		||||
 | 
			
		||||
  results.push_back(big_o);
 | 
			
		||||
  results.push_back(rms);
 | 
			
		||||
  return results;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										55
									
								
								benchmarks/thirdparty/benchmark/src/complexity.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								benchmarks/thirdparty/benchmark/src/complexity.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,55 @@
 | 
			
		|||
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
// Source project : https://github.com/ismaelJimenez/cpp.leastsq
 | 
			
		||||
// Adapted to be used with google benchmark
 | 
			
		||||
 | 
			
		||||
#ifndef COMPLEXITY_H_
 | 
			
		||||
#define COMPLEXITY_H_
 | 
			
		||||
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
// Return a vector containing the bigO and RMS information for the specified
 | 
			
		||||
// list of reports. If 'reports.size() < 2' an empty vector is returned.
 | 
			
		||||
std::vector<BenchmarkReporter::Run> ComputeBigO(
 | 
			
		||||
    const std::vector<BenchmarkReporter::Run>& reports);
 | 
			
		||||
 | 
			
		||||
// This data structure will contain the result returned by MinimalLeastSq
 | 
			
		||||
//   - coef        : Estimated coeficient for the high-order term as
 | 
			
		||||
//                   interpolated from data.
 | 
			
		||||
//   - rms         : Normalized Root Mean Squared Error.
 | 
			
		||||
//   - complexity  : Scalability form (e.g. oN, oNLogN). In case a scalability
 | 
			
		||||
//                   form has been provided to MinimalLeastSq this will return
 | 
			
		||||
//                   the same value. In case BigO::oAuto has been selected, this
 | 
			
		||||
//                   parameter will return the best fitting curve detected.
 | 
			
		||||
 | 
			
		||||
struct LeastSq {
 | 
			
		||||
  LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {}
 | 
			
		||||
 | 
			
		||||
  double coef;
 | 
			
		||||
  double rms;
 | 
			
		||||
  BigO complexity;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Function to return an string for the calculated complexity
 | 
			
		||||
std::string GetBigOString(BigO complexity);
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // COMPLEXITY_H_
 | 
			
		||||
							
								
								
									
										182
									
								
								benchmarks/thirdparty/benchmark/src/console_reporter.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										182
									
								
								benchmarks/thirdparty/benchmark/src/console_reporter.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,182 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
#include "complexity.h"
 | 
			
		||||
#include "counter.h"
 | 
			
		||||
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
#include <cstdint>
 | 
			
		||||
#include <cstdio>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <tuple>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "check.h"
 | 
			
		||||
#include "colorprint.h"
 | 
			
		||||
#include "commandlineflags.h"
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
#include "string_util.h"
 | 
			
		||||
#include "timers.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
bool ConsoleReporter::ReportContext(const Context& context) {
 | 
			
		||||
  name_field_width_ = context.name_field_width;
 | 
			
		||||
  printed_header_ = false;
 | 
			
		||||
  prev_counters_.clear();
 | 
			
		||||
 | 
			
		||||
  PrintBasicContext(&GetErrorStream(), context);
 | 
			
		||||
 | 
			
		||||
#ifdef BENCHMARK_OS_WINDOWS
 | 
			
		||||
  if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) {
 | 
			
		||||
    GetErrorStream()
 | 
			
		||||
        << "Color printing is only supported for stdout on windows."
 | 
			
		||||
           " Disabling color printing\n";
 | 
			
		||||
    output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color);
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ConsoleReporter::PrintHeader(const Run& run) {
 | 
			
		||||
  std::string str = FormatString("%-*s %13s %13s %10s", static_cast<int>(name_field_width_),
 | 
			
		||||
                                 "Benchmark", "Time", "CPU", "Iterations");
 | 
			
		||||
  if(!run.counters.empty()) {
 | 
			
		||||
    if(output_options_ & OO_Tabular) {
 | 
			
		||||
      for(auto const& c : run.counters) {
 | 
			
		||||
        str += FormatString(" %10s", c.first.c_str());
 | 
			
		||||
      }
 | 
			
		||||
    } else {
 | 
			
		||||
      str += " UserCounters...";
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  str += "\n";
 | 
			
		||||
  std::string line = std::string(str.length(), '-');
 | 
			
		||||
  GetOutputStream() << line << "\n" << str << line << "\n";
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
 | 
			
		||||
  for (const auto& run : reports) {
 | 
			
		||||
    // print the header:
 | 
			
		||||
    // --- if none was printed yet
 | 
			
		||||
    bool print_header = !printed_header_;
 | 
			
		||||
    // --- or if the format is tabular and this run
 | 
			
		||||
    //     has different fields from the prev header
 | 
			
		||||
    print_header |= (output_options_ & OO_Tabular) &&
 | 
			
		||||
                    (!internal::SameNames(run.counters, prev_counters_));
 | 
			
		||||
    if (print_header) {
 | 
			
		||||
      printed_header_ = true;
 | 
			
		||||
      prev_counters_ = run.counters;
 | 
			
		||||
      PrintHeader(run);
 | 
			
		||||
    }
 | 
			
		||||
    // As an alternative to printing the headers like this, we could sort
 | 
			
		||||
    // the benchmarks by header and then print. But this would require
 | 
			
		||||
    // waiting for the full results before printing, or printing twice.
 | 
			
		||||
    PrintRunData(run);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
 | 
			
		||||
                             ...) {
 | 
			
		||||
  va_list args;
 | 
			
		||||
  va_start(args, fmt);
 | 
			
		||||
  out << FormatString(fmt, args);
 | 
			
		||||
  va_end(args);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ConsoleReporter::PrintRunData(const Run& result) {
 | 
			
		||||
  typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
 | 
			
		||||
  auto& Out = GetOutputStream();
 | 
			
		||||
  PrinterFn* printer = (output_options_ & OO_Color) ?
 | 
			
		||||
                         (PrinterFn*)ColorPrintf : IgnoreColorPrint;
 | 
			
		||||
  auto name_color =
 | 
			
		||||
      (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
 | 
			
		||||
  printer(Out, name_color, "%-*s ", name_field_width_,
 | 
			
		||||
          result.benchmark_name.c_str());
 | 
			
		||||
 | 
			
		||||
  if (result.error_occurred) {
 | 
			
		||||
    printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
 | 
			
		||||
            result.error_message.c_str());
 | 
			
		||||
    printer(Out, COLOR_DEFAULT, "\n");
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
  // Format bytes per second
 | 
			
		||||
  std::string rate;
 | 
			
		||||
  if (result.bytes_per_second > 0) {
 | 
			
		||||
    rate = StrCat(" ", HumanReadableNumber(result.bytes_per_second), "B/s");
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Format items per second
 | 
			
		||||
  std::string items;
 | 
			
		||||
  if (result.items_per_second > 0) {
 | 
			
		||||
    items =
 | 
			
		||||
        StrCat(" ", HumanReadableNumber(result.items_per_second), " items/s");
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const double real_time = result.GetAdjustedRealTime();
 | 
			
		||||
  const double cpu_time = result.GetAdjustedCPUTime();
 | 
			
		||||
 | 
			
		||||
  if (result.report_big_o) {
 | 
			
		||||
    std::string big_o = GetBigOString(result.complexity);
 | 
			
		||||
    printer(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time, big_o.c_str(),
 | 
			
		||||
            cpu_time, big_o.c_str());
 | 
			
		||||
  } else if (result.report_rms) {
 | 
			
		||||
    printer(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100,
 | 
			
		||||
            cpu_time * 100);
 | 
			
		||||
  } else {
 | 
			
		||||
    const char* timeLabel = GetTimeUnitString(result.time_unit);
 | 
			
		||||
    printer(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel,
 | 
			
		||||
            cpu_time, timeLabel);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (!result.report_big_o && !result.report_rms) {
 | 
			
		||||
    printer(Out, COLOR_CYAN, "%10lld", result.iterations);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  for (auto& c : result.counters) {
 | 
			
		||||
    const std::size_t cNameLen = std::max(std::string::size_type(10),
 | 
			
		||||
                                          c.first.length());
 | 
			
		||||
    auto const& s = HumanReadableNumber(c.second.value, 1000);
 | 
			
		||||
    if (output_options_ & OO_Tabular) {
 | 
			
		||||
      if (c.second.flags & Counter::kIsRate) {
 | 
			
		||||
        printer(Out, COLOR_DEFAULT, " %*s/s", cNameLen - 2, s.c_str());
 | 
			
		||||
      } else {
 | 
			
		||||
        printer(Out, COLOR_DEFAULT, " %*s", cNameLen, s.c_str());
 | 
			
		||||
      }
 | 
			
		||||
    } else {
 | 
			
		||||
      const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : "";
 | 
			
		||||
      printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(),
 | 
			
		||||
              unit);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (!rate.empty()) {
 | 
			
		||||
    printer(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str());
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (!items.empty()) {
 | 
			
		||||
    printer(Out, COLOR_DEFAULT, " %*s", 18, items.c_str());
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (!result.report_label.empty()) {
 | 
			
		||||
    printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  printer(Out, COLOR_DEFAULT, "\n");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										68
									
								
								benchmarks/thirdparty/benchmark/src/counter.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								benchmarks/thirdparty/benchmark/src/counter.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,68 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "counter.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
namespace internal {
 | 
			
		||||
 | 
			
		||||
double Finish(Counter const& c, double cpu_time, double num_threads) {
 | 
			
		||||
  double v = c.value;
 | 
			
		||||
  if (c.flags & Counter::kIsRate) {
 | 
			
		||||
    v /= cpu_time;
 | 
			
		||||
  }
 | 
			
		||||
  if (c.flags & Counter::kAvgThreads) {
 | 
			
		||||
    v /= num_threads;
 | 
			
		||||
  }
 | 
			
		||||
  return v;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Finish(UserCounters *l, double cpu_time, double num_threads) {
 | 
			
		||||
  for (auto &c : *l) {
 | 
			
		||||
    c.second.value = Finish(c.second, cpu_time, num_threads);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Increment(UserCounters *l, UserCounters const& r) {
 | 
			
		||||
  // add counters present in both or just in *l
 | 
			
		||||
  for (auto &c : *l) {
 | 
			
		||||
    auto it = r.find(c.first);
 | 
			
		||||
    if (it != r.end()) {
 | 
			
		||||
      c.second.value = c.second + it->second;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  // add counters present in r, but not in *l
 | 
			
		||||
  for (auto const &tc : r) {
 | 
			
		||||
    auto it = l->find(tc.first);
 | 
			
		||||
    if (it == l->end()) {
 | 
			
		||||
      (*l)[tc.first] = tc.second;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool SameNames(UserCounters const& l, UserCounters const& r) {
 | 
			
		||||
  if (&l == &r) return true;
 | 
			
		||||
  if (l.size() != r.size()) {
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
  for (auto const& c : l) {
 | 
			
		||||
    if (r.find(c.first) == r.end()) {
 | 
			
		||||
      return false;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // end namespace internal
 | 
			
		||||
} // end namespace benchmark
 | 
			
		||||
							
								
								
									
										26
									
								
								benchmarks/thirdparty/benchmark/src/counter.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								benchmarks/thirdparty/benchmark/src/counter.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,26 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
// these counter-related functions are hidden to reduce API surface.
 | 
			
		||||
namespace internal {
 | 
			
		||||
void Finish(UserCounters *l, double time, double num_threads);
 | 
			
		||||
void Increment(UserCounters *l, UserCounters const& r);
 | 
			
		||||
bool SameNames(UserCounters const& l, UserCounters const& r);
 | 
			
		||||
} // end namespace internal
 | 
			
		||||
 | 
			
		||||
} //end namespace benchmark
 | 
			
		||||
							
								
								
									
										149
									
								
								benchmarks/thirdparty/benchmark/src/csv_reporter.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										149
									
								
								benchmarks/thirdparty/benchmark/src/csv_reporter.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,149 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
#include "complexity.h"
 | 
			
		||||
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
#include <cstdint>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <tuple>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "string_util.h"
 | 
			
		||||
#include "timers.h"
 | 
			
		||||
#include "check.h"
 | 
			
		||||
 | 
			
		||||
// File format reference: http://edoceo.com/utilitas/csv-file-format.
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
std::vector<std::string> elements = {
 | 
			
		||||
    "name",           "iterations",       "real_time",        "cpu_time",
 | 
			
		||||
    "time_unit",      "bytes_per_second", "items_per_second", "label",
 | 
			
		||||
    "error_occurred", "error_message"};
 | 
			
		||||
}  // namespace
 | 
			
		||||
 | 
			
		||||
bool CSVReporter::ReportContext(const Context& context) {
 | 
			
		||||
  PrintBasicContext(&GetErrorStream(), context);
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
 | 
			
		||||
  std::ostream& Out = GetOutputStream();
 | 
			
		||||
 | 
			
		||||
  if (!printed_header_) {
 | 
			
		||||
    // save the names of all the user counters
 | 
			
		||||
    for (const auto& run : reports) {
 | 
			
		||||
      for (const auto& cnt : run.counters) {
 | 
			
		||||
        user_counter_names_.insert(cnt.first);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // print the header
 | 
			
		||||
    for (auto B = elements.begin(); B != elements.end();) {
 | 
			
		||||
      Out << *B++;
 | 
			
		||||
      if (B != elements.end()) Out << ",";
 | 
			
		||||
    }
 | 
			
		||||
    for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) {
 | 
			
		||||
      Out << ",\"" << *B++ << "\"";
 | 
			
		||||
    }
 | 
			
		||||
    Out << "\n";
 | 
			
		||||
 | 
			
		||||
    printed_header_ = true;
 | 
			
		||||
  } else {
 | 
			
		||||
    // check that all the current counters are saved in the name set
 | 
			
		||||
    for (const auto& run : reports) {
 | 
			
		||||
      for (const auto& cnt : run.counters) {
 | 
			
		||||
        CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
 | 
			
		||||
              << "All counters must be present in each run. "
 | 
			
		||||
              << "Counter named \"" << cnt.first
 | 
			
		||||
              << "\" was not in a run after being added to the header";
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // print results for each run
 | 
			
		||||
  for (const auto& run : reports) {
 | 
			
		||||
    PrintRunData(run);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CSVReporter::PrintRunData(const Run & run) {
 | 
			
		||||
  std::ostream& Out = GetOutputStream();
 | 
			
		||||
 | 
			
		||||
  // Field with embedded double-quote characters must be doubled and the field
 | 
			
		||||
  // delimited with double-quotes.
 | 
			
		||||
  std::string name = run.benchmark_name;
 | 
			
		||||
  ReplaceAll(&name, "\"", "\"\"");
 | 
			
		||||
  Out << '"' << name << "\",";
 | 
			
		||||
  if (run.error_occurred) {
 | 
			
		||||
    Out << std::string(elements.size() - 3, ',');
 | 
			
		||||
    Out << "true,";
 | 
			
		||||
    std::string msg = run.error_message;
 | 
			
		||||
    ReplaceAll(&msg, "\"", "\"\"");
 | 
			
		||||
    Out << '"' << msg << "\"\n";
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Do not print iteration on bigO and RMS report
 | 
			
		||||
  if (!run.report_big_o && !run.report_rms) {
 | 
			
		||||
    Out << run.iterations;
 | 
			
		||||
  }
 | 
			
		||||
  Out << ",";
 | 
			
		||||
 | 
			
		||||
  Out << run.GetAdjustedRealTime() << ",";
 | 
			
		||||
  Out << run.GetAdjustedCPUTime() << ",";
 | 
			
		||||
 | 
			
		||||
  // Do not print timeLabel on bigO and RMS report
 | 
			
		||||
  if (run.report_big_o) {
 | 
			
		||||
    Out << GetBigOString(run.complexity);
 | 
			
		||||
  } else if (!run.report_rms) {
 | 
			
		||||
    Out << GetTimeUnitString(run.time_unit);
 | 
			
		||||
  }
 | 
			
		||||
  Out << ",";
 | 
			
		||||
 | 
			
		||||
  if (run.bytes_per_second > 0.0) {
 | 
			
		||||
    Out << run.bytes_per_second;
 | 
			
		||||
  }
 | 
			
		||||
  Out << ",";
 | 
			
		||||
  if (run.items_per_second > 0.0) {
 | 
			
		||||
    Out << run.items_per_second;
 | 
			
		||||
  }
 | 
			
		||||
  Out << ",";
 | 
			
		||||
  if (!run.report_label.empty()) {
 | 
			
		||||
    // Field with embedded double-quote characters must be doubled and the field
 | 
			
		||||
    // delimited with double-quotes.
 | 
			
		||||
    std::string label = run.report_label;
 | 
			
		||||
    ReplaceAll(&label, "\"", "\"\"");
 | 
			
		||||
    Out << "\"" << label << "\"";
 | 
			
		||||
  }
 | 
			
		||||
  Out << ",,";  // for error_occurred and error_message
 | 
			
		||||
 | 
			
		||||
  // Print user counters
 | 
			
		||||
  for (const auto &ucn : user_counter_names_) {
 | 
			
		||||
    auto it = run.counters.find(ucn);
 | 
			
		||||
    if(it == run.counters.end()) {
 | 
			
		||||
      Out << ",";
 | 
			
		||||
    } else {
 | 
			
		||||
      Out << "," << it->second;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  Out << '\n';
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										172
									
								
								benchmarks/thirdparty/benchmark/src/cycleclock.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										172
									
								
								benchmarks/thirdparty/benchmark/src/cycleclock.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,172 @@
 | 
			
		|||
// ----------------------------------------------------------------------
 | 
			
		||||
// CycleClock
 | 
			
		||||
//    A CycleClock tells you the current time in Cycles.  The "time"
 | 
			
		||||
//    is actually time since power-on.  This is like time() but doesn't
 | 
			
		||||
//    involve a system call and is much more precise.
 | 
			
		||||
//
 | 
			
		||||
// NOTE: Not all cpu/platform/kernel combinations guarantee that this
 | 
			
		||||
// clock increments at a constant rate or is synchronized across all logical
 | 
			
		||||
// cpus in a system.
 | 
			
		||||
//
 | 
			
		||||
// If you need the above guarantees, please consider using a different
 | 
			
		||||
// API. There are efforts to provide an interface which provides a millisecond
 | 
			
		||||
// granularity and implemented as a memory read. A memory read is generally
 | 
			
		||||
// cheaper than the CycleClock for many architectures.
 | 
			
		||||
//
 | 
			
		||||
// Also, in some out of order CPU implementations, the CycleClock is not
 | 
			
		||||
// serializing. So if you're trying to count at cycles granularity, your
 | 
			
		||||
// data might be inaccurate due to out of order instruction execution.
 | 
			
		||||
// ----------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
#ifndef BENCHMARK_CYCLECLOCK_H_
 | 
			
		||||
#define BENCHMARK_CYCLECLOCK_H_
 | 
			
		||||
 | 
			
		||||
#include <cstdint>
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
 | 
			
		||||
#if defined(BENCHMARK_OS_MACOSX)
 | 
			
		||||
#include <mach/mach_time.h>
 | 
			
		||||
#endif
 | 
			
		||||
// For MSVC, we want to use '_asm rdtsc' when possible (since it works
 | 
			
		||||
// with even ancient MSVC compilers), and when not possible the
 | 
			
		||||
// __rdtsc intrinsic, declared in <intrin.h>.  Unfortunately, in some
 | 
			
		||||
// environments, <windows.h> and <intrin.h> have conflicting
 | 
			
		||||
// declarations of some other intrinsics, breaking compilation.
 | 
			
		||||
// Therefore, we simply declare __rdtsc ourselves. See also
 | 
			
		||||
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
 | 
			
		||||
#if defined(COMPILER_MSVC) && !defined(_M_IX86)
 | 
			
		||||
extern "C" uint64_t __rdtsc();
 | 
			
		||||
#pragma intrinsic(__rdtsc)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef BENCHMARK_OS_WINDOWS
 | 
			
		||||
#include <sys/time.h>
 | 
			
		||||
#include <time.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef BENCHMARK_OS_EMSCRIPTEN
 | 
			
		||||
#include <emscripten.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
// NOTE: only i386 and x86_64 have been well tested.
 | 
			
		||||
// PPC, sparc, alpha, and ia64 are based on
 | 
			
		||||
//    http://peter.kuscsik.com/wordpress/?p=14
 | 
			
		||||
// with modifications by m3b.  See also
 | 
			
		||||
//    https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
 | 
			
		||||
namespace cycleclock {
 | 
			
		||||
// This should return the number of cycles since power-on.  Thread-safe.
 | 
			
		||||
inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
 | 
			
		||||
#if defined(BENCHMARK_OS_MACOSX)
 | 
			
		||||
  // this goes at the top because we need ALL Macs, regardless of
 | 
			
		||||
  // architecture, to return the number of "mach time units" that
 | 
			
		||||
  // have passed since startup.  See sysinfo.cc where
 | 
			
		||||
  // InitializeSystemInfo() sets the supposed cpu clock frequency of
 | 
			
		||||
  // macs to the number of mach time units per second, not actual
 | 
			
		||||
  // CPU clock frequency (which can change in the face of CPU
 | 
			
		||||
  // frequency scaling).  Also note that when the Mac sleeps, this
 | 
			
		||||
  // counter pauses; it does not continue counting, nor does it
 | 
			
		||||
  // reset to zero.
 | 
			
		||||
  return mach_absolute_time();
 | 
			
		||||
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
 | 
			
		||||
  // this goes above x86-specific code because old versions of Emscripten
 | 
			
		||||
  // define __x86_64__, although they have nothing to do with it.
 | 
			
		||||
  return static_cast<int64_t>(emscripten_get_now() * 1e+6);
 | 
			
		||||
#elif defined(__i386__)
 | 
			
		||||
  int64_t ret;
 | 
			
		||||
  __asm__ volatile("rdtsc" : "=A"(ret));
 | 
			
		||||
  return ret;
 | 
			
		||||
#elif defined(__x86_64__) || defined(__amd64__)
 | 
			
		||||
  uint64_t low, high;
 | 
			
		||||
  __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
 | 
			
		||||
  return (high << 32) | low;
 | 
			
		||||
#elif defined(__powerpc__) || defined(__ppc__)
 | 
			
		||||
  // This returns a time-base, which is not always precisely a cycle-count.
 | 
			
		||||
  int64_t tbl, tbu0, tbu1;
 | 
			
		||||
  asm("mftbu %0" : "=r"(tbu0));
 | 
			
		||||
  asm("mftb  %0" : "=r"(tbl));
 | 
			
		||||
  asm("mftbu %0" : "=r"(tbu1));
 | 
			
		||||
  tbl &= -static_cast<int64_t>(tbu0 == tbu1);
 | 
			
		||||
  // high 32 bits in tbu1; low 32 bits in tbl  (tbu0 is garbage)
 | 
			
		||||
  return (tbu1 << 32) | tbl;
 | 
			
		||||
#elif defined(__sparc__)
 | 
			
		||||
  int64_t tick;
 | 
			
		||||
  asm(".byte 0x83, 0x41, 0x00, 0x00");
 | 
			
		||||
  asm("mov   %%g1, %0" : "=r"(tick));
 | 
			
		||||
  return tick;
 | 
			
		||||
#elif defined(__ia64__)
 | 
			
		||||
  int64_t itc;
 | 
			
		||||
  asm("mov %0 = ar.itc" : "=r"(itc));
 | 
			
		||||
  return itc;
 | 
			
		||||
#elif defined(COMPILER_MSVC) && defined(_M_IX86)
 | 
			
		||||
  // Older MSVC compilers (like 7.x) don't seem to support the
 | 
			
		||||
  // __rdtsc intrinsic properly, so I prefer to use _asm instead
 | 
			
		||||
  // when I know it will work.  Otherwise, I'll use __rdtsc and hope
 | 
			
		||||
  // the code is being compiled with a non-ancient compiler.
 | 
			
		||||
  _asm rdtsc
 | 
			
		||||
#elif defined(COMPILER_MSVC)
 | 
			
		||||
  return __rdtsc();
 | 
			
		||||
#elif defined(BENCHMARK_OS_NACL)
 | 
			
		||||
  // Native Client validator on x86/x86-64 allows RDTSC instructions,
 | 
			
		||||
  // and this case is handled above. Native Client validator on ARM
 | 
			
		||||
  // rejects MRC instructions (used in the ARM-specific sequence below),
 | 
			
		||||
  // so we handle it here. Portable Native Client compiles to
 | 
			
		||||
  // architecture-agnostic bytecode, which doesn't provide any
 | 
			
		||||
  // cycle counter access mnemonics.
 | 
			
		||||
 | 
			
		||||
  // Native Client does not provide any API to access cycle counter.
 | 
			
		||||
  // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
 | 
			
		||||
  // because is provides nanosecond resolution (which is noticable at
 | 
			
		||||
  // least for PNaCl modules running on x86 Mac & Linux).
 | 
			
		||||
  // Initialize to always return 0 if clock_gettime fails.
 | 
			
		||||
  struct timespec ts = { 0, 0 };
 | 
			
		||||
  clock_gettime(CLOCK_MONOTONIC, &ts);
 | 
			
		||||
  return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
 | 
			
		||||
#elif defined(__aarch64__)
 | 
			
		||||
  // System timer of ARMv8 runs at a different frequency than the CPU's.
 | 
			
		||||
  // The frequency is fixed, typically in the range 1-50MHz.  It can be
 | 
			
		||||
  // read at CNTFRQ special register.  We assume the OS has set up
 | 
			
		||||
  // the virtual timer properly.
 | 
			
		||||
  int64_t virtual_timer_value;
 | 
			
		||||
  asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
 | 
			
		||||
  return virtual_timer_value;
 | 
			
		||||
#elif defined(__ARM_ARCH)
 | 
			
		||||
  // V6 is the earliest arch that has a standard cyclecount
 | 
			
		||||
  // Native Client validator doesn't allow MRC instructions.
 | 
			
		||||
#if (__ARM_ARCH >= 6)
 | 
			
		||||
  uint32_t pmccntr;
 | 
			
		||||
  uint32_t pmuseren;
 | 
			
		||||
  uint32_t pmcntenset;
 | 
			
		||||
  // Read the user mode perf monitor counter access permissions.
 | 
			
		||||
  asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
 | 
			
		||||
  if (pmuseren & 1) {  // Allows reading perfmon counters for user mode code.
 | 
			
		||||
    asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
 | 
			
		||||
    if (pmcntenset & 0x80000000ul) {  // Is it counting?
 | 
			
		||||
      asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
 | 
			
		||||
      // The counter is set up to count every 64th cycle
 | 
			
		||||
      return static_cast<int64_t>(pmccntr) * 64;  // Should optimize to << 6
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
  struct timeval tv;
 | 
			
		||||
  gettimeofday(&tv, nullptr);
 | 
			
		||||
  return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
 | 
			
		||||
#elif defined(__mips__)
 | 
			
		||||
  // mips apparently only allows rdtsc for superusers, so we fall
 | 
			
		||||
  // back to gettimeofday.  It's possible clock_gettime would be better.
 | 
			
		||||
  struct timeval tv;
 | 
			
		||||
  gettimeofday(&tv, nullptr);
 | 
			
		||||
  return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
 | 
			
		||||
#else
 | 
			
		||||
// The soft failover to a generic implementation is automatic only for ARM.
 | 
			
		||||
// For other platforms the developer is expected to make an attempt to create
 | 
			
		||||
// a fast implementation and use generic version if nothing better is available.
 | 
			
		||||
#error You need to define CycleTimer for your OS and CPU
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
}  // end namespace cycleclock
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_CYCLECLOCK_H_
 | 
			
		||||
							
								
								
									
										82
									
								
								benchmarks/thirdparty/benchmark/src/internal_macros.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										82
									
								
								benchmarks/thirdparty/benchmark/src/internal_macros.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,82 @@
 | 
			
		|||
#ifndef BENCHMARK_INTERNAL_MACROS_H_
 | 
			
		||||
#define BENCHMARK_INTERNAL_MACROS_H_
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
 | 
			
		||||
#ifndef __has_feature
 | 
			
		||||
#define __has_feature(x) 0
 | 
			
		||||
#endif
 | 
			
		||||
#ifndef __has_builtin
 | 
			
		||||
#define __has_builtin(x) 0
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if defined(__clang__)
 | 
			
		||||
  #if !defined(COMPILER_CLANG)
 | 
			
		||||
    #define COMPILER_CLANG
 | 
			
		||||
  #endif
 | 
			
		||||
#elif defined(_MSC_VER)
 | 
			
		||||
  #if !defined(COMPILER_MSVC)
 | 
			
		||||
    #define COMPILER_MSVC
 | 
			
		||||
  #endif
 | 
			
		||||
#elif defined(__GNUC__)
 | 
			
		||||
  #if !defined(COMPILER_GCC)
 | 
			
		||||
    #define COMPILER_GCC
 | 
			
		||||
  #endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if __has_feature(cxx_attributes)
 | 
			
		||||
  #define BENCHMARK_NORETURN [[noreturn]]
 | 
			
		||||
#elif defined(__GNUC__)
 | 
			
		||||
  #define BENCHMARK_NORETURN __attribute__((noreturn))
 | 
			
		||||
#elif defined(COMPILER_MSVC)
 | 
			
		||||
  #define BENCHMARK_NORETURN __declspec(noreturn)
 | 
			
		||||
#else
 | 
			
		||||
  #define BENCHMARK_NORETURN
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if defined(__CYGWIN__)
 | 
			
		||||
  #define BENCHMARK_OS_CYGWIN 1
 | 
			
		||||
#elif defined(_WIN32)
 | 
			
		||||
  #define BENCHMARK_OS_WINDOWS 1
 | 
			
		||||
#elif defined(__APPLE__)
 | 
			
		||||
  #include "TargetConditionals.h"
 | 
			
		||||
  #if defined(TARGET_OS_MAC)
 | 
			
		||||
    #define BENCHMARK_OS_MACOSX 1
 | 
			
		||||
    #if defined(TARGET_OS_IPHONE)
 | 
			
		||||
      #define BENCHMARK_OS_IOS 1
 | 
			
		||||
    #endif
 | 
			
		||||
  #endif
 | 
			
		||||
#elif defined(__FreeBSD__)
 | 
			
		||||
  #define BENCHMARK_OS_FREEBSD 1
 | 
			
		||||
#elif defined(__NetBSD__)
 | 
			
		||||
  #define BENCHMARK_OS_NETBSD 1
 | 
			
		||||
#elif defined(__linux__)
 | 
			
		||||
  #define BENCHMARK_OS_LINUX 1
 | 
			
		||||
#elif defined(__native_client__)
 | 
			
		||||
  #define BENCHMARK_OS_NACL 1
 | 
			
		||||
#elif defined(EMSCRIPTEN)
 | 
			
		||||
  #define BENCHMARK_OS_EMSCRIPTEN 1
 | 
			
		||||
#elif defined(__rtems__)
 | 
			
		||||
  #define BENCHMARK_OS_RTEMS 1
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
 | 
			
		||||
     && !defined(__EXCEPTIONS)
 | 
			
		||||
  #define BENCHMARK_HAS_NO_EXCEPTIONS
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if defined(COMPILER_CLANG) || defined(COMPILER_GCC)
 | 
			
		||||
  #define BENCHMARK_MAYBE_UNUSED __attribute__((unused))
 | 
			
		||||
#else
 | 
			
		||||
  #define BENCHMARK_MAYBE_UNUSED
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if defined(COMPILER_GCC) || __has_builtin(__builtin_unreachable)
 | 
			
		||||
  #define BENCHMARK_UNREACHABLE() __builtin_unreachable()
 | 
			
		||||
#elif defined(COMPILER_MSVC)
 | 
			
		||||
  #define BENCHMARK_UNREACHABLE() __assume(false)
 | 
			
		||||
#else
 | 
			
		||||
  #define BENCHMARK_UNREACHABLE() ((void)0)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_INTERNAL_MACROS_H_
 | 
			
		||||
							
								
								
									
										201
									
								
								benchmarks/thirdparty/benchmark/src/json_reporter.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										201
									
								
								benchmarks/thirdparty/benchmark/src/json_reporter.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,201 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
#include "complexity.h"
 | 
			
		||||
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
#include <cstdint>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <tuple>
 | 
			
		||||
#include <vector>
 | 
			
		||||
#include <iomanip> // for setprecision
 | 
			
		||||
#include <limits>
 | 
			
		||||
 | 
			
		||||
#include "string_util.h"
 | 
			
		||||
#include "timers.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
std::string FormatKV(std::string const& key, std::string const& value) {
 | 
			
		||||
  return StringPrintF("\"%s\": \"%s\"", key.c_str(), value.c_str());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string FormatKV(std::string const& key, const char* value) {
 | 
			
		||||
  return StringPrintF("\"%s\": \"%s\"", key.c_str(), value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string FormatKV(std::string const& key, bool value) {
 | 
			
		||||
  return StringPrintF("\"%s\": %s", key.c_str(), value ? "true" : "false");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string FormatKV(std::string const& key, int64_t value) {
 | 
			
		||||
  std::stringstream ss;
 | 
			
		||||
  ss << '"' << key << "\": " << value;
 | 
			
		||||
  return ss.str();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string FormatKV(std::string const& key, double value) {
 | 
			
		||||
  std::stringstream ss;
 | 
			
		||||
  ss << '"' << key << "\": ";
 | 
			
		||||
 | 
			
		||||
  const auto max_digits10 = std::numeric_limits<decltype (value)>::max_digits10;
 | 
			
		||||
  const auto max_fractional_digits10 = max_digits10 - 1;
 | 
			
		||||
 | 
			
		||||
  ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
 | 
			
		||||
  return ss.str();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int64_t RoundDouble(double v) { return static_cast<int64_t>(v + 0.5); }
 | 
			
		||||
 | 
			
		||||
}  // end namespace
 | 
			
		||||
 | 
			
		||||
bool JSONReporter::ReportContext(const Context& context) {
 | 
			
		||||
  std::ostream& out = GetOutputStream();
 | 
			
		||||
 | 
			
		||||
  out << "{\n";
 | 
			
		||||
  std::string inner_indent(2, ' ');
 | 
			
		||||
 | 
			
		||||
  // Open context block and print context information.
 | 
			
		||||
  out << inner_indent << "\"context\": {\n";
 | 
			
		||||
  std::string indent(4, ' ');
 | 
			
		||||
 | 
			
		||||
  std::string walltime_value = LocalDateTimeString();
 | 
			
		||||
  out << indent << FormatKV("date", walltime_value) << ",\n";
 | 
			
		||||
 | 
			
		||||
  CPUInfo const& info = context.cpu_info;
 | 
			
		||||
  out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus))
 | 
			
		||||
      << ",\n";
 | 
			
		||||
  out << indent
 | 
			
		||||
      << FormatKV("mhz_per_cpu",
 | 
			
		||||
                  RoundDouble(info.cycles_per_second / 1000000.0))
 | 
			
		||||
      << ",\n";
 | 
			
		||||
  out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled)
 | 
			
		||||
      << ",\n";
 | 
			
		||||
 | 
			
		||||
  out << indent << "\"caches\": [\n";
 | 
			
		||||
  indent = std::string(6, ' ');
 | 
			
		||||
  std::string cache_indent(8, ' ');
 | 
			
		||||
  for (size_t i = 0; i < info.caches.size(); ++i) {
 | 
			
		||||
    auto& CI = info.caches[i];
 | 
			
		||||
    out << indent << "{\n";
 | 
			
		||||
    out << cache_indent << FormatKV("type", CI.type) << ",\n";
 | 
			
		||||
    out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
 | 
			
		||||
        << ",\n";
 | 
			
		||||
    out << cache_indent
 | 
			
		||||
        << FormatKV("size", static_cast<int64_t>(CI.size) * 1000u) << ",\n";
 | 
			
		||||
    out << cache_indent
 | 
			
		||||
        << FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
 | 
			
		||||
        << "\n";
 | 
			
		||||
    out << indent << "}";
 | 
			
		||||
    if (i != info.caches.size() - 1) out << ",";
 | 
			
		||||
    out << "\n";
 | 
			
		||||
  }
 | 
			
		||||
  indent = std::string(4, ' ');
 | 
			
		||||
  out << indent << "],\n";
 | 
			
		||||
 | 
			
		||||
#if defined(NDEBUG)
 | 
			
		||||
  const char build_type[] = "release";
 | 
			
		||||
#else
 | 
			
		||||
  const char build_type[] = "debug";
 | 
			
		||||
#endif
 | 
			
		||||
  out << indent << FormatKV("library_build_type", build_type) << "\n";
 | 
			
		||||
  // Close context block and open the list of benchmarks.
 | 
			
		||||
  out << inner_indent << "},\n";
 | 
			
		||||
  out << inner_indent << "\"benchmarks\": [\n";
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
 | 
			
		||||
  if (reports.empty()) {
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
  std::string indent(4, ' ');
 | 
			
		||||
  std::ostream& out = GetOutputStream();
 | 
			
		||||
  if (!first_report_) {
 | 
			
		||||
    out << ",\n";
 | 
			
		||||
  }
 | 
			
		||||
  first_report_ = false;
 | 
			
		||||
 | 
			
		||||
  for (auto it = reports.begin(); it != reports.end(); ++it) {
 | 
			
		||||
    out << indent << "{\n";
 | 
			
		||||
    PrintRunData(*it);
 | 
			
		||||
    out << indent << '}';
 | 
			
		||||
    auto it_cp = it;
 | 
			
		||||
    if (++it_cp != reports.end()) {
 | 
			
		||||
      out << ",\n";
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void JSONReporter::Finalize() {
 | 
			
		||||
  // Close the list of benchmarks and the top level object.
 | 
			
		||||
  GetOutputStream() << "\n  ]\n}\n";
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void JSONReporter::PrintRunData(Run const& run) {
 | 
			
		||||
  std::string indent(6, ' ');
 | 
			
		||||
  std::ostream& out = GetOutputStream();
 | 
			
		||||
  out << indent << FormatKV("name", run.benchmark_name) << ",\n";
 | 
			
		||||
  if (run.error_occurred) {
 | 
			
		||||
    out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
 | 
			
		||||
    out << indent << FormatKV("error_message", run.error_message) << ",\n";
 | 
			
		||||
  }
 | 
			
		||||
  if (!run.report_big_o && !run.report_rms) {
 | 
			
		||||
    out << indent << FormatKV("iterations", run.iterations) << ",\n";
 | 
			
		||||
    out << indent
 | 
			
		||||
        << FormatKV("real_time", run.GetAdjustedRealTime())
 | 
			
		||||
        << ",\n";
 | 
			
		||||
    out << indent
 | 
			
		||||
        << FormatKV("cpu_time", run.GetAdjustedCPUTime());
 | 
			
		||||
    out << ",\n"
 | 
			
		||||
        << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
 | 
			
		||||
  } else if (run.report_big_o) {
 | 
			
		||||
    out << indent
 | 
			
		||||
        << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
 | 
			
		||||
        << ",\n";
 | 
			
		||||
    out << indent
 | 
			
		||||
        << FormatKV("real_coefficient", run.GetAdjustedRealTime())
 | 
			
		||||
        << ",\n";
 | 
			
		||||
    out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
 | 
			
		||||
    out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
 | 
			
		||||
  } else if (run.report_rms) {
 | 
			
		||||
    out << indent
 | 
			
		||||
        << FormatKV("rms", run.GetAdjustedCPUTime());
 | 
			
		||||
  }
 | 
			
		||||
  if (run.bytes_per_second > 0.0) {
 | 
			
		||||
    out << ",\n"
 | 
			
		||||
        << indent
 | 
			
		||||
        << FormatKV("bytes_per_second", run.bytes_per_second);
 | 
			
		||||
  }
 | 
			
		||||
  if (run.items_per_second > 0.0) {
 | 
			
		||||
    out << ",\n"
 | 
			
		||||
        << indent
 | 
			
		||||
        << FormatKV("items_per_second", run.items_per_second);
 | 
			
		||||
  }
 | 
			
		||||
  for(auto &c : run.counters) {
 | 
			
		||||
    out << ",\n"
 | 
			
		||||
        << indent
 | 
			
		||||
        << FormatKV(c.first, c.second);
 | 
			
		||||
  }
 | 
			
		||||
  if (!run.report_label.empty()) {
 | 
			
		||||
    out << ",\n" << indent << FormatKV("label", run.report_label);
 | 
			
		||||
  }
 | 
			
		||||
  out << '\n';
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // end namespace benchmark
 | 
			
		||||
							
								
								
									
										73
									
								
								benchmarks/thirdparty/benchmark/src/log.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								benchmarks/thirdparty/benchmark/src/log.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,73 @@
 | 
			
		|||
#ifndef BENCHMARK_LOG_H_
 | 
			
		||||
#define BENCHMARK_LOG_H_
 | 
			
		||||
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <ostream>
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
namespace internal {
 | 
			
		||||
 | 
			
		||||
typedef std::basic_ostream<char>&(EndLType)(std::basic_ostream<char>&);
 | 
			
		||||
 | 
			
		||||
class LogType {
 | 
			
		||||
  friend LogType& GetNullLogInstance();
 | 
			
		||||
  friend LogType& GetErrorLogInstance();
 | 
			
		||||
 | 
			
		||||
  // FIXME: Add locking to output.
 | 
			
		||||
  template <class Tp>
 | 
			
		||||
  friend LogType& operator<<(LogType&, Tp const&);
 | 
			
		||||
  friend LogType& operator<<(LogType&, EndLType*);
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  LogType(std::ostream* out) : out_(out) {}
 | 
			
		||||
  std::ostream* out_;
 | 
			
		||||
  BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Tp>
 | 
			
		||||
LogType& operator<<(LogType& log, Tp const& value) {
 | 
			
		||||
  if (log.out_) {
 | 
			
		||||
    *log.out_ << value;
 | 
			
		||||
  }
 | 
			
		||||
  return log;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline LogType& operator<<(LogType& log, EndLType* m) {
 | 
			
		||||
  if (log.out_) {
 | 
			
		||||
    *log.out_ << m;
 | 
			
		||||
  }
 | 
			
		||||
  return log;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline int& LogLevel() {
 | 
			
		||||
  static int log_level = 0;
 | 
			
		||||
  return log_level;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline LogType& GetNullLogInstance() {
 | 
			
		||||
  static LogType log(nullptr);
 | 
			
		||||
  return log;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline LogType& GetErrorLogInstance() {
 | 
			
		||||
  static LogType log(&std::clog);
 | 
			
		||||
  return log;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline LogType& GetLogInstanceForLevel(int level) {
 | 
			
		||||
  if (level <= LogLevel()) {
 | 
			
		||||
    return GetErrorLogInstance();
 | 
			
		||||
  }
 | 
			
		||||
  return GetNullLogInstance();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace internal
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#define VLOG(x)                                                               \
 | 
			
		||||
  (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \
 | 
			
		||||
                                                                         " ")
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										155
									
								
								benchmarks/thirdparty/benchmark/src/mutex.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										155
									
								
								benchmarks/thirdparty/benchmark/src/mutex.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,155 @@
 | 
			
		|||
#ifndef BENCHMARK_MUTEX_H_
 | 
			
		||||
#define BENCHMARK_MUTEX_H_
 | 
			
		||||
 | 
			
		||||
#include <condition_variable>
 | 
			
		||||
#include <mutex>
 | 
			
		||||
 | 
			
		||||
#include "check.h"
 | 
			
		||||
 | 
			
		||||
// Enable thread safety attributes only with clang.
 | 
			
		||||
// The attributes can be safely erased when compiling with other compilers.
 | 
			
		||||
#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
 | 
			
		||||
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
 | 
			
		||||
#else
 | 
			
		||||
#define THREAD_ANNOTATION_ATTRIBUTE__(x)  // no-op
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
 | 
			
		||||
 | 
			
		||||
#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
 | 
			
		||||
 | 
			
		||||
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
 | 
			
		||||
 | 
			
		||||
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
 | 
			
		||||
 | 
			
		||||
#define ACQUIRED_BEFORE(...) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define ACQUIRED_AFTER(...) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define REQUIRES(...) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define REQUIRES_SHARED(...) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define ACQUIRE(...) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define ACQUIRE_SHARED(...) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define RELEASE(...) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define RELEASE_SHARED(...) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define TRY_ACQUIRE(...) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define TRY_ACQUIRE_SHARED(...) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
 | 
			
		||||
 | 
			
		||||
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
 | 
			
		||||
 | 
			
		||||
#define ASSERT_SHARED_CAPABILITY(x) \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
 | 
			
		||||
 | 
			
		||||
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
 | 
			
		||||
 | 
			
		||||
#define NO_THREAD_SAFETY_ANALYSIS \
 | 
			
		||||
  THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
typedef std::condition_variable Condition;
 | 
			
		||||
 | 
			
		||||
// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that
 | 
			
		||||
// we can annotate them with thread safety attributes and use the
 | 
			
		||||
// -Wthread-safety warning with clang. The standard library types cannot be
 | 
			
		||||
// used directly because they do not provided the required annotations.
 | 
			
		||||
class CAPABILITY("mutex") Mutex {
 | 
			
		||||
 public:
 | 
			
		||||
  Mutex() {}
 | 
			
		||||
 | 
			
		||||
  void lock() ACQUIRE() { mut_.lock(); }
 | 
			
		||||
  void unlock() RELEASE() { mut_.unlock(); }
 | 
			
		||||
  std::mutex& native_handle() { return mut_; }
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  std::mutex mut_;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class SCOPED_CAPABILITY MutexLock {
 | 
			
		||||
  typedef std::unique_lock<std::mutex> MutexLockImp;
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
  MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {}
 | 
			
		||||
  ~MutexLock() RELEASE() {}
 | 
			
		||||
  MutexLockImp& native_handle() { return ml_; }
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  MutexLockImp ml_;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class Barrier {
 | 
			
		||||
 public:
 | 
			
		||||
  Barrier(int num_threads) : running_threads_(num_threads) {}
 | 
			
		||||
 | 
			
		||||
  // Called by each thread
 | 
			
		||||
  bool wait() EXCLUDES(lock_) {
 | 
			
		||||
    bool last_thread = false;
 | 
			
		||||
    {
 | 
			
		||||
      MutexLock ml(lock_);
 | 
			
		||||
      last_thread = createBarrier(ml);
 | 
			
		||||
    }
 | 
			
		||||
    if (last_thread) phase_condition_.notify_all();
 | 
			
		||||
    return last_thread;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void removeThread() EXCLUDES(lock_) {
 | 
			
		||||
    MutexLock ml(lock_);
 | 
			
		||||
    --running_threads_;
 | 
			
		||||
    if (entered_ != 0) phase_condition_.notify_all();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  Mutex lock_;
 | 
			
		||||
  Condition phase_condition_;
 | 
			
		||||
  int running_threads_;
 | 
			
		||||
 | 
			
		||||
  // State for barrier management
 | 
			
		||||
  int phase_number_ = 0;
 | 
			
		||||
  int entered_ = 0;  // Number of threads that have entered this barrier
 | 
			
		||||
 | 
			
		||||
  // Enter the barrier and wait until all other threads have also
 | 
			
		||||
  // entered the barrier.  Returns iff this is the last thread to
 | 
			
		||||
  // enter the barrier.
 | 
			
		||||
  bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
 | 
			
		||||
    CHECK_LT(entered_, running_threads_);
 | 
			
		||||
    entered_++;
 | 
			
		||||
    if (entered_ < running_threads_) {
 | 
			
		||||
      // Wait for all threads to enter
 | 
			
		||||
      int phase_number_cp = phase_number_;
 | 
			
		||||
      auto cb = [this, phase_number_cp]() {
 | 
			
		||||
        return this->phase_number_ > phase_number_cp ||
 | 
			
		||||
               entered_ == running_threads_;  // A thread has aborted in error
 | 
			
		||||
      };
 | 
			
		||||
      phase_condition_.wait(ml.native_handle(), cb);
 | 
			
		||||
      if (phase_number_ > phase_number_cp) return false;
 | 
			
		||||
      // else (running_threads_ == entered_) and we are the last thread.
 | 
			
		||||
    }
 | 
			
		||||
    // Last thread has reached the barrier
 | 
			
		||||
    phase_number_++;
 | 
			
		||||
    entered_ = 0;
 | 
			
		||||
    return true;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_MUTEX_H_
 | 
			
		||||
							
								
								
									
										140
									
								
								benchmarks/thirdparty/benchmark/src/re.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										140
									
								
								benchmarks/thirdparty/benchmark/src/re.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,140 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#ifndef BENCHMARK_RE_H_
 | 
			
		||||
#define BENCHMARK_RE_H_
 | 
			
		||||
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
 | 
			
		||||
// Prefer C regex libraries when compiling w/o exceptions so that we can
 | 
			
		||||
// correctly report errors.
 | 
			
		||||
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && defined(HAVE_STD_REGEX) && \
 | 
			
		||||
    (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
 | 
			
		||||
#undef HAVE_STD_REGEX
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if defined(HAVE_STD_REGEX)
 | 
			
		||||
#include <regex>
 | 
			
		||||
#elif defined(HAVE_GNU_POSIX_REGEX)
 | 
			
		||||
#include <gnuregex.h>
 | 
			
		||||
#elif defined(HAVE_POSIX_REGEX)
 | 
			
		||||
#include <regex.h>
 | 
			
		||||
#else
 | 
			
		||||
#error No regular expression backend was found!
 | 
			
		||||
#endif
 | 
			
		||||
#include <string>
 | 
			
		||||
 | 
			
		||||
#include "check.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
// A wrapper around the POSIX regular expression API that provides automatic
 | 
			
		||||
// cleanup
 | 
			
		||||
class Regex {
 | 
			
		||||
 public:
 | 
			
		||||
  Regex() : init_(false) {}
 | 
			
		||||
 | 
			
		||||
  ~Regex();
 | 
			
		||||
 | 
			
		||||
  // Compile a regular expression matcher from spec.  Returns true on success.
 | 
			
		||||
  //
 | 
			
		||||
  // On failure (and if error is not nullptr), error is populated with a human
 | 
			
		||||
  // readable error message if an error occurs.
 | 
			
		||||
  bool Init(const std::string& spec, std::string* error);
 | 
			
		||||
 | 
			
		||||
  // Returns whether str matches the compiled regular expression.
 | 
			
		||||
  bool Match(const std::string& str);
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  bool init_;
 | 
			
		||||
// Underlying regular expression object
 | 
			
		||||
#if defined(HAVE_STD_REGEX)
 | 
			
		||||
  std::regex re_;
 | 
			
		||||
#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
 | 
			
		||||
  regex_t re_;
 | 
			
		||||
#else
 | 
			
		||||
#error No regular expression backend implementation available
 | 
			
		||||
#endif
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#if defined(HAVE_STD_REGEX)
 | 
			
		||||
 | 
			
		||||
inline bool Regex::Init(const std::string& spec, std::string* error) {
 | 
			
		||||
#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
 | 
			
		||||
  ((void)error); // suppress unused warning
 | 
			
		||||
#else
 | 
			
		||||
  try {
 | 
			
		||||
#endif
 | 
			
		||||
    re_ = std::regex(spec, std::regex_constants::extended);
 | 
			
		||||
    init_ = true;
 | 
			
		||||
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
 | 
			
		||||
  } catch (const std::regex_error& e) {
 | 
			
		||||
    if (error) {
 | 
			
		||||
      *error = e.what();
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
  return init_;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline Regex::~Regex() {}
 | 
			
		||||
 | 
			
		||||
inline bool Regex::Match(const std::string& str) {
 | 
			
		||||
  if (!init_) {
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
  return std::regex_search(str, re_);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
inline bool Regex::Init(const std::string& spec, std::string* error) {
 | 
			
		||||
  int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
 | 
			
		||||
  if (ec != 0) {
 | 
			
		||||
    if (error) {
 | 
			
		||||
      size_t needed = regerror(ec, &re_, nullptr, 0);
 | 
			
		||||
      char* errbuf = new char[needed];
 | 
			
		||||
      regerror(ec, &re_, errbuf, needed);
 | 
			
		||||
 | 
			
		||||
      // regerror returns the number of bytes necessary to null terminate
 | 
			
		||||
      // the string, so we move that when assigning to error.
 | 
			
		||||
      CHECK_NE(needed, 0);
 | 
			
		||||
      error->assign(errbuf, needed - 1);
 | 
			
		||||
 | 
			
		||||
      delete[] errbuf;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  init_ = true;
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline Regex::~Regex() {
 | 
			
		||||
  if (init_) {
 | 
			
		||||
    regfree(&re_);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline bool Regex::Match(const std::string& str) {
 | 
			
		||||
  if (!init_) {
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
  return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_RE_H_
 | 
			
		||||
							
								
								
									
										81
									
								
								benchmarks/thirdparty/benchmark/src/reporter.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								benchmarks/thirdparty/benchmark/src/reporter.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,81 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
#include "timers.h"
 | 
			
		||||
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <tuple>
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "check.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
BenchmarkReporter::BenchmarkReporter()
 | 
			
		||||
    : output_stream_(&std::cout), error_stream_(&std::cerr) {}
 | 
			
		||||
 | 
			
		||||
BenchmarkReporter::~BenchmarkReporter() {}
 | 
			
		||||
 | 
			
		||||
void BenchmarkReporter::PrintBasicContext(std::ostream *out,
 | 
			
		||||
                                          Context const &context) {
 | 
			
		||||
  CHECK(out) << "cannot be null";
 | 
			
		||||
  auto &Out = *out;
 | 
			
		||||
 | 
			
		||||
  Out << LocalDateTimeString() << "\n";
 | 
			
		||||
 | 
			
		||||
  const CPUInfo &info = context.cpu_info;
 | 
			
		||||
  Out << "Run on (" << info.num_cpus << " X "
 | 
			
		||||
      << (info.cycles_per_second / 1000000.0) << " MHz CPU "
 | 
			
		||||
      << ((info.num_cpus > 1) ? "s" : "") << ")\n";
 | 
			
		||||
  if (info.caches.size() != 0) {
 | 
			
		||||
    Out << "CPU Caches:\n";
 | 
			
		||||
    for (auto &CInfo : info.caches) {
 | 
			
		||||
      Out << "  L" << CInfo.level << " " << CInfo.type << " "
 | 
			
		||||
          << (CInfo.size / 1000) << "K";
 | 
			
		||||
      if (CInfo.num_sharing != 0)
 | 
			
		||||
        Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
 | 
			
		||||
      Out << "\n";
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (info.scaling_enabled) {
 | 
			
		||||
    Out << "***WARNING*** CPU scaling is enabled, the benchmark "
 | 
			
		||||
           "real time measurements may be noisy and will incur extra "
 | 
			
		||||
           "overhead.\n";
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#ifndef NDEBUG
 | 
			
		||||
  Out << "***WARNING*** Library was built as DEBUG. Timings may be "
 | 
			
		||||
         "affected.\n";
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {}
 | 
			
		||||
 | 
			
		||||
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
 | 
			
		||||
  double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
 | 
			
		||||
  if (iterations != 0) new_time /= static_cast<double>(iterations);
 | 
			
		||||
  return new_time;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
double BenchmarkReporter::Run::GetAdjustedCPUTime() const {
 | 
			
		||||
  double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
 | 
			
		||||
  if (iterations != 0) new_time /= static_cast<double>(iterations);
 | 
			
		||||
  return new_time;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										51
									
								
								benchmarks/thirdparty/benchmark/src/sleep.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								benchmarks/thirdparty/benchmark/src/sleep.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,51 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "sleep.h"
 | 
			
		||||
 | 
			
		||||
#include <cerrno>
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <ctime>
 | 
			
		||||
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
 | 
			
		||||
#ifdef BENCHMARK_OS_WINDOWS
 | 
			
		||||
#include <Windows.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
#ifdef BENCHMARK_OS_WINDOWS
 | 
			
		||||
// Window's Sleep takes milliseconds argument.
 | 
			
		||||
void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); }
 | 
			
		||||
void SleepForSeconds(double seconds) {
 | 
			
		||||
  SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
 | 
			
		||||
}
 | 
			
		||||
#else   // BENCHMARK_OS_WINDOWS
 | 
			
		||||
void SleepForMicroseconds(int microseconds) {
 | 
			
		||||
  struct timespec sleep_time;
 | 
			
		||||
  sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
 | 
			
		||||
  sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
 | 
			
		||||
  while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
 | 
			
		||||
    ;  // Ignore signals and wait for the full interval to elapse.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void SleepForMilliseconds(int milliseconds) {
 | 
			
		||||
  SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void SleepForSeconds(double seconds) {
 | 
			
		||||
  SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
 | 
			
		||||
}
 | 
			
		||||
#endif  // BENCHMARK_OS_WINDOWS
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										15
									
								
								benchmarks/thirdparty/benchmark/src/sleep.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								benchmarks/thirdparty/benchmark/src/sleep.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,15 @@
 | 
			
		|||
#ifndef BENCHMARK_SLEEP_H_
 | 
			
		||||
#define BENCHMARK_SLEEP_H_
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
const int kNumMillisPerSecond = 1000;
 | 
			
		||||
const int kNumMicrosPerMilli = 1000;
 | 
			
		||||
const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;
 | 
			
		||||
const int kNumNanosPerMicro = 1000;
 | 
			
		||||
const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond;
 | 
			
		||||
 | 
			
		||||
void SleepForMilliseconds(int milliseconds);
 | 
			
		||||
void SleepForSeconds(double seconds);
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_SLEEP_H_
 | 
			
		||||
							
								
								
									
										175
									
								
								benchmarks/thirdparty/benchmark/src/statistics.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										175
									
								
								benchmarks/thirdparty/benchmark/src/statistics.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,175 @@
 | 
			
		|||
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
 | 
			
		||||
// Copyright 2017 Roman Lebedev. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
#include <cmath>
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <vector>
 | 
			
		||||
#include <numeric>
 | 
			
		||||
#include "check.h"
 | 
			
		||||
#include "statistics.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
auto StatisticsSum = [](const std::vector<double>& v) {
 | 
			
		||||
  return std::accumulate(v.begin(), v.end(), 0.0);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
double StatisticsMean(const std::vector<double>& v) {
 | 
			
		||||
  if (v.size() == 0) return 0.0;
 | 
			
		||||
  return StatisticsSum(v) * (1.0 / v.size());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
double StatisticsMedian(const std::vector<double>& v) {
 | 
			
		||||
  if (v.size() < 3) return StatisticsMean(v);
 | 
			
		||||
  std::vector<double> partial;
 | 
			
		||||
  // we need roundDown(count/2)+1 slots
 | 
			
		||||
  partial.resize(1 + (v.size() / 2));
 | 
			
		||||
  std::partial_sort_copy(v.begin(), v.end(), partial.begin(), partial.end());
 | 
			
		||||
  // did we have odd number of samples?
 | 
			
		||||
  // if yes, then the last element of partially-sorted vector is the median
 | 
			
		||||
  // it no, then the average of the last two elements is the median
 | 
			
		||||
  if(v.size() % 2 == 1)
 | 
			
		||||
    return partial.back();
 | 
			
		||||
  return (partial[partial.size() - 2] + partial[partial.size() - 1]) / 2.0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Return the sum of the squares of this sample set
 | 
			
		||||
auto SumSquares = [](const std::vector<double>& v) {
 | 
			
		||||
  return std::inner_product(v.begin(), v.end(), v.begin(), 0.0);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
auto Sqr = [](const double dat) { return dat * dat; };
 | 
			
		||||
auto Sqrt = [](const double dat) {
 | 
			
		||||
  // Avoid NaN due to imprecision in the calculations
 | 
			
		||||
  if (dat < 0.0) return 0.0;
 | 
			
		||||
  return std::sqrt(dat);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
double StatisticsStdDev(const std::vector<double>& v) {
 | 
			
		||||
  const auto mean = StatisticsMean(v);
 | 
			
		||||
  if (v.size() == 0) return mean;
 | 
			
		||||
 | 
			
		||||
  // Sample standard deviation is undefined for n = 1
 | 
			
		||||
  if (v.size() == 1)
 | 
			
		||||
    return 0.0;
 | 
			
		||||
 | 
			
		||||
  const double avg_squares = SumSquares(v) * (1.0 / v.size());
 | 
			
		||||
  return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::vector<BenchmarkReporter::Run> ComputeStats(
 | 
			
		||||
    const std::vector<BenchmarkReporter::Run>& reports) {
 | 
			
		||||
  typedef BenchmarkReporter::Run Run;
 | 
			
		||||
  std::vector<Run> results;
 | 
			
		||||
 | 
			
		||||
  auto error_count =
 | 
			
		||||
      std::count_if(reports.begin(), reports.end(),
 | 
			
		||||
                    [](Run const& run) { return run.error_occurred; });
 | 
			
		||||
 | 
			
		||||
  if (reports.size() - error_count < 2) {
 | 
			
		||||
    // We don't report aggregated data if there was a single run.
 | 
			
		||||
    return results;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Accumulators.
 | 
			
		||||
  std::vector<double> real_accumulated_time_stat;
 | 
			
		||||
  std::vector<double> cpu_accumulated_time_stat;
 | 
			
		||||
  std::vector<double> bytes_per_second_stat;
 | 
			
		||||
  std::vector<double> items_per_second_stat;
 | 
			
		||||
 | 
			
		||||
  real_accumulated_time_stat.reserve(reports.size());
 | 
			
		||||
  cpu_accumulated_time_stat.reserve(reports.size());
 | 
			
		||||
  bytes_per_second_stat.reserve(reports.size());
 | 
			
		||||
  items_per_second_stat.reserve(reports.size());
 | 
			
		||||
 | 
			
		||||
  // All repetitions should be run with the same number of iterations so we
 | 
			
		||||
  // can take this information from the first benchmark.
 | 
			
		||||
  int64_t const run_iterations = reports.front().iterations;
 | 
			
		||||
  // create stats for user counters
 | 
			
		||||
  struct CounterStat {
 | 
			
		||||
    Counter c;
 | 
			
		||||
    std::vector<double> s;
 | 
			
		||||
  };
 | 
			
		||||
  std::map< std::string, CounterStat > counter_stats;
 | 
			
		||||
  for(Run const& r : reports) {
 | 
			
		||||
    for(auto const& cnt : r.counters) {
 | 
			
		||||
      auto it = counter_stats.find(cnt.first);
 | 
			
		||||
      if(it == counter_stats.end()) {
 | 
			
		||||
        counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
 | 
			
		||||
        it = counter_stats.find(cnt.first);
 | 
			
		||||
        it->second.s.reserve(reports.size());
 | 
			
		||||
      } else {
 | 
			
		||||
        CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Populate the accumulators.
 | 
			
		||||
  for (Run const& run : reports) {
 | 
			
		||||
    CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
 | 
			
		||||
    CHECK_EQ(run_iterations, run.iterations);
 | 
			
		||||
    if (run.error_occurred) continue;
 | 
			
		||||
    real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
 | 
			
		||||
    cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
 | 
			
		||||
    items_per_second_stat.emplace_back(run.items_per_second);
 | 
			
		||||
    bytes_per_second_stat.emplace_back(run.bytes_per_second);
 | 
			
		||||
    // user counters
 | 
			
		||||
    for(auto const& cnt : run.counters) {
 | 
			
		||||
      auto it = counter_stats.find(cnt.first);
 | 
			
		||||
      CHECK_NE(it, counter_stats.end());
 | 
			
		||||
      it->second.s.emplace_back(cnt.second);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Only add label if it is same for all runs
 | 
			
		||||
  std::string report_label = reports[0].report_label;
 | 
			
		||||
  for (std::size_t i = 1; i < reports.size(); i++) {
 | 
			
		||||
    if (reports[i].report_label != report_label) {
 | 
			
		||||
      report_label = "";
 | 
			
		||||
      break;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  for(const auto& Stat : *reports[0].statistics) {
 | 
			
		||||
    // Get the data from the accumulator to BenchmarkReporter::Run's.
 | 
			
		||||
    Run data;
 | 
			
		||||
    data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_;
 | 
			
		||||
    data.report_label = report_label;
 | 
			
		||||
    data.iterations = run_iterations;
 | 
			
		||||
 | 
			
		||||
    data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
 | 
			
		||||
    data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
 | 
			
		||||
    data.bytes_per_second = Stat.compute_(bytes_per_second_stat);
 | 
			
		||||
    data.items_per_second = Stat.compute_(items_per_second_stat);
 | 
			
		||||
 | 
			
		||||
    data.time_unit = reports[0].time_unit;
 | 
			
		||||
 | 
			
		||||
    // user counters
 | 
			
		||||
    for(auto const& kv : counter_stats) {
 | 
			
		||||
      const auto uc_stat = Stat.compute_(kv.second.s);
 | 
			
		||||
      auto c = Counter(uc_stat, counter_stats[kv.first].c.flags);
 | 
			
		||||
      data.counters[kv.first] = c;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    results.push_back(data);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return results;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										37
									
								
								benchmarks/thirdparty/benchmark/src/statistics.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								benchmarks/thirdparty/benchmark/src/statistics.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,37 @@
 | 
			
		|||
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
 | 
			
		||||
// Copyright 2017 Roman Lebedev. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#ifndef STATISTICS_H_
 | 
			
		||||
#define STATISTICS_H_
 | 
			
		||||
 | 
			
		||||
#include <vector>
 | 
			
		||||
 | 
			
		||||
#include "benchmark/benchmark.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
// Return a vector containing the mean, median and standard devation information
 | 
			
		||||
// (and any user-specified info) for the specified list of reports. If 'reports'
 | 
			
		||||
// contains less than two non-errored runs an empty vector is returned
 | 
			
		||||
std::vector<BenchmarkReporter::Run> ComputeStats(
 | 
			
		||||
    const std::vector<BenchmarkReporter::Run>& reports);
 | 
			
		||||
 | 
			
		||||
double StatisticsMean(const std::vector<double>& v);
 | 
			
		||||
double StatisticsMedian(const std::vector<double>& v);
 | 
			
		||||
double StatisticsStdDev(const std::vector<double>& v);
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // STATISTICS_H_
 | 
			
		||||
							
								
								
									
										172
									
								
								benchmarks/thirdparty/benchmark/src/string_util.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										172
									
								
								benchmarks/thirdparty/benchmark/src/string_util.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,172 @@
 | 
			
		|||
#include "string_util.h"
 | 
			
		||||
 | 
			
		||||
#include <array>
 | 
			
		||||
#include <cmath>
 | 
			
		||||
#include <cstdarg>
 | 
			
		||||
#include <cstdio>
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <sstream>
 | 
			
		||||
 | 
			
		||||
#include "arraysize.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
 | 
			
		||||
const char kBigSIUnits[] = "kMGTPEZY";
 | 
			
		||||
// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi.
 | 
			
		||||
const char kBigIECUnits[] = "KMGTPEZY";
 | 
			
		||||
// milli, micro, nano, pico, femto, atto, zepto, yocto.
 | 
			
		||||
const char kSmallSIUnits[] = "munpfazy";
 | 
			
		||||
 | 
			
		||||
// We require that all three arrays have the same size.
 | 
			
		||||
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits),
 | 
			
		||||
              "SI and IEC unit arrays must be the same size");
 | 
			
		||||
static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
 | 
			
		||||
              "Small SI and Big SI unit arrays must be the same size");
 | 
			
		||||
 | 
			
		||||
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
 | 
			
		||||
 | 
			
		||||
void ToExponentAndMantissa(double val, double thresh, int precision,
 | 
			
		||||
                           double one_k, std::string* mantissa,
 | 
			
		||||
                           int64_t* exponent) {
 | 
			
		||||
  std::stringstream mantissa_stream;
 | 
			
		||||
 | 
			
		||||
  if (val < 0) {
 | 
			
		||||
    mantissa_stream << "-";
 | 
			
		||||
    val = -val;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Adjust threshold so that it never excludes things which can't be rendered
 | 
			
		||||
  // in 'precision' digits.
 | 
			
		||||
  const double adjusted_threshold =
 | 
			
		||||
      std::max(thresh, 1.0 / std::pow(10.0, precision));
 | 
			
		||||
  const double big_threshold = adjusted_threshold * one_k;
 | 
			
		||||
  const double small_threshold = adjusted_threshold;
 | 
			
		||||
  // Values in ]simple_threshold,small_threshold[ will be printed as-is
 | 
			
		||||
  const double simple_threshold = 0.01;
 | 
			
		||||
 | 
			
		||||
  if (val > big_threshold) {
 | 
			
		||||
    // Positive powers
 | 
			
		||||
    double scaled = val;
 | 
			
		||||
    for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) {
 | 
			
		||||
      scaled /= one_k;
 | 
			
		||||
      if (scaled <= big_threshold) {
 | 
			
		||||
        mantissa_stream << scaled;
 | 
			
		||||
        *exponent = i + 1;
 | 
			
		||||
        *mantissa = mantissa_stream.str();
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    mantissa_stream << val;
 | 
			
		||||
    *exponent = 0;
 | 
			
		||||
  } else if (val < small_threshold) {
 | 
			
		||||
    // Negative powers
 | 
			
		||||
    if (val < simple_threshold) {
 | 
			
		||||
      double scaled = val;
 | 
			
		||||
      for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) {
 | 
			
		||||
        scaled *= one_k;
 | 
			
		||||
        if (scaled >= small_threshold) {
 | 
			
		||||
          mantissa_stream << scaled;
 | 
			
		||||
          *exponent = -static_cast<int64_t>(i + 1);
 | 
			
		||||
          *mantissa = mantissa_stream.str();
 | 
			
		||||
          return;
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    mantissa_stream << val;
 | 
			
		||||
    *exponent = 0;
 | 
			
		||||
  } else {
 | 
			
		||||
    mantissa_stream << val;
 | 
			
		||||
    *exponent = 0;
 | 
			
		||||
  }
 | 
			
		||||
  *mantissa = mantissa_stream.str();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string ExponentToPrefix(int64_t exponent, bool iec) {
 | 
			
		||||
  if (exponent == 0) return "";
 | 
			
		||||
 | 
			
		||||
  const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
 | 
			
		||||
  if (index >= kUnitsSize) return "";
 | 
			
		||||
 | 
			
		||||
  const char* array =
 | 
			
		||||
      (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
 | 
			
		||||
  if (iec)
 | 
			
		||||
    return array[index] + std::string("i");
 | 
			
		||||
  else
 | 
			
		||||
    return std::string(1, array[index]);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string ToBinaryStringFullySpecified(double value, double threshold,
 | 
			
		||||
                                         int precision, double one_k = 1024.0) {
 | 
			
		||||
  std::string mantissa;
 | 
			
		||||
  int64_t exponent;
 | 
			
		||||
  ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa,
 | 
			
		||||
                        &exponent);
 | 
			
		||||
  return mantissa + ExponentToPrefix(exponent, false);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace
 | 
			
		||||
 | 
			
		||||
void AppendHumanReadable(int n, std::string* str) {
 | 
			
		||||
  std::stringstream ss;
 | 
			
		||||
  // Round down to the nearest SI prefix.
 | 
			
		||||
  ss << ToBinaryStringFullySpecified(n, 1.0, 0);
 | 
			
		||||
  *str += ss.str();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string HumanReadableNumber(double n, double one_k) {
 | 
			
		||||
  // 1.1 means that figures up to 1.1k should be shown with the next unit down;
 | 
			
		||||
  // this softens edge effects.
 | 
			
		||||
  // 1 means that we should show one decimal place of precision.
 | 
			
		||||
  return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string StringPrintFImp(const char* msg, va_list args) {
 | 
			
		||||
  // we might need a second shot at this, so pre-emptivly make a copy
 | 
			
		||||
  va_list args_cp;
 | 
			
		||||
  va_copy(args_cp, args);
 | 
			
		||||
 | 
			
		||||
  // TODO(ericwf): use std::array for first attempt to avoid one memory
 | 
			
		||||
  // allocation guess what the size might be
 | 
			
		||||
  std::array<char, 256> local_buff;
 | 
			
		||||
  std::size_t size = local_buff.size();
 | 
			
		||||
  // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
 | 
			
		||||
  // in the android-ndk
 | 
			
		||||
  auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
 | 
			
		||||
 | 
			
		||||
  va_end(args_cp);
 | 
			
		||||
 | 
			
		||||
  // handle empty expansion
 | 
			
		||||
  if (ret == 0) return std::string{};
 | 
			
		||||
  if (static_cast<std::size_t>(ret) < size)
 | 
			
		||||
    return std::string(local_buff.data());
 | 
			
		||||
 | 
			
		||||
  // we did not provide a long enough buffer on our first attempt.
 | 
			
		||||
  // add 1 to size to account for null-byte in size cast to prevent overflow
 | 
			
		||||
  size = static_cast<std::size_t>(ret) + 1;
 | 
			
		||||
  auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
 | 
			
		||||
  // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
 | 
			
		||||
  // in the android-ndk
 | 
			
		||||
  ret = vsnprintf(buff_ptr.get(), size, msg, args);
 | 
			
		||||
  return std::string(buff_ptr.get());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string StringPrintF(const char* format, ...) {
 | 
			
		||||
  va_list args;
 | 
			
		||||
  va_start(args, format);
 | 
			
		||||
  std::string tmp = StringPrintFImp(format, args);
 | 
			
		||||
  va_end(args);
 | 
			
		||||
  return tmp;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ReplaceAll(std::string* str, const std::string& from,
 | 
			
		||||
                const std::string& to) {
 | 
			
		||||
  std::size_t start = 0;
 | 
			
		||||
  while ((start = str->find(from, start)) != std::string::npos) {
 | 
			
		||||
    str->replace(start, from.length(), to);
 | 
			
		||||
    start += to.length();
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										40
									
								
								benchmarks/thirdparty/benchmark/src/string_util.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								benchmarks/thirdparty/benchmark/src/string_util.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,40 @@
 | 
			
		|||
#ifndef BENCHMARK_STRING_UTIL_H_
 | 
			
		||||
#define BENCHMARK_STRING_UTIL_H_
 | 
			
		||||
 | 
			
		||||
#include <sstream>
 | 
			
		||||
#include <string>
 | 
			
		||||
#include <utility>
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
void AppendHumanReadable(int n, std::string* str);
 | 
			
		||||
 | 
			
		||||
std::string HumanReadableNumber(double n, double one_k = 1024.0);
 | 
			
		||||
 | 
			
		||||
std::string StringPrintF(const char* format, ...);
 | 
			
		||||
 | 
			
		||||
inline std::ostream& StringCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
 | 
			
		||||
  return out;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class First, class... Rest>
 | 
			
		||||
inline std::ostream& StringCatImp(std::ostream& out, First&& f,
 | 
			
		||||
                                  Rest&&... rest) {
 | 
			
		||||
  out << std::forward<First>(f);
 | 
			
		||||
  return StringCatImp(out, std::forward<Rest>(rest)...);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class... Args>
 | 
			
		||||
inline std::string StrCat(Args&&... args) {
 | 
			
		||||
  std::ostringstream ss;
 | 
			
		||||
  StringCatImp(ss, std::forward<Args>(args)...);
 | 
			
		||||
  return ss.str();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ReplaceAll(std::string* str, const std::string& from,
 | 
			
		||||
                const std::string& to);
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_STRING_UTIL_H_
 | 
			
		||||
							
								
								
									
										517
									
								
								benchmarks/thirdparty/benchmark/src/sysinfo.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										517
									
								
								benchmarks/thirdparty/benchmark/src/sysinfo.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,517 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
 | 
			
		||||
#ifdef BENCHMARK_OS_WINDOWS
 | 
			
		||||
#include <Shlwapi.h>
 | 
			
		||||
#include <VersionHelpers.h>
 | 
			
		||||
#include <Windows.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <fcntl.h>
 | 
			
		||||
#include <sys/resource.h>
 | 
			
		||||
#include <sys/time.h>
 | 
			
		||||
#include <sys/types.h>  // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \
 | 
			
		||||
    defined BENCHMARK_OS_NETBSD
 | 
			
		||||
#define BENCHMARK_HAS_SYSCTL
 | 
			
		||||
#include <sys/sysctl.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
#include <array>
 | 
			
		||||
#include <bitset>
 | 
			
		||||
#include <cerrno>
 | 
			
		||||
#include <climits>
 | 
			
		||||
#include <cstdint>
 | 
			
		||||
#include <cstdio>
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <cstring>
 | 
			
		||||
#include <fstream>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <iterator>
 | 
			
		||||
#include <limits>
 | 
			
		||||
#include <memory>
 | 
			
		||||
#include <sstream>
 | 
			
		||||
 | 
			
		||||
#include "check.h"
 | 
			
		||||
#include "cycleclock.h"
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
#include "log.h"
 | 
			
		||||
#include "sleep.h"
 | 
			
		||||
#include "string_util.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
void PrintImp(std::ostream& out) { out << std::endl; }
 | 
			
		||||
 | 
			
		||||
template <class First, class... Rest>
 | 
			
		||||
void PrintImp(std::ostream& out, First&& f, Rest&&... rest) {
 | 
			
		||||
  out << std::forward<First>(f);
 | 
			
		||||
  PrintImp(out, std::forward<Rest>(rest)...);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class... Args>
 | 
			
		||||
BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) {
 | 
			
		||||
  PrintImp(std::cerr, std::forward<Args>(args)...);
 | 
			
		||||
  std::exit(EXIT_FAILURE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef BENCHMARK_HAS_SYSCTL
 | 
			
		||||
 | 
			
		||||
/// ValueUnion - A type used to correctly alias the byte-for-byte output of
 | 
			
		||||
/// `sysctl` with the result type it's to be interpreted as.
 | 
			
		||||
struct ValueUnion {
 | 
			
		||||
  union DataT {
 | 
			
		||||
    uint32_t uint32_value;
 | 
			
		||||
    uint64_t uint64_value;
 | 
			
		||||
    // For correct aliasing of union members from bytes.
 | 
			
		||||
    char bytes[8];
 | 
			
		||||
  };
 | 
			
		||||
  using DataPtr = std::unique_ptr<DataT, decltype(&std::free)>;
 | 
			
		||||
 | 
			
		||||
  // The size of the data union member + its trailing array size.
 | 
			
		||||
  size_t Size;
 | 
			
		||||
  DataPtr Buff;
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
  ValueUnion() : Size(0), Buff(nullptr, &std::free) {}
 | 
			
		||||
 | 
			
		||||
  explicit ValueUnion(size_t BuffSize)
 | 
			
		||||
      : Size(sizeof(DataT) + BuffSize),
 | 
			
		||||
        Buff(::new (std::malloc(Size)) DataT(), &std::free) {}
 | 
			
		||||
 | 
			
		||||
  ValueUnion(ValueUnion&& other) = default;
 | 
			
		||||
 | 
			
		||||
  explicit operator bool() const { return bool(Buff); }
 | 
			
		||||
 | 
			
		||||
  char* data() const { return Buff->bytes; }
 | 
			
		||||
 | 
			
		||||
  std::string GetAsString() const { return std::string(data()); }
 | 
			
		||||
 | 
			
		||||
  int64_t GetAsInteger() const {
 | 
			
		||||
    if (Size == sizeof(Buff->uint32_value))
 | 
			
		||||
      return static_cast<int32_t>(Buff->uint32_value);
 | 
			
		||||
    else if (Size == sizeof(Buff->uint64_value))
 | 
			
		||||
      return static_cast<int64_t>(Buff->uint64_value);
 | 
			
		||||
    BENCHMARK_UNREACHABLE();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  uint64_t GetAsUnsigned() const {
 | 
			
		||||
    if (Size == sizeof(Buff->uint32_value))
 | 
			
		||||
      return Buff->uint32_value;
 | 
			
		||||
    else if (Size == sizeof(Buff->uint64_value))
 | 
			
		||||
      return Buff->uint64_value;
 | 
			
		||||
    BENCHMARK_UNREACHABLE();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template <class T, int N>
 | 
			
		||||
  std::array<T, N> GetAsArray() {
 | 
			
		||||
    const int ArrSize = sizeof(T) * N;
 | 
			
		||||
    CHECK_LE(ArrSize, Size);
 | 
			
		||||
    std::array<T, N> Arr;
 | 
			
		||||
    std::memcpy(Arr.data(), data(), ArrSize);
 | 
			
		||||
    return Arr;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
ValueUnion GetSysctlImp(std::string const& Name) {
 | 
			
		||||
  size_t CurBuffSize = 0;
 | 
			
		||||
  if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1)
 | 
			
		||||
    return ValueUnion();
 | 
			
		||||
 | 
			
		||||
  ValueUnion buff(CurBuffSize);
 | 
			
		||||
  if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0)
 | 
			
		||||
    return buff;
 | 
			
		||||
  return ValueUnion();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
BENCHMARK_MAYBE_UNUSED
 | 
			
		||||
bool GetSysctl(std::string const& Name, std::string* Out) {
 | 
			
		||||
  Out->clear();
 | 
			
		||||
  auto Buff = GetSysctlImp(Name);
 | 
			
		||||
  if (!Buff) return false;
 | 
			
		||||
  Out->assign(Buff.data());
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Tp,
 | 
			
		||||
          class = typename std::enable_if<std::is_integral<Tp>::value>::type>
 | 
			
		||||
bool GetSysctl(std::string const& Name, Tp* Out) {
 | 
			
		||||
  *Out = 0;
 | 
			
		||||
  auto Buff = GetSysctlImp(Name);
 | 
			
		||||
  if (!Buff) return false;
 | 
			
		||||
  *Out = static_cast<Tp>(Buff.GetAsUnsigned());
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Tp, size_t N>
 | 
			
		||||
bool GetSysctl(std::string const& Name, std::array<Tp, N>* Out) {
 | 
			
		||||
  auto Buff = GetSysctlImp(Name);
 | 
			
		||||
  if (!Buff) return false;
 | 
			
		||||
  *Out = Buff.GetAsArray<Tp, N>();
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template <class ArgT>
 | 
			
		||||
bool ReadFromFile(std::string const& fname, ArgT* arg) {
 | 
			
		||||
  *arg = ArgT();
 | 
			
		||||
  std::ifstream f(fname.c_str());
 | 
			
		||||
  if (!f.is_open()) return false;
 | 
			
		||||
  f >> *arg;
 | 
			
		||||
  return f.good();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool CpuScalingEnabled(int num_cpus) {
 | 
			
		||||
  // We don't have a valid CPU count, so don't even bother.
 | 
			
		||||
  if (num_cpus <= 0) return false;
 | 
			
		||||
#ifndef BENCHMARK_OS_WINDOWS
 | 
			
		||||
  // On Linux, the CPUfreq subsystem exposes CPU information as files on the
 | 
			
		||||
  // local file system. If reading the exported files fails, then we may not be
 | 
			
		||||
  // running on Linux, so we silently ignore all the read errors.
 | 
			
		||||
  std::string res;
 | 
			
		||||
  for (int cpu = 0; cpu < num_cpus; ++cpu) {
 | 
			
		||||
    std::string governor_file =
 | 
			
		||||
        StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
 | 
			
		||||
    if (ReadFromFile(governor_file, &res) && res != "performance") return true;
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
  return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int CountSetBitsInCPUMap(std::string Val) {
 | 
			
		||||
  auto CountBits = [](std::string Part) {
 | 
			
		||||
    using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
 | 
			
		||||
    Part = "0x" + Part;
 | 
			
		||||
    CPUMask Mask(std::stoul(Part, nullptr, 16));
 | 
			
		||||
    return static_cast<int>(Mask.count());
 | 
			
		||||
  };
 | 
			
		||||
  size_t Pos;
 | 
			
		||||
  int total = 0;
 | 
			
		||||
  while ((Pos = Val.find(',')) != std::string::npos) {
 | 
			
		||||
    total += CountBits(Val.substr(0, Pos));
 | 
			
		||||
    Val = Val.substr(Pos + 1);
 | 
			
		||||
  }
 | 
			
		||||
  if (!Val.empty()) {
 | 
			
		||||
    total += CountBits(Val);
 | 
			
		||||
  }
 | 
			
		||||
  return total;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
BENCHMARK_MAYBE_UNUSED
 | 
			
		||||
std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
 | 
			
		||||
  std::vector<CPUInfo::CacheInfo> res;
 | 
			
		||||
  std::string dir = "/sys/devices/system/cpu/cpu0/cache/";
 | 
			
		||||
  int Idx = 0;
 | 
			
		||||
  while (true) {
 | 
			
		||||
    CPUInfo::CacheInfo info;
 | 
			
		||||
    std::string FPath = StrCat(dir, "index", Idx++, "/");
 | 
			
		||||
    std::ifstream f(StrCat(FPath, "size").c_str());
 | 
			
		||||
    if (!f.is_open()) break;
 | 
			
		||||
    std::string suffix;
 | 
			
		||||
    f >> info.size;
 | 
			
		||||
    if (f.fail())
 | 
			
		||||
      PrintErrorAndDie("Failed while reading file '", FPath, "size'");
 | 
			
		||||
    if (f.good()) {
 | 
			
		||||
      f >> suffix;
 | 
			
		||||
      if (f.bad())
 | 
			
		||||
        PrintErrorAndDie(
 | 
			
		||||
            "Invalid cache size format: failed to read size suffix");
 | 
			
		||||
      else if (f && suffix != "K")
 | 
			
		||||
        PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix);
 | 
			
		||||
      else if (suffix == "K")
 | 
			
		||||
        info.size *= 1000;
 | 
			
		||||
    }
 | 
			
		||||
    if (!ReadFromFile(StrCat(FPath, "type"), &info.type))
 | 
			
		||||
      PrintErrorAndDie("Failed to read from file ", FPath, "type");
 | 
			
		||||
    if (!ReadFromFile(StrCat(FPath, "level"), &info.level))
 | 
			
		||||
      PrintErrorAndDie("Failed to read from file ", FPath, "level");
 | 
			
		||||
    std::string map_str;
 | 
			
		||||
    if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str))
 | 
			
		||||
      PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map");
 | 
			
		||||
    info.num_sharing = CountSetBitsInCPUMap(map_str);
 | 
			
		||||
    res.push_back(info);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return res;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef BENCHMARK_OS_MACOSX
 | 
			
		||||
std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
 | 
			
		||||
  std::vector<CPUInfo::CacheInfo> res;
 | 
			
		||||
  std::array<uint64_t, 4> CacheCounts{{0, 0, 0, 0}};
 | 
			
		||||
  GetSysctl("hw.cacheconfig", &CacheCounts);
 | 
			
		||||
 | 
			
		||||
  struct {
 | 
			
		||||
    std::string name;
 | 
			
		||||
    std::string type;
 | 
			
		||||
    int level;
 | 
			
		||||
    size_t num_sharing;
 | 
			
		||||
  } Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]},
 | 
			
		||||
               {"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
 | 
			
		||||
               {"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
 | 
			
		||||
               {"hw.l3cachesize", "Unified", 3, CacheCounts[3]}};
 | 
			
		||||
  for (auto& C : Cases) {
 | 
			
		||||
    int val;
 | 
			
		||||
    if (!GetSysctl(C.name, &val)) continue;
 | 
			
		||||
    CPUInfo::CacheInfo info;
 | 
			
		||||
    info.type = C.type;
 | 
			
		||||
    info.level = C.level;
 | 
			
		||||
    info.size = val;
 | 
			
		||||
    info.num_sharing = static_cast<int>(C.num_sharing);
 | 
			
		||||
    res.push_back(std::move(info));
 | 
			
		||||
  }
 | 
			
		||||
  return res;
 | 
			
		||||
}
 | 
			
		||||
#elif defined(BENCHMARK_OS_WINDOWS)
 | 
			
		||||
std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
 | 
			
		||||
  std::vector<CPUInfo::CacheInfo> res;
 | 
			
		||||
  DWORD buffer_size = 0;
 | 
			
		||||
  using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
 | 
			
		||||
  using CInfo = CACHE_DESCRIPTOR;
 | 
			
		||||
 | 
			
		||||
  using UPtr = std::unique_ptr<PInfo, decltype(&std::free)>;
 | 
			
		||||
  GetLogicalProcessorInformation(nullptr, &buffer_size);
 | 
			
		||||
  UPtr buff((PInfo*)malloc(buffer_size), &std::free);
 | 
			
		||||
  if (!GetLogicalProcessorInformation(buff.get(), &buffer_size))
 | 
			
		||||
    PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ",
 | 
			
		||||
                     GetLastError());
 | 
			
		||||
 | 
			
		||||
  PInfo* it = buff.get();
 | 
			
		||||
  PInfo* end = buff.get() + (buffer_size / sizeof(PInfo));
 | 
			
		||||
 | 
			
		||||
  for (; it != end; ++it) {
 | 
			
		||||
    if (it->Relationship != RelationCache) continue;
 | 
			
		||||
    using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>;
 | 
			
		||||
    BitSet B(it->ProcessorMask);
 | 
			
		||||
    // To prevent duplicates, only consider caches where CPU 0 is specified
 | 
			
		||||
    if (!B.test(0)) continue;
 | 
			
		||||
    CInfo* Cache = &it->Cache;
 | 
			
		||||
    CPUInfo::CacheInfo C;
 | 
			
		||||
    C.num_sharing = B.count();
 | 
			
		||||
    C.level = Cache->Level;
 | 
			
		||||
    C.size = Cache->Size;
 | 
			
		||||
    switch (Cache->Type) {
 | 
			
		||||
      case CacheUnified:
 | 
			
		||||
        C.type = "Unified";
 | 
			
		||||
        break;
 | 
			
		||||
      case CacheInstruction:
 | 
			
		||||
        C.type = "Instruction";
 | 
			
		||||
        break;
 | 
			
		||||
      case CacheData:
 | 
			
		||||
        C.type = "Data";
 | 
			
		||||
        break;
 | 
			
		||||
      case CacheTrace:
 | 
			
		||||
        C.type = "Trace";
 | 
			
		||||
        break;
 | 
			
		||||
      default:
 | 
			
		||||
        C.type = "Unknown";
 | 
			
		||||
        break;
 | 
			
		||||
    }
 | 
			
		||||
    res.push_back(C);
 | 
			
		||||
  }
 | 
			
		||||
  return res;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
 | 
			
		||||
#ifdef BENCHMARK_OS_MACOSX
 | 
			
		||||
  return GetCacheSizesMacOSX();
 | 
			
		||||
#elif defined(BENCHMARK_OS_WINDOWS)
 | 
			
		||||
  return GetCacheSizesWindows();
 | 
			
		||||
#else
 | 
			
		||||
  return GetCacheSizesFromKVFS();
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int GetNumCPUs() {
 | 
			
		||||
#ifdef BENCHMARK_HAS_SYSCTL
 | 
			
		||||
  int NumCPU = -1;
 | 
			
		||||
  if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU;
 | 
			
		||||
  fprintf(stderr, "Err: %s\n", strerror(errno));
 | 
			
		||||
  std::exit(EXIT_FAILURE);
 | 
			
		||||
#elif defined(BENCHMARK_OS_WINDOWS)
 | 
			
		||||
  SYSTEM_INFO sysinfo;
 | 
			
		||||
  // Use memset as opposed to = {} to avoid GCC missing initializer false
 | 
			
		||||
  // positives.
 | 
			
		||||
  std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
 | 
			
		||||
  GetSystemInfo(&sysinfo);
 | 
			
		||||
  return sysinfo.dwNumberOfProcessors;  // number of logical
 | 
			
		||||
                                        // processors in the current
 | 
			
		||||
                                        // group
 | 
			
		||||
#else
 | 
			
		||||
  int NumCPUs = 0;
 | 
			
		||||
  int MaxID = -1;
 | 
			
		||||
  std::ifstream f("/proc/cpuinfo");
 | 
			
		||||
  if (!f.is_open()) {
 | 
			
		||||
    std::cerr << "failed to open /proc/cpuinfo\n";
 | 
			
		||||
    return -1;
 | 
			
		||||
  }
 | 
			
		||||
  const std::string Key = "processor";
 | 
			
		||||
  std::string ln;
 | 
			
		||||
  while (std::getline(f, ln)) {
 | 
			
		||||
    if (ln.empty()) continue;
 | 
			
		||||
    size_t SplitIdx = ln.find(':');
 | 
			
		||||
    std::string value;
 | 
			
		||||
    if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
 | 
			
		||||
    if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) {
 | 
			
		||||
      NumCPUs++;
 | 
			
		||||
      if (!value.empty()) {
 | 
			
		||||
        int CurID = std::stoi(value);
 | 
			
		||||
        MaxID = std::max(CurID, MaxID);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  if (f.bad()) {
 | 
			
		||||
    std::cerr << "Failure reading /proc/cpuinfo\n";
 | 
			
		||||
    return -1;
 | 
			
		||||
  }
 | 
			
		||||
  if (!f.eof()) {
 | 
			
		||||
    std::cerr << "Failed to read to end of /proc/cpuinfo\n";
 | 
			
		||||
    return -1;
 | 
			
		||||
  }
 | 
			
		||||
  f.close();
 | 
			
		||||
 | 
			
		||||
  if ((MaxID + 1) != NumCPUs) {
 | 
			
		||||
    fprintf(stderr,
 | 
			
		||||
            "CPU ID assignments in /proc/cpuinfo seem messed up."
 | 
			
		||||
            " This is usually caused by a bad BIOS.\n");
 | 
			
		||||
  }
 | 
			
		||||
  return NumCPUs;
 | 
			
		||||
#endif
 | 
			
		||||
  BENCHMARK_UNREACHABLE();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
double GetCPUCyclesPerSecond() {
 | 
			
		||||
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
 | 
			
		||||
  long freq;
 | 
			
		||||
 | 
			
		||||
  // If the kernel is exporting the tsc frequency use that. There are issues
 | 
			
		||||
  // where cpuinfo_max_freq cannot be relied on because the BIOS may be
 | 
			
		||||
  // exporintg an invalid p-state (on x86) or p-states may be used to put the
 | 
			
		||||
  // processor in a new mode (turbo mode). Essentially, those frequencies
 | 
			
		||||
  // cannot always be relied upon. The same reasons apply to /proc/cpuinfo as
 | 
			
		||||
  // well.
 | 
			
		||||
  if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)
 | 
			
		||||
      // If CPU scaling is in effect, we want to use the *maximum* frequency,
 | 
			
		||||
      // not whatever CPU speed some random processor happens to be using now.
 | 
			
		||||
      || ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
 | 
			
		||||
                      &freq)) {
 | 
			
		||||
    // The value is in kHz (as the file name suggests).  For example, on a
 | 
			
		||||
    // 2GHz warpstation, the file contains the value "2000000".
 | 
			
		||||
    return freq * 1000.0;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const double error_value = -1;
 | 
			
		||||
  double bogo_clock = error_value;
 | 
			
		||||
 | 
			
		||||
  std::ifstream f("/proc/cpuinfo");
 | 
			
		||||
  if (!f.is_open()) {
 | 
			
		||||
    std::cerr << "failed to open /proc/cpuinfo\n";
 | 
			
		||||
    return error_value;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  auto startsWithKey = [](std::string const& Value, std::string const& Key) {
 | 
			
		||||
    if (Key.size() > Value.size()) return false;
 | 
			
		||||
    auto Cmp = [&](char X, char Y) {
 | 
			
		||||
      return std::tolower(X) == std::tolower(Y);
 | 
			
		||||
    };
 | 
			
		||||
    return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp);
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  std::string ln;
 | 
			
		||||
  while (std::getline(f, ln)) {
 | 
			
		||||
    if (ln.empty()) continue;
 | 
			
		||||
    size_t SplitIdx = ln.find(':');
 | 
			
		||||
    std::string value;
 | 
			
		||||
    if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
 | 
			
		||||
    // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
 | 
			
		||||
    // accept postive values. Some environments (virtual machines) report zero,
 | 
			
		||||
    // which would cause infinite looping in WallTime_Init.
 | 
			
		||||
    if (startsWithKey(ln, "cpu MHz")) {
 | 
			
		||||
      if (!value.empty()) {
 | 
			
		||||
        double cycles_per_second = std::stod(value) * 1000000.0;
 | 
			
		||||
        if (cycles_per_second > 0) return cycles_per_second;
 | 
			
		||||
      }
 | 
			
		||||
    } else if (startsWithKey(ln, "bogomips")) {
 | 
			
		||||
      if (!value.empty()) {
 | 
			
		||||
        bogo_clock = std::stod(value) * 1000000.0;
 | 
			
		||||
        if (bogo_clock < 0.0) bogo_clock = error_value;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  if (f.bad()) {
 | 
			
		||||
    std::cerr << "Failure reading /proc/cpuinfo\n";
 | 
			
		||||
    return error_value;
 | 
			
		||||
  }
 | 
			
		||||
  if (!f.eof()) {
 | 
			
		||||
    std::cerr << "Failed to read to end of /proc/cpuinfo\n";
 | 
			
		||||
    return error_value;
 | 
			
		||||
  }
 | 
			
		||||
  f.close();
 | 
			
		||||
  // If we found the bogomips clock, but nothing better, we'll use it (but
 | 
			
		||||
  // we're not happy about it); otherwise, fallback to the rough estimation
 | 
			
		||||
  // below.
 | 
			
		||||
  if (bogo_clock >= 0.0) return bogo_clock;
 | 
			
		||||
 | 
			
		||||
#elif defined BENCHMARK_HAS_SYSCTL
 | 
			
		||||
  constexpr auto* FreqStr =
 | 
			
		||||
#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD)
 | 
			
		||||
      "machdep.tsc_freq";
 | 
			
		||||
#else
 | 
			
		||||
      "hw.cpufrequency";
 | 
			
		||||
#endif
 | 
			
		||||
  unsigned long long hz = 0;
 | 
			
		||||
  if (GetSysctl(FreqStr, &hz)) return hz;
 | 
			
		||||
 | 
			
		||||
  fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
 | 
			
		||||
          FreqStr, strerror(errno));
 | 
			
		||||
 | 
			
		||||
#elif defined BENCHMARK_OS_WINDOWS
 | 
			
		||||
  // In NT, read MHz from the registry. If we fail to do so or we're in win9x
 | 
			
		||||
  // then make a crude estimate.
 | 
			
		||||
  DWORD data, data_size = sizeof(data);
 | 
			
		||||
  if (IsWindowsXPOrGreater() &&
 | 
			
		||||
      SUCCEEDED(
 | 
			
		||||
          SHGetValueA(HKEY_LOCAL_MACHINE,
 | 
			
		||||
                      "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
 | 
			
		||||
                      "~MHz", nullptr, &data, &data_size)))
 | 
			
		||||
    return static_cast<double>((int64_t)data *
 | 
			
		||||
                               (int64_t)(1000 * 1000));  // was mhz
 | 
			
		||||
#endif
 | 
			
		||||
  // If we've fallen through, attempt to roughly estimate the CPU clock rate.
 | 
			
		||||
  const int estimate_time_ms = 1000;
 | 
			
		||||
  const auto start_ticks = cycleclock::Now();
 | 
			
		||||
  SleepForMilliseconds(estimate_time_ms);
 | 
			
		||||
  return static_cast<double>(cycleclock::Now() - start_ticks);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace
 | 
			
		||||
 | 
			
		||||
const CPUInfo& CPUInfo::Get() {
 | 
			
		||||
  static const CPUInfo* info = new CPUInfo();
 | 
			
		||||
  return *info;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CPUInfo::CPUInfo()
 | 
			
		||||
    : num_cpus(GetNumCPUs()),
 | 
			
		||||
      cycles_per_second(GetCPUCyclesPerSecond()),
 | 
			
		||||
      caches(GetCacheSizes()),
 | 
			
		||||
      scaling_enabled(CpuScalingEnabled(num_cpus)) {}
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										212
									
								
								benchmarks/thirdparty/benchmark/src/timers.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										212
									
								
								benchmarks/thirdparty/benchmark/src/timers.cc
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,212 @@
 | 
			
		|||
// Copyright 2015 Google Inc. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
// Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
// you may not use this file except in compliance with the License.
 | 
			
		||||
// You may obtain a copy of the License at
 | 
			
		||||
//
 | 
			
		||||
//     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
//
 | 
			
		||||
// Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
// distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
// See the License for the specific language governing permissions and
 | 
			
		||||
// limitations under the License.
 | 
			
		||||
 | 
			
		||||
#include "timers.h"
 | 
			
		||||
#include "internal_macros.h"
 | 
			
		||||
 | 
			
		||||
#ifdef BENCHMARK_OS_WINDOWS
 | 
			
		||||
#include <Shlwapi.h>
 | 
			
		||||
#include <VersionHelpers.h>
 | 
			
		||||
#include <Windows.h>
 | 
			
		||||
#else
 | 
			
		||||
#include <fcntl.h>
 | 
			
		||||
#include <sys/resource.h>
 | 
			
		||||
#include <sys/time.h>
 | 
			
		||||
#include <sys/types.h>  // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
 | 
			
		||||
#include <sys/sysctl.h>
 | 
			
		||||
#endif
 | 
			
		||||
#if defined(BENCHMARK_OS_MACOSX)
 | 
			
		||||
#include <mach/mach_init.h>
 | 
			
		||||
#include <mach/mach_port.h>
 | 
			
		||||
#include <mach/thread_act.h>
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef BENCHMARK_OS_EMSCRIPTEN
 | 
			
		||||
#include <emscripten.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#include <cerrno>
 | 
			
		||||
#include <cstdint>
 | 
			
		||||
#include <cstdio>
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <cstring>
 | 
			
		||||
#include <ctime>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <limits>
 | 
			
		||||
#include <mutex>
 | 
			
		||||
 | 
			
		||||
#include "check.h"
 | 
			
		||||
#include "log.h"
 | 
			
		||||
#include "sleep.h"
 | 
			
		||||
#include "string_util.h"
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
// Suppress unused warnings on helper functions.
 | 
			
		||||
#if defined(__GNUC__)
 | 
			
		||||
#pragma GCC diagnostic ignored "-Wunused-function"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
#if defined(BENCHMARK_OS_WINDOWS)
 | 
			
		||||
double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
 | 
			
		||||
  ULARGE_INTEGER kernel;
 | 
			
		||||
  ULARGE_INTEGER user;
 | 
			
		||||
  kernel.HighPart = kernel_time.dwHighDateTime;
 | 
			
		||||
  kernel.LowPart = kernel_time.dwLowDateTime;
 | 
			
		||||
  user.HighPart = user_time.dwHighDateTime;
 | 
			
		||||
  user.LowPart = user_time.dwLowDateTime;
 | 
			
		||||
  return (static_cast<double>(kernel.QuadPart) +
 | 
			
		||||
          static_cast<double>(user.QuadPart)) *
 | 
			
		||||
         1e-7;
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
double MakeTime(struct rusage const& ru) {
 | 
			
		||||
  return (static_cast<double>(ru.ru_utime.tv_sec) +
 | 
			
		||||
          static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
 | 
			
		||||
          static_cast<double>(ru.ru_stime.tv_sec) +
 | 
			
		||||
          static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
#if defined(BENCHMARK_OS_MACOSX)
 | 
			
		||||
double MakeTime(thread_basic_info_data_t const& info) {
 | 
			
		||||
  return (static_cast<double>(info.user_time.seconds) +
 | 
			
		||||
          static_cast<double>(info.user_time.microseconds) * 1e-6 +
 | 
			
		||||
          static_cast<double>(info.system_time.seconds) +
 | 
			
		||||
          static_cast<double>(info.system_time.microseconds) * 1e-6);
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
 | 
			
		||||
double MakeTime(struct timespec const& ts) {
 | 
			
		||||
  return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) {
 | 
			
		||||
  std::cerr << "ERROR: " << msg << std::endl;
 | 
			
		||||
  std::exit(EXIT_FAILURE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace
 | 
			
		||||
 | 
			
		||||
double ProcessCPUUsage() {
 | 
			
		||||
#if defined(BENCHMARK_OS_WINDOWS)
 | 
			
		||||
  HANDLE proc = GetCurrentProcess();
 | 
			
		||||
  FILETIME creation_time;
 | 
			
		||||
  FILETIME exit_time;
 | 
			
		||||
  FILETIME kernel_time;
 | 
			
		||||
  FILETIME user_time;
 | 
			
		||||
  if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time,
 | 
			
		||||
                      &user_time))
 | 
			
		||||
    return MakeTime(kernel_time, user_time);
 | 
			
		||||
  DiagnoseAndExit("GetProccessTimes() failed");
 | 
			
		||||
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
 | 
			
		||||
  // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
 | 
			
		||||
  // Use Emscripten-specific API. Reported CPU time would be exactly the
 | 
			
		||||
  // same as total time, but this is ok because there aren't long-latency
 | 
			
		||||
  // syncronous system calls in Emscripten.
 | 
			
		||||
  return emscripten_get_now() * 1e-3;
 | 
			
		||||
#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX)
 | 
			
		||||
  // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
 | 
			
		||||
  // https://github.com/google/benchmark/pull/292
 | 
			
		||||
  struct timespec spec;
 | 
			
		||||
  if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
 | 
			
		||||
    return MakeTime(spec);
 | 
			
		||||
  DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
 | 
			
		||||
#else
 | 
			
		||||
  struct rusage ru;
 | 
			
		||||
  if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru);
 | 
			
		||||
  DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
double ThreadCPUUsage() {
 | 
			
		||||
#if defined(BENCHMARK_OS_WINDOWS)
 | 
			
		||||
  HANDLE this_thread = GetCurrentThread();
 | 
			
		||||
  FILETIME creation_time;
 | 
			
		||||
  FILETIME exit_time;
 | 
			
		||||
  FILETIME kernel_time;
 | 
			
		||||
  FILETIME user_time;
 | 
			
		||||
  GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time,
 | 
			
		||||
                 &user_time);
 | 
			
		||||
  return MakeTime(kernel_time, user_time);
 | 
			
		||||
#elif defined(BENCHMARK_OS_MACOSX)
 | 
			
		||||
  // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
 | 
			
		||||
  // https://github.com/google/benchmark/pull/292
 | 
			
		||||
  mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
 | 
			
		||||
  thread_basic_info_data_t info;
 | 
			
		||||
  mach_port_t thread = pthread_mach_thread_np(pthread_self());
 | 
			
		||||
  if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) ==
 | 
			
		||||
      KERN_SUCCESS) {
 | 
			
		||||
    return MakeTime(info);
 | 
			
		||||
  }
 | 
			
		||||
  DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
 | 
			
		||||
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
 | 
			
		||||
  // Emscripten doesn't support traditional threads
 | 
			
		||||
  return ProcessCPUUsage();
 | 
			
		||||
#elif defined(BENCHMARK_OS_RTEMS)
 | 
			
		||||
  // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
 | 
			
		||||
  // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
 | 
			
		||||
  return ProcessCPUUsage();
 | 
			
		||||
#elif defined(CLOCK_THREAD_CPUTIME_ID)
 | 
			
		||||
  struct timespec ts;
 | 
			
		||||
  if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
 | 
			
		||||
  DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
 | 
			
		||||
#else
 | 
			
		||||
#error Per-thread timing is not available on your system.
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
std::string DateTimeString(bool local) {
 | 
			
		||||
  typedef std::chrono::system_clock Clock;
 | 
			
		||||
  std::time_t now = Clock::to_time_t(Clock::now());
 | 
			
		||||
  const std::size_t kStorageSize = 128;
 | 
			
		||||
  char storage[kStorageSize];
 | 
			
		||||
  std::size_t written;
 | 
			
		||||
 | 
			
		||||
  if (local) {
 | 
			
		||||
#if defined(BENCHMARK_OS_WINDOWS)
 | 
			
		||||
    written =
 | 
			
		||||
        std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
 | 
			
		||||
#else
 | 
			
		||||
    std::tm timeinfo;
 | 
			
		||||
    std::memset(&timeinfo, 0, sizeof(std::tm));
 | 
			
		||||
    ::localtime_r(&now, &timeinfo);
 | 
			
		||||
    written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
 | 
			
		||||
#endif
 | 
			
		||||
  } else {
 | 
			
		||||
#if defined(BENCHMARK_OS_WINDOWS)
 | 
			
		||||
    written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
 | 
			
		||||
#else
 | 
			
		||||
    std::tm timeinfo;
 | 
			
		||||
    std::memset(&timeinfo, 0, sizeof(std::tm));
 | 
			
		||||
    ::gmtime_r(&now, &timeinfo);
 | 
			
		||||
    written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
 | 
			
		||||
#endif
 | 
			
		||||
  }
 | 
			
		||||
  CHECK(written < kStorageSize);
 | 
			
		||||
  ((void)written);  // prevent unused variable in optimized mode.
 | 
			
		||||
  return std::string(storage);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}  // end namespace
 | 
			
		||||
 | 
			
		||||
std::string LocalDateTimeString() { return DateTimeString(true); }
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
							
								
								
									
										48
									
								
								benchmarks/thirdparty/benchmark/src/timers.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								benchmarks/thirdparty/benchmark/src/timers.h
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,48 @@
 | 
			
		|||
#ifndef BENCHMARK_TIMERS_H
 | 
			
		||||
#define BENCHMARK_TIMERS_H
 | 
			
		||||
 | 
			
		||||
#include <chrono>
 | 
			
		||||
#include <string>
 | 
			
		||||
 | 
			
		||||
namespace benchmark {
 | 
			
		||||
 | 
			
		||||
// Return the CPU usage of the current process
 | 
			
		||||
double ProcessCPUUsage();
 | 
			
		||||
 | 
			
		||||
// Return the CPU usage of the children of the current process
 | 
			
		||||
double ChildrenCPUUsage();
 | 
			
		||||
 | 
			
		||||
// Return the CPU usage of the current thread
 | 
			
		||||
double ThreadCPUUsage();
 | 
			
		||||
 | 
			
		||||
#if defined(HAVE_STEADY_CLOCK)
 | 
			
		||||
template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady>
 | 
			
		||||
struct ChooseSteadyClock {
 | 
			
		||||
  typedef std::chrono::high_resolution_clock type;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct ChooseSteadyClock<false> {
 | 
			
		||||
  typedef std::chrono::steady_clock type;
 | 
			
		||||
};
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
struct ChooseClockType {
 | 
			
		||||
#if defined(HAVE_STEADY_CLOCK)
 | 
			
		||||
  typedef ChooseSteadyClock<>::type type;
 | 
			
		||||
#else
 | 
			
		||||
  typedef std::chrono::high_resolution_clock type;
 | 
			
		||||
#endif
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
inline double ChronoClockNow() {
 | 
			
		||||
  typedef ChooseClockType::type ClockType;
 | 
			
		||||
  using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
 | 
			
		||||
  return FpSeconds(ClockType::now().time_since_epoch()).count();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string LocalDateTimeString();
 | 
			
		||||
 | 
			
		||||
}  // end namespace benchmark
 | 
			
		||||
 | 
			
		||||
#endif  // BENCHMARK_TIMERS_H
 | 
			
		||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue