File benchmark-1.5.5.obscpio of Package benchmark
07070100000000000081A400000000000000000000000160C0813C0000089A000000000000000000000000000000000000001800000000benchmark-1.5.5/AUTHORS# This is the official list of benchmark authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
#
# Please keep the list sorted.
Albert Pretorius <pretoalb@gmail.com>
Alex Steele <steeleal123@gmail.com>
Andriy Berestovskyy <berestovskyy@gmail.com>
Arne Beer <arne@twobeer.de>
Carto
Christian Wassermann <christian_wassermann@web.de>
Christopher Seymour <chris.j.seymour@hotmail.com>
Colin Braley <braley.colin@gmail.com>
Daniel Harvey <danielharvey458@gmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dirac Research
Dominik Czarnota <dominik.b.czarnota@gmail.com>
Eric Backus <eric_backus@alum.mit.edu>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Federico Ficarelli <federico.ficarelli@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Gergő Szitár <szitar.gergo@gmail.com>
Google Inc.
International Business Machines Corporation
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
Jordan Williams <jwillikers@protonmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Kishan Kumar <kumar.kishan@outlook.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
MongoDB Inc.
Nick Hutchinson <nshutchinson@gmail.com>
Norman Heino <norman.heino@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Ori Livneh <ori.livneh@gmail.com>
Paul Redmond <paul.redmond@gmail.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Roman Lebedev <lebedev.ri@gmail.com>
Sayan Bhattacharjee <aero.sayan@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Steinar H. Gunderson <sgunderson@bigfoot.com>
Stripe, Inc.
Tobias Schmidt <tobias.schmidt@in.tum.de>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>
Min-Yih Hsu <yihshyng223@gmail.com>
07070100000001000081A400000000000000000000000160C0813C000003DA000000000000000000000000000000000000001C00000000benchmark-1.5.5/BUILD.bazelload("@rules_cc//cc:defs.bzl", "cc_library")
licenses(["notice"])
config_setting(
name = "windows",
values = {
"cpu": "x64_windows",
},
visibility = [":__subpackages__"],
)
cc_library(
name = "benchmark",
srcs = glob(
[
"src/*.cc",
"src/*.h",
],
exclude = ["src/benchmark_main.cc"],
),
hdrs = ["include/benchmark/benchmark.h"],
linkopts = select({
":windows": ["-DEFAULTLIB:shlwapi.lib"],
"//conditions:default": ["-pthread"],
}),
strip_include_prefix = "include",
visibility = ["//visibility:public"],
)
cc_library(
name = "benchmark_main",
srcs = ["src/benchmark_main.cc"],
hdrs = ["include/benchmark/benchmark.h"],
strip_include_prefix = "include",
visibility = ["//visibility:public"],
deps = [":benchmark"],
)
cc_library(
name = "benchmark_internal_headers",
hdrs = glob(["src/*.h"]),
visibility = ["//test:__pkg__"],
)
07070100000002000081A400000000000000000000000160C0813C000030C7000000000000000000000000000000000000001F00000000benchmark-1.5.5/CMakeLists.txtcmake_minimum_required (VERSION 3.5.1)
foreach(p
CMP0048 # OK to clear PROJECT_VERSION on project()
CMP0054 # CMake 3.1
CMP0056 # export EXE_LINKER_FLAGS to try_run
CMP0057 # Support no if() IN_LIST operator
CMP0063 # Honor visibility properties for all targets
CMP0077 # Allow option() overrides in importing projects
)
if(POLICY ${p})
cmake_policy(SET ${p} NEW)
endif()
endforeach()
project (benchmark VERSION 1.5.4 LANGUAGES CXX)
option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON)
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
if(NOT MSVC)
option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF)
else()
set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE)
endif()
option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON)
# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which
# may require downloading the source code.
option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree building of unmet dependencies" OFF)
# This option can be used to disable building and running unit tests which depend on gtest
# in cases where it is not possible to build or find a valid version of gtest.
option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON)
option(BENCHMARK_ENABLE_LIBPFM "Enable performance counters provided by libpfm" OFF)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
if(MSVC)
# As of CMake 3.18, CMAKE_SYSTEM_PROCESSOR is not set properly for MSVC and
# cross-compilation (e.g. Host=x86_64, target=aarch64) requires using the
# undocumented, but working variable.
# See https://gitlab.kitware.com/cmake/cmake/-/issues/15170
set(CMAKE_SYSTEM_PROCESSOR ${MSVC_CXX_ARCHITECTURE_ID})
if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ARM")
set(CMAKE_CROSSCOMPILING TRUE)
endif()
endif()
set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF)
function(should_enable_assembly_tests)
if(CMAKE_BUILD_TYPE)
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
# FIXME: The --coverage flag needs to be removed when building assembly
# tests for this to work.
return()
endif()
endif()
if (MSVC)
return()
elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
return()
elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
# FIXME: Make these work on 32 bit builds
return()
elseif(BENCHMARK_BUILD_32_BITS)
# FIXME: Make these work on 32 bit builds
return()
endif()
find_program(LLVM_FILECHECK_EXE FileCheck)
if (LLVM_FILECHECK_EXE)
set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE)
message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}")
else()
message(STATUS "Failed to find LLVM FileCheck")
return()
endif()
set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE)
endfunction()
should_enable_assembly_tests()
# This option disables the building and running of the assembly verification tests
option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests"
${ENABLE_ASSEMBLY_TESTS_DEFAULT})
# Make sure we can import out CMake functions
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
# Read the git tags to determine the project version
include(GetGitVersion)
get_git_version(GIT_VERSION)
# If no git version can be determined, use the version
# from the project() command
if ("${GIT_VERSION}" STREQUAL "0.0.0")
set(VERSION "${benchmark_VERSION}")
else()
set(VERSION "${GIT_VERSION}")
endif()
# Tell the user what versions we are using
message(STATUS "Version: ${VERSION}")
# The version of the libraries
set(GENERIC_LIB_VERSION ${VERSION})
string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION)
# Import our CMake modules
include(CheckCXXCompilerFlag)
include(AddCXXCompilerFlag)
include(CXXFeatureCheck)
if (BENCHMARK_BUILD_32_BITS)
add_required_cxx_compiler_flag(-m32)
endif()
if (MSVC)
# Turn compiler warnings up to 11
string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
add_cxx_compiler_flag(-EHs-)
add_cxx_compiler_flag(-EHa-)
add_definitions(-D_HAS_EXCEPTIONS=0)
endif()
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL")
set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GL")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /GL")
set(CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL "${CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL} /LTCG")
set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "${CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL} /LTCG")
set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG")
endif()
else()
# Try and enable C++11. Don't use C++14 because it doesn't work in some
# configurations.
add_cxx_compiler_flag(-std=c++11)
if (NOT HAVE_CXX_FLAG_STD_CXX11)
add_cxx_compiler_flag(-std=c++0x)
endif()
# Turn compiler warnings up to 11
add_cxx_compiler_flag(-Wall)
add_cxx_compiler_flag(-Wextra)
add_cxx_compiler_flag(-Wshadow)
add_cxx_compiler_flag(-Werror RELEASE)
add_cxx_compiler_flag(-Werror RELWITHDEBINFO)
add_cxx_compiler_flag(-Werror MINSIZEREL)
if (NOT BENCHMARK_ENABLE_TESTING)
# Disable warning when compiling tests as gtest does not use 'override'.
add_cxx_compiler_flag(-Wsuggest-override)
endif()
add_cxx_compiler_flag(-pedantic)
add_cxx_compiler_flag(-pedantic-errors)
add_cxx_compiler_flag(-Wshorten-64-to-32)
add_cxx_compiler_flag(-fstrict-aliasing)
# Disable warnings regarding deprecated parts of the library while building
# and testing those parts of the library.
add_cxx_compiler_flag(-Wno-deprecated-declarations)
if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
# Intel silently ignores '-Wno-deprecated-declarations',
# warning no. 1786 must be explicitly disabled.
# See #631 for rationale.
add_cxx_compiler_flag(-wd1786)
endif()
# Disable deprecation warnings for release builds (when -Werror is enabled).
add_cxx_compiler_flag(-Wno-deprecated RELEASE)
add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO)
add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL)
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
add_cxx_compiler_flag(-fno-exceptions)
endif()
if (HAVE_CXX_FLAG_FSTRICT_ALIASING)
if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing
add_cxx_compiler_flag(-Wstrict-aliasing)
endif()
endif()
# ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden
# (because of deprecated overload)
add_cxx_compiler_flag(-wd654)
add_cxx_compiler_flag(-Wthread-safety)
if (HAVE_CXX_FLAG_WTHREAD_SAFETY)
cxx_feature_check(THREAD_SAFETY_ATTRIBUTES)
endif()
# On most UNIX like platforms g++ and clang++ define _GNU_SOURCE as a
# predefined macro, which turns on all of the wonderful libc extensions.
# However g++ doesn't do this in Cygwin so we have to define it ourselfs
# since we depend on GNU/POSIX/BSD extensions.
if (CYGWIN)
add_definitions(-D_GNU_SOURCE=1)
endif()
if (QNXNTO)
add_definitions(-D_QNX_SOURCE)
endif()
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
add_cxx_compiler_flag(-flto)
add_cxx_compiler_flag(-Wno-lto-type-mismatch)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
find_program(GCC_AR gcc-ar)
if (GCC_AR)
set(CMAKE_AR ${GCC_AR})
endif()
find_program(GCC_RANLIB gcc-ranlib)
if (GCC_RANLIB)
set(CMAKE_RANLIB ${GCC_RANLIB})
endif()
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
include(llvm-toolchain)
endif()
endif()
# Coverage build type
set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}"
CACHE STRING "Flags used by the C++ compiler during coverage builds."
FORCE)
set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}"
CACHE STRING "Flags used for linking binaries during coverage builds."
FORCE)
set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}"
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
FORCE)
mark_as_advanced(
BENCHMARK_CXX_FLAGS_COVERAGE
BENCHMARK_EXE_LINKER_FLAGS_COVERAGE
BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE)
set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.")
add_cxx_compiler_flag(--coverage COVERAGE)
endif()
if (BENCHMARK_USE_LIBCXX)
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
add_cxx_compiler_flag(-stdlib=libc++)
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR
"${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
add_cxx_compiler_flag(-nostdinc++)
message(WARNING "libc++ header path must be manually specified using CMAKE_CXX_FLAGS")
# Adding -nodefaultlibs directly to CMAKE_<TYPE>_LINKER_FLAGS will break
# configuration checks such as 'find_package(Threads)'
list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs)
# -lc++ cannot be added directly to CMAKE_<TYPE>_LINKER_FLAGS because
# linker flags appear before all linker inputs and -lc++ must appear after.
list(APPEND BENCHMARK_CXX_LIBRARIES c++)
else()
message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
endif()
endif(BENCHMARK_USE_LIBCXX)
set(EXTRA_CXX_FLAGS "")
if (WIN32 AND "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
# Clang on Windows fails to compile the regex feature check under C++11
set(EXTRA_CXX_FLAGS "-DCMAKE_CXX_STANDARD=14")
endif()
# C++ feature checks
# Determine the correct regular expression engine to use
cxx_feature_check(STD_REGEX ${EXTRA_CXX_FLAGS})
cxx_feature_check(GNU_POSIX_REGEX ${EXTRA_CXX_FLAGS})
cxx_feature_check(POSIX_REGEX ${EXTRA_CXX_FLAGS})
if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(FATAL_ERROR "Failed to determine the source files for the regular expression backend")
endif()
if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX
AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(WARNING "Using std::regex with exceptions disabled is not fully supported")
endif()
cxx_feature_check(STEADY_CLOCK)
# Ensure we have pthreads
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
if (BENCHMARK_ENABLE_LIBPFM)
find_package(PFM)
endif()
# Set up directories
include_directories(${PROJECT_SOURCE_DIR}/include)
# Build the targets
add_subdirectory(src)
if (BENCHMARK_ENABLE_TESTING)
enable_testing()
if (BENCHMARK_ENABLE_GTEST_TESTS AND
NOT (TARGET gtest AND TARGET gtest_main AND
TARGET gmock AND TARGET gmock_main))
include(GoogleTest)
endif()
add_subdirectory(test)
endif()
07070100000003000081A400000000000000000000000160C0813C000009B5000000000000000000000000000000000000002000000000benchmark-1.5.5/CONTRIBUTING.md# How to contribute #
We'd love to accept your patches and contributions to this project. There are
a just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
Once your CLA is submitted (or if you already submitted one for
another Google project), make a commit adding yourself to the
[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
of your first [pull request][].
[AUTHORS]: AUTHORS
[CONTRIBUTORS]: CONTRIBUTORS
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[pull request]: https://help.github.com/articles/creating-a-pull-request
07070100000004000081A400000000000000000000000160C0813C00000D73000000000000000000000000000000000000001D00000000benchmark-1.5.5/CONTRIBUTORS# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
#
# Names should be added to this file as:
# Name <email address>
#
# Please keep the list sorted.
Abhina Sreeskantharajan <abhina.sreeskantharajan@ibm.com>
Albert Pretorius <pretoalb@gmail.com>
Alex Steele <steelal123@gmail.com>
Andriy Berestovskyy <berestovskyy@gmail.com>
Arne Beer <arne@twobeer.de>
Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
Christian Wassermann <christian_wassermann@web.de>
Christopher Seymour <chris.j.seymour@hotmail.com>
Colin Braley <braley.colin@gmail.com>
Cyrille Faucheux <cyrille.faucheux@gmail.com>
Daniel Harvey <danielharvey458@gmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dominic Hamon <dma@stripysock.com> <dominic@google.com>
Dominik Czarnota <dominik.b.czarnota@gmail.com>
Eric Backus <eric_backus@alum.mit.edu>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Fanbo Meng <fanbo.meng@ibm.com>
Federico Ficarelli <federico.ficarelli@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Geoffrey Martin-Noble <gcmn@google.com> <gmngeoffrey@gmail.com>
Gergő Szitár <szitar.gergo@gmail.com>
Hannes Hauswedell <h2@fsfe.org>
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
John Millikin <jmillikin@stripe.com>
Jordan Williams <jwillikers@protonmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kai Wolf <kai.wolf@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Kishan Kumar <kumar.kishan@outlook.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
Nick Hutchinson <nshutchinson@gmail.com>
Norman Heino <norman.heino@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Ori Livneh <ori.livneh@gmail.com>
Pascal Leroy <phl@google.com>
Paul Redmond <paul.redmond@gmail.com>
Pierre Phaneuf <pphaneuf@google.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Raul Marin <rmrodriguez@cartodb.com>
Ray Glover <ray.glover@uk.ibm.com>
Robert Guo <robert.guo@mongodb.com>
Roman Lebedev <lebedev.ri@gmail.com>
Sayan Bhattacharjee <aero.sayan@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Steven Wan <wan.yu@ibm.com>
Tobias Schmidt <tobias.schmidt@in.tum.de>
Tobias Ulvgård <tobias.ulvgard@dirac.se>
Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>
Min-Yih Hsu <yihshyng223@gmail.com>
07070100000005000081A400000000000000000000000160C0813C00002C5E000000000000000000000000000000000000001800000000benchmark-1.5.5/LICENSE
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
07070100000006000081A400000000000000000000000160C0813C0000BB5F000000000000000000000000000000000000001A00000000benchmark-1.5.5/README.md# Benchmark
[![build-and-test](https://github.com/google/benchmark/workflows/build-and-test/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Abuild-and-test)
[![bazel](https://github.com/google/benchmark/actions/workflows/bazel.yml/badge.svg)](https://github.com/google/benchmark/actions/workflows/bazel.yml)
[![pylint](https://github.com/google/benchmark/workflows/pylint/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Apylint)
[![test-bindings](https://github.com/google/benchmark/workflows/test-bindings/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Atest-bindings)
[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
A library to benchmark code snippets, similar to unit tests. Example:
```c++
#include <benchmark/benchmark.h>
static void BM_SomeFunction(benchmark::State& state) {
// Perform setup here
for (auto _ : state) {
// This code gets timed
SomeFunction();
}
}
// Register the function as a benchmark
BENCHMARK(BM_SomeFunction);
// Run the benchmark
BENCHMARK_MAIN();
```
To get started, see [Requirements](#requirements) and
[Installation](#installation). See [Usage](#usage) for a full example and the
[User Guide](#user-guide) for a more comprehensive feature overview.
It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/docs/primer.md)
as some of the structural aspects of the APIs are similar.
### Resources
[Discussion group](https://groups.google.com/d/forum/benchmark-discuss)
IRC channels:
* [libera](https://libera.chat) #benchmark
[Additional Tooling Documentation](docs/tools.md)
[Assembly Testing Documentation](docs/AssemblyTests.md)
## Requirements
The library can be used with C++03. However, it requires C++11 to build,
including compiler and standard library support.
The following minimum versions are required to build the library:
* GCC 4.8
* Clang 3.4
* Visual Studio 14 2015
* Intel 2015 Update 1
See [Platform-Specific Build Instructions](#platform-specific-build-instructions).
## Installation
This describes the installation process using cmake. As pre-requisites, you'll
need git and cmake installed.
_See [dependencies.md](dependencies.md) for more details regarding supported
versions of build tools._
```bash
# Check out the library.
$ git clone https://github.com/google/benchmark.git
# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory.
$ git clone https://github.com/google/googletest.git benchmark/googletest
# Go to the library root directory
$ cd benchmark
# Make a build directory to place the build output.
$ cmake -E make_directory "build"
# Generate build system files with cmake.
$ cmake -E chdir "build" cmake -DCMAKE_BUILD_TYPE=Release ../
# or, starting with CMake 3.13, use a simpler form:
# cmake -DCMAKE_BUILD_TYPE=Release -S . -B "build"
# Build the library.
$ cmake --build "build" --config Release
```
This builds the `benchmark` and `benchmark_main` libraries and tests.
On a unix system, the build directory should now look something like this:
```
/benchmark
/build
/src
/libbenchmark.a
/libbenchmark_main.a
/test
...
```
Next, you can run the tests to check the build.
```bash
$ cmake -E chdir "build" ctest --build-config Release
```
If you want to install the library globally, also run:
```
sudo cmake --build "build" --config Release --target install
```
Note that Google Benchmark requires Google Test to build and run the tests. This
dependency can be provided two ways:
* Checkout the Google Test sources into `benchmark/googletest` as above.
* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during
configuration, the library will automatically download and build any required
dependencies.
If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
to `CMAKE_ARGS`.
### Debug vs Release
By default, benchmark builds as a debug library. You will see a warning in the
output when this is the case. To build it as a release library instead, add
`-DCMAKE_BUILD_TYPE=Release` when generating the build system files, as shown
above. The use of `--config Release` in build commands is needed to properly
support multi-configuration tools (like Visual Studio for example) and can be
skipped for other build systems (like Makefile).
To enable link-time optimisation, also add `-DBENCHMARK_ENABLE_LTO=true` when
generating the build system files.
If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake
cache variables, if autodetection fails.
If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
### Stable and Experimental Library Versions
The main branch contains the latest stable version of the benchmarking library;
the API of which can be considered largely stable, with source breaking changes
being made only upon the release of a new major version.
Newer, experimental, features are implemented and tested on the
[`v2` branch](https://github.com/google/benchmark/tree/v2). Users who wish
to use, test, and provide feedback on the new features are encouraged to try
this branch. However, this branch provides no stability guarantees and reserves
the right to change and break the API at any time.
## Usage
### Basic usage
Define a function that executes the code to measure, register it as a benchmark
function using the `BENCHMARK` macro, and ensure an appropriate `main` function
is available:
```c++
#include <benchmark/benchmark.h>
static void BM_StringCreation(benchmark::State& state) {
for (auto _ : state)
std::string empty_string;
}
// Register the function as a benchmark
BENCHMARK(BM_StringCreation);
// Define another benchmark
static void BM_StringCopy(benchmark::State& state) {
std::string x = "hello";
for (auto _ : state)
std::string copy(x);
}
BENCHMARK(BM_StringCopy);
BENCHMARK_MAIN();
```
To run the benchmark, compile and link against the `benchmark` library
(libbenchmark.a/.so). If you followed the build steps above, this library will
be under the build directory you created.
```bash
# Example on linux after running the build steps above. Assumes the
# `benchmark` and `build` directories are under the current directory.
$ g++ mybenchmark.cc -std=c++11 -isystem benchmark/include \
-Lbenchmark/build/src -lbenchmark -lpthread -o mybenchmark
```
Alternatively, link against the `benchmark_main` library and remove
`BENCHMARK_MAIN();` above to get the same behavior.
The compiled executable will run all benchmarks by default. Pass the `--help`
flag for option information or see the guide below.
### Usage with CMake
If using CMake, it is recommended to link against the project-provided
`benchmark::benchmark` and `benchmark::benchmark_main` targets using
`target_link_libraries`.
It is possible to use ```find_package``` to import an installed version of the
library.
```cmake
find_package(benchmark REQUIRED)
```
Alternatively, ```add_subdirectory``` will incorporate the library directly in
to one's CMake project.
```cmake
add_subdirectory(benchmark)
```
Either way, link to the library as follows.
```cmake
target_link_libraries(MyTarget benchmark::benchmark)
```
## Platform Specific Build Instructions
### Building with GCC
When the library is built using GCC it is necessary to link with the pthread
library due to how GCC implements `std::thread`. Failing to link to pthread will
lead to runtime exceptions (unless you're using libc++), not linker errors. See
[issue #67](https://github.com/google/benchmark/issues/67) for more details. You
can link to pthread by adding `-pthread` to your linker command. Note, you can
also use `-lpthread`, but there are potential issues with ordering of command
line parameters if you use that.
### Building with Visual Studio 2015 or 2017
The `shlwapi` library (`-lshlwapi`) is required to support a call to `CPUInfo` which reads the registry. Either add `shlwapi.lib` under `[ Configuration Properties > Linker > Input ]`, or use the following:
```
// Alternatively, can add libraries using linker options.
#ifdef _WIN32
#pragma comment ( lib, "Shlwapi.lib" )
#ifdef _DEBUG
#pragma comment ( lib, "benchmarkd.lib" )
#else
#pragma comment ( lib, "benchmark.lib" )
#endif
#endif
```
Can also use the graphical version of CMake:
* Open `CMake GUI`.
* Under `Where to build the binaries`, same path as source plus `build`.
* Under `CMAKE_INSTALL_PREFIX`, same path as source plus `install`.
* Click `Configure`, `Generate`, `Open Project`.
* If build fails, try deleting entire directory and starting again, or unticking options to build less.
### Building with Intel 2015 Update 1 or Intel System Studio Update 4
See instructions for building with Visual Studio. Once built, right click on the solution and change the build to Intel.
### Building on Solaris
If you're running benchmarks on solaris, you'll want the kstat library linked in
too (`-lkstat`).
## User Guide
### Command Line
[Output Formats](#output-formats)
[Output Files](#output-files)
[Running Benchmarks](#running-benchmarks)
[Running a Subset of Benchmarks](#running-a-subset-of-benchmarks)
[Result Comparison](#result-comparison)
[Extra Context](#extra-context)
### Library
[Runtime and Reporting Considerations](#runtime-and-reporting-considerations)
[Passing Arguments](#passing-arguments)
[Custom Benchmark Name](#custom-benchmark-name)
[Calculating Asymptotic Complexity](#asymptotic-complexity)
[Templated Benchmarks](#templated-benchmarks)
[Fixtures](#fixtures)
[Custom Counters](#custom-counters)
[Multithreaded Benchmarks](#multithreaded-benchmarks)
[CPU Timers](#cpu-timers)
[Manual Timing](#manual-timing)
[Setting the Time Unit](#setting-the-time-unit)
[Random Interleaving](docs/random_interleaving.md)
[User-Requested Performance Counters](docs/perf_counters.md)
[Preventing Optimization](#preventing-optimization)
[Reporting Statistics](#reporting-statistics)
[Custom Statistics](#custom-statistics)
[Using RegisterBenchmark](#using-register-benchmark)
[Exiting with an Error](#exiting-with-an-error)
[A Faster KeepRunning Loop](#a-faster-keep-running-loop)
[Disabling CPU Frequency Scaling](#disabling-cpu-frequency-scaling)
<a name="output-formats" />
### Output Formats
The library supports multiple output formats. Use the
`--benchmark_format=<console|json|csv>` flag (or set the
`BENCHMARK_FORMAT=<console|json|csv>` environment variable) to set
the format type. `console` is the default format.
The Console format is intended to be a human readable format. By default
the format generates color output. Context is output on stderr and the
tabular data on stdout. Example tabular output looks like:
```
Benchmark Time(ns) CPU(ns) Iterations
----------------------------------------------------------------------
BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s
BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s
BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s
```
The JSON format outputs human readable json split into two top level attributes.
The `context` attribute contains information about the run in general, including
information about the CPU and the date.
The `benchmarks` attribute contains a list of every benchmark run. Example json
output looks like:
```json
{
"context": {
"date": "2015/03/17-18:40:25",
"num_cpus": 40,
"mhz_per_cpu": 2801,
"cpu_scaling_enabled": false,
"build_type": "debug"
},
"benchmarks": [
{
"name": "BM_SetInsert/1024/1",
"iterations": 94877,
"real_time": 29275,
"cpu_time": 29836,
"bytes_per_second": 134066,
"items_per_second": 33516
},
{
"name": "BM_SetInsert/1024/8",
"iterations": 21609,
"real_time": 32317,
"cpu_time": 32429,
"bytes_per_second": 986770,
"items_per_second": 246693
},
{
"name": "BM_SetInsert/1024/10",
"iterations": 21393,
"real_time": 32724,
"cpu_time": 33355,
"bytes_per_second": 1199226,
"items_per_second": 299807
}
]
}
```
The CSV format outputs comma-separated values. The `context` is output on stderr
and the CSV itself on stdout. Example CSV output looks like:
```
name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942,
"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115,
"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
```
<a name="output-files" />
### Output Files
Write benchmark results to a file with the `--benchmark_out=<filename>` option
(or set `BENCHMARK_OUT`). Specify the output format with
`--benchmark_out_format={json|console|csv}` (or set
`BENCHMARK_OUT_FORMAT={json|console|csv}`). Note that the 'csv' reporter is
deprecated and the saved `.csv` file
[is not parsable](https://github.com/google/benchmark/issues/794) by csv
parsers.
Specifying `--benchmark_out` does not suppress the console output.
<a name="running-benchmarks" />
### Running Benchmarks
Benchmarks are executed by running the produced binaries. Benchmarks binaries,
by default, accept options that may be specified either through their command
line interface or by setting environment variables before execution. For every
`--option_flag=<value>` CLI switch, a corresponding environment variable
`OPTION_FLAG=<value>` exist and is used as default if set (CLI switches always
prevails). A complete list of CLI options is available running benchmarks
with the `--help` switch.
<a name="running-a-subset-of-benchmarks" />
### Running a Subset of Benchmarks
The `--benchmark_filter=<regex>` option (or `BENCHMARK_FILTER=<regex>`
environment variable) can be used to only run the benchmarks that match
the specified `<regex>`. For example:
```bash
$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32
Run on (1 X 2300 MHz CPU )
2016-06-25 19:34:24
Benchmark Time CPU Iterations
----------------------------------------------------
BM_memcpy/32 11 ns 11 ns 79545455
BM_memcpy/32k 2181 ns 2185 ns 324074
BM_memcpy/32 12 ns 12 ns 54687500
BM_memcpy/32k 1834 ns 1837 ns 357143
```
<a name="result-comparison" />
### Result comparison
It is possible to compare the benchmarking results.
See [Additional Tooling Documentation](docs/tools.md)
<a name="extra-context" />
### Extra Context
Sometimes it's useful to add extra context to the content printed before the
results. By default this section includes information about the CPU on which
the benchmarks are running. If you do want to add more context, you can use
the `benchmark_context` command line flag:
```bash
$ ./run_benchmarks --benchmark_context=pwd=`pwd`
Run on (1 x 2300 MHz CPU)
pwd: /home/user/benchmark/
Benchmark Time CPU Iterations
----------------------------------------------------
BM_memcpy/32 11 ns 11 ns 79545455
BM_memcpy/32k 2181 ns 2185 ns 324074
```
You can get the same effect with the API:
```c++
benchmark::AddCustomContext("foo", "bar");
```
Note that attempts to add a second value with the same key will fail with an
error message.
<a name="runtime-and-reporting-considerations" />
### Runtime and Reporting Considerations
When the benchmark binary is executed, each benchmark function is run serially.
The number of iterations to run is determined dynamically by running the
benchmark a few times and measuring the time taken and ensuring that the
ultimate result will be statistically stable. As such, faster benchmark
functions will be run for more iterations than slower benchmark functions, and
the number of iterations is thus reported.
In all cases, the number of iterations for which the benchmark is run is
governed by the amount of time the benchmark takes. Concretely, the number of
iterations is at least one, not more than 1e9, until CPU time is greater than
the minimum time, or the wallclock time is 5x minimum time. The minimum time is
set per benchmark by calling `MinTime` on the registered benchmark object.
Average timings are then reported over the iterations run. If multiple
repetitions are requested using the `--benchmark_repetitions` command-line
option, or at registration time, the benchmark function will be run several
times and statistical results across these repetitions will also be reported.
As well as the per-benchmark entries, a preamble in the report will include
information about the machine on which the benchmarks are run.
<a name="passing-arguments" />
### Passing Arguments
Sometimes a family of benchmarks can be implemented with just one routine that
takes an extra argument to specify which one of the family of benchmarks to
run. For example, the following code defines a family of benchmarks for
measuring the speed of `memcpy()` calls of different lengths:
```c++
static void BM_memcpy(benchmark::State& state) {
char* src = new char[state.range(0)];
char* dst = new char[state.range(0)];
memset(src, 'x', state.range(0));
for (auto _ : state)
memcpy(dst, src, state.range(0));
state.SetBytesProcessed(int64_t(state.iterations()) *
int64_t(state.range(0)));
delete[] src;
delete[] dst;
}
BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
```
The preceding code is quite repetitive, and can be replaced with the following
short-hand. The following invocation will pick a few appropriate arguments in
the specified range and will generate a benchmark for each such argument.
```c++
BENCHMARK(BM_memcpy)->Range(8, 8<<10);
```
By default the arguments in the range are generated in multiples of eight and
the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the
range multiplier is changed to multiples of two.
```c++
BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10);
```
Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ].
The preceding code shows a method of defining a sparse range. The following
example shows a method of defining a dense range. It is then used to benchmark
the performance of `std::vector` initialization for uniformly increasing sizes.
```c++
static void BM_DenseRange(benchmark::State& state) {
for(auto _ : state) {
std::vector<int> v(state.range(0), state.range(0));
benchmark::DoNotOptimize(v.data());
benchmark::ClobberMemory();
}
}
BENCHMARK(BM_DenseRange)->DenseRange(0, 1024, 128);
```
Now arguments generated are [ 0, 128, 256, 384, 512, 640, 768, 896, 1024 ].
You might have a benchmark that depends on two or more inputs. For example, the
following code defines a family of benchmarks for measuring the speed of set
insertion.
```c++
static void BM_SetInsert(benchmark::State& state) {
std::set<int> data;
for (auto _ : state) {
state.PauseTiming();
data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j)
data.insert(RandomNumber());
}
}
BENCHMARK(BM_SetInsert)
->Args({1<<10, 128})
->Args({2<<10, 128})
->Args({4<<10, 128})
->Args({8<<10, 128})
->Args({1<<10, 512})
->Args({2<<10, 512})
->Args({4<<10, 512})
->Args({8<<10, 512});
```
The preceding code is quite repetitive, and can be replaced with the following
short-hand. The following macro will pick a few appropriate arguments in the
product of the two specified ranges and will generate a benchmark for each such
pair.
```c++
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
```
Some benchmarks may require specific argument values that cannot be expressed
with `Ranges`. In this case, `ArgsProduct` offers the ability to generate a
benchmark input for each combination in the product of the supplied vectors.
```c++
BENCHMARK(BM_SetInsert)
->ArgsProduct({{1<<10, 3<<10, 8<<10}, {20, 40, 60, 80}})
// would generate the same benchmark arguments as
BENCHMARK(BM_SetInsert)
->Args({1<<10, 20})
->Args({3<<10, 20})
->Args({8<<10, 20})
->Args({3<<10, 40})
->Args({8<<10, 40})
->Args({1<<10, 40})
->Args({1<<10, 60})
->Args({3<<10, 60})
->Args({8<<10, 60})
->Args({1<<10, 80})
->Args({3<<10, 80})
->Args({8<<10, 80});
```
For more complex patterns of inputs, passing a custom function to `Apply` allows
programmatic specification of an arbitrary set of arguments on which to run the
benchmark. The following example enumerates a dense range on one parameter,
and a sparse range on the second.
```c++
static void CustomArguments(benchmark::internal::Benchmark* b) {
for (int i = 0; i <= 10; ++i)
for (int j = 32; j <= 1024*1024; j *= 8)
b->Args({i, j});
}
BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
```
#### Passing Arbitrary Arguments to a Benchmark
In C++11 it is possible to define a benchmark that takes an arbitrary number
of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
macro creates a benchmark that invokes `func` with the `benchmark::State` as
the first argument followed by the specified `args...`.
The `test_case_name` is appended to the name of the benchmark and
should describe the values passed.
```c++
template <class ...ExtraArgs>
void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
[...]
}
// Registers a benchmark named "BM_takes_args/int_string_test" that passes
// the specified values to `extra_args`.
BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
```
Note that elements of `...args` may refer to global variables. Users should
avoid modifying global state inside of a benchmark.
<a name="asymptotic-complexity" />
### Calculating Asymptotic Complexity (Big O)
Asymptotic complexity might be calculated for a family of benchmarks. The
following code will calculate the coefficient for the high-order term in the
running time and the normalized root-mean square error of string comparison.
```c++
static void BM_StringCompare(benchmark::State& state) {
std::string s1(state.range(0), '-');
std::string s2(state.range(0), '-');
for (auto _ : state) {
benchmark::DoNotOptimize(s1.compare(s2));
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_StringCompare)
->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN);
```
As shown in the following invocation, asymptotic complexity might also be
calculated automatically.
```c++
BENCHMARK(BM_StringCompare)
->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity();
```
The following code will specify asymptotic complexity with a lambda function,
that might be used to customize high-order term calculation.
```c++
BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
->Range(1<<10, 1<<18)->Complexity([](benchmark::IterationCount n)->double{return n; });
```
<a name="custom-benchmark-name" />
### Custom Benchmark Name
You can change the benchmark's name as follows:
```c++
BENCHMARK(BM_memcpy)->Name("memcpy")->RangeMultiplier(2)->Range(8, 8<<10);
```
The invocation will execute the benchmark as before using `BM_memcpy` but changes
the prefix in the report to `memcpy`.
<a name="templated-benchmarks" />
### Templated Benchmarks
This example produces and consumes messages of size `sizeof(v)` `range_x`
times. It also outputs throughput in the absence of multiprogramming.
```c++
template <class Q> void BM_Sequential(benchmark::State& state) {
Q q;
typename Q::value_type v;
for (auto _ : state) {
for (int i = state.range(0); i--; )
q.push(v);
for (int e = state.range(0); e--; )
q.Wait(&v);
}
// actually messages, not bytes:
state.SetBytesProcessed(
static_cast<int64_t>(state.iterations())*state.range(0));
}
BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
```
Three macros are provided for adding benchmark templates.
```c++
#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters.
#else // C++ < C++11
#define BENCHMARK_TEMPLATE(func, arg1)
#endif
#define BENCHMARK_TEMPLATE1(func, arg1)
#define BENCHMARK_TEMPLATE2(func, arg1, arg2)
```
<a name="fixtures" />
### Fixtures
Fixture tests are created by first defining a type that derives from
`::benchmark::Fixture` and then creating/registering the tests using the
following macros:
* `BENCHMARK_F(ClassName, Method)`
* `BENCHMARK_DEFINE_F(ClassName, Method)`
* `BENCHMARK_REGISTER_F(ClassName, Method)`
For Example:
```c++
class MyFixture : public benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& state) {
}
void TearDown(const ::benchmark::State& state) {
}
};
BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
for (auto _ : st) {
...
}
}
BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
for (auto _ : st) {
...
}
}
/* BarTest is NOT registered */
BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2);
/* BarTest is now registered */
```
#### Templated Fixtures
Also you can create templated fixture by using the following macros:
* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)`
* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)`
For example:
```c++
template<typename T>
class MyFixture : public benchmark::Fixture {};
BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) {
for (auto _ : st) {
...
}
}
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) {
for (auto _ : st) {
...
}
}
BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2);
```
<a name="custom-counters" />
### Custom Counters
You can add your own counters with user-defined names. The example below
will add columns "Foo", "Bar" and "Baz" in its output:
```c++
static void UserCountersExample1(benchmark::State& state) {
double numFoos = 0, numBars = 0, numBazs = 0;
for (auto _ : state) {
// ... count Foo,Bar,Baz events
}
state.counters["Foo"] = numFoos;
state.counters["Bar"] = numBars;
state.counters["Baz"] = numBazs;
}
```
The `state.counters` object is a `std::map` with `std::string` keys
and `Counter` values. The latter is a `double`-like class, via an implicit
conversion to `double&`. Thus you can use all of the standard arithmetic
assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter.
In multithreaded benchmarks, each counter is set on the calling thread only.
When the benchmark finishes, the counters from each thread will be summed;
the resulting sum is the value which will be shown for the benchmark.
The `Counter` constructor accepts three parameters: the value as a `double`
; a bit flag which allows you to show counters as rates, and/or as per-thread
iteration, and/or as per-thread averages, and/or iteration invariants,
and/or finally inverting the result; and a flag specifying the 'unit' - i.e.
is 1k a 1000 (default, `benchmark::Counter::OneK::kIs1000`), or 1024
(`benchmark::Counter::OneK::kIs1024`)?
```c++
// sets a simple counter
state.counters["Foo"] = numFoos;
// Set the counter as a rate. It will be presented divided
// by the duration of the benchmark.
// Meaning: per one second, how many 'foo's are processed?
state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate);
// Set the counter as a rate. It will be presented divided
// by the duration of the benchmark, and the result inverted.
// Meaning: how many seconds it takes to process one 'foo'?
state.counters["FooInvRate"] = Counter(numFoos, benchmark::Counter::kIsRate | benchmark::Counter::kInvert);
// Set the counter as a thread-average quantity. It will
// be presented divided by the number of threads.
state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads);
// There's also a combined flag:
state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate);
// This says that we process with the rate of state.range(0) bytes every iteration:
state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024);
```
When you're compiling in C++11 mode or later you can use `insert()` with
`std::initializer_list`:
```c++
// With C++11, this can be done:
state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}});
// ... instead of:
state.counters["Foo"] = numFoos;
state.counters["Bar"] = numBars;
state.counters["Baz"] = numBazs;
```
#### Counter Reporting
When using the console reporter, by default, user counters are printed at
the end after the table, the same way as ``bytes_processed`` and
``items_processed``. This is best for cases in which there are few counters,
or where there are only a couple of lines per benchmark. Here's an example of
the default output:
```
------------------------------------------------------------------------------
Benchmark Time CPU Iterations UserCounters...
------------------------------------------------------------------------------
BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8
BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m
BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2
BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4
BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8
BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16
BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32
BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4
BM_Factorial 26 ns 26 ns 26608979 40320
BM_Factorial/real_time 26 ns 26 ns 26587936 40320
BM_CalculatePiRange/1 16 ns 16 ns 45704255 0
BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374
BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746
BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355
```
If this doesn't suit you, you can print each counter as a table column by
passing the flag `--benchmark_counters_tabular=true` to the benchmark
application. This is best for cases in which there are a lot of counters, or
a lot of lines per individual benchmark. Note that this will trigger a
reprinting of the table header any time the counter set changes between
individual benchmarks. Here's an example of corresponding output when
`--benchmark_counters_tabular=true` is passed:
```
---------------------------------------------------------------------------------------
Benchmark Time CPU Iterations Bar Bat Baz Foo
---------------------------------------------------------------------------------------
BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8
BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1
BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2
BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4
BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8
BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16
BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32
BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4
--------------------------------------------------------------
Benchmark Time CPU Iterations
--------------------------------------------------------------
BM_Factorial 26 ns 26 ns 26392245 40320
BM_Factorial/real_time 26 ns 26 ns 26494107 40320
BM_CalculatePiRange/1 15 ns 15 ns 45571597 0
BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374
BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746
BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355
BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184
BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162
BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416
BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159
BM_CalculatePi/threads:8 2255 ns 9943 ns 70936
```
Note above the additional header printed when the benchmark changes from
``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does
not have the same counter set as ``BM_UserCounter``.
<a name="multithreaded-benchmarks"/>
### Multithreaded Benchmarks
In a multithreaded test (benchmark invoked by multiple threads simultaneously),
it is guaranteed that none of the threads will start until all have reached
the start of the benchmark loop, and all will have finished before any thread
exits the benchmark loop. (This behavior is also provided by the `KeepRunning()`
API) As such, any global setup or teardown can be wrapped in a check against the thread
index:
```c++
static void BM_MultiThreaded(benchmark::State& state) {
if (state.thread_index == 0) {
// Setup code here.
}
for (auto _ : state) {
// Run the test as normal.
}
if (state.thread_index == 0) {
// Teardown code here.
}
}
BENCHMARK(BM_MultiThreaded)->Threads(2);
```
If the benchmarked code itself uses threads and you want to compare it to
single-threaded code, you may want to use real-time ("wallclock") measurements
for latency comparisons:
```c++
BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime();
```
Without `UseRealTime`, CPU time is used by default.
<a name="cpu-timers" />
### CPU Timers
By default, the CPU timer only measures the time spent by the main thread.
If the benchmark itself uses threads internally, this measurement may not
be what you are looking for. Instead, there is a way to measure the total
CPU usage of the process, by all the threads.
```c++
void callee(int i);
static void MyMain(int size) {
#pragma omp parallel for
for(int i = 0; i < size; i++)
callee(i);
}
static void BM_OpenMP(benchmark::State& state) {
for (auto _ : state)
MyMain(state.range(0));
}
// Measure the time spent by the main thread, use it to decide for how long to
// run the benchmark loop. Depending on the internal implementation detail may
// measure to anywhere from near-zero (the overhead spent before/after work
// handoff to worker thread[s]) to the whole single-thread time.
BENCHMARK(BM_OpenMP)->Range(8, 8<<10);
// Measure the user-visible time, the wall clock (literally, the time that
// has passed on the clock on the wall), use it to decide for how long to
// run the benchmark loop. This will always be meaningful, an will match the
// time spent by the main thread in single-threaded case, in general decreasing
// with the number of internal threads doing the work.
BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->UseRealTime();
// Measure the total CPU consumption, use it to decide for how long to
// run the benchmark loop. This will always measure to no less than the
// time spent by the main thread in single-threaded case.
BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime();
// A mixture of the last two. Measure the total CPU consumption, but use the
// wall clock to decide for how long to run the benchmark loop.
BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime()->UseRealTime();
```
#### Controlling Timers
Normally, the entire duration of the work loop (`for (auto _ : state) {}`)
is measured. But sometimes, it is necessary to do some work inside of
that loop, every iteration, but without counting that time to the benchmark time.
That is possible, although it is not recommended, since it has high overhead.
```c++
static void BM_SetInsert_With_Timer_Control(benchmark::State& state) {
std::set<int> data;
for (auto _ : state) {
state.PauseTiming(); // Stop timers. They will not count until they are resumed.
data = ConstructRandomSet(state.range(0)); // Do something that should not be measured
state.ResumeTiming(); // And resume timers. They are now counting again.
// The rest will be measured.
for (int j = 0; j < state.range(1); ++j)
data.insert(RandomNumber());
}
}
BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}});
```
<a name="manual-timing" />
### Manual Timing
For benchmarking something for which neither CPU time nor real-time are
correct or accurate enough, completely manual timing is supported using
the `UseManualTime` function.
When `UseManualTime` is used, the benchmarked code must call
`SetIterationTime` once per iteration of the benchmark loop to
report the manually measured time.
An example use case for this is benchmarking GPU execution (e.g. OpenCL
or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot
be accurately measured using CPU time or real-time. Instead, they can be
measured accurately using a dedicated API, and these measurement results
can be reported back with `SetIterationTime`.
```c++
static void BM_ManualTiming(benchmark::State& state) {
int microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration {
static_cast<double>(microseconds)
};
for (auto _ : state) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(sleep_duration);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(
end - start);
state.SetIterationTime(elapsed_seconds.count());
}
}
BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime();
```
<a name="setting-the-time-unit" />
### Setting the Time Unit
If a benchmark runs a few milliseconds it may be hard to visually compare the
measured times, since the output data is given in nanoseconds per default. In
order to manually set the time unit, you can specify it manually:
```c++
BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
```
<a name="preventing-optimization" />
### Preventing Optimization
To prevent a value or expression from being optimized away by the compiler
the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()`
functions can be used.
```c++
static void BM_test(benchmark::State& state) {
for (auto _ : state) {
int x = 0;
for (int i=0; i < 64; ++i) {
benchmark::DoNotOptimize(x += i);
}
}
}
```
`DoNotOptimize(<expr>)` forces the *result* of `<expr>` to be stored in either
memory or a register. For GNU based compilers it acts as read/write barrier
for global memory. More specifically it forces the compiler to flush pending
writes to memory and reload any other values as necessary.
Note that `DoNotOptimize(<expr>)` does not prevent optimizations on `<expr>`
in any way. `<expr>` may even be removed entirely when the result is already
known. For example:
```c++
/* Example 1: `<expr>` is removed entirely. */
int foo(int x) { return x + 42; }
while (...) DoNotOptimize(foo(0)); // Optimized to DoNotOptimize(42);
/* Example 2: Result of '<expr>' is only reused */
int bar(int) __attribute__((const));
while (...) DoNotOptimize(bar(0)); // Optimized to:
// int __result__ = bar(0);
// while (...) DoNotOptimize(__result__);
```
The second tool for preventing optimizations is `ClobberMemory()`. In essence
`ClobberMemory()` forces the compiler to perform all pending writes to global
memory. Memory managed by block scope objects must be "escaped" using
`DoNotOptimize(...)` before it can be clobbered. In the below example
`ClobberMemory()` prevents the call to `v.push_back(42)` from being optimized
away.
```c++
static void BM_vector_push_back(benchmark::State& state) {
for (auto _ : state) {
std::vector<int> v;
v.reserve(1);
benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered.
v.push_back(42);
benchmark::ClobberMemory(); // Force 42 to be written to memory.
}
}
```
Note that `ClobberMemory()` is only available for GNU or MSVC based compilers.
<a name="reporting-statistics" />
### Statistics: Reporting the Mean, Median and Standard Deviation of Repeated Benchmarks
By default each benchmark is run once and that single result is reported.
However benchmarks are often noisy and a single result may not be representative
of the overall behavior. For this reason it's possible to repeatedly rerun the
benchmark.
The number of runs of each benchmark is specified globally by the
`--benchmark_repetitions` flag or on a per benchmark basis by calling
`Repetitions` on the registered benchmark object. When a benchmark is run more
than once the mean, median and standard deviation of the runs will be reported.
Additionally the `--benchmark_report_aggregates_only={true|false}`,
`--benchmark_display_aggregates_only={true|false}` flags or
`ReportAggregatesOnly(bool)`, `DisplayAggregatesOnly(bool)` functions can be
used to change how repeated tests are reported. By default the result of each
repeated run is reported. When `report aggregates only` option is `true`,
only the aggregates (i.e. mean, median and standard deviation, maybe complexity
measurements if they were requested) of the runs is reported, to both the
reporters - standard output (console), and the file.
However when only the `display aggregates only` option is `true`,
only the aggregates are displayed in the standard output, while the file
output still contains everything.
Calling `ReportAggregatesOnly(bool)` / `DisplayAggregatesOnly(bool)` on a
registered benchmark object overrides the value of the appropriate flag for that
benchmark.
<a name="custom-statistics" />
### Custom Statistics
While having mean, median and standard deviation is nice, this may not be
enough for everyone. For example you may want to know what the largest
observation is, e.g. because you have some real-time constraints. This is easy.
The following code will specify a custom statistic to be calculated, defined
by a lambda function.
```c++
void BM_spin_empty(benchmark::State& state) {
for (auto _ : state) {
for (int x = 0; x < state.range(0); ++x) {
benchmark::DoNotOptimize(x);
}
}
}
BENCHMARK(BM_spin_empty)
->ComputeStatistics("max", [](const std::vector<double>& v) -> double {
return *(std::max_element(std::begin(v), std::end(v)));
})
->Arg(512);
```
<a name="using-register-benchmark" />
### Using RegisterBenchmark(name, fn, args...)
The `RegisterBenchmark(name, func, args...)` function provides an alternative
way to create and register benchmarks.
`RegisterBenchmark(name, func, args...)` creates, registers, and returns a
pointer to a new benchmark with the specified `name` that invokes
`func(st, args...)` where `st` is a `benchmark::State` object.
Unlike the `BENCHMARK` registration macros, which can only be used at the global
scope, the `RegisterBenchmark` can be called anywhere. This allows for
benchmark tests to be registered programmatically.
Additionally `RegisterBenchmark` allows any callable object to be registered
as a benchmark. Including capturing lambdas and function objects.
For Example:
```c++
auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ };
int main(int argc, char** argv) {
for (auto& test_input : { /* ... */ })
benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
benchmark::Shutdown();
}
```
<a name="exiting-with-an-error" />
### Exiting with an Error
When errors caused by external influences, such as file I/O and network
communication, occur within a benchmark the
`State::SkipWithError(const char* msg)` function can be used to skip that run
of benchmark and report the error. Note that only future iterations of the
`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop
Users must explicitly exit the loop, otherwise all iterations will be performed.
Users may explicitly return to exit the benchmark immediately.
The `SkipWithError(...)` function may be used at any point within the benchmark,
including before and after the benchmark loop. Moreover, if `SkipWithError(...)`
has been used, it is not required to reach the benchmark loop and one may return
from the benchmark function early.
For example:
```c++
static void BM_test(benchmark::State& state) {
auto resource = GetResource();
if (!resource.good()) {
state.SkipWithError("Resource is not good!");
// KeepRunning() loop will not be entered.
}
while (state.KeepRunning()) {
auto data = resource.read_data();
if (!resource.good()) {
state.SkipWithError("Failed to read data!");
break; // Needed to skip the rest of the iteration.
}
do_stuff(data);
}
}
static void BM_test_ranged_fo(benchmark::State & state) {
auto resource = GetResource();
if (!resource.good()) {
state.SkipWithError("Resource is not good!");
return; // Early return is allowed when SkipWithError() has been used.
}
for (auto _ : state) {
auto data = resource.read_data();
if (!resource.good()) {
state.SkipWithError("Failed to read data!");
break; // REQUIRED to prevent all further iterations.
}
do_stuff(data);
}
}
```
<a name="a-faster-keep-running-loop" />
### A Faster KeepRunning Loop
In C++11 mode, a ranged-based for loop should be used in preference to
the `KeepRunning` loop for running the benchmarks. For example:
```c++
static void BM_Fast(benchmark::State &state) {
for (auto _ : state) {
FastOperation();
}
}
BENCHMARK(BM_Fast);
```
The reason the ranged-for loop is faster than using `KeepRunning`, is
because `KeepRunning` requires a memory load and store of the iteration count
ever iteration, whereas the ranged-for variant is able to keep the iteration count
in a register.
For example, an empty inner loop of using the ranged-based for method looks like:
```asm
# Loop Init
mov rbx, qword ptr [r14 + 104]
call benchmark::State::StartKeepRunning()
test rbx, rbx
je .LoopEnd
.LoopHeader: # =>This Inner Loop Header: Depth=1
add rbx, -1
jne .LoopHeader
.LoopEnd:
```
Compared to an empty `KeepRunning` loop, which looks like:
```asm
.LoopHeader: # in Loop: Header=BB0_3 Depth=1
cmp byte ptr [rbx], 1
jne .LoopInit
.LoopBody: # =>This Inner Loop Header: Depth=1
mov rax, qword ptr [rbx + 8]
lea rcx, [rax + 1]
mov qword ptr [rbx + 8], rcx
cmp rax, qword ptr [rbx + 104]
jb .LoopHeader
jmp .LoopEnd
.LoopInit:
mov rdi, rbx
call benchmark::State::StartKeepRunning()
jmp .LoopBody
.LoopEnd:
```
Unless C++03 compatibility is required, the ranged-for variant of writing
the benchmark loop should be preferred.
<a name="disabling-cpu-frequency-scaling" />
### Disabling CPU Frequency Scaling
If you see this error:
```
***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead.
```
you might want to disable the CPU frequency scaling while running the benchmark:
```bash
sudo cpupower frequency-set --governor performance
./mybench
sudo cpupower frequency-set --governor powersave
```
07070100000007000081A400000000000000000000000160C0813C00000728000000000000000000000000000000000000001A00000000benchmark-1.5.5/WORKSPACEworkspace(name = "com_github_google_benchmark")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "rules_cc",
strip_prefix = "rules_cc-a508235df92e71d537fcbae0c7c952ea6957a912",
urls = ["https://github.com/bazelbuild/rules_cc/archive/a508235df92e71d537fcbae0c7c952ea6957a912.zip"],
sha256 = "d7dc12c1d5bc1a87474de8e3d17b7731a4dcebcfb8aa3990fe8ac7734ef12f2f",
)
http_archive(
name = "com_google_absl",
sha256 = "f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111",
strip_prefix = "abseil-cpp-20200225.2",
urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"],
)
http_archive(
name = "com_google_googletest",
strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
sha256 = "8f827dd550db8b4fdf73904690df0be9fccc161017c9038a724bc9a0617a1bc8",
)
http_archive(
name = "pybind11",
build_file = "@//bindings/python:pybind11.BUILD",
sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d",
strip_prefix = "pybind11-2.4.3",
urls = ["https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz"],
)
new_local_repository(
name = "python_headers",
build_file = "@//bindings/python:python_headers.BUILD",
path = "/usr/include/python3.6", # May be overwritten by setup.py.
)
http_archive(
name = "rules_python",
url = "https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz",
sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0",
)
load("@rules_python//python:pip.bzl", pip3_install="pip_install")
pip3_install(
name = "py_deps",
requirements = "//:requirements.txt",
)
07070100000008000081A400000000000000000000000160C0813C0000002B000000000000000000000000000000000000001C00000000benchmark-1.5.5/_config.ymltheme: jekyll-theme-midnight
markdown: GFM
07070100000009000081A400000000000000000000000160C0813C000004F2000000000000000000000000000000000000001D00000000benchmark-1.5.5/appveyor.ymlversion: '{build}'
image: Visual Studio 2017
configuration:
- Debug
- Release
environment:
matrix:
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017"
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017 Win64"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015 Win64"
- compiler: gcc-5.3.0-posix
generator: "MinGW Makefiles"
cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin'
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
matrix:
fast_finish: true
install:
# git bash conflicts with MinGW makefiles
- if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%")
- if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%")
build_script:
- md _build -Force
- cd _build
- echo %configuration%
- cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON ..
- cmake --build . --config %configuration%
test_script:
- ctest --build-config %configuration% --timeout 300 --output-on-failure
artifacts:
- path: '_build/CMakeFiles/*.log'
name: logs
- path: '_build/Testing/**/*.xml'
name: test_results
0707010000000A000041ED00000000000000000000000360C0813C00000000000000000000000000000000000000000000001900000000benchmark-1.5.5/bindings0707010000000B000041ED00000000000000000000000360C0813C00000000000000000000000000000000000000000000002000000000benchmark-1.5.5/bindings/python0707010000000C000081A400000000000000000000000160C0813C00000044000000000000000000000000000000000000002600000000benchmark-1.5.5/bindings/python/BUILDexports_files(glob(["*.BUILD"]))
exports_files(["build_defs.bzl"])
0707010000000D000081A400000000000000000000000160C0813C000002E1000000000000000000000000000000000000002F00000000benchmark-1.5.5/bindings/python/build_defs.bzl_SHARED_LIB_SUFFIX = {
"//conditions:default": ".so",
"//:windows": ".dll",
}
def py_extension(name, srcs, hdrs = [], copts = [], features = [], deps = []):
for shared_lib_suffix in _SHARED_LIB_SUFFIX.values():
shared_lib_name = name + shared_lib_suffix
native.cc_binary(
name = shared_lib_name,
linkshared = 1,
linkstatic = 1,
srcs = srcs + hdrs,
copts = copts,
features = features,
deps = deps,
)
return native.py_library(
name = name,
data = select({
platform: [name + shared_lib_suffix]
for platform, shared_lib_suffix in _SHARED_LIB_SUFFIX.items()
}),
)
0707010000000E000041ED00000000000000000000000260C0813C00000000000000000000000000000000000000000000003100000000benchmark-1.5.5/bindings/python/google_benchmark0707010000000F000081A400000000000000000000000160C0813C000002D7000000000000000000000000000000000000003700000000benchmark-1.5.5/bindings/python/google_benchmark/BUILDload("//bindings/python:build_defs.bzl", "py_extension")
py_library(
name = "google_benchmark",
srcs = ["__init__.py"],
visibility = ["//visibility:public"],
deps = [
":_benchmark",
# pip; absl:app
],
)
py_extension(
name = "_benchmark",
srcs = ["benchmark.cc"],
copts = [
"-fexceptions",
"-fno-strict-aliasing",
],
features = ["-use_header_modules"],
deps = [
"//:benchmark",
"@pybind11",
"@python_headers",
],
)
py_test(
name = "example",
srcs = ["example.py"],
python_version = "PY3",
srcs_version = "PY3",
visibility = ["//visibility:public"],
deps = [
":google_benchmark",
],
)
07070100000010000081A400000000000000000000000160C0813C00001112000000000000000000000000000000000000003D00000000benchmark-1.5.5/bindings/python/google_benchmark/__init__.py# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python benchmarking utilities.
Example usage:
import google_benchmark as benchmark
@benchmark.register
def my_benchmark(state):
... # Code executed outside `while` loop is not timed.
while state:
... # Code executed within `while` loop is timed.
if __name__ == '__main__':
benchmark.main()
"""
from absl import app
from google_benchmark import _benchmark
from google_benchmark._benchmark import (
Counter,
kNanosecond,
kMicrosecond,
kMillisecond,
kSecond,
oNone,
o1,
oN,
oNSquared,
oNCubed,
oLogN,
oNLogN,
oAuto,
oLambda,
)
__all__ = [
"register",
"main",
"Counter",
"kNanosecond",
"kMicrosecond",
"kMillisecond",
"kSecond",
"oNone",
"o1",
"oN",
"oNSquared",
"oNCubed",
"oLogN",
"oNLogN",
"oAuto",
"oLambda",
]
__version__ = "0.2.0"
class __OptionMaker:
"""A stateless class to collect benchmark options.
Collect all decorator calls like @option.range(start=0, limit=1<<5).
"""
class Options:
"""Pure data class to store options calls, along with the benchmarked function."""
def __init__(self, func):
self.func = func
self.builder_calls = []
@classmethod
def make(cls, func_or_options):
"""Make Options from Options or the benchmarked function."""
if isinstance(func_or_options, cls.Options):
return func_or_options
return cls.Options(func_or_options)
def __getattr__(self, builder_name):
"""Append option call in the Options."""
# The function that get returned on @option.range(start=0, limit=1<<5).
def __builder_method(*args, **kwargs):
# The decorator that get called, either with the benchmared function
# or the previous Options
def __decorator(func_or_options):
options = self.make(func_or_options)
options.builder_calls.append((builder_name, args, kwargs))
# The decorator returns Options so it is not technically a decorator
# and needs a final call to @regiser
return options
return __decorator
return __builder_method
# Alias for nicer API.
# We have to instantiate an object, even if stateless, to be able to use __getattr__
# on option.range
option = __OptionMaker()
def register(undefined=None, *, name=None):
"""Register function for benchmarking."""
if undefined is None:
# Decorator is called without parenthesis so we return a decorator
return lambda f: register(f, name=name)
# We have either the function to benchmark (simple case) or an instance of Options
# (@option._ case).
options = __OptionMaker.make(undefined)
if name is None:
name = options.func.__name__
# We register the benchmark and reproduce all the @option._ calls onto the
# benchmark builder pattern
benchmark = _benchmark.RegisterBenchmark(name, options.func)
for name, args, kwargs in options.builder_calls[::-1]:
getattr(benchmark, name)(*args, **kwargs)
# return the benchmarked function because the decorator does not modify it
return options.func
def _flags_parser(argv):
argv = _benchmark.Initialize(argv)
return app.parse_flags_with_usage(argv)
def _run_benchmarks(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
return _benchmark.RunSpecifiedBenchmarks()
def main(argv=None):
return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser)
# Methods for use with custom main function.
initialize = _benchmark.Initialize
run_benchmarks = _benchmark.RunSpecifiedBenchmarks
07070100000011000081A400000000000000000000000160C0813C00001E55000000000000000000000000000000000000003E00000000benchmark-1.5.5/bindings/python/google_benchmark/benchmark.cc// Benchmark for Python.
#include <map>
#include <string>
#include <vector>
#include "pybind11/operators.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
#include "pybind11/stl_bind.h"
#include "benchmark/benchmark.h"
PYBIND11_MAKE_OPAQUE(benchmark::UserCounters);
namespace {
namespace py = ::pybind11;
std::vector<std::string> Initialize(const std::vector<std::string>& argv) {
// The `argv` pointers here become invalid when this function returns, but
// benchmark holds the pointer to `argv[0]`. We create a static copy of it
// so it persists, and replace the pointer below.
static std::string executable_name(argv[0]);
std::vector<char*> ptrs;
ptrs.reserve(argv.size());
for (auto& arg : argv) {
ptrs.push_back(const_cast<char*>(arg.c_str()));
}
ptrs[0] = const_cast<char*>(executable_name.c_str());
int argc = static_cast<int>(argv.size());
benchmark::Initialize(&argc, ptrs.data());
std::vector<std::string> remaining_argv;
remaining_argv.reserve(argc);
for (int i = 0; i < argc; ++i) {
remaining_argv.emplace_back(ptrs[i]);
}
return remaining_argv;
}
benchmark::internal::Benchmark* RegisterBenchmark(const char* name,
py::function f) {
return benchmark::RegisterBenchmark(
name, [f](benchmark::State& state) { f(&state); });
}
PYBIND11_MODULE(_benchmark, m) {
using benchmark::TimeUnit;
py::enum_<TimeUnit>(m, "TimeUnit")
.value("kNanosecond", TimeUnit::kNanosecond)
.value("kMicrosecond", TimeUnit::kMicrosecond)
.value("kMillisecond", TimeUnit::kMillisecond)
.value("kSecond", TimeUnit::kSecond)
.export_values();
using benchmark::BigO;
py::enum_<BigO>(m, "BigO")
.value("oNone", BigO::oNone)
.value("o1", BigO::o1)
.value("oN", BigO::oN)
.value("oNSquared", BigO::oNSquared)
.value("oNCubed", BigO::oNCubed)
.value("oLogN", BigO::oLogN)
.value("oNLogN", BigO::oLogN)
.value("oAuto", BigO::oAuto)
.value("oLambda", BigO::oLambda)
.export_values();
using benchmark::internal::Benchmark;
py::class_<Benchmark>(m, "Benchmark")
// For methods returning a pointer tor the current object, reference
// return policy is used to ask pybind not to take ownership oof the
// returned object and avoid calling delete on it.
// https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies
//
// For methods taking a const std::vector<...>&, a copy is created
// because a it is bound to a Python list.
// https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
.def("unit", &Benchmark::Unit, py::return_value_policy::reference)
.def("arg", &Benchmark::Arg, py::return_value_policy::reference)
.def("args", &Benchmark::Args, py::return_value_policy::reference)
.def("range", &Benchmark::Range, py::return_value_policy::reference,
py::arg("start"), py::arg("limit"))
.def("dense_range", &Benchmark::DenseRange,
py::return_value_policy::reference, py::arg("start"),
py::arg("limit"), py::arg("step") = 1)
.def("ranges", &Benchmark::Ranges, py::return_value_policy::reference)
.def("args_product", &Benchmark::ArgsProduct,
py::return_value_policy::reference)
.def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference)
.def("arg_names", &Benchmark::ArgNames,
py::return_value_policy::reference)
.def("range_pair", &Benchmark::RangePair,
py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"),
py::arg("lo2"), py::arg("hi2"))
.def("range_multiplier", &Benchmark::RangeMultiplier,
py::return_value_policy::reference)
.def("min_time", &Benchmark::MinTime, py::return_value_policy::reference)
.def("iterations", &Benchmark::Iterations,
py::return_value_policy::reference)
.def("repetitions", &Benchmark::Repetitions,
py::return_value_policy::reference)
.def("report_aggregates_only", &Benchmark::ReportAggregatesOnly,
py::return_value_policy::reference, py::arg("value") = true)
.def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly,
py::return_value_policy::reference, py::arg("value") = true)
.def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime,
py::return_value_policy::reference)
.def("use_real_time", &Benchmark::UseRealTime,
py::return_value_policy::reference)
.def("use_manual_time", &Benchmark::UseManualTime,
py::return_value_policy::reference)
.def(
"complexity",
(Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity,
py::return_value_policy::reference,
py::arg("complexity") = benchmark::oAuto);
using benchmark::Counter;
py::class_<Counter> py_counter(m, "Counter");
py::enum_<Counter::Flags>(py_counter, "Flags")
.value("kDefaults", Counter::Flags::kDefaults)
.value("kIsRate", Counter::Flags::kIsRate)
.value("kAvgThreads", Counter::Flags::kAvgThreads)
.value("kAvgThreadsRate", Counter::Flags::kAvgThreadsRate)
.value("kIsIterationInvariant", Counter::Flags::kIsIterationInvariant)
.value("kIsIterationInvariantRate",
Counter::Flags::kIsIterationInvariantRate)
.value("kAvgIterations", Counter::Flags::kAvgIterations)
.value("kAvgIterationsRate", Counter::Flags::kAvgIterationsRate)
.value("kInvert", Counter::Flags::kInvert)
.export_values()
.def(py::self | py::self);
py::enum_<Counter::OneK>(py_counter, "OneK")
.value("kIs1000", Counter::OneK::kIs1000)
.value("kIs1024", Counter::OneK::kIs1024)
.export_values();
py_counter
.def(py::init<double, Counter::Flags, Counter::OneK>(),
py::arg("value") = 0., py::arg("flags") = Counter::kDefaults,
py::arg("k") = Counter::kIs1000)
.def(py::init([](double value) { return Counter(value); }))
.def_readwrite("value", &Counter::value)
.def_readwrite("flags", &Counter::flags)
.def_readwrite("oneK", &Counter::oneK);
py::implicitly_convertible<py::float_, Counter>();
py::implicitly_convertible<py::int_, Counter>();
py::bind_map<benchmark::UserCounters>(m, "UserCounters");
using benchmark::State;
py::class_<State>(m, "State")
.def("__bool__", &State::KeepRunning)
.def_property_readonly("keep_running", &State::KeepRunning)
.def("pause_timing", &State::PauseTiming)
.def("resume_timing", &State::ResumeTiming)
.def("skip_with_error", &State::SkipWithError)
.def_property_readonly("error_occurred", &State::error_occurred)
.def("set_iteration_time", &State::SetIterationTime)
.def_property("bytes_processed", &State::bytes_processed,
&State::SetBytesProcessed)
.def_property("complexity_n", &State::complexity_length_n,
&State::SetComplexityN)
.def_property("items_processed", &State::items_processed,
&State::SetItemsProcessed)
.def("set_label", (void (State::*)(const char*)) & State::SetLabel)
.def("range", &State::range, py::arg("pos") = 0)
.def_property_readonly("iterations", &State::iterations)
.def_readwrite("counters", &State::counters)
.def_readonly("thread_index", &State::thread_index)
.def_readonly("threads", &State::threads);
m.def("Initialize", Initialize);
m.def("RegisterBenchmark", RegisterBenchmark,
py::return_value_policy::reference);
m.def("RunSpecifiedBenchmarks",
[]() { benchmark::RunSpecifiedBenchmarks(); });
};
} // namespace
07070100000012000081A400000000000000000000000160C0813C00000F10000000000000000000000000000000000000003C00000000benchmark-1.5.5/bindings/python/google_benchmark/example.py# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Python using C++ benchmark framework.
To run this example, you must first install the `google_benchmark` Python package.
To install using `setup.py`, download and extract the `google_benchmark` source.
In the extracted directory, execute:
python setup.py install
"""
import random
import time
import google_benchmark as benchmark
from google_benchmark import Counter
@benchmark.register
def empty(state):
while state:
pass
@benchmark.register
def sum_million(state):
while state:
sum(range(1_000_000))
@benchmark.register
def pause_timing(state):
"""Pause timing every iteration."""
while state:
# Construct a list of random ints every iteration without timing it
state.pause_timing()
random_list = [random.randint(0, 100) for _ in range(100)]
state.resume_timing()
# Time the in place sorting algorithm
random_list.sort()
@benchmark.register
def skipped(state):
if True: # Test some predicate here.
state.skip_with_error("some error")
return # NOTE: You must explicitly return, or benchmark will continue.
... # Benchmark code would be here.
@benchmark.register
def manual_timing(state):
while state:
# Manually count Python CPU time
start = time.perf_counter() # perf_counter_ns() in Python 3.7+
# Something to benchmark
time.sleep(0.01)
end = time.perf_counter()
state.set_iteration_time(end - start)
@benchmark.register
def custom_counters(state):
"""Collect cutom metric using benchmark.Counter."""
num_foo = 0.0
while state:
# Benchmark some code here
pass
# Collect some custom metric named foo
num_foo += 0.13
# Automatic Counter from numbers.
state.counters["foo"] = num_foo
# Set a counter as a rate.
state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate)
# Set a counter as an inverse of rate.
state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert)
# Set a counter as a thread-average quantity.
state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads)
# There's also a combined flag:
state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)
@benchmark.register
@benchmark.option.measure_process_cpu_time()
@benchmark.option.use_real_time()
def with_options(state):
while state:
sum(range(1_000_000))
@benchmark.register(name="sum_million_microseconds")
@benchmark.option.unit(benchmark.kMicrosecond)
def with_options(state):
while state:
sum(range(1_000_000))
@benchmark.register
@benchmark.option.arg(100)
@benchmark.option.arg(1000)
def passing_argument(state):
while state:
sum(range(state.range(0)))
@benchmark.register
@benchmark.option.range(8, limit=8 << 10)
def using_range(state):
while state:
sum(range(state.range(0)))
@benchmark.register
@benchmark.option.range_multiplier(2)
@benchmark.option.range(1 << 10, 1 << 18)
@benchmark.option.complexity(benchmark.oN)
def computing_complexity(state):
while state:
sum(range(state.range(0)))
state.complexity_n = state.range(0)
if __name__ == "__main__":
benchmark.main()
07070100000013000081A400000000000000000000000160C0813C000001DA000000000000000000000000000000000000002F00000000benchmark-1.5.5/bindings/python/pybind11.BUILDcc_library(
name = "pybind11",
hdrs = glob(
include = [
"include/pybind11/*.h",
"include/pybind11/detail/*.h",
],
exclude = [
"include/pybind11/common.h",
"include/pybind11/eigen.h",
],
),
copts = [
"-fexceptions",
"-Wno-undefined-inline",
"-Wno-pragma-once-outside-header",
],
includes = ["include"],
visibility = ["//visibility:public"],
)
07070100000014000081A400000000000000000000000160C0813C00000088000000000000000000000000000000000000003500000000benchmark-1.5.5/bindings/python/python_headers.BUILDcc_library(
name = "python_headers",
hdrs = glob(["**/*.h"]),
includes = ["."],
visibility = ["//visibility:public"],
)
07070100000015000081A400000000000000000000000160C0813C00000010000000000000000000000000000000000000003100000000benchmark-1.5.5/bindings/python/requirements.txtabsl-py>=0.7.1
07070100000016000041ED00000000000000000000000360C0813C00000000000000000000000000000000000000000000001600000000benchmark-1.5.5/cmake07070100000017000081A400000000000000000000000160C0813C00000B99000000000000000000000000000000000000002F00000000benchmark-1.5.5/cmake/AddCXXCompilerFlag.cmake# - Adds a compiler flag if it is supported by the compiler
#
# This function checks that the supplied compiler flag is supported and then
# adds it to the corresponding compiler flags
#
# add_cxx_compiler_flag(<FLAG> [<VARIANT>])
#
# - Example
#
# include(AddCXXCompilerFlag)
# add_cxx_compiler_flag(-Wall)
# add_cxx_compiler_flag(-no-strict-aliasing RELEASE)
# Requires CMake 2.6+
if(__add_cxx_compiler_flag)
return()
endif()
set(__add_cxx_compiler_flag INCLUDED)
include(CheckCXXCompilerFlag)
function(mangle_compiler_flag FLAG OUTPUT)
string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG)
string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG})
string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE)
endfunction(mangle_compiler_flag)
function(add_cxx_compiler_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
if(${MANGLED_FLAG})
if(ARGC GREATER 1)
set(VARIANT ${ARGV1})
string(TOUPPER "_${VARIANT}" VARIANT)
else()
set(VARIANT "")
endif()
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
endif()
endfunction()
function(add_required_cxx_compiler_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
if(${MANGLED_FLAG})
if(ARGC GREATER 1)
set(VARIANT ${ARGV1})
string(TOUPPER "_${VARIANT}" VARIANT)
else()
set(VARIANT "")
endif()
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE)
else()
message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler")
endif()
endfunction()
function(check_cxx_warning_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
# Add -Werror to ensure the compiler generates an error if the warning flag
# doesn't exist.
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
endfunction()
07070100000018000081A400000000000000000000000160C0813C00000874000000000000000000000000000000000000002C00000000benchmark-1.5.5/cmake/CXXFeatureCheck.cmake# - Compile and run code to check for C++ features
#
# This functions compiles a source file under the `cmake` folder
# and adds the corresponding `HAVE_[FILENAME]` flag to the CMake
# environment
#
# cxx_feature_check(<FLAG> [<VARIANT>])
#
# - Example
#
# include(CXXFeatureCheck)
# cxx_feature_check(STD_REGEX)
# Requires CMake 2.8.12+
if(__cxx_feature_check)
return()
endif()
set(__cxx_feature_check INCLUDED)
function(cxx_feature_check FILE)
string(TOLOWER ${FILE} FILE)
string(TOUPPER ${FILE} VAR)
string(TOUPPER "HAVE_${VAR}" FEATURE)
if (DEFINED HAVE_${VAR})
set(HAVE_${VAR} 1 PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
return()
endif()
if (ARGC GREATER 1)
message(STATUS "Enabling additional flags: ${ARGV1}")
list(APPEND BENCHMARK_CXX_LINKER_FLAGS ${ARGV1})
endif()
if (NOT DEFINED COMPILE_${FEATURE})
message(STATUS "Performing Test ${FEATURE}")
if(CMAKE_CROSSCOMPILING)
try_compile(COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
if(COMPILE_${FEATURE})
message(WARNING
"If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
set(RUN_${FEATURE} 0 CACHE INTERNAL "")
else()
set(RUN_${FEATURE} 1 CACHE INTERNAL "")
endif()
else()
message(STATUS "Performing Test ${FEATURE}")
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
endif()
endif()
if(RUN_${FEATURE} EQUAL 0)
message(STATUS "Performing Test ${FEATURE} -- success")
set(HAVE_${VAR} 1 PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
else()
if(NOT COMPILE_${FEATURE})
message(STATUS "Performing Test ${FEATURE} -- failed to compile")
else()
message(STATUS "Performing Test ${FEATURE} -- compiled but failed to run")
endif()
endif()
endfunction()
07070100000019000081A400000000000000000000000160C0813C00000041000000000000000000000000000000000000002600000000benchmark-1.5.5/cmake/Config.cmake.ininclude("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake")
0707010000001A000081A400000000000000000000000160C0813C00000749000000000000000000000000000000000000002A00000000benchmark-1.5.5/cmake/GetGitVersion.cmake# - Returns a version string from Git tags
#
# This function inspects the annotated git tags for the project and returns a string
# into a CMake variable
#
# get_git_version(<var>)
#
# - Example
#
# include(GetGitVersion)
# get_git_version(GIT_VERSION)
#
# Requires CMake 2.8.11+
find_package(Git)
if(__get_git_version)
return()
endif()
set(__get_git_version INCLUDED)
function(get_git_version var)
if(GIT_EXECUTABLE)
execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
RESULT_VARIABLE status
OUTPUT_VARIABLE GIT_DESCRIBE_VERSION
ERROR_QUIET)
if(status)
set(GIT_DESCRIBE_VERSION "v0.0.0")
endif()
string(STRIP ${GIT_DESCRIBE_VERSION} GIT_DESCRIBE_VERSION)
if(GIT_DESCRIBE_VERSION MATCHES v[^-]*-)
string(REGEX REPLACE "v([^-]*)-([0-9]+)-.*" "\\1.\\2" GIT_VERSION ${GIT_DESCRIBE_VERSION})
else()
string(REGEX REPLACE "v(.*)" "\\1" GIT_VERSION ${GIT_DESCRIBE_VERSION})
endif()
# Work out if the repository is dirty
execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_QUIET
ERROR_QUIET)
execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD --
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_DIFF_INDEX
ERROR_QUIET)
string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)
if (${GIT_DIRTY})
set(GIT_DESCRIBE_VERSION "${GIT_DESCRIBE_VERSION}-dirty")
endif()
message(STATUS "git version: ${GIT_DESCRIBE_VERSION} normalized to ${GIT_VERSION}")
else()
set(GIT_VERSION "0.0.0")
endif()
set(${var} ${GIT_VERSION} PARENT_SCOPE)
endfunction()
0707010000001B000081A400000000000000000000000160C0813C00000718000000000000000000000000000000000000002700000000benchmark-1.5.5/cmake/GoogleTest.cmake# Download and unpack googletest at configure time
set(GOOGLETEST_PREFIX "${benchmark_BINARY_DIR}/third_party/googletest")
configure_file(${benchmark_SOURCE_DIR}/cmake/GoogleTest.cmake.in ${GOOGLETEST_PREFIX}/CMakeLists.txt @ONLY)
set(GOOGLETEST_PATH "${CMAKE_CURRENT_SOURCE_DIR}/googletest" CACHE PATH "") # Mind the quotes
execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}"
-DALLOW_DOWNLOADING_GOOGLETEST=${BENCHMARK_DOWNLOAD_DEPENDENCIES} -DGOOGLETEST_PATH:PATH=${GOOGLETEST_PATH} .
RESULT_VARIABLE result
WORKING_DIRECTORY ${GOOGLETEST_PREFIX}
)
if(result)
message(FATAL_ERROR "CMake step for googletest failed: ${result}")
endif()
execute_process(
COMMAND ${CMAKE_COMMAND} --build .
RESULT_VARIABLE result
WORKING_DIRECTORY ${GOOGLETEST_PREFIX}
)
if(result)
message(FATAL_ERROR "Build step for googletest failed: ${result}")
endif()
# Prevent overriding the parent project's compiler/linker
# settings on Windows
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
include(${GOOGLETEST_PREFIX}/googletest-paths.cmake)
# Add googletest directly to our build. This defines
# the gtest and gtest_main targets.
add_subdirectory(${GOOGLETEST_SOURCE_DIR}
${GOOGLETEST_BINARY_DIR}
EXCLUDE_FROM_ALL)
set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gtest,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(gtest_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gtest_main,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gmock,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(gmock_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gmock_main,INTERFACE_INCLUDE_DIRECTORIES>)
0707010000001C000081A400000000000000000000000160C0813C00000A26000000000000000000000000000000000000002A00000000benchmark-1.5.5/cmake/GoogleTest.cmake.incmake_minimum_required(VERSION 2.8.12)
project(googletest-download NONE)
# Enable ExternalProject CMake module
include(ExternalProject)
option(ALLOW_DOWNLOADING_GOOGLETEST "If googletest src tree is not found in location specified by GOOGLETEST_PATH, do fetch the archive from internet" OFF)
set(GOOGLETEST_PATH "/usr/src/googletest" CACHE PATH
"Path to the googletest root tree. Should contain googletest and googlemock subdirs. And CMakeLists.txt in root, and in both of these subdirs")
# Download and install GoogleTest
message(STATUS "Looking for Google Test sources")
message(STATUS "Looking for Google Test sources in ${GOOGLETEST_PATH}")
if(EXISTS "${GOOGLETEST_PATH}" AND IS_DIRECTORY "${GOOGLETEST_PATH}" AND EXISTS "${GOOGLETEST_PATH}/CMakeLists.txt" AND
EXISTS "${GOOGLETEST_PATH}/googletest" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googletest" AND EXISTS "${GOOGLETEST_PATH}/googletest/CMakeLists.txt" AND
EXISTS "${GOOGLETEST_PATH}/googlemock" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googlemock" AND EXISTS "${GOOGLETEST_PATH}/googlemock/CMakeLists.txt")
message(STATUS "Found Google Test in ${GOOGLETEST_PATH}")
ExternalProject_Add(
googletest
PREFIX "${CMAKE_BINARY_DIR}"
DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download"
SOURCE_DIR "${GOOGLETEST_PATH}" # use existing src dir.
BINARY_DIR "${CMAKE_BINARY_DIR}/build"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
else()
if(NOT ALLOW_DOWNLOADING_GOOGLETEST)
message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable BENCHMARK_DOWNLOAD_DEPENDENCIES, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.")
else()
message(WARNING "Did not find Google Test sources! Fetching from web...")
ExternalProject_Add(
googletest
GIT_REPOSITORY https://github.com/google/googletest.git
GIT_TAG master
PREFIX "${CMAKE_BINARY_DIR}"
STAMP_DIR "${CMAKE_BINARY_DIR}/stamp"
DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download"
SOURCE_DIR "${CMAKE_BINARY_DIR}/src"
BINARY_DIR "${CMAKE_BINARY_DIR}/build"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
endif()
endif()
ExternalProject_Get_Property(googletest SOURCE_DIR BINARY_DIR)
file(WRITE googletest-paths.cmake
"set(GOOGLETEST_SOURCE_DIR \"${SOURCE_DIR}\")
set(GOOGLETEST_BINARY_DIR \"${BINARY_DIR}\")
")
0707010000001D000041ED00000000000000000000000260C0813C00000000000000000000000000000000000000000000001E00000000benchmark-1.5.5/cmake/Modules0707010000001E000081A400000000000000000000000160C0813C0000017B000000000000000000000000000000000000002F00000000benchmark-1.5.5/cmake/Modules/FindLLVMAr.cmakeinclude(FeatureSummary)
find_program(LLVMAR_EXECUTABLE
NAMES llvm-ar
DOC "The llvm-ar executable"
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LLVMAr
DEFAULT_MSG
LLVMAR_EXECUTABLE)
SET_PACKAGE_PROPERTIES(LLVMAr PROPERTIES
URL https://llvm.org/docs/CommandGuide/llvm-ar.html
DESCRIPTION "create, modify, and extract from archives"
)
0707010000001F000081A400000000000000000000000160C0813C00000184000000000000000000000000000000000000002F00000000benchmark-1.5.5/cmake/Modules/FindLLVMNm.cmakeinclude(FeatureSummary)
find_program(LLVMNM_EXECUTABLE
NAMES llvm-nm
DOC "The llvm-nm executable"
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LLVMNm
DEFAULT_MSG
LLVMNM_EXECUTABLE)
SET_PACKAGE_PROPERTIES(LLVMNm PROPERTIES
URL https://llvm.org/docs/CommandGuide/llvm-nm.html
DESCRIPTION "list LLVM bitcode and object file’s symbol table"
)
07070100000020000081A400000000000000000000000160C0813C00000153000000000000000000000000000000000000003300000000benchmark-1.5.5/cmake/Modules/FindLLVMRanLib.cmakeinclude(FeatureSummary)
find_program(LLVMRANLIB_EXECUTABLE
NAMES llvm-ranlib
DOC "The llvm-ranlib executable"
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LLVMRanLib
DEFAULT_MSG
LLVMRANLIB_EXECUTABLE)
SET_PACKAGE_PROPERTIES(LLVMRanLib PROPERTIES
DESCRIPTION "generate index for LLVM archive"
)
07070100000021000081A400000000000000000000000160C0813C00000319000000000000000000000000000000000000002C00000000benchmark-1.5.5/cmake/Modules/FindPFM.cmake# If successful, the following variables will be defined:
# HAVE_LIBPFM.
# Set BENCHMARK_ENABLE_LIBPFM to 0 to disable, regardless of libpfm presence.
include(CheckIncludeFile)
include(CheckLibraryExists)
enable_language(C)
check_library_exists(libpfm.a pfm_initialize "" HAVE_LIBPFM_INITIALIZE)
if(HAVE_LIBPFM_INITIALIZE)
check_include_file(perfmon/perf_event.h HAVE_PERFMON_PERF_EVENT_H)
check_include_file(perfmon/pfmlib.h HAVE_PERFMON_PFMLIB_H)
check_include_file(perfmon/pfmlib_perf_event.h HAVE_PERFMON_PFMLIB_PERF_EVENT_H)
if(HAVE_PERFMON_PERF_EVENT_H AND HAVE_PERFMON_PFMLIB_H AND HAVE_PERFMON_PFMLIB_PERF_EVENT_H)
message("Using Perf Counters.")
set(HAVE_LIBPFM 1)
endif()
else()
message("Perf Counters support requested, but was unable to find libpfm.")
endif()
07070100000022000081A400000000000000000000000160C0813C00000131000000000000000000000000000000000000002600000000benchmark-1.5.5/cmake/benchmark.pc.inprefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
Name: @PROJECT_NAME@
Description: Google microbenchmark framework
Version: @VERSION@
Libs: -L${libdir} -lbenchmark
Libs.private: -lpthread
Cflags: -I${includedir}
07070100000023000081A400000000000000000000000160C0813C0000010B000000000000000000000000000000000000002A00000000benchmark-1.5.5/cmake/gnu_posix_regex.cpp#include <gnuregex.h>
#include <string>
int main() {
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
return ec;
}
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
}
07070100000024000081A400000000000000000000000160C0813C0000011F000000000000000000000000000000000000002B00000000benchmark-1.5.5/cmake/llvm-toolchain.cmakefind_package(LLVMAr REQUIRED)
set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE)
find_package(LLVMNm REQUIRED)
set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE)
find_package(LLVMRanLib REQUIRED)
set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE)
07070100000025000081A400000000000000000000000160C0813C00000129000000000000000000000000000000000000002600000000benchmark-1.5.5/cmake/posix_regex.cpp#include <regex.h>
#include <string>
int main() {
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
return ec;
}
int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
regfree(&re);
return ret;
}
07070100000026000081A400000000000000000000000160C0813C0000005D000000000000000000000000000000000000002700000000benchmark-1.5.5/cmake/split_list.cmakemacro(split_list listname)
string(REPLACE ";" " " ${listname} "${${listname}}")
endmacro()
07070100000027000081A400000000000000000000000160C0813C00000103000000000000000000000000000000000000002400000000benchmark-1.5.5/cmake/std_regex.cpp#include <regex>
#include <string>
int main() {
const std::string str = "test0159";
std::regex re;
re = std::regex("^[a-z]+[0-9]+$",
std::regex_constants::extended | std::regex_constants::nosubs);
return std::regex_search(str, re) ? 0 : -1;
}
07070100000028000081A400000000000000000000000160C0813C00000088000000000000000000000000000000000000002700000000benchmark-1.5.5/cmake/steady_clock.cpp#include <chrono>
int main() {
typedef std::chrono::steady_clock Clock;
Clock::time_point tp = Clock::now();
((void)tp);
}
07070100000029000081A400000000000000000000000160C0813C0000004F000000000000000000000000000000000000003300000000benchmark-1.5.5/cmake/thread_safety_attributes.cpp#define HAVE_THREAD_SAFETY_ATTRIBUTES
#include "../src/mutex.h"
int main() {}
0707010000002A000081A400000000000000000000000160C0813C00000287000000000000000000000000000000000000002000000000benchmark-1.5.5/dependencies.md# Build tool dependency policy
To ensure the broadest compatibility when building the benchmark library, but
still allow forward progress, we require any build tooling to be available for:
* Debian stable AND
* The last two Ubuntu LTS releases AND
Currently, this means using build tool versions that are available for Ubuntu
16.04 (Xenial), Ubuntu 18.04 (Bionic), and Debian stretch.
_Note, [travis](.travis.yml) runs under Ubuntu 14.04 (Trusty) for linux builds._
## cmake
The current supported version is cmake 3.5.1 as of 2018-06-06.
_Note, this version is also available for Ubuntu 14.04, the previous Ubuntu LTS
release, as `cmake3`._
0707010000002B000041ED00000000000000000000000260C0813C00000000000000000000000000000000000000000000001500000000benchmark-1.5.5/docs0707010000002C000081A400000000000000000000000160C0813C000014A3000000000000000000000000000000000000002600000000benchmark-1.5.5/docs/AssemblyTests.md# Assembly Tests
The Benchmark library provides a number of functions whose primary
purpose in to affect assembly generation, including `DoNotOptimize`
and `ClobberMemory`. In addition there are other functions,
such as `KeepRunning`, for which generating good assembly is paramount.
For these functions it's important to have tests that verify the
correctness and quality of the implementation. This requires testing
the code generated by the compiler.
This document describes how the Benchmark library tests compiler output,
as well as how to properly write new tests.
## Anatomy of a Test
Writing a test has two steps:
* Write the code you want to generate assembly for.
* Add `// CHECK` lines to match against the verified assembly.
Example:
```c++
// CHECK-LABEL: test_add:
extern "C" int test_add() {
extern int ExternInt;
return ExternInt + 1;
// CHECK: movl ExternInt(%rip), %eax
// CHECK: addl %eax
// CHECK: ret
}
```
#### LLVM Filecheck
[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html)
is used to test the generated assembly against the `// CHECK` lines
specified in the tests source file. Please see the documentation
linked above for information on how to write `CHECK` directives.
#### Tips and Tricks:
* Tests should match the minimal amount of output required to establish
correctness. `CHECK` directives don't have to match on the exact next line
after the previous match, so tests should omit checks for unimportant
bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive)
can be used to ensure a match occurs exactly after the previous match).
* The tests are compiled with `-O3 -g0`. So we're only testing the
optimized output.
* The assembly output is further cleaned up using `tools/strip_asm.py`.
This removes comments, assembler directives, and unused labels before
the test is run.
* The generated and stripped assembly file for a test is output under
`<build-directory>/test/<test-name>.s`
* Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes)
to specify lines that should only match in certain situations.
The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that
are only expected to match Clang or GCC's output respectively. Normal
`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and
`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed
`CHECK` lines)
* Use `extern "C"` to disable name mangling for specific functions. This
makes them easier to name in the `CHECK` lines.
## Problems Writing Portable Tests
Writing tests which check the code generated by a compiler are
inherently non-portable. Different compilers and even different compiler
versions may generate entirely different code. The Benchmark tests
must tolerate this.
LLVM Filecheck provides a number of mechanisms to help write
"more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax),
allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables)
for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive).
#### Capturing Variables
For example, say GCC stores a variable in a register but Clang stores
it in memory. To write a test that tolerates both cases we "capture"
the destination of the store, and then use the captured expression
to write the remainder of the test.
```c++
// CHECK-LABEL: test_div_no_op_into_shr:
extern "C" void test_div_no_op_into_shr(int value) {
int divisor = 2;
benchmark::DoNotOptimize(divisor); // hide the value from the optimizer
return value / divisor;
// CHECK: movl $2, [[DEST:.*]]
// CHECK: idivl [[DEST]]
// CHECK: ret
}
```
#### Using Regular Expressions to Match Differing Output
Often tests require testing assembly lines which may subtly differ
between compilers or compiler versions. A common example of this
is matching stack frame addresses. In this case regular expressions
can be used to match the differing bits of output. For example:
```c++
int ExternInt;
struct Point { int x, y, z; };
// CHECK-LABEL: test_store_point:
extern "C" void test_store_point() {
Point p{ExternInt, ExternInt, ExternInt};
benchmark::DoNotOptimize(p);
// CHECK: movl ExternInt(%rip), %eax
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: ret
}
```
## Current Requirements and Limitations
The tests require Filecheck to be installed along the `PATH` of the
build machine. Otherwise the tests will be disabled.
Additionally, as mentioned in the previous section, codegen tests are
inherently non-portable. Currently the tests are limited to:
* x86_64 targets.
* Compiled with GCC or Clang
Further work could be done, at least on a limited basis, to extend the
tests to other architectures and compilers (using `CHECK` prefixes).
Furthermore, the tests fail for builds which specify additional flags
that modify code generation, including `--coverage` or `-fsanitize=`.
0707010000002D000081A400000000000000000000000160C0813C0000001A000000000000000000000000000000000000002100000000benchmark-1.5.5/docs/_config.ymltheme: jekyll-theme-hacker0707010000002E000081A400000000000000000000000160C0813C000005A2000000000000000000000000000000000000002600000000benchmark-1.5.5/docs/perf_counters.md<a name="perf-counters" />
# User-Requested Performance Counters
When running benchmarks, the user may choose to request collection of
performance counters. This may be useful in investigation scenarios - narrowing
down the cause of a regression; or verifying that the underlying cause of a
performance improvement matches expectations.
This feature is available if:
* The benchmark is run on an architecture featuring a Performance Monitoring
Unit (PMU),
* The benchmark is compiled with support for collecting counters. Currently,
this requires [libpfm](http://perfmon2.sourceforge.net/) be available at build
time
The feature does not require modifying benchmark code. Counter collection is
handled at the boundaries where timer collection is also handled.
To opt-in:
* Install `libpfm4-dev`, e.g. `apt-get install libpfm4-dev`.
* Enable the cmake flag BENCHMARK_ENABLE_LIBPFM.
To use, pass a comma-separated list of counter names through the
`--benchmark_perf_counters` flag. The names are decoded through libpfm - meaning,
they are platform specific, but some (e.g. `CYCLES` or `INSTRUCTIONS`) are
mapped by libpfm to platform-specifics - see libpfm
[documentation](http://perfmon2.sourceforge.net/docs.html) for more details.
The counter values are reported back through the [User Counters](../README.md#custom-counters)
mechanism, meaning, they are available in all the formats (e.g. JSON) supported
by User Counters.0707010000002F000081A400000000000000000000000160C0813C00000287000000000000000000000000000000000000002C00000000benchmark-1.5.5/docs/random_interleaving.md<a name="interleaving" />
# Random Interleaving
[Random Interleaving](https://github.com/google/benchmark/issues/1051) is a
technique to lower run-to-run variance. It randomly interleaves repetitions of a
microbenchmark with repetitions from other microbenchmarks in the same benchmark
test. Data shows it is able to lower run-to-run variance by
[40%](https://github.com/google/benchmark/issues/1051) on average.
To use, you mainly need to set `--benchmark_enable_random_interleaving=true`,
and optionally specify non-zero repetition count `--benchmark_repetitions=9`
and optionally decrease the per-repetition time `--benchmark_min_time=0.1`.
07070100000030000081A400000000000000000000000160C0813C0000039F000000000000000000000000000000000000002200000000benchmark-1.5.5/docs/releasing.md# How to release
* Make sure you're on main and synced to HEAD
* Ensure the project builds and tests run (sanity check only, obviously)
* `parallel -j0 exec ::: test/*_test` can help ensure everything at least
passes
* Prepare release notes
* `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of
commits between the last annotated tag and HEAD
* Pick the most interesting.
* Create one last commit that updates the version saved in `CMakeLists.txt` to the release version you're creating. (This version will be used if benchmark is installed from the archive you'll be creating in the next step.)
```
project (benchmark VERSION 1.5.3 LANGUAGES CXX)
```
* Create a release through github's interface
* Note this will create a lightweight tag.
* Update this to an annotated tag:
* `git pull --tags`
* `git tag -a -f <tag> <tag>`
* `git push --force origin`
07070100000031000081A400000000000000000000000160C0813C000030BB000000000000000000000000000000000000001E00000000benchmark-1.5.5/docs/tools.md# Benchmark Tools
## compare.py
The `compare.py` can be used to compare the result of benchmarks.
### Dependencies
The utility relies on the [scipy](https://www.scipy.org) package which can be installed using pip:
```bash
pip3 install -r requirements.txt
```
### Displaying aggregates only
The switch `-a` / `--display_aggregates_only` can be used to control the
displayment of the normal iterations vs the aggregates. When passed, it will
be passthrough to the benchmark binaries to be run, and will be accounted for
in the tool itself; only the aggregates will be displayed, but not normal runs.
It only affects the display, the separate runs will still be used to calculate
the U test.
### Modes of operation
There are three modes of operation:
1. Just compare two benchmarks
The program is invoked like:
``` bash
$ compare.py benchmarks <benchmark_baseline> <benchmark_contender> [benchmark options]...
```
Where `<benchmark_baseline>` and `<benchmark_contender>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
Example output:
```
$ ./compare.py benchmarks ./a.out ./a.out
RUNNING: ./a.out --benchmark_out=/tmp/tmprBT5nW
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:16:44
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 36 ns 36 ns 19101577 211.669MB/s
BM_memcpy/64 76 ns 76 ns 9412571 800.199MB/s
BM_memcpy/512 84 ns 84 ns 8249070 5.64771GB/s
BM_memcpy/1024 116 ns 116 ns 6181763 8.19505GB/s
BM_memcpy/8192 643 ns 643 ns 1062855 11.8636GB/s
BM_copy/8 222 ns 222 ns 3137987 34.3772MB/s
BM_copy/64 1608 ns 1608 ns 432758 37.9501MB/s
BM_copy/512 12589 ns 12589 ns 54806 38.7867MB/s
BM_copy/1024 25169 ns 25169 ns 27713 38.8003MB/s
BM_copy/8192 201165 ns 201112 ns 3486 38.8466MB/s
RUNNING: ./a.out --benchmark_out=/tmp/tmpt1wwG_
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:16:53
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 36 ns 36 ns 19397903 211.255MB/s
BM_memcpy/64 73 ns 73 ns 9691174 839.635MB/s
BM_memcpy/512 85 ns 85 ns 8312329 5.60101GB/s
BM_memcpy/1024 118 ns 118 ns 6438774 8.11608GB/s
BM_memcpy/8192 656 ns 656 ns 1068644 11.6277GB/s
BM_copy/8 223 ns 223 ns 3146977 34.2338MB/s
BM_copy/64 1611 ns 1611 ns 435340 37.8751MB/s
BM_copy/512 12622 ns 12622 ns 54818 38.6844MB/s
BM_copy/1024 25257 ns 25239 ns 27779 38.6927MB/s
BM_copy/8192 205013 ns 205010 ns 3479 38.108MB/s
Comparing ./a.out to ./a.out
Benchmark Time CPU Time Old Time New CPU Old CPU New
------------------------------------------------------------------------------------------------------
BM_memcpy/8 +0.0020 +0.0020 36 36 36 36
BM_memcpy/64 -0.0468 -0.0470 76 73 76 73
BM_memcpy/512 +0.0081 +0.0083 84 85 84 85
BM_memcpy/1024 +0.0098 +0.0097 116 118 116 118
BM_memcpy/8192 +0.0200 +0.0203 643 656 643 656
BM_copy/8 +0.0046 +0.0042 222 223 222 223
BM_copy/64 +0.0020 +0.0020 1608 1611 1608 1611
BM_copy/512 +0.0027 +0.0026 12589 12622 12589 12622
BM_copy/1024 +0.0035 +0.0028 25169 25257 25169 25239
BM_copy/8192 +0.0191 +0.0194 201165 205013 201112 205010
```
What it does is for the every benchmark from the first run it looks for the benchmark with exactly the same name in the second run, and then compares the results. If the names differ, the benchmark is omitted from the diff.
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
2. Compare two different filters of one benchmark
The program is invoked like:
``` bash
$ compare.py filters <benchmark> <filter_baseline> <filter_contender> [benchmark options]...
```
Where `<benchmark>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
Where `<filter_baseline>` and `<filter_contender>` are the same regex filters that you would pass to the `[--benchmark_filter=<regex>]` parameter of the benchmark binary.
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
Example output:
```
$ ./compare.py filters ./a.out BM_memcpy BM_copy
RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmpBWKk0k
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:37:28
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 36 ns 36 ns 17891491 211.215MB/s
BM_memcpy/64 74 ns 74 ns 9400999 825.646MB/s
BM_memcpy/512 87 ns 87 ns 8027453 5.46126GB/s
BM_memcpy/1024 111 ns 111 ns 6116853 8.5648GB/s
BM_memcpy/8192 657 ns 656 ns 1064679 11.6247GB/s
RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpAvWcOM
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:37:33
----------------------------------------------------
Benchmark Time CPU Iterations
----------------------------------------------------
BM_copy/8 227 ns 227 ns 3038700 33.6264MB/s
BM_copy/64 1640 ns 1640 ns 426893 37.2154MB/s
BM_copy/512 12804 ns 12801 ns 55417 38.1444MB/s
BM_copy/1024 25409 ns 25407 ns 27516 38.4365MB/s
BM_copy/8192 202986 ns 202990 ns 3454 38.4871MB/s
Comparing BM_memcpy to BM_copy (from ./a.out)
Benchmark Time CPU Time Old Time New CPU Old CPU New
--------------------------------------------------------------------------------------------------------------------
[BM_memcpy vs. BM_copy]/8 +5.2829 +5.2812 36 227 36 227
[BM_memcpy vs. BM_copy]/64 +21.1719 +21.1856 74 1640 74 1640
[BM_memcpy vs. BM_copy]/512 +145.6487 +145.6097 87 12804 87 12801
[BM_memcpy vs. BM_copy]/1024 +227.1860 +227.1776 111 25409 111 25407
[BM_memcpy vs. BM_copy]/8192 +308.1664 +308.2898 657 202986 656 202990
```
As you can see, it applies filter to the benchmarks, both when running the benchmark, and before doing the diff. And to make the diff work, the matches are replaced with some common string. Thus, you can compare two different benchmark families within one benchmark binary.
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
3. Compare filter one from benchmark one to filter two from benchmark two:
The program is invoked like:
``` bash
$ compare.py filters <benchmark_baseline> <filter_baseline> <benchmark_contender> <filter_contender> [benchmark options]...
```
Where `<benchmark_baseline>` and `<benchmark_contender>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
Where `<filter_baseline>` and `<filter_contender>` are the same regex filters that you would pass to the `[--benchmark_filter=<regex>]` parameter of the benchmark binary.
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
Example output:
```
$ ./compare.py benchmarksfiltered ./a.out BM_memcpy ./a.out BM_copy
RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmp_FvbYg
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:38:27
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 37 ns 37 ns 18953482 204.118MB/s
BM_memcpy/64 74 ns 74 ns 9206578 828.245MB/s
BM_memcpy/512 91 ns 91 ns 8086195 5.25476GB/s
BM_memcpy/1024 120 ns 120 ns 5804513 7.95662GB/s
BM_memcpy/8192 664 ns 664 ns 1028363 11.4948GB/s
RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpDfL5iE
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:38:32
----------------------------------------------------
Benchmark Time CPU Iterations
----------------------------------------------------
BM_copy/8 230 ns 230 ns 2985909 33.1161MB/s
BM_copy/64 1654 ns 1653 ns 419408 36.9137MB/s
BM_copy/512 13122 ns 13120 ns 53403 37.2156MB/s
BM_copy/1024 26679 ns 26666 ns 26575 36.6218MB/s
BM_copy/8192 215068 ns 215053 ns 3221 36.3283MB/s
Comparing BM_memcpy (from ./a.out) to BM_copy (from ./a.out)
Benchmark Time CPU Time Old Time New CPU Old CPU New
--------------------------------------------------------------------------------------------------------------------
[BM_memcpy vs. BM_copy]/8 +5.1649 +5.1637 37 230 37 230
[BM_memcpy vs. BM_copy]/64 +21.4352 +21.4374 74 1654 74 1653
[BM_memcpy vs. BM_copy]/512 +143.6022 +143.5865 91 13122 91 13120
[BM_memcpy vs. BM_copy]/1024 +221.5903 +221.4790 120 26679 120 26666
[BM_memcpy vs. BM_copy]/8192 +322.9059 +323.0096 664 215068 664 215053
```
This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one.
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
### U test
If there is a sufficient repetition count of the benchmarks, the tool can do
a [U Test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test), of the
null hypothesis that it is equally likely that a randomly selected value from
one sample will be less than or greater than a randomly selected value from a
second sample.
If the calculated p-value is below this value is lower than the significance
level alpha, then the result is said to be statistically significant and the
null hypothesis is rejected. Which in other words means that the two benchmarks
aren't identical.
**WARNING**: requires **LARGE** (no less than 9) number of repetitions to be
meaningful!
07070100000032000041ED00000000000000000000000360C0813C00000000000000000000000000000000000000000000001800000000benchmark-1.5.5/include07070100000033000041ED00000000000000000000000260C0813C00000000000000000000000000000000000000000000002200000000benchmark-1.5.5/include/benchmark07070100000034000081A400000000000000000000000160C0813C0000E5BF000000000000000000000000000000000000002E00000000benchmark-1.5.5/include/benchmark/benchmark.h// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Support for registering benchmarks for functions.
/* Example usage:
// Define a function that executes the code to be measured a
// specified number of times:
static void BM_StringCreation(benchmark::State& state) {
for (auto _ : state)
std::string empty_string;
}
// Register the function as a benchmark
BENCHMARK(BM_StringCreation);
// Define another benchmark
static void BM_StringCopy(benchmark::State& state) {
std::string x = "hello";
for (auto _ : state)
std::string copy(x);
}
BENCHMARK(BM_StringCopy);
// Augment the main() program to invoke benchmarks if specified
// via the --benchmarks command line flag. E.g.,
// my_unittest --benchmark_filter=all
// my_unittest --benchmark_filter=BM_StringCreation
// my_unittest --benchmark_filter=String
// my_unittest --benchmark_filter='Copy|Creation'
int main(int argc, char** argv) {
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
benchmark::Shutdown();
return 0;
}
// Sometimes a family of microbenchmarks can be implemented with
// just one routine that takes an extra argument to specify which
// one of the family of benchmarks to run. For example, the following
// code defines a family of microbenchmarks for measuring the speed
// of memcpy() calls of different lengths:
static void BM_memcpy(benchmark::State& state) {
char* src = new char[state.range(0)]; char* dst = new char[state.range(0)];
memset(src, 'x', state.range(0));
for (auto _ : state)
memcpy(dst, src, state.range(0));
state.SetBytesProcessed(state.iterations() * state.range(0));
delete[] src; delete[] dst;
}
BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
// The preceding code is quite repetitive, and can be replaced with the
// following short-hand. The following invocation will pick a few
// appropriate arguments in the specified range and will generate a
// microbenchmark for each such argument.
BENCHMARK(BM_memcpy)->Range(8, 8<<10);
// You might have a microbenchmark that depends on two inputs. For
// example, the following code defines a family of microbenchmarks for
// measuring the speed of set insertion.
static void BM_SetInsert(benchmark::State& state) {
set<int> data;
for (auto _ : state) {
state.PauseTiming();
data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j)
data.insert(RandomNumber());
}
}
BENCHMARK(BM_SetInsert)
->Args({1<<10, 128})
->Args({2<<10, 128})
->Args({4<<10, 128})
->Args({8<<10, 128})
->Args({1<<10, 512})
->Args({2<<10, 512})
->Args({4<<10, 512})
->Args({8<<10, 512});
// The preceding code is quite repetitive, and can be replaced with
// the following short-hand. The following macro will pick a few
// appropriate arguments in the product of the two specified ranges
// and will generate a microbenchmark for each such pair.
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
// For more complex patterns of inputs, passing a custom function
// to Apply allows programmatic specification of an
// arbitrary set of arguments to run the microbenchmark on.
// The following example enumerates a dense range on
// one parameter, and a sparse range on the second.
static void CustomArguments(benchmark::internal::Benchmark* b) {
for (int i = 0; i <= 10; ++i)
for (int j = 32; j <= 1024*1024; j *= 8)
b->Args({i, j});
}
BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
// Templated microbenchmarks work the same way:
// Produce then consume 'size' messages 'iters' times
// Measures throughput in the absence of multiprogramming.
template <class Q> int BM_Sequential(benchmark::State& state) {
Q q;
typename Q::value_type v;
for (auto _ : state) {
for (int i = state.range(0); i--; )
q.push(v);
for (int e = state.range(0); e--; )
q.Wait(&v);
}
// actually messages, not bytes:
state.SetBytesProcessed(state.iterations() * state.range(0));
}
BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
Use `Benchmark::MinTime(double t)` to set the minimum time used to run the
benchmark. This option overrides the `benchmark_min_time` flag.
void BM_test(benchmark::State& state) {
... body ...
}
BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds.
In a multithreaded test, it is guaranteed that none of the threads will start
until all have reached the loop start, and all will have finished before any
thread exits the loop body. As such, any global setup or teardown you want to
do can be wrapped in a check against the thread index:
static void BM_MultiThreaded(benchmark::State& state) {
if (state.thread_index == 0) {
// Setup code here.
}
for (auto _ : state) {
// Run the test as normal.
}
if (state.thread_index == 0) {
// Teardown code here.
}
}
BENCHMARK(BM_MultiThreaded)->Threads(4);
If a benchmark runs a few milliseconds it may be hard to visually compare the
measured times, since the output data is given in nanoseconds per default. In
order to manually set the time unit, you can specify it manually:
BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
*/
#ifndef BENCHMARK_BENCHMARK_H_
#define BENCHMARK_BENCHMARK_H_
// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer.
#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
#define BENCHMARK_HAS_CXX11
#endif
// This _MSC_VER check should detect VS 2017 v15.3 and newer.
#if __cplusplus >= 201703L || \
(defined(_MSC_VER) && _MSC_VER >= 1911 && _MSVC_LANG >= 201703L)
#define BENCHMARK_HAS_CXX17
#endif
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iosfwd>
#include <map>
#include <set>
#include <string>
#include <utility>
#include <vector>
#if defined(BENCHMARK_HAS_CXX11)
#include <initializer_list>
#include <type_traits>
#include <utility>
#endif
#if defined(_MSC_VER)
#include <intrin.h> // for _ReadWriteBarrier
#endif
#ifndef BENCHMARK_HAS_CXX11
#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
TypeName& operator=(const TypeName&)
#else
#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
TypeName& operator=(const TypeName&) = delete
#endif
#ifdef BENCHMARK_HAS_CXX17
#define BENCHMARK_UNUSED [[maybe_unused]]
#elif defined(__GNUC__) || defined(__clang__)
#define BENCHMARK_UNUSED __attribute__((unused))
#else
#define BENCHMARK_UNUSED
#endif
#if defined(__GNUC__) || defined(__clang__)
#define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline))
#define BENCHMARK_NOEXCEPT noexcept
#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x)
#elif defined(_MSC_VER) && !defined(__clang__)
#define BENCHMARK_ALWAYS_INLINE __forceinline
#if _MSC_VER >= 1900
#define BENCHMARK_NOEXCEPT noexcept
#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x)
#else
#define BENCHMARK_NOEXCEPT
#define BENCHMARK_NOEXCEPT_OP(x)
#endif
#define __func__ __FUNCTION__
#else
#define BENCHMARK_ALWAYS_INLINE
#define BENCHMARK_NOEXCEPT
#define BENCHMARK_NOEXCEPT_OP(x)
#endif
#define BENCHMARK_INTERNAL_TOSTRING2(x) #x
#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x)
#if defined(__GNUC__) || defined(__clang__)
#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y)
#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg)))
#else
#define BENCHMARK_BUILTIN_EXPECT(x, y) x
#define BENCHMARK_DEPRECATED_MSG(msg)
#define BENCHMARK_WARNING_MSG(msg) \
__pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \
__LINE__) ") : warning note: " msg))
#endif
#if defined(__GNUC__) && !defined(__clang__)
#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#endif
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
#if defined(__GNUC__) || __has_builtin(__builtin_unreachable)
#define BENCHMARK_UNREACHABLE() __builtin_unreachable()
#elif defined(_MSC_VER)
#define BENCHMARK_UNREACHABLE() __assume(false)
#else
#define BENCHMARK_UNREACHABLE() ((void)0)
#endif
#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_OVERRIDE override
#else
#define BENCHMARK_OVERRIDE
#endif
namespace benchmark {
class BenchmarkReporter;
class MemoryManager;
void Initialize(int* argc, char** argv);
void Shutdown();
// Report to stdout all arguments in 'argv' as unrecognized except the first.
// Returns true there is at least on unrecognized argument (i.e. 'argc' > 1).
bool ReportUnrecognizedArguments(int argc, char** argv);
// Generate a list of benchmarks matching the specified --benchmark_filter flag
// and if --benchmark_list_tests is specified return after printing the name
// of each matching benchmark. Otherwise run each matching benchmark and
// report the results.
//
// The second and third overload use the specified 'display_reporter' and
// 'file_reporter' respectively. 'file_reporter' will write to the file
// specified
// by '--benchmark_output'. If '--benchmark_output' is not given the
// 'file_reporter' is ignored.
//
// RETURNS: The number of matching benchmarks.
size_t RunSpecifiedBenchmarks();
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter);
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter);
// Register a MemoryManager instance that will be used to collect and report
// allocation measurements for benchmark runs.
void RegisterMemoryManager(MemoryManager* memory_manager);
// Add a key-value pair to output as part of the context stanza in the report.
void AddCustomContext(const std::string& key, const std::string& value);
namespace internal {
class Benchmark;
class BenchmarkImp;
class BenchmarkFamilies;
void UseCharPointer(char const volatile*);
// Take ownership of the pointer and register the benchmark. Return the
// registered benchmark.
Benchmark* RegisterBenchmarkInternal(Benchmark*);
// Ensure that the standard streams are properly initialized in every TU.
int InitializeStreams();
BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
} // namespace internal
#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
defined(__EMSCRIPTEN__)
#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
#endif
// The DoNotOptimize(...) function can be used to prevent a value or
// expression from being optimized away by the compiler. This function is
// intended to add little to no overhead.
// See: https://youtu.be/nXaxk27zwlk?t=2441
#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
template <class Tp>
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
asm volatile("" : : "r,m"(value) : "memory");
}
template <class Tp>
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) {
#if defined(__clang__)
asm volatile("" : "+r,m"(value) : : "memory");
#else
asm volatile("" : "+m,r"(value) : : "memory");
#endif
}
// Force the compiler to flush pending writes to global memory. Acts as an
// effective read/write barrier
inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
asm volatile("" : : : "memory");
}
#elif defined(_MSC_VER)
template <class Tp>
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value));
_ReadWriteBarrier();
}
inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); }
#else
template <class Tp>
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value));
}
// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers
#endif
// This class is used for user-defined counters.
class Counter {
public:
enum Flags {
kDefaults = 0,
// Mark the counter as a rate. It will be presented divided
// by the duration of the benchmark.
kIsRate = 1U << 0U,
// Mark the counter as a thread-average quantity. It will be
// presented divided by the number of threads.
kAvgThreads = 1U << 1U,
// Mark the counter as a thread-average rate. See above.
kAvgThreadsRate = kIsRate | kAvgThreads,
// Mark the counter as a constant value, valid/same for *every* iteration.
// When reporting, it will be *multiplied* by the iteration count.
kIsIterationInvariant = 1U << 2U,
// Mark the counter as a constant rate.
// When reporting, it will be *multiplied* by the iteration count
// and then divided by the duration of the benchmark.
kIsIterationInvariantRate = kIsRate | kIsIterationInvariant,
// Mark the counter as a iteration-average quantity.
// It will be presented divided by the number of iterations.
kAvgIterations = 1U << 3U,
// Mark the counter as a iteration-average rate. See above.
kAvgIterationsRate = kIsRate | kAvgIterations,
// In the end, invert the result. This is always done last!
kInvert = 1U << 31U
};
enum OneK {
// 1'000 items per 1k
kIs1000 = 1000,
// 1'024 items per 1k
kIs1024 = 1024
};
double value;
Flags flags;
OneK oneK;
BENCHMARK_ALWAYS_INLINE
Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000)
: value(v), flags(f), oneK(k) {}
BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; }
BENCHMARK_ALWAYS_INLINE operator double&() { return value; }
};
// A helper for user code to create unforeseen combinations of Flags, without
// having to do this cast manually each time, or providing this operator.
Counter::Flags inline operator|(const Counter::Flags& LHS,
const Counter::Flags& RHS) {
return static_cast<Counter::Flags>(static_cast<int>(LHS) |
static_cast<int>(RHS));
}
// This is the container for the user-defined counters.
typedef std::map<std::string, Counter> UserCounters;
// TimeUnit is passed to a benchmark in order to specify the order of magnitude
// for the measured time.
enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond };
// BigO is passed to a benchmark in order to specify the asymptotic
// computational
// complexity for the benchmark. In case oAuto is selected, complexity will be
// calculated automatically to the best fit.
enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
typedef uint64_t IterationCount;
// BigOFunc is passed to a benchmark in order to specify the asymptotic
// computational complexity for the benchmark.
typedef double(BigOFunc)(IterationCount);
// StatisticsFunc is passed to a benchmark in order to compute some descriptive
// statistics over all the measurements of some type
typedef double(StatisticsFunc)(const std::vector<double>&);
namespace internal {
struct Statistics {
std::string name_;
StatisticsFunc* compute_;
Statistics(const std::string& name, StatisticsFunc* compute)
: name_(name), compute_(compute) {}
};
class BenchmarkInstance;
class ThreadTimer;
class ThreadManager;
class PerfCountersMeasurement;
enum AggregationReportMode
#if defined(BENCHMARK_HAS_CXX11)
: unsigned
#else
#endif
{
// The mode has not been manually specified
ARM_Unspecified = 0,
// The mode is user-specified.
// This may or may not be set when the following bit-flags are set.
ARM_Default = 1U << 0U,
// File reporter should only output aggregates.
ARM_FileReportAggregatesOnly = 1U << 1U,
// Display reporter should only output aggregates
ARM_DisplayReportAggregatesOnly = 1U << 2U,
// Both reporters should only display aggregates.
ARM_ReportAggregatesOnly =
ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly
};
} // namespace internal
// State is passed to a running Benchmark and contains state for the
// benchmark to use.
class State {
public:
struct StateIterator;
friend struct StateIterator;
// Returns iterators used to run each iteration of a benchmark using a
// C++11 ranged-based for loop. These functions should not be called directly.
//
// REQUIRES: The benchmark has not started running yet. Neither begin nor end
// have been called previously.
//
// NOTE: KeepRunning may not be used after calling either of these functions.
BENCHMARK_ALWAYS_INLINE StateIterator begin();
BENCHMARK_ALWAYS_INLINE StateIterator end();
// Returns true if the benchmark should continue through another iteration.
// NOTE: A benchmark may not return from the test until KeepRunning() has
// returned false.
bool KeepRunning();
// Returns true iff the benchmark should run n more iterations.
// REQUIRES: 'n' > 0.
// NOTE: A benchmark must not return from the test until KeepRunningBatch()
// has returned false.
// NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations.
//
// Intended usage:
// while (state.KeepRunningBatch(1000)) {
// // process 1000 elements
// }
bool KeepRunningBatch(IterationCount n);
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
// by the current thread.
// Stop the benchmark timer. If not called, the timer will be
// automatically stopped after the last iteration of the benchmark loop.
//
// For threaded benchmarks the PauseTiming() function only pauses the timing
// for the current thread.
//
// NOTE: The "real time" measurement is per-thread. If different threads
// report different measurements the largest one is reported.
//
// NOTE: PauseTiming()/ResumeTiming() are relatively
// heavyweight, and so their use should generally be avoided
// within each benchmark iteration, if possible.
void PauseTiming();
// REQUIRES: timer is not running and 'SkipWithError(...)' has not been called
// by the current thread.
// Start the benchmark timer. The timer is NOT running on entrance to the
// benchmark function. It begins running after control flow enters the
// benchmark loop.
//
// NOTE: PauseTiming()/ResumeTiming() are relatively
// heavyweight, and so their use should generally be avoided
// within each benchmark iteration, if possible.
void ResumeTiming();
// REQUIRES: 'SkipWithError(...)' has not been called previously by the
// current thread.
// Report the benchmark as resulting in an error with the specified 'msg'.
// After this call the user may explicitly 'return' from the benchmark.
//
// If the ranged-for style of benchmark loop is used, the user must explicitly
// break from the loop, otherwise all future iterations will be run.
// If the 'KeepRunning()' loop is used the current thread will automatically
// exit the loop at the end of the current iteration.
//
// For threaded benchmarks only the current thread stops executing and future
// calls to `KeepRunning()` will block until all threads have completed
// the `KeepRunning()` loop. If multiple threads report an error only the
// first error message is used.
//
// NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit
// the current scope immediately. If the function is called from within
// the 'KeepRunning()' loop the current iteration will finish. It is the users
// responsibility to exit the scope as needed.
void SkipWithError(const char* msg);
// Returns true if an error has been reported with 'SkipWithError(...)'.
bool error_occurred() const { return error_occurred_; }
// REQUIRES: called exactly once per iteration of the benchmarking loop.
// Set the manually measured time for this benchmark iteration, which
// is used instead of automatically measured time if UseManualTime() was
// specified.
//
// For threaded benchmarks the final value will be set to the largest
// reported values.
void SetIterationTime(double seconds);
// Set the number of bytes processed by the current benchmark
// execution. This routine is typically called once at the end of a
// throughput oriented benchmark.
//
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
void SetBytesProcessed(int64_t bytes) {
counters["bytes_per_second"] =
Counter(static_cast<double>(bytes), Counter::kIsRate, Counter::kIs1024);
}
BENCHMARK_ALWAYS_INLINE
int64_t bytes_processed() const {
if (counters.find("bytes_per_second") != counters.end())
return static_cast<int64_t>(counters.at("bytes_per_second"));
return 0;
}
// If this routine is called with complexity_n > 0 and complexity report is
// requested for the
// family benchmark, then current benchmark will be part of the computation
// and complexity_n will
// represent the length of N.
BENCHMARK_ALWAYS_INLINE
void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; }
BENCHMARK_ALWAYS_INLINE
int64_t complexity_length_n() const { return complexity_n_; }
// If this routine is called with items > 0, then an items/s
// label is printed on the benchmark report line for the currently
// executing benchmark. It is typically called at the end of a processing
// benchmark where a processing items/second output is desired.
//
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
void SetItemsProcessed(int64_t items) {
counters["items_per_second"] =
Counter(static_cast<double>(items), benchmark::Counter::kIsRate);
}
BENCHMARK_ALWAYS_INLINE
int64_t items_processed() const {
if (counters.find("items_per_second") != counters.end())
return static_cast<int64_t>(counters.at("items_per_second"));
return 0;
}
// If this routine is called, the specified label is printed at the
// end of the benchmark report line for the currently executing
// benchmark. Example:
// static void BM_Compress(benchmark::State& state) {
// ...
// double compress = input_size / output_size;
// state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression));
// }
// Produces output that looks like:
// BM_Compress 50 50 14115038 compress:27.3%
//
// REQUIRES: a benchmark has exited its benchmarking loop.
void SetLabel(const char* label);
void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) {
this->SetLabel(str.c_str());
}
// Range arguments for this run. CHECKs if the argument has been set.
BENCHMARK_ALWAYS_INLINE
int64_t range(std::size_t pos = 0) const {
assert(range_.size() > pos);
return range_[pos];
}
BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
int64_t range_x() const { return range(0); }
BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
int64_t range_y() const { return range(1); }
BENCHMARK_ALWAYS_INLINE
IterationCount iterations() const {
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
return 0;
}
return max_iterations - total_iterations_ + batch_leftover_;
}
private
: // items we expect on the first cache line (ie 64 bytes of the struct)
// When total_iterations_ is 0, KeepRunning() and friends will return false.
// May be larger than max_iterations.
IterationCount total_iterations_;
// When using KeepRunningBatch(), batch_leftover_ holds the number of
// iterations beyond max_iters that were run. Used to track
// completed_iterations_ accurately.
IterationCount batch_leftover_;
public:
const IterationCount max_iterations;
private:
bool started_;
bool finished_;
bool error_occurred_;
private: // items we don't need on the first cache line
std::vector<int64_t> range_;
int64_t complexity_n_;
public:
// Container for user-defined counters.
UserCounters counters;
// Index of the executing thread. Values from [0, threads).
const int thread_index;
// Number of threads concurrently executing the benchmark.
const int threads;
private:
State(IterationCount max_iters, const std::vector<int64_t>& ranges,
int thread_i, int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager,
internal::PerfCountersMeasurement* perf_counters_measurement);
void StartKeepRunning();
// Implementation of KeepRunning() and KeepRunningBatch().
// is_batch must be true unless n is 1.
bool KeepRunningInternal(IterationCount n, bool is_batch);
void FinishKeepRunning();
internal::ThreadTimer* const timer_;
internal::ThreadManager* const manager_;
internal::PerfCountersMeasurement* const perf_counters_measurement_;
friend class internal::BenchmarkInstance;
};
inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() {
return KeepRunningInternal(1, /*is_batch=*/false);
}
inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(IterationCount n) {
return KeepRunningInternal(n, /*is_batch=*/true);
}
inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n,
bool is_batch) {
// total_iterations_ is set to 0 by the constructor, and always set to a
// nonzero value by StartKepRunning().
assert(n > 0);
// n must be 1 unless is_batch is true.
assert(is_batch || n == 1);
if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) {
total_iterations_ -= n;
return true;
}
if (!started_) {
StartKeepRunning();
if (!error_occurred_ && total_iterations_ >= n) {
total_iterations_ -= n;
return true;
}
}
// For non-batch runs, total_iterations_ must be 0 by now.
if (is_batch && total_iterations_ != 0) {
batch_leftover_ = n - total_iterations_;
total_iterations_ = 0;
return true;
}
FinishKeepRunning();
return false;
}
struct State::StateIterator {
struct BENCHMARK_UNUSED Value {};
typedef std::forward_iterator_tag iterator_category;
typedef Value value_type;
typedef Value reference;
typedef Value pointer;
typedef std::ptrdiff_t difference_type;
private:
friend class State;
BENCHMARK_ALWAYS_INLINE
StateIterator() : cached_(0), parent_() {}
BENCHMARK_ALWAYS_INLINE
explicit StateIterator(State* st)
: cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {}
public:
BENCHMARK_ALWAYS_INLINE
Value operator*() const { return Value(); }
BENCHMARK_ALWAYS_INLINE
StateIterator& operator++() {
assert(cached_ > 0);
--cached_;
return *this;
}
BENCHMARK_ALWAYS_INLINE
bool operator!=(StateIterator const&) const {
if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true;
parent_->FinishKeepRunning();
return false;
}
private:
IterationCount cached_;
State* const parent_;
};
inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::begin() {
return StateIterator(this);
}
inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::end() {
StartKeepRunning();
return StateIterator();
}
namespace internal {
typedef void(Function)(State&);
// ------------------------------------------------------
// Benchmark registration object. The BENCHMARK() macro expands
// into an internal::Benchmark* object. Various methods can
// be called on this object to change the properties of the benchmark.
// Each method returns "this" so that multiple method calls can
// chained into one expression.
class Benchmark {
public:
virtual ~Benchmark();
// Note: the following methods all return "this" so that multiple
// method calls can be chained together in one expression.
// Specify the name of the benchmark
Benchmark* Name(const std::string& name);
// Run this benchmark once with "x" as the extra argument passed
// to the function.
// REQUIRES: The function passed to the constructor must accept an arg1.
Benchmark* Arg(int64_t x);
// Run this benchmark with the given time unit for the generated output report
Benchmark* Unit(TimeUnit unit);
// Run this benchmark once for a number of values picked from the
// range [start..limit]. (start and limit are always picked.)
// REQUIRES: The function passed to the constructor must accept an arg1.
Benchmark* Range(int64_t start, int64_t limit);
// Run this benchmark once for all values in the range [start..limit] with
// specific step
// REQUIRES: The function passed to the constructor must accept an arg1.
Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1);
// Run this benchmark once with "args" as the extra arguments passed
// to the function.
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
Benchmark* Args(const std::vector<int64_t>& args);
// Equivalent to Args({x, y})
// NOTE: This is a legacy C++03 interface provided for compatibility only.
// New code should use 'Args'.
Benchmark* ArgPair(int64_t x, int64_t y) {
std::vector<int64_t> args;
args.push_back(x);
args.push_back(y);
return Args(args);
}
// Run this benchmark once for a number of values picked from the
// ranges [start..limit]. (starts and limits are always picked.)
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
// Run this benchmark once for each combination of values in the (cartesian)
// product of the supplied argument lists.
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
Benchmark* ArgsProduct(const std::vector<std::vector<int64_t> >& arglists);
// Equivalent to ArgNames({name})
Benchmark* ArgName(const std::string& name);
// Set the argument names to display in the benchmark name. If not called,
// only argument values will be shown.
Benchmark* ArgNames(const std::vector<std::string>& names);
// Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
// NOTE: This is a legacy C++03 interface provided for compatibility only.
// New code should use 'Ranges'.
Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) {
std::vector<std::pair<int64_t, int64_t> > ranges;
ranges.push_back(std::make_pair(lo1, hi1));
ranges.push_back(std::make_pair(lo2, hi2));
return Ranges(ranges);
}
// Pass this benchmark object to *func, which can customize
// the benchmark by calling various methods like Arg, Args,
// Threads, etc.
Benchmark* Apply(void (*func)(Benchmark* benchmark));
// Set the range multiplier for non-dense range. If not called, the range
// multiplier kRangeMultiplier will be used.
Benchmark* RangeMultiplier(int multiplier);
// Set the minimum amount of time to use when running this benchmark. This
// option overrides the `benchmark_min_time` flag.
// REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark.
Benchmark* MinTime(double t);
// Specify the amount of iterations that should be run by this benchmark.
// REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark.
//
// NOTE: This function should only be used when *exact* iteration control is
// needed and never to control or limit how long a benchmark runs, where
// `--benchmark_min_time=N` or `MinTime(...)` should be used instead.
Benchmark* Iterations(IterationCount n);
// Specify the amount of times to repeat this benchmark. This option overrides
// the `benchmark_repetitions` flag.
// REQUIRES: `n > 0`
Benchmark* Repetitions(int n);
// Specify if each repetition of the benchmark should be reported separately
// or if only the final statistics should be reported. If the benchmark
// is not repeated then the single result is always reported.
// Applies to *ALL* reporters (display and file).
Benchmark* ReportAggregatesOnly(bool value = true);
// Same as ReportAggregatesOnly(), but applies to display reporter only.
Benchmark* DisplayAggregatesOnly(bool value = true);
// By default, the CPU time is measured only for the main thread, which may
// be unrepresentative if the benchmark uses threads internally. If called,
// the total CPU time spent by all the threads will be measured instead.
// By default, the only the main thread CPU time will be measured.
Benchmark* MeasureProcessCPUTime();
// If a particular benchmark should use the Wall clock instead of the CPU time
// (be it either the CPU time of the main thread only (default), or the
// total CPU usage of the benchmark), call this method. If called, the elapsed
// (wall) time will be used to control how many iterations are run, and in the
// printing of items/second or MB/seconds values.
// If not called, the CPU time used by the benchmark will be used.
Benchmark* UseRealTime();
// If a benchmark must measure time manually (e.g. if GPU execution time is
// being
// measured), call this method. If called, each benchmark iteration should
// call
// SetIterationTime(seconds) to report the measured time, which will be used
// to control how many iterations are run, and in the printing of items/second
// or MB/second values.
Benchmark* UseManualTime();
// Set the asymptotic computational complexity for the benchmark. If called
// the asymptotic computational complexity will be shown on the output.
Benchmark* Complexity(BigO complexity = benchmark::oAuto);
// Set the asymptotic computational complexity for the benchmark. If called
// the asymptotic computational complexity will be shown on the output.
Benchmark* Complexity(BigOFunc* complexity);
// Add this statistics to be computed over all the values of benchmark run
Benchmark* ComputeStatistics(std::string name, StatisticsFunc* statistics);
// Support for running multiple copies of the same benchmark concurrently
// in multiple threads. This may be useful when measuring the scaling
// of some piece of code.
// Run one instance of this benchmark concurrently in t threads.
Benchmark* Threads(int t);
// Pick a set of values T from [min_threads,max_threads].
// min_threads and max_threads are always included in T. Run this
// benchmark once for each value in T. The benchmark run for a
// particular value t consists of t threads running the benchmark
// function concurrently. For example, consider:
// BENCHMARK(Foo)->ThreadRange(1,16);
// This will run the following benchmarks:
// Foo in 1 thread
// Foo in 2 threads
// Foo in 4 threads
// Foo in 8 threads
// Foo in 16 threads
Benchmark* ThreadRange(int min_threads, int max_threads);
// For each value n in the range, run this benchmark once using n threads.
// min_threads and max_threads are always included in the range.
// stride specifies the increment. E.g. DenseThreadRange(1, 8, 3) starts
// a benchmark with 1, 4, 7 and 8 threads.
Benchmark* DenseThreadRange(int min_threads, int max_threads, int stride = 1);
// Equivalent to ThreadRange(NumCPUs(), NumCPUs())
Benchmark* ThreadPerCpu();
virtual void Run(State& state) = 0;
protected:
explicit Benchmark(const char* name);
Benchmark(Benchmark const&);
void SetName(const char* name);
int ArgsCnt() const;
private:
friend class BenchmarkFamilies;
friend class BenchmarkInstance;
std::string name_;
AggregationReportMode aggregation_report_mode_;
std::vector<std::string> arg_names_; // Args for all benchmark runs
std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
TimeUnit time_unit_;
int range_multiplier_;
double min_time_;
IterationCount iterations_;
int repetitions_;
bool measure_process_cpu_time_;
bool use_real_time_;
bool use_manual_time_;
BigO complexity_;
BigOFunc* complexity_lambda_;
std::vector<Statistics> statistics_;
std::vector<int> thread_counts_;
Benchmark& operator=(Benchmark const&);
};
} // namespace internal
// Create and register a benchmark with the specified 'name' that invokes
// the specified functor 'fn'.
//
// RETURNS: A pointer to the registered benchmark.
internal::Benchmark* RegisterBenchmark(const char* name,
internal::Function* fn);
#if defined(BENCHMARK_HAS_CXX11)
template <class Lambda>
internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn);
#endif
// Remove all registered benchmarks. All pointers to previously registered
// benchmarks are invalidated.
void ClearRegisteredBenchmarks();
namespace internal {
// The class used to hold all Benchmarks created from static function.
// (ie those created using the BENCHMARK(...) macros.
class FunctionBenchmark : public Benchmark {
public:
FunctionBenchmark(const char* name, Function* func)
: Benchmark(name), func_(func) {}
virtual void Run(State& st) BENCHMARK_OVERRIDE;
private:
Function* func_;
};
#ifdef BENCHMARK_HAS_CXX11
template <class Lambda>
class LambdaBenchmark : public Benchmark {
public:
virtual void Run(State& st) BENCHMARK_OVERRIDE { lambda_(st); }
private:
template <class OLambda>
LambdaBenchmark(const char* name, OLambda&& lam)
: Benchmark(name), lambda_(std::forward<OLambda>(lam)) {}
LambdaBenchmark(LambdaBenchmark const&) = delete;
private:
template <class Lam>
friend Benchmark* ::benchmark::RegisterBenchmark(const char*, Lam&&);
Lambda lambda_;
};
#endif
} // namespace internal
inline internal::Benchmark* RegisterBenchmark(const char* name,
internal::Function* fn) {
return internal::RegisterBenchmarkInternal(
::new internal::FunctionBenchmark(name, fn));
}
#ifdef BENCHMARK_HAS_CXX11
template <class Lambda>
internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn) {
using BenchType =
internal::LambdaBenchmark<typename std::decay<Lambda>::type>;
return internal::RegisterBenchmarkInternal(
::new BenchType(name, std::forward<Lambda>(fn)));
}
#endif
#if defined(BENCHMARK_HAS_CXX11) && \
(!defined(BENCHMARK_GCC_VERSION) || BENCHMARK_GCC_VERSION >= 409)
template <class Lambda, class... Args>
internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn,
Args&&... args) {
return benchmark::RegisterBenchmark(
name, [=](benchmark::State& st) { fn(st, args...); });
}
#else
#define BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
#endif
// The base class for all fixture tests.
class Fixture : public internal::Benchmark {
public:
Fixture() : internal::Benchmark("") {}
virtual void Run(State& st) BENCHMARK_OVERRIDE {
this->SetUp(st);
this->BenchmarkCase(st);
this->TearDown(st);
}
// These will be deprecated ...
virtual void SetUp(const State&) {}
virtual void TearDown(const State&) {}
// ... In favor of these.
virtual void SetUp(State& st) { SetUp(const_cast<const State&>(st)); }
virtual void TearDown(State& st) { TearDown(const_cast<const State&>(st)); }
protected:
virtual void BenchmarkCase(State&) = 0;
};
} // namespace benchmark
// ------------------------------------------------------
// Macro to register benchmarks
// Check that __COUNTER__ is defined and that __COUNTER__ increases by 1
// every time it is expanded. X + 1 == X + 0 is used in case X is defined to be
// empty. If X is empty the expression becomes (+1 == +0).
#if defined(__COUNTER__) && (__COUNTER__ + 1 == __COUNTER__ + 0)
#define BENCHMARK_PRIVATE_UNIQUE_ID __COUNTER__
#else
#define BENCHMARK_PRIVATE_UNIQUE_ID __LINE__
#endif
// Helpers for generating unique variable names
#define BENCHMARK_PRIVATE_NAME(n) \
BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, n)
#define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c)
#define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c
// Helper for concatenation with macro name expansion
#define BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method) \
BaseClass##_##Method##_Benchmark
#define BENCHMARK_PRIVATE_DECLARE(n) \
static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \
BENCHMARK_UNUSED
#define BENCHMARK(n) \
BENCHMARK_PRIVATE_DECLARE(n) = \
(::benchmark::internal::RegisterBenchmarkInternal( \
new ::benchmark::internal::FunctionBenchmark(#n, n)))
// Old-style macros
#define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a))
#define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->Args({(a1), (a2)})
#define BENCHMARK_WITH_UNIT(n, t) BENCHMARK(n)->Unit((t))
#define BENCHMARK_RANGE(n, lo, hi) BENCHMARK(n)->Range((lo), (hi))
#define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \
BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}})
#ifdef BENCHMARK_HAS_CXX11
// Register a benchmark which invokes the function specified by `func`
// with the additional arguments specified by `...`.
//
// For example:
//
// template <class ...ExtraArgs>`
// void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
// [...]
//}
// /* Registers a benchmark named "BM_takes_args/int_string_test` */
// BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
#define BENCHMARK_CAPTURE(func, test_case_name, ...) \
BENCHMARK_PRIVATE_DECLARE(func) = \
(::benchmark::internal::RegisterBenchmarkInternal( \
new ::benchmark::internal::FunctionBenchmark( \
#func "/" #test_case_name, \
[](::benchmark::State& st) { func(st, __VA_ARGS__); })))
#endif // BENCHMARK_HAS_CXX11
// This will register a benchmark for a templatized function. For example:
//
// template<int arg>
// void BM_Foo(int iters);
//
// BENCHMARK_TEMPLATE(BM_Foo, 1);
//
// will register BM_Foo<1> as a benchmark.
#define BENCHMARK_TEMPLATE1(n, a) \
BENCHMARK_PRIVATE_DECLARE(n) = \
(::benchmark::internal::RegisterBenchmarkInternal( \
new ::benchmark::internal::FunctionBenchmark(#n "<" #a ">", n<a>)))
#define BENCHMARK_TEMPLATE2(n, a, b) \
BENCHMARK_PRIVATE_DECLARE(n) = \
(::benchmark::internal::RegisterBenchmarkInternal( \
new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \
n<a, b>)))
#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE(n, ...) \
BENCHMARK_PRIVATE_DECLARE(n) = \
(::benchmark::internal::RegisterBenchmarkInternal( \
new ::benchmark::internal::FunctionBenchmark( \
#n "<" #__VA_ARGS__ ">", n<__VA_ARGS__>)))
#else
#define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a)
#endif
#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
class BaseClass##_##Method##_Benchmark : public BaseClass { \
public: \
BaseClass##_##Method##_Benchmark() : BaseClass() { \
this->SetName(#BaseClass "/" #Method); \
} \
\
protected: \
virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
};
#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
public: \
BaseClass##_##Method##_Benchmark() : BaseClass<a>() { \
this->SetName(#BaseClass "<" #a ">/" #Method); \
} \
\
protected: \
virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
};
#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
public: \
BaseClass##_##Method##_Benchmark() : BaseClass<a, b>() { \
this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \
} \
\
protected: \
virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
};
#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \
class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \
public: \
BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \
this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \
} \
\
protected: \
virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
};
#else
#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \
BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
#endif
#define BENCHMARK_DEFINE_F(BaseClass, Method) \
BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \
BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#else
#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
#endif
#define BENCHMARK_REGISTER_F(BaseClass, Method) \
BENCHMARK_PRIVATE_REGISTER_F(BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method))
#define BENCHMARK_PRIVATE_REGISTER_F(TestName) \
BENCHMARK_PRIVATE_DECLARE(TestName) = \
(::benchmark::internal::RegisterBenchmarkInternal(new TestName()))
// This macro will define and register a benchmark within a fixture class.
#define BENCHMARK_F(BaseClass, Method) \
BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \
BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#else
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
#endif
// Helper macro to create a main routine in a test that runs the benchmarks
#define BENCHMARK_MAIN() \
int main(int argc, char** argv) { \
::benchmark::Initialize(&argc, argv); \
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
::benchmark::RunSpecifiedBenchmarks(); \
::benchmark::Shutdown(); \
return 0; \
} \
int main(int, char**)
// ------------------------------------------------------
// Benchmark Reporters
namespace benchmark {
struct CPUInfo {
struct CacheInfo {
std::string type;
int level;
int size;
int num_sharing;
};
enum Scaling {
UNKNOWN,
ENABLED,
DISABLED
};
int num_cpus;
Scaling scaling;
double cycles_per_second;
std::vector<CacheInfo> caches;
std::vector<double> load_avg;
static const CPUInfo& Get();
private:
CPUInfo();
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo);
};
// Adding Struct for System Information
struct SystemInfo {
std::string name;
static const SystemInfo& Get();
private:
SystemInfo();
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo);
};
// BenchmarkName contains the components of the Benchmark's name
// which allows individual fields to be modified or cleared before
// building the final name using 'str()'.
struct BenchmarkName {
std::string function_name;
std::string args;
std::string min_time;
std::string iterations;
std::string repetitions;
std::string time_type;
std::string threads;
// Return the full name of the benchmark with each non-empty
// field separated by a '/'
std::string str() const;
};
// Interface for custom benchmark result printers.
// By default, benchmark reports are printed to stdout. However an application
// can control the destination of the reports by calling
// RunSpecifiedBenchmarks and passing it a custom reporter object.
// The reporter object must implement the following interface.
class BenchmarkReporter {
public:
struct Context {
CPUInfo const& cpu_info;
SystemInfo const& sys_info;
// The number of chars in the longest benchmark name.
size_t name_field_width;
static const char* executable_name;
Context();
};
struct Run {
static const int64_t no_repetition_index = -1;
enum RunType { RT_Iteration, RT_Aggregate };
Run()
: run_type(RT_Iteration),
error_occurred(false),
iterations(1),
threads(1),
time_unit(kNanosecond),
real_accumulated_time(0),
cpu_accumulated_time(0),
max_heapbytes_used(0),
complexity(oNone),
complexity_lambda(),
complexity_n(0),
report_big_o(false),
report_rms(false),
counters(),
has_memory_result(false),
allocs_per_iter(0.0),
max_bytes_used(0) {}
std::string benchmark_name() const;
BenchmarkName run_name;
int64_t family_index;
int64_t per_family_instance_index;
RunType run_type;
std::string aggregate_name;
std::string report_label; // Empty if not set by benchmark.
bool error_occurred;
std::string error_message;
IterationCount iterations;
int64_t threads;
int64_t repetition_index;
int64_t repetitions;
TimeUnit time_unit;
double real_accumulated_time;
double cpu_accumulated_time;
// Return a value representing the real time per iteration in the unit
// specified by 'time_unit'.
// NOTE: If 'iterations' is zero the returned value represents the
// accumulated time.
double GetAdjustedRealTime() const;
// Return a value representing the cpu time per iteration in the unit
// specified by 'time_unit'.
// NOTE: If 'iterations' is zero the returned value represents the
// accumulated time.
double GetAdjustedCPUTime() const;
// This is set to 0.0 if memory tracing is not enabled.
double max_heapbytes_used;
// Keep track of arguments to compute asymptotic complexity
BigO complexity;
BigOFunc* complexity_lambda;
int64_t complexity_n;
// what statistics to compute from the measurements
const std::vector<internal::Statistics>* statistics;
// Inform print function whether the current run is a complexity report
bool report_big_o;
bool report_rms;
UserCounters counters;
// Memory metrics.
bool has_memory_result;
double allocs_per_iter;
int64_t max_bytes_used;
};
struct PerFamilyRunReports {
PerFamilyRunReports() : num_runs_total(0), num_runs_done(0) {}
// How many runs will all instances of this benchmark perform?
int num_runs_total;
// How many runs have happened already?
int num_runs_done;
// The reports about (non-errneous!) runs of this family.
std::vector<BenchmarkReporter::Run> Runs;
};
// Construct a BenchmarkReporter with the output stream set to 'std::cout'
// and the error stream set to 'std::cerr'
BenchmarkReporter();
// Called once for every suite of benchmarks run.
// The parameter "context" contains information that the
// reporter may wish to use when generating its report, for example the
// platform under which the benchmarks are running. The benchmark run is
// never started if this function returns false, allowing the reporter
// to skip runs based on the context information.
virtual bool ReportContext(const Context& context) = 0;
// Called once for each group of benchmark runs, gives information about
// cpu-time and heap memory usage during the benchmark run. If the group
// of runs contained more than two entries then 'report' contains additional
// elements representing the mean and standard deviation of those runs.
// Additionally if this group of runs was the last in a family of benchmarks
// 'reports' contains additional entries representing the asymptotic
// complexity and RMS of that benchmark family.
virtual void ReportRuns(const std::vector<Run>& report) = 0;
// Called once and only once after ever group of benchmarks is run and
// reported.
virtual void Finalize() {}
// REQUIRES: The object referenced by 'out' is valid for the lifetime
// of the reporter.
void SetOutputStream(std::ostream* out) {
assert(out);
output_stream_ = out;
}
// REQUIRES: The object referenced by 'err' is valid for the lifetime
// of the reporter.
void SetErrorStream(std::ostream* err) {
assert(err);
error_stream_ = err;
}
std::ostream& GetOutputStream() const { return *output_stream_; }
std::ostream& GetErrorStream() const { return *error_stream_; }
virtual ~BenchmarkReporter();
// Write a human readable string to 'out' representing the specified
// 'context'.
// REQUIRES: 'out' is non-null.
static void PrintBasicContext(std::ostream* out, Context const& context);
private:
std::ostream* output_stream_;
std::ostream* error_stream_;
};
// Simple reporter that outputs benchmark data to the console. This is the
// default reporter used by RunSpecifiedBenchmarks().
class ConsoleReporter : public BenchmarkReporter {
public:
enum OutputOptions {
OO_None = 0,
OO_Color = 1,
OO_Tabular = 2,
OO_ColorTabular = OO_Color | OO_Tabular,
OO_Defaults = OO_ColorTabular
};
explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults)
: output_options_(opts_),
name_field_width_(0),
prev_counters_(),
printed_header_(false) {}
virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
virtual void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
protected:
virtual void PrintRunData(const Run& report);
virtual void PrintHeader(const Run& report);
OutputOptions output_options_;
size_t name_field_width_;
UserCounters prev_counters_;
bool printed_header_;
};
class JSONReporter : public BenchmarkReporter {
public:
JSONReporter() : first_report_(true) {}
virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
virtual void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
virtual void Finalize() BENCHMARK_OVERRIDE;
private:
void PrintRunData(const Run& report);
bool first_report_;
};
class BENCHMARK_DEPRECATED_MSG(
"The CSV Reporter will be removed in a future release") CSVReporter
: public BenchmarkReporter {
public:
CSVReporter() : printed_header_(false) {}
virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
virtual void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
private:
void PrintRunData(const Run& report);
bool printed_header_;
std::set<std::string> user_counter_names_;
};
// If a MemoryManager is registered, it can be used to collect and report
// allocation metrics for a run of the benchmark.
class MemoryManager {
public:
struct Result {
Result() : num_allocs(0), max_bytes_used(0) {}
// The number of allocations made in total between Start and Stop.
int64_t num_allocs;
// The peak memory use between Start and Stop.
int64_t max_bytes_used;
};
virtual ~MemoryManager() {}
// Implement this to start recording allocation information.
virtual void Start() = 0;
// Implement this to stop recording and fill out the given Result structure.
virtual void Stop(Result* result) = 0;
};
inline const char* GetTimeUnitString(TimeUnit unit) {
switch (unit) {
case kSecond:
return "s";
case kMillisecond:
return "ms";
case kMicrosecond:
return "us";
case kNanosecond:
return "ns";
}
BENCHMARK_UNREACHABLE();
}
inline double GetTimeUnitMultiplier(TimeUnit unit) {
switch (unit) {
case kSecond:
return 1;
case kMillisecond:
return 1e3;
case kMicrosecond:
return 1e6;
case kNanosecond:
return 1e9;
}
BENCHMARK_UNREACHABLE();
}
} // namespace benchmark
#endif // BENCHMARK_BENCHMARK_H_
07070100000035000081A400000000000000000000000160C0813C0000001F000000000000000000000000000000000000002100000000benchmark-1.5.5/requirements.txtnumpy == 1.19.4
scipy == 1.5.4
07070100000036000081A400000000000000000000000160C0813C00001186000000000000000000000000000000000000001900000000benchmark-1.5.5/setup.pyimport os
import posixpath
import re
import shutil
import sys
from distutils import sysconfig
import setuptools
from setuptools.command import build_ext
HERE = os.path.dirname(os.path.abspath(__file__))
IS_WINDOWS = sys.platform.startswith("win")
def _get_version():
"""Parse the version string from __init__.py."""
with open(
os.path.join(HERE, "bindings", "python", "google_benchmark", "__init__.py")
) as init_file:
try:
version_line = next(
line for line in init_file if line.startswith("__version__")
)
except StopIteration:
raise ValueError("__version__ not defined in __init__.py")
else:
namespace = {}
exec(version_line, namespace) # pylint: disable=exec-used
return namespace["__version__"]
def _parse_requirements(path):
with open(os.path.join(HERE, path)) as requirements:
return [
line.rstrip()
for line in requirements
if not (line.isspace() or line.startswith("#"))
]
class BazelExtension(setuptools.Extension):
"""A C/C++ extension that is defined as a Bazel BUILD target."""
def __init__(self, name, bazel_target):
self.bazel_target = bazel_target
self.relpath, self.target_name = posixpath.relpath(bazel_target, "//").split(
":"
)
setuptools.Extension.__init__(self, name, sources=[])
class BuildBazelExtension(build_ext.build_ext):
"""A command that runs Bazel to build a C/C++ extension."""
def run(self):
for ext in self.extensions:
self.bazel_build(ext)
build_ext.build_ext.run(self)
def bazel_build(self, ext):
"""Runs the bazel build to create the package."""
with open("WORKSPACE", "r") as workspace:
workspace_contents = workspace.read()
with open("WORKSPACE", "w") as workspace:
workspace.write(
re.sub(
r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)',
sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep),
workspace_contents,
)
)
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
bazel_argv = [
"bazel",
"build",
ext.bazel_target,
"--symlink_prefix=" + os.path.join(self.build_temp, "bazel-"),
"--compilation_mode=" + ("dbg" if self.debug else "opt"),
]
if IS_WINDOWS:
# Link with python*.lib.
for library_dir in self.library_dirs:
bazel_argv.append("--linkopt=/LIBPATH:" + library_dir)
self.spawn(bazel_argv)
shared_lib_suffix = '.dll' if IS_WINDOWS else '.so'
ext_bazel_bin_path = os.path.join(
self.build_temp, 'bazel-bin',
ext.relpath, ext.target_name + shared_lib_suffix)
ext_dest_path = self.get_ext_fullpath(ext.name)
ext_dest_dir = os.path.dirname(ext_dest_path)
if not os.path.exists(ext_dest_dir):
os.makedirs(ext_dest_dir)
shutil.copyfile(ext_bazel_bin_path, ext_dest_path)
setuptools.setup(
name="google_benchmark",
version=_get_version(),
url="https://github.com/google/benchmark",
description="A library to benchmark code snippets.",
author="Google",
author_email="benchmark-py@google.com",
# Contained modules and scripts.
package_dir={"": "bindings/python"},
packages=setuptools.find_packages("bindings/python"),
install_requires=_parse_requirements("bindings/python/requirements.txt"),
cmdclass=dict(build_ext=BuildBazelExtension),
ext_modules=[
BazelExtension(
"google_benchmark._benchmark",
"//bindings/python/google_benchmark:_benchmark",
)
],
zip_safe=False,
# PyPI package information.
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Testing",
"Topic :: System :: Benchmark",
],
license="Apache 2.0",
keywords="benchmark",
)
07070100000037000041ED00000000000000000000000260C0813C00000000000000000000000000000000000000000000001400000000benchmark-1.5.5/src07070100000038000081A400000000000000000000000160C0813C00000FE8000000000000000000000000000000000000002300000000benchmark-1.5.5/src/CMakeLists.txt# Allow the source files to find headers in src/
include(GNUInstallDirs)
include_directories(${PROJECT_SOURCE_DIR}/src)
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
endif()
file(GLOB
SOURCE_FILES
*.cc
${PROJECT_SOURCE_DIR}/include/benchmark/*.h
${CMAKE_CURRENT_SOURCE_DIR}/*.h)
file(GLOB BENCHMARK_MAIN "benchmark_main.cc")
foreach(item ${BENCHMARK_MAIN})
list(REMOVE_ITEM SOURCE_FILES "${item}")
endforeach()
add_library(benchmark ${SOURCE_FILES})
add_library(benchmark::benchmark ALIAS benchmark)
set_target_properties(benchmark PROPERTIES
OUTPUT_NAME "benchmark"
VERSION ${GENERIC_LIB_VERSION}
SOVERSION ${GENERIC_LIB_SOVERSION}
)
target_include_directories(benchmark PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
)
# libpfm, if available
if (HAVE_LIBPFM)
target_link_libraries(benchmark libpfm.a)
add_definitions(-DHAVE_LIBPFM)
endif()
# Link threads.
target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
find_library(LIBRT rt)
if(LIBRT)
target_link_libraries(benchmark ${LIBRT})
endif()
if(CMAKE_BUILD_TYPE)
string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER)
endif()
if(NOT CMAKE_THREAD_LIBS_INIT AND "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}" MATCHES ".*-fsanitize=[^ ]*address.*")
message(WARNING "CMake's FindThreads.cmake did not fail, but CMAKE_THREAD_LIBS_INIT ended up being empty. This was fixed in https://github.com/Kitware/CMake/commit/d53317130e84898c5328c237186dbd995aaf1c12 Let's guess that -pthread is sufficient.")
target_link_libraries(benchmark -pthread)
endif()
# We need extra libraries on Windows
if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
target_link_libraries(benchmark shlwapi)
endif()
# We need extra libraries on Solaris
if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
target_link_libraries(benchmark kstat)
endif()
# Benchmark main library
add_library(benchmark_main "benchmark_main.cc")
add_library(benchmark::benchmark_main ALIAS benchmark_main)
set_target_properties(benchmark_main PROPERTIES
OUTPUT_NAME "benchmark_main"
VERSION ${GENERIC_LIB_VERSION}
SOVERSION ${GENERIC_LIB_SOVERSION}
)
target_include_directories(benchmark PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
)
target_link_libraries(benchmark_main benchmark::benchmark)
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake")
set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc")
set(targets_export_name "${PROJECT_NAME}Targets")
set(namespace "${PROJECT_NAME}::")
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
"${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY SameMajorVersion
)
configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY)
if (BENCHMARK_ENABLE_INSTALL)
# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
install(
TARGETS benchmark benchmark_main
EXPORT ${targets_export_name}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
install(
DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
FILES_MATCHING PATTERN "*.*h")
install(
FILES "${project_config}" "${version_config}"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}")
install(
FILES "${pkg_config}"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
install(
EXPORT "${targets_export_name}"
NAMESPACE "${namespace}"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}")
endif()
07070100000039000081A400000000000000000000000160C0813C00000454000000000000000000000000000000000000002000000000benchmark-1.5.5/src/arraysize.h#ifndef BENCHMARK_ARRAYSIZE_H_
#define BENCHMARK_ARRAYSIZE_H_
#include "internal_macros.h"
namespace benchmark {
namespace internal {
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
// a pointer by mistake, you will get a compile-time error.
//
// This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only
// use its type.
template <typename T, size_t N>
char (&ArraySizeHelper(T (&array)[N]))[N];
// That gcc wants both of these prototypes seems mysterious. VC, for
// its part, can't decide which to use (another mystery). Matching of
// template overloads: the final frontier.
#ifndef COMPILER_MSVC
template <typename T, size_t N>
char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif
#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_ARRAYSIZE_H_
0707010000003A000081A400000000000000000000000160C0813C00005609000000000000000000000000000000000000002100000000benchmark-1.5.5/src/benchmark.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "benchmark_runner.h"
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <limits>
#include <map>
#include <memory>
#include <random>
#include <string>
#include <thread>
#include <utility>
#include "check.h"
#include "colorprint.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "counter.h"
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
#include "perf_counters.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
#include "thread_manager.h"
#include "thread_timer.h"
// Print a list of benchmarks. This option overrides all other options.
DEFINE_bool(benchmark_list_tests, false);
// A regular expression that specifies the set of benchmarks to execute. If
// this flag is empty, or if this flag is the string \"all\", all benchmarks
// linked into the binary are run.
DEFINE_string(benchmark_filter, ".");
// Minimum number of seconds we should run benchmark before results are
// considered significant. For cpu-time based tests, this is the lower bound
// on the total cpu time used by all threads that make up the test. For
// real-time based tests, this is the lower bound on the elapsed time of the
// benchmark execution, regardless of number of threads.
DEFINE_double(benchmark_min_time, 0.5);
// The number of runs of each benchmark. If greater than 1, the mean and
// standard deviation of the runs will be reported.
DEFINE_int32(benchmark_repetitions, 1);
// If set, enable random interleaving of repetitions of all benchmarks.
// See http://github.com/google/benchmark/issues/1051 for details.
DEFINE_bool(benchmark_enable_random_interleaving, false);
// Report the result of each benchmark repetitions. When 'true' is specified
// only the mean, standard deviation, and other statistics are reported for
// repeated benchmarks. Affects all reporters.
DEFINE_bool(benchmark_report_aggregates_only, false);
// Display the result of each benchmark repetitions. When 'true' is specified
// only the mean, standard deviation, and other statistics are displayed for
// repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects
// the display reporter, but *NOT* file reporter, which will still contain
// all the output.
DEFINE_bool(benchmark_display_aggregates_only, false);
// The format to use for console output.
// Valid values are 'console', 'json', or 'csv'.
DEFINE_string(benchmark_format, "console");
// The format to use for file output.
// Valid values are 'console', 'json', or 'csv'.
DEFINE_string(benchmark_out_format, "json");
// The file to write additional output to.
DEFINE_string(benchmark_out, "");
// Whether to use colors in the output. Valid values:
// 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if
// the output is being sent to a terminal and the TERM environment variable is
// set to a terminal type that supports colors.
DEFINE_string(benchmark_color, "auto");
// Whether to use tabular format when printing user counters to the console.
// Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false.
DEFINE_bool(benchmark_counters_tabular, false);
// The level of verbose logging to output
DEFINE_int32(v, 0);
// List of additional perf counters to collect, in libpfm format. For more
// information about libpfm: https://man7.org/linux/man-pages/man3/libpfm.3.html
DEFINE_string(benchmark_perf_counters, "");
namespace benchmark {
namespace internal {
// Extra context to include in the output formatted as comma-separated key-value
// pairs. Kept internal as it's only used for parsing from env/command line.
DEFINE_kvpairs(benchmark_context, {});
std::map<std::string, std::string>* global_context = nullptr;
// FIXME: wouldn't LTO mess this up?
void UseCharPointer(char const volatile*) {}
} // namespace internal
State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
int thread_i, int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager,
internal::PerfCountersMeasurement* perf_counters_measurement)
: total_iterations_(0),
batch_leftover_(0),
max_iterations(max_iters),
started_(false),
finished_(false),
error_occurred_(false),
range_(ranges),
complexity_n_(0),
counters(),
thread_index(thread_i),
threads(n_threads),
timer_(timer),
manager_(manager),
perf_counters_measurement_(perf_counters_measurement) {
CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
// Note: The use of offsetof below is technically undefined until C++17
// because State is not a standard layout type. However, all compilers
// currently provide well-defined behavior as an extension (which is
// demonstrated since constexpr evaluation must diagnose all undefined
// behavior). However, GCC and Clang also warn about this use of offsetof,
// which must be suppressed.
#if defined(__INTEL_COMPILER)
#pragma warning push
#pragma warning(disable : 1875)
#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winvalid-offsetof"
#endif
// Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64;
static_assert(offsetof(State, error_occurred_) <=
(cache_line_size - sizeof(error_occurred_)),
"");
#if defined(__INTEL_COMPILER)
#pragma warning pop
#elif defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
}
void State::PauseTiming() {
// Add in time accumulated so far
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StopTimer();
if (perf_counters_measurement_) {
auto measurements = perf_counters_measurement_->StopAndGetMeasurements();
for (const auto& name_and_measurement : measurements) {
auto name = name_and_measurement.first;
auto measurement = name_and_measurement.second;
CHECK_EQ(counters[name], 0.0);
counters[name] = Counter(measurement, Counter::kAvgIterations);
}
}
}
void State::ResumeTiming() {
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StartTimer();
if (perf_counters_measurement_) {
perf_counters_measurement_->Start();
}
}
void State::SkipWithError(const char* msg) {
CHECK(msg);
error_occurred_ = true;
{
MutexLock l(manager_->GetBenchmarkMutex());
if (manager_->results.has_error_ == false) {
manager_->results.error_message_ = msg;
manager_->results.has_error_ = true;
}
}
total_iterations_ = 0;
if (timer_->running()) timer_->StopTimer();
}
void State::SetIterationTime(double seconds) {
timer_->SetIterationTime(seconds);
}
void State::SetLabel(const char* label) {
MutexLock l(manager_->GetBenchmarkMutex());
manager_->results.report_label_ = label;
}
void State::StartKeepRunning() {
CHECK(!started_ && !finished_);
started_ = true;
total_iterations_ = error_occurred_ ? 0 : max_iterations;
manager_->StartStopBarrier();
if (!error_occurred_) ResumeTiming();
}
void State::FinishKeepRunning() {
CHECK(started_ && (!finished_ || error_occurred_));
if (!error_occurred_) {
PauseTiming();
}
// Total iterations has now wrapped around past 0. Fix this.
total_iterations_ = 0;
finished_ = true;
manager_->StartStopBarrier();
}
namespace internal {
namespace {
// Flushes streams after invoking reporter methods that write to them. This
// ensures users get timely updates even when streams are not line-buffered.
void FlushStreams(BenchmarkReporter* reporter) {
if (!reporter) return;
std::flush(reporter->GetOutputStream());
std::flush(reporter->GetErrorStream());
}
// Reports in both display and file reporters.
void Report(BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter, const RunResults& run_results) {
auto report_one = [](BenchmarkReporter* reporter, bool aggregates_only,
const RunResults& results) {
assert(reporter);
// If there are no aggregates, do output non-aggregates.
aggregates_only &= !results.aggregates_only.empty();
if (!aggregates_only) reporter->ReportRuns(results.non_aggregates);
if (!results.aggregates_only.empty())
reporter->ReportRuns(results.aggregates_only);
};
report_one(display_reporter, run_results.display_report_aggregates_only,
run_results);
if (file_reporter)
report_one(file_reporter, run_results.file_report_aggregates_only,
run_results);
FlushStreams(display_reporter);
FlushStreams(file_reporter);
}
void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
// Note the file_reporter can be null.
CHECK(display_reporter != nullptr);
// Determine the width of the name field using a minimum width of 10.
bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
size_t stat_field_width = 0;
for (const BenchmarkInstance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name().str().size());
might_have_aggregates |= benchmark.repetitions() > 1;
for (const auto& Stat : benchmark.statistics())
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
}
if (might_have_aggregates) name_field_width += 1 + stat_field_width;
// Print header here
BenchmarkReporter::Context context;
context.name_field_width = name_field_width;
// Keep track of running times of all instances of each benchmark family.
std::map<int /*family_index*/, BenchmarkReporter::PerFamilyRunReports>
per_family_reports;
if (display_reporter->ReportContext(context) &&
(!file_reporter || file_reporter->ReportContext(context))) {
FlushStreams(display_reporter);
FlushStreams(file_reporter);
size_t num_repetitions_total = 0;
std::vector<internal::BenchmarkRunner> runners;
runners.reserve(benchmarks.size());
for (const BenchmarkInstance& benchmark : benchmarks) {
BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr;
if (benchmark.complexity() != oNone)
reports_for_family = &per_family_reports[benchmark.family_index()];
runners.emplace_back(benchmark, reports_for_family);
int num_repeats_of_this_instance = runners.back().GetNumRepeats();
num_repetitions_total += num_repeats_of_this_instance;
if (reports_for_family)
reports_for_family->num_runs_total += num_repeats_of_this_instance;
}
assert(runners.size() == benchmarks.size() && "Unexpected runner count.");
std::vector<int> repetition_indices;
repetition_indices.reserve(num_repetitions_total);
for (size_t runner_index = 0, num_runners = runners.size();
runner_index != num_runners; ++runner_index) {
const internal::BenchmarkRunner& runner = runners[runner_index];
std::fill_n(std::back_inserter(repetition_indices),
runner.GetNumRepeats(), runner_index);
}
assert(repetition_indices.size() == num_repetitions_total &&
"Unexpected number of repetition indexes.");
if (FLAGS_benchmark_enable_random_interleaving) {
std::random_device rd;
std::mt19937 g(rd());
std::shuffle(repetition_indices.begin(), repetition_indices.end(), g);
}
for (size_t repetition_index : repetition_indices) {
internal::BenchmarkRunner& runner = runners[repetition_index];
runner.DoOneRepetition();
if (runner.HasRepeatsRemaining()) continue;
// FIXME: report each repetition separately, not all of them in bulk.
RunResults run_results = runner.GetResults();
// Maybe calculate complexity report
if (const auto* reports_for_family = runner.GetReportsForFamily()) {
if (reports_for_family->num_runs_done ==
reports_for_family->num_runs_total) {
auto additional_run_stats = ComputeBigO(reports_for_family->Runs);
run_results.aggregates_only.insert(run_results.aggregates_only.end(),
additional_run_stats.begin(),
additional_run_stats.end());
per_family_reports.erase(
(int)reports_for_family->Runs.front().family_index);
}
}
Report(display_reporter, file_reporter, run_results);
}
}
display_reporter->Finalize();
if (file_reporter) file_reporter->Finalize();
FlushStreams(display_reporter);
FlushStreams(file_reporter);
}
// Disable deprecated warnings temporarily because we need to reference
// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
std::unique_ptr<BenchmarkReporter> CreateReporter(
std::string const& name, ConsoleReporter::OutputOptions output_opts) {
typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (name == "console") {
return PtrType(new ConsoleReporter(output_opts));
} else if (name == "json") {
return PtrType(new JSONReporter);
} else if (name == "csv") {
return PtrType(new CSVReporter);
} else {
std::cerr << "Unexpected format: '" << name << "'\n";
std::exit(1);
}
}
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
} // end namespace
bool IsZero(double n) {
return std::abs(n) < std::numeric_limits<double>::epsilon();
}
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
int output_opts = ConsoleReporter::OO_Defaults;
auto is_benchmark_color = [force_no_color]() -> bool {
if (force_no_color) {
return false;
}
if (FLAGS_benchmark_color == "auto") {
return IsColorTerminal();
}
return IsTruthyFlagValue(FLAGS_benchmark_color);
};
if (is_benchmark_color()) {
output_opts |= ConsoleReporter::OO_Color;
} else {
output_opts &= ~ConsoleReporter::OO_Color;
}
if (FLAGS_benchmark_counters_tabular) {
output_opts |= ConsoleReporter::OO_Tabular;
} else {
output_opts &= ~ConsoleReporter::OO_Tabular;
}
return static_cast<ConsoleReporter::OutputOptions>(output_opts);
}
} // end namespace internal
size_t RunSpecifiedBenchmarks() {
return RunSpecifiedBenchmarks(nullptr, nullptr);
}
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
return RunSpecifiedBenchmarks(display_reporter, nullptr);
}
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
std::string spec = FLAGS_benchmark_filter;
if (spec.empty() || spec == "all")
spec = "."; // Regexp that matches all benchmarks
// Setup the reporters
std::ofstream output_file;
std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter;
if (!display_reporter) {
default_display_reporter = internal::CreateReporter(
FLAGS_benchmark_format, internal::GetOutputOptions());
display_reporter = default_display_reporter.get();
}
auto& Out = display_reporter->GetOutputStream();
auto& Err = display_reporter->GetErrorStream();
std::string const& fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter) {
Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified."
<< std::endl;
std::exit(1);
}
if (!fname.empty()) {
output_file.open(fname);
if (!output_file.is_open()) {
Err << "invalid file name: '" << fname << "'" << std::endl;
std::exit(1);
}
if (!file_reporter) {
default_file_reporter = internal::CreateReporter(
FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
file_reporter = default_file_reporter.get();
}
file_reporter->SetOutputStream(&output_file);
file_reporter->SetErrorStream(&output_file);
}
std::vector<internal::BenchmarkInstance> benchmarks;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
if (benchmarks.empty()) {
Err << "Failed to match any benchmarks against regex: " << spec << "\n";
return 0;
}
if (FLAGS_benchmark_list_tests) {
for (auto const& benchmark : benchmarks)
Out << benchmark.name().str() << "\n";
} else {
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
return benchmarks.size();
}
void RegisterMemoryManager(MemoryManager* manager) {
internal::memory_manager = manager;
}
void AddCustomContext(const std::string& key, const std::string& value) {
if (internal::global_context == nullptr) {
internal::global_context = new std::map<std::string, std::string>();
}
if (!internal::global_context->emplace(key, value).second) {
std::cerr << "Failed to add custom context \"" << key << "\" as it already "
<< "exists with value \"" << value << "\"\n";
}
}
namespace internal {
void PrintUsageAndExit() {
fprintf(stdout,
"benchmark"
" [--benchmark_list_tests={true|false}]\n"
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
" [--benchmark_enable_random_interleaving={true|false}]\n"
" [--benchmark_report_aggregates_only={true|false}]\n"
" [--benchmark_display_aggregates_only={true|false}]\n"
" [--benchmark_format=<console|json|csv>]\n"
" [--benchmark_out=<filename>]\n"
" [--benchmark_out_format=<json|console|csv>]\n"
" [--benchmark_color={auto|true|false}]\n"
" [--benchmark_counters_tabular={true|false}]\n"
" [--benchmark_context=<key>=<value>,...]\n"
" [--v=<verbosity>]\n");
exit(0);
}
void ParseCommandLineFlags(int* argc, char** argv) {
using namespace benchmark;
BenchmarkReporter::Context::executable_name =
(argc && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; argc && i < *argc; ++i) {
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
&FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
ParseDoubleFlag(argv[i], "benchmark_min_time",
&FLAGS_benchmark_min_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions",
&FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving",
&FLAGS_benchmark_enable_random_interleaving) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
&FLAGS_benchmark_report_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
&FLAGS_benchmark_display_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format",
&FLAGS_benchmark_out_format) ||
ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
// "color_print" is the deprecated name for "benchmark_color".
// TODO: Remove this.
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular",
&FLAGS_benchmark_counters_tabular) ||
ParseStringFlag(argv[i], "benchmark_perf_counters",
&FLAGS_benchmark_perf_counters) ||
ParseKeyValueFlag(argv[i], "benchmark_context",
&FLAGS_benchmark_context) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
--(*argc);
--i;
} else if (IsFlag(argv[i], "help")) {
PrintUsageAndExit();
}
}
for (auto const* flag :
{&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) {
if (*flag != "console" && *flag != "json" && *flag != "csv") {
PrintUsageAndExit();
}
}
if (FLAGS_benchmark_color.empty()) {
PrintUsageAndExit();
}
for (const auto& kv : FLAGS_benchmark_context) {
AddCustomContext(kv.first, kv.second);
}
}
int InitializeStreams() {
static std::ios_base::Init init;
return 0;
}
} // end namespace internal
void Initialize(int* argc, char** argv) {
internal::ParseCommandLineFlags(argc, argv);
internal::LogLevel() = FLAGS_v;
}
void Shutdown() {
delete internal::global_context;
}
bool ReportUnrecognizedArguments(int argc, char** argv) {
for (int i = 1; i < argc; ++i) {
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
argv[i]);
}
return argc > 1;
}
} // end namespace benchmark
0707010000003B000081A400000000000000000000000160C0813C00000B1B000000000000000000000000000000000000002E00000000benchmark-1.5.5/src/benchmark_api_internal.cc#include "benchmark_api_internal.h"
#include <cinttypes>
#include "string_util.h"
namespace benchmark {
namespace internal {
BenchmarkInstance::BenchmarkInstance(Benchmark* benchmark, int family_idx,
int per_family_instance_idx,
const std::vector<int64_t>& args,
int thread_count)
: benchmark_(*benchmark),
family_index_(family_idx),
per_family_instance_index_(per_family_instance_idx),
aggregation_report_mode_(benchmark_.aggregation_report_mode_),
args_(args),
time_unit_(benchmark_.time_unit_),
measure_process_cpu_time_(benchmark_.measure_process_cpu_time_),
use_real_time_(benchmark_.use_real_time_),
use_manual_time_(benchmark_.use_manual_time_),
complexity_(benchmark_.complexity_),
complexity_lambda_(benchmark_.complexity_lambda_),
statistics_(benchmark_.statistics_),
repetitions_(benchmark_.repetitions_),
min_time_(benchmark_.min_time_),
iterations_(benchmark_.iterations_),
threads_(thread_count) {
name_.function_name = benchmark_.name_;
size_t arg_i = 0;
for (const auto& arg : args) {
if (!name_.args.empty()) {
name_.args += '/';
}
if (arg_i < benchmark->arg_names_.size()) {
const auto& arg_name = benchmark_.arg_names_[arg_i];
if (!arg_name.empty()) {
name_.args += StrFormat("%s:", arg_name.c_str());
}
}
name_.args += StrFormat("%" PRId64, arg);
++arg_i;
}
if (!IsZero(benchmark->min_time_)) {
name_.min_time = StrFormat("min_time:%0.3f", benchmark_.min_time_);
}
if (benchmark_.iterations_ != 0) {
name_.iterations = StrFormat(
"iterations:%lu", static_cast<unsigned long>(benchmark_.iterations_));
}
if (benchmark_.repetitions_ != 0) {
name_.repetitions = StrFormat("repeats:%d", benchmark_.repetitions_);
}
if (benchmark_.measure_process_cpu_time_) {
name_.time_type = "process_time";
}
if (benchmark_.use_manual_time_) {
if (!name_.time_type.empty()) {
name_.time_type += '/';
}
name_.time_type += "manual_time";
} else if (benchmark_.use_real_time_) {
if (!name_.time_type.empty()) {
name_.time_type += '/';
}
name_.time_type += "real_time";
}
if (!benchmark_.thread_counts_.empty()) {
name_.threads = StrFormat("threads:%d", threads_);
}
}
State BenchmarkInstance::Run(
IterationCount iters, int thread_id, internal::ThreadTimer* timer,
internal::ThreadManager* manager,
internal::PerfCountersMeasurement* perf_counters_measurement) const {
State st(iters, args_, thread_id, threads_, timer, manager,
perf_counters_measurement);
benchmark_.Run(st);
return st;
}
} // namespace internal
} // namespace benchmark
0707010000003C000081A400000000000000000000000160C0813C00000A45000000000000000000000000000000000000002D00000000benchmark-1.5.5/src/benchmark_api_internal.h#ifndef BENCHMARK_API_INTERNAL_H
#define BENCHMARK_API_INTERNAL_H
#include <cmath>
#include <iosfwd>
#include <limits>
#include <memory>
#include <string>
#include <vector>
#include "benchmark/benchmark.h"
#include "commandlineflags.h"
namespace benchmark {
namespace internal {
// Information kept per benchmark we may want to run
class BenchmarkInstance {
public:
BenchmarkInstance(Benchmark* benchmark, int family_index,
int per_family_instance_index,
const std::vector<int64_t>& args, int threads);
const BenchmarkName& name() const { return name_; }
int family_index() const { return family_index_; }
int per_family_instance_index() const { return per_family_instance_index_; }
AggregationReportMode aggregation_report_mode() const {
return aggregation_report_mode_;
}
TimeUnit time_unit() const { return time_unit_; }
bool measure_process_cpu_time() const { return measure_process_cpu_time_; }
bool use_real_time() const { return use_real_time_; }
bool use_manual_time() const { return use_manual_time_; }
BigO complexity() const { return complexity_; }
BigOFunc& complexity_lambda() const { return *complexity_lambda_; }
const std::vector<Statistics>& statistics() const { return statistics_; }
int repetitions() const { return repetitions_; }
double min_time() const { return min_time_; }
IterationCount iterations() const { return iterations_; }
int threads() const { return threads_; }
State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer,
internal::ThreadManager* manager,
internal::PerfCountersMeasurement* perf_counters_measurement) const;
private:
BenchmarkName name_;
Benchmark& benchmark_;
const int family_index_;
const int per_family_instance_index_;
AggregationReportMode aggregation_report_mode_;
const std::vector<int64_t>& args_;
TimeUnit time_unit_;
bool measure_process_cpu_time_;
bool use_real_time_;
bool use_manual_time_;
BigO complexity_;
BigOFunc* complexity_lambda_;
UserCounters counters_;
const std::vector<Statistics>& statistics_;
int repetitions_;
double min_time_;
IterationCount iterations_;
int threads_; // Number of concurrent threads to us
};
bool FindBenchmarksInternal(const std::string& re,
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
bool IsZero(double n);
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false);
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_API_INTERNAL_H
0707010000003D000081A400000000000000000000000160C0813C00000296000000000000000000000000000000000000002600000000benchmark-1.5.5/src/benchmark_main.cc// Copyright 2018 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
BENCHMARK_MAIN();
0707010000003E000081A400000000000000000000000160C0813C00000691000000000000000000000000000000000000002600000000benchmark-1.5.5/src/benchmark_name.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <benchmark/benchmark.h>
namespace benchmark {
namespace {
// Compute the total size of a pack of std::strings
size_t size_impl() { return 0; }
template <typename Head, typename... Tail>
size_t size_impl(const Head& head, const Tail&... tail) {
return head.size() + size_impl(tail...);
}
// Join a pack of std::strings using a delimiter
// TODO: use absl::StrJoin
void join_impl(std::string&, char) {}
template <typename Head, typename... Tail>
void join_impl(std::string& s, const char delimiter, const Head& head,
const Tail&... tail) {
if (!s.empty() && !head.empty()) {
s += delimiter;
}
s += head;
join_impl(s, delimiter, tail...);
}
template <typename... Ts>
std::string join(char delimiter, const Ts&... ts) {
std::string s;
s.reserve(sizeof...(Ts) + size_impl(ts...));
join_impl(s, delimiter, ts...);
return s;
}
} // namespace
std::string BenchmarkName::str() const {
return join('/', function_name, args, min_time, iterations, repetitions,
time_type, threads);
}
} // namespace benchmark
0707010000003F000081A400000000000000000000000160C0813C00003492000000000000000000000000000000000000002A00000000benchmark-1.5.5/src/benchmark_register.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark_register.h"
#ifndef BENCHMARK_OS_WINDOWS
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <cinttypes>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <memory>
#include <numeric>
#include <sstream>
#include <thread>
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "check.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
#include "timers.h"
namespace benchmark {
namespace {
// For non-dense Range, intermediate values are powers of kRangeMultiplier.
static const int kRangeMultiplier = 8;
// The size of a benchmark family determines is the number of inputs to repeat
// the benchmark on. If this is "large" then warn the user during configuration.
static const size_t kMaxFamilySize = 100;
} // end namespace
namespace internal {
//=============================================================================//
// BenchmarkFamilies
//=============================================================================//
// Class for managing registered benchmarks. Note that each registered
// benchmark identifies a family of related benchmarks to run.
class BenchmarkFamilies {
public:
static BenchmarkFamilies* GetInstance();
// Registers a benchmark family and returns the index assigned to it.
size_t AddBenchmark(std::unique_ptr<Benchmark> family);
// Clear all registered benchmark families.
void ClearBenchmarks();
// Extract the list of benchmark instances that match the specified
// regular expression.
bool FindBenchmarks(std::string re,
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
private:
BenchmarkFamilies() {}
std::vector<std::unique_ptr<Benchmark>> families_;
Mutex mutex_;
};
BenchmarkFamilies* BenchmarkFamilies::GetInstance() {
static BenchmarkFamilies instance;
return &instance;
}
size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family) {
MutexLock l(mutex_);
size_t index = families_.size();
families_.push_back(std::move(family));
return index;
}
void BenchmarkFamilies::ClearBenchmarks() {
MutexLock l(mutex_);
families_.clear();
families_.shrink_to_fit();
}
bool BenchmarkFamilies::FindBenchmarks(
std::string spec, std::vector<BenchmarkInstance>* benchmarks,
std::ostream* ErrStream) {
CHECK(ErrStream);
auto& Err = *ErrStream;
// Make regular expression out of command-line flag
std::string error_msg;
Regex re;
bool isNegativeFilter = false;
if (spec[0] == '-') {
spec.replace(0, 1, "");
isNegativeFilter = true;
}
if (!re.Init(spec, &error_msg)) {
Err << "Could not compile benchmark re: " << error_msg << std::endl;
return false;
}
// Special list of thread counts to use when none are specified
const std::vector<int> one_thread = {1};
int next_family_index = 0;
MutexLock l(mutex_);
for (std::unique_ptr<Benchmark>& family : families_) {
int family_index = next_family_index;
int per_family_instance_index = 0;
// Family was deleted or benchmark doesn't match
if (!family) continue;
if (family->ArgsCnt() == -1) {
family->Args({});
}
const std::vector<int>* thread_counts =
(family->thread_counts_.empty()
? &one_thread
: &static_cast<const std::vector<int>&>(family->thread_counts_));
const size_t family_size = family->args_.size() * thread_counts->size();
// The benchmark will be run at least 'family_size' different inputs.
// If 'family_size' is very large warn the user.
if (family_size > kMaxFamilySize) {
Err << "The number of inputs is very large. " << family->name_
<< " will be repeated at least " << family_size << " times.\n";
}
// reserve in the special case the regex ".", since we know the final
// family size.
if (spec == ".") benchmarks->reserve(benchmarks->size() + family_size);
for (auto const& args : family->args_) {
for (int num_threads : *thread_counts) {
BenchmarkInstance instance(family.get(), family_index,
per_family_instance_index, args,
num_threads);
const auto full_name = instance.name().str();
if ((re.Match(full_name) && !isNegativeFilter) ||
(!re.Match(full_name) && isNegativeFilter)) {
benchmarks->push_back(std::move(instance));
++per_family_instance_index;
// Only bump the next family index once we've estabilished that
// at least one instance of this family will be run.
if (next_family_index == family_index) ++next_family_index;
}
}
}
}
return true;
}
Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
std::unique_ptr<Benchmark> bench_ptr(bench);
BenchmarkFamilies* families = BenchmarkFamilies::GetInstance();
families->AddBenchmark(std::move(bench_ptr));
return bench;
}
// FIXME: This function is a hack so that benchmark.cc can access
// `BenchmarkFamilies`
bool FindBenchmarksInternal(const std::string& re,
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err) {
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
}
//=============================================================================//
// Benchmark
//=============================================================================//
Benchmark::Benchmark(const char* name)
: name_(name),
aggregation_report_mode_(ARM_Unspecified),
time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier),
min_time_(0),
iterations_(0),
repetitions_(0),
measure_process_cpu_time_(false),
use_real_time_(false),
use_manual_time_(false),
complexity_(oNone),
complexity_lambda_(nullptr) {
ComputeStatistics("mean", StatisticsMean);
ComputeStatistics("median", StatisticsMedian);
ComputeStatistics("stddev", StatisticsStdDev);
}
Benchmark::~Benchmark() {}
Benchmark* Benchmark::Name(const std::string& name) {
SetName(name.c_str());
return this;
}
Benchmark* Benchmark::Arg(int64_t x) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
}
Benchmark* Benchmark::Unit(TimeUnit unit) {
time_unit_ = unit;
return this;
}
Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
std::vector<int64_t> arglist;
AddRange(&arglist, start, limit, range_multiplier_);
for (int64_t i : arglist) {
args_.push_back({i});
}
return this;
}
Benchmark* Benchmark::Ranges(
const std::vector<std::pair<int64_t, int64_t>>& ranges) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size());
for (std::size_t i = 0; i < ranges.size(); i++) {
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
range_multiplier_);
}
ArgsProduct(arglists);
return this;
}
Benchmark* Benchmark::ArgsProduct(
const std::vector<std::vector<int64_t>>& arglists) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(arglists.size()));
std::vector<std::size_t> indices(arglists.size());
const std::size_t total = std::accumulate(
std::begin(arglists), std::end(arglists), std::size_t{1},
[](const std::size_t res, const std::vector<int64_t>& arglist) {
return res * arglist.size();
});
std::vector<int64_t> args;
args.reserve(arglists.size());
for (std::size_t i = 0; i < total; i++) {
for (std::size_t arg = 0; arg < arglists.size(); arg++) {
args.push_back(arglists[arg][indices[arg]]);
}
args_.push_back(args);
args.clear();
std::size_t arg = 0;
do {
indices[arg] = (indices[arg] + 1) % arglists[arg].size();
} while (indices[arg++] == 0 && arg < arglists.size());
}
return this;
}
Benchmark* Benchmark::ArgName(const std::string& name) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
arg_names_ = {name};
return this;
}
Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
arg_names_ = names;
return this;
}
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
CHECK_LE(start, limit);
for (int64_t arg = start; arg <= limit; arg += step) {
args_.push_back({arg});
}
return this;
}
Benchmark* Benchmark::Args(const std::vector<int64_t>& args) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
args_.push_back(args);
return this;
}
Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) {
custom_arguments(this);
return this;
}
Benchmark* Benchmark::RangeMultiplier(int multiplier) {
CHECK(multiplier > 1);
range_multiplier_ = multiplier;
return this;
}
Benchmark* Benchmark::MinTime(double t) {
CHECK(t > 0.0);
CHECK(iterations_ == 0);
min_time_ = t;
return this;
}
Benchmark* Benchmark::Iterations(IterationCount n) {
CHECK(n > 0);
CHECK(IsZero(min_time_));
iterations_ = n;
return this;
}
Benchmark* Benchmark::Repetitions(int n) {
CHECK(n > 0);
repetitions_ = n;
return this;
}
Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
return this;
}
Benchmark* Benchmark::DisplayAggregatesOnly(bool value) {
// If we were called, the report mode is no longer 'unspecified', in any case.
aggregation_report_mode_ = static_cast<AggregationReportMode>(
aggregation_report_mode_ | ARM_Default);
if (value) {
aggregation_report_mode_ = static_cast<AggregationReportMode>(
aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
} else {
aggregation_report_mode_ = static_cast<AggregationReportMode>(
aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
}
return this;
}
Benchmark* Benchmark::MeasureProcessCPUTime() {
// Can be used together with UseRealTime() / UseManualTime().
measure_process_cpu_time_ = true;
return this;
}
Benchmark* Benchmark::UseRealTime() {
CHECK(!use_manual_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true;
return this;
}
Benchmark* Benchmark::UseManualTime() {
CHECK(!use_real_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
use_manual_time_ = true;
return this;
}
Benchmark* Benchmark::Complexity(BigO complexity) {
complexity_ = complexity;
return this;
}
Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
complexity_lambda_ = complexity;
complexity_ = oLambda;
return this;
}
Benchmark* Benchmark::ComputeStatistics(std::string name,
StatisticsFunc* statistics) {
statistics_.emplace_back(name, statistics);
return this;
}
Benchmark* Benchmark::Threads(int t) {
CHECK_GT(t, 0);
thread_counts_.push_back(t);
return this;
}
Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
AddRange(&thread_counts_, min_threads, max_threads, 2);
return this;
}
Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads,
int stride) {
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
CHECK_GE(stride, 1);
for (auto i = min_threads; i < max_threads; i += stride) {
thread_counts_.push_back(i);
}
thread_counts_.push_back(max_threads);
return this;
}
Benchmark* Benchmark::ThreadPerCpu() {
thread_counts_.push_back(CPUInfo::Get().num_cpus);
return this;
}
void Benchmark::SetName(const char* name) { name_ = name; }
int Benchmark::ArgsCnt() const {
if (args_.empty()) {
if (arg_names_.empty()) return -1;
return static_cast<int>(arg_names_.size());
}
return static_cast<int>(args_.front().size());
}
//=============================================================================//
// FunctionBenchmark
//=============================================================================//
void FunctionBenchmark::Run(State& st) { func_(st); }
} // end namespace internal
void ClearRegisteredBenchmarks() {
internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
}
} // end namespace benchmark
07070100000040000081A400000000000000000000000160C0813C00000B40000000000000000000000000000000000000002900000000benchmark-1.5.5/src/benchmark_register.h#ifndef BENCHMARK_REGISTER_H
#define BENCHMARK_REGISTER_H
#include <limits>
#include <vector>
#include "check.h"
namespace benchmark {
namespace internal {
// Append the powers of 'mult' in the closed interval [lo, hi].
// Returns iterator to the start of the inserted range.
template <typename T>
typename std::vector<T>::iterator
AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
CHECK_GE(lo, 0);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
const size_t start_offset = dst->size();
static const T kmax = std::numeric_limits<T>::max();
// Space out the values in multiples of "mult"
for (T i = static_cast<T>(1); i <= hi; i *= mult) {
if (i >= lo) {
dst->push_back(i);
}
// Break the loop here since multiplying by
// 'mult' would move outside of the range of T
if (i > kmax / mult) break;
}
return dst->begin() + start_offset;
}
template <typename T>
void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
// We negate lo and hi so we require that they cannot be equal to 'min'.
CHECK_GT(lo, std::numeric_limits<T>::min());
CHECK_GT(hi, std::numeric_limits<T>::min());
CHECK_GE(hi, lo);
CHECK_LE(hi, 0);
// Add positive powers, then negate and reverse.
// Casts necessary since small integers get promoted
// to 'int' when negating.
const auto lo_complement = static_cast<T>(-lo);
const auto hi_complement = static_cast<T>(-hi);
const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
std::for_each(it, dst->end(), [](T& t) { t *= -1; });
std::reverse(it, dst->end());
}
template <typename T>
void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
"Args type must be a signed integer");
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
// Add "lo"
dst->push_back(lo);
// Handle lo == hi as a special case, so we then know
// lo < hi and so it is safe to add 1 to lo and subtract 1
// from hi without falling outside of the range of T.
if (lo == hi) return;
// Ensure that lo_inner <= hi_inner below.
if (lo + 1 == hi) {
dst->push_back(hi);
return;
}
// Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive).
const auto lo_inner = static_cast<T>(lo + 1);
const auto hi_inner = static_cast<T>(hi - 1);
// Insert negative values
if (lo_inner < 0) {
AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
}
// Treat 0 as a special case (see discussion on #762).
if (lo < 0 && hi >= 0) {
dst->push_back(0);
}
// Insert positive values
if (hi_inner > 0) {
AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
}
// Add "hi" (if different from last value).
if (hi != dst->back()) {
dst->push_back(hi);
}
}
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_REGISTER_H
07070100000041000081A400000000000000000000000160C0813C00003435000000000000000000000000000000000000002800000000benchmark-1.5.5/src/benchmark_runner.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark_runner.h"
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <thread>
#include <utility>
#include "check.h"
#include "colorprint.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "counter.h"
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
#include "perf_counters.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
#include "thread_manager.h"
#include "thread_timer.h"
namespace benchmark {
namespace internal {
MemoryManager* memory_manager = nullptr;
namespace {
static constexpr IterationCount kMaxIterations = 1000000000;
BenchmarkReporter::Run CreateRunReport(
const benchmark::internal::BenchmarkInstance& b,
const internal::ThreadManager::Result& results,
IterationCount memory_iterations,
const MemoryManager::Result& memory_result, double seconds,
int64_t repetition_index, int64_t repeats) {
// Create report about this benchmark run.
BenchmarkReporter::Run report;
report.run_name = b.name();
report.family_index = b.family_index();
report.per_family_instance_index = b.per_family_instance_index();
report.error_occurred = results.has_error_;
report.error_message = results.error_message_;
report.report_label = results.report_label_;
// This is the total iterations across all threads.
report.iterations = results.iterations;
report.time_unit = b.time_unit();
report.threads = b.threads();
report.repetition_index = repetition_index;
report.repetitions = repeats;
if (!report.error_occurred) {
if (b.use_manual_time()) {
report.real_accumulated_time = results.manual_time_used;
} else {
report.real_accumulated_time = results.real_time_used;
}
report.cpu_accumulated_time = results.cpu_time_used;
report.complexity_n = results.complexity_n;
report.complexity = b.complexity();
report.complexity_lambda = b.complexity_lambda();
report.statistics = &b.statistics();
report.counters = results.counters;
if (memory_iterations > 0) {
report.has_memory_result = true;
report.allocs_per_iter =
memory_iterations ? static_cast<double>(memory_result.num_allocs) /
memory_iterations
: 0;
report.max_bytes_used = memory_result.max_bytes_used;
}
internal::Finish(&report.counters, results.iterations, seconds,
b.threads());
}
return report;
}
// Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into manager->results.
void RunInThread(const BenchmarkInstance* b, IterationCount iters,
int thread_id, ThreadManager* manager,
PerfCountersMeasurement* perf_counters_measurement) {
internal::ThreadTimer timer(
b->measure_process_cpu_time()
? internal::ThreadTimer::CreateProcessCpuTime()
: internal::ThreadTimer::Create());
State st =
b->Run(iters, thread_id, &timer, manager, perf_counters_measurement);
CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
internal::ThreadManager::Result& results = manager->results;
results.iterations += st.iterations();
results.cpu_time_used += timer.cpu_time_used();
results.real_time_used += timer.real_time_used();
results.manual_time_used += timer.manual_time_used();
results.complexity_n += st.complexity_length_n();
internal::Increment(&results.counters, st.counters);
}
manager->NotifyThreadComplete();
}
} // end namespace
BenchmarkRunner::BenchmarkRunner(
const benchmark::internal::BenchmarkInstance& b_,
BenchmarkReporter::PerFamilyRunReports* reports_for_family_)
: b(b_),
reports_for_family(reports_for_family_),
min_time(!IsZero(b.min_time()) ? b.min_time() : FLAGS_benchmark_min_time),
repeats(b.repetitions() != 0 ? b.repetitions()
: FLAGS_benchmark_repetitions),
has_explicit_iteration_count(b.iterations() != 0),
pool(b.threads() - 1),
iters(has_explicit_iteration_count ? b.iterations() : 1),
perf_counters_measurement(
PerfCounters::Create(StrSplit(FLAGS_benchmark_perf_counters, ','))),
perf_counters_measurement_ptr(perf_counters_measurement.IsValid()
? &perf_counters_measurement
: nullptr) {
run_results.display_report_aggregates_only =
(FLAGS_benchmark_report_aggregates_only ||
FLAGS_benchmark_display_aggregates_only);
run_results.file_report_aggregates_only =
FLAGS_benchmark_report_aggregates_only;
if (b.aggregation_report_mode() != internal::ARM_Unspecified) {
run_results.display_report_aggregates_only =
(b.aggregation_report_mode() &
internal::ARM_DisplayReportAggregatesOnly);
run_results.file_report_aggregates_only =
(b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly);
CHECK(FLAGS_benchmark_perf_counters.empty() ||
perf_counters_measurement.IsValid())
<< "Perf counters were requested but could not be set up.";
}
}
BenchmarkRunner::IterationResults BenchmarkRunner::DoNIterations() {
VLOG(2) << "Running " << b.name().str() << " for " << iters << "\n";
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(b.threads()));
// Run all but one thread in separate threads
for (std::size_t ti = 0; ti < pool.size(); ++ti) {
pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
manager.get(), perf_counters_measurement_ptr);
}
// And run one thread here directly.
// (If we were asked to run just one thread, we don't create new threads.)
// Yes, we need to do this here *after* we start the separate threads.
RunInThread(&b, iters, 0, manager.get(), perf_counters_measurement_ptr);
// The main thread has finished. Now let's wait for the other threads.
manager->WaitForAllThreads();
for (std::thread& thread : pool) thread.join();
IterationResults i;
// Acquire the measurements/counters from the manager, UNDER THE LOCK!
{
MutexLock l(manager->GetBenchmarkMutex());
i.results = manager->results;
}
// And get rid of the manager.
manager.reset();
// Adjust real/manual time stats since they were reported per thread.
i.results.real_time_used /= b.threads();
i.results.manual_time_used /= b.threads();
// If we were measuring whole-process CPU usage, adjust the CPU time too.
if (b.measure_process_cpu_time()) i.results.cpu_time_used /= b.threads();
VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
<< i.results.real_time_used << "\n";
// By using KeepRunningBatch a benchmark can iterate more times than
// requested, so take the iteration count from i.results.
i.iters = i.results.iterations / b.threads();
// Base decisions off of real time if requested by this benchmark.
i.seconds = i.results.cpu_time_used;
if (b.use_manual_time()) {
i.seconds = i.results.manual_time_used;
} else if (b.use_real_time()) {
i.seconds = i.results.real_time_used;
}
return i;
}
IterationCount BenchmarkRunner::PredictNumItersNeeded(
const IterationResults& i) const {
// See how much iterations should be increased by.
// Note: Avoid division by zero with max(seconds, 1ns).
double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
// If our last run was at least 10% of FLAGS_benchmark_min_time then we
// use the multiplier directly.
// Otherwise we use at most 10 times expansion.
// NOTE: When the last run was at least 10% of the min time the max
// expansion should be 14x.
bool is_significant = (i.seconds / min_time) > 0.1;
multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
if (multiplier <= 1.0) multiplier = 2.0;
// So what seems to be the sufficiently-large iteration count? Round up.
const IterationCount max_next_iters = static_cast<IterationCount>(
std::lround(std::max(multiplier * static_cast<double>(i.iters),
static_cast<double>(i.iters) + 1.0)));
// But we do have *some* sanity limits though..
const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
return next_iters; // round up before conversion to integer.
}
bool BenchmarkRunner::ShouldReportIterationResults(
const IterationResults& i) const {
// Determine if this run should be reported;
// Either it has run for a sufficient amount of time
// or because an error was reported.
return i.results.has_error_ ||
i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >= min_time || // The elapsed time is large enough.
// CPU time is specified but the elapsed real time greatly exceeds
// the minimum time.
// Note that user provided timers are except from this sanity check.
((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time());
}
void BenchmarkRunner::DoOneRepetition() {
assert(HasRepeatsRemaining() && "Already done all repetitions?");
const bool is_the_first_repetition = num_repetitions_done == 0;
IterationResults i;
// We *may* be gradually increasing the length (iteration count)
// of the benchmark until we decide the results are significant.
// And once we do, we report those last results and exit.
// Please do note that the if there are repetitions, the iteration count
// is *only* calculated for the *first* repetition, and other repetitions
// simply use that precomputed iteration count.
for (;;) {
i = DoNIterations();
// Do we consider the results to be significant?
// If we are doing repetitions, and the first repetition was already done,
// it has calculated the correct iteration time, so we have run that very
// iteration count just now. No need to calculate anything. Just report.
// Else, the normal rules apply.
const bool results_are_significant = !is_the_first_repetition ||
has_explicit_iteration_count ||
ShouldReportIterationResults(i);
if (results_are_significant) break; // Good, let's report them!
// Nope, bad iteration. Let's re-estimate the hopefully-sufficient
// iteration count, and run the benchmark again...
iters = PredictNumItersNeeded(i);
assert(iters > i.iters &&
"if we did more iterations than we want to do the next time, "
"then we should have accepted the current iteration run.");
}
// Oh, one last thing, we need to also produce the 'memory measurements'..
MemoryManager::Result memory_result;
IterationCount memory_iterations = 0;
if (memory_manager != nullptr) {
// Only run a few iterations to reduce the impact of one-time
// allocations in benchmarks that are not properly managed.
memory_iterations = std::min<IterationCount>(16, iters);
memory_manager->Start();
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(1));
RunInThread(&b, memory_iterations, 0, manager.get(),
perf_counters_measurement_ptr);
manager->WaitForAllThreads();
manager.reset();
memory_manager->Stop(&memory_result);
}
// Ok, now actualy report.
BenchmarkReporter::Run report =
CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds,
num_repetitions_done, repeats);
if (reports_for_family) {
++reports_for_family->num_runs_done;
if (!report.error_occurred) reports_for_family->Runs.push_back(report);
}
run_results.non_aggregates.push_back(report);
++num_repetitions_done;
}
RunResults&& BenchmarkRunner::GetResults() {
assert(!HasRepeatsRemaining() && "Did not run all repetitions yet?");
// Calculate additional statistics over the repetitions of this instance.
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
return std::move(run_results);
}
} // end namespace internal
} // end namespace benchmark
07070100000042000081A400000000000000000000000160C0813C00000B2B000000000000000000000000000000000000002700000000benchmark-1.5.5/src/benchmark_runner.h// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_RUNNER_H_
#define BENCHMARK_RUNNER_H_
#include <thread>
#include <vector>
#include "benchmark_api_internal.h"
#include "internal_macros.h"
#include "perf_counters.h"
#include "thread_manager.h"
DECLARE_double(benchmark_min_time);
DECLARE_int32(benchmark_repetitions);
DECLARE_bool(benchmark_report_aggregates_only);
DECLARE_bool(benchmark_display_aggregates_only);
DECLARE_string(benchmark_perf_counters);
namespace benchmark {
namespace internal {
extern MemoryManager* memory_manager;
struct RunResults {
std::vector<BenchmarkReporter::Run> non_aggregates;
std::vector<BenchmarkReporter::Run> aggregates_only;
bool display_report_aggregates_only = false;
bool file_report_aggregates_only = false;
};
class BenchmarkRunner {
public:
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
BenchmarkReporter::PerFamilyRunReports* reports_for_family);
int GetNumRepeats() const { return repeats; }
bool HasRepeatsRemaining() const {
return GetNumRepeats() != num_repetitions_done;
}
void DoOneRepetition();
RunResults&& GetResults();
BenchmarkReporter::PerFamilyRunReports* GetReportsForFamily() const {
return reports_for_family;
};
private:
RunResults run_results;
const benchmark::internal::BenchmarkInstance& b;
BenchmarkReporter::PerFamilyRunReports* reports_for_family;
const double min_time;
const int repeats;
const bool has_explicit_iteration_count;
int num_repetitions_done = 0;
std::vector<std::thread> pool;
IterationCount iters; // preserved between repetitions!
// So only the first repetition has to find/calculate it,
// the other repetitions will just use that precomputed iteration count.
PerfCountersMeasurement perf_counters_measurement;
PerfCountersMeasurement* const perf_counters_measurement_ptr;
struct IterationResults {
internal::ThreadManager::Result results;
IterationCount iters;
double seconds;
};
IterationResults DoNIterations();
IterationCount PredictNumItersNeeded(const IterationResults& i) const;
bool ShouldReportIterationResults(const IterationResults& i) const;
};
} // namespace internal
} // end namespace benchmark
#endif // BENCHMARK_RUNNER_H_
07070100000043000081A400000000000000000000000160C0813C0000097D000000000000000000000000000000000000001C00000000benchmark-1.5.5/src/check.h#ifndef CHECK_H_
#define CHECK_H_
#include <cmath>
#include <cstdlib>
#include <ostream>
#include "internal_macros.h"
#include "log.h"
namespace benchmark {
namespace internal {
typedef void(AbortHandlerT)();
inline AbortHandlerT*& GetAbortHandler() {
static AbortHandlerT* handler = &std::abort;
return handler;
}
BENCHMARK_NORETURN inline void CallAbortHandler() {
GetAbortHandler()();
std::abort(); // fallback to enforce noreturn
}
// CheckHandler is the class constructed by failing CHECK macros. CheckHandler
// will log information about the failures and abort when it is destructed.
class CheckHandler {
public:
CheckHandler(const char* check, const char* file, const char* func, int line)
: log_(GetErrorLogInstance()) {
log_ << file << ":" << line << ": " << func << ": Check `" << check
<< "' failed. ";
}
LogType& GetLog() { return log_; }
BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) {
log_ << std::endl;
CallAbortHandler();
}
CheckHandler& operator=(const CheckHandler&) = delete;
CheckHandler(const CheckHandler&) = delete;
CheckHandler() = delete;
private:
LogType& log_;
};
} // end namespace internal
} // end namespace benchmark
// The CHECK macro returns a std::ostream object that can have extra information
// written to it.
#ifndef NDEBUG
#define CHECK(b) \
(b ? ::benchmark::internal::GetNullLogInstance() \
: ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \
.GetLog())
#else
#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
#endif
// clang-format off
// preserve whitespacing between operators for alignment
#define CHECK_EQ(a, b) CHECK((a) == (b))
#define CHECK_NE(a, b) CHECK((a) != (b))
#define CHECK_GE(a, b) CHECK((a) >= (b))
#define CHECK_LE(a, b) CHECK((a) <= (b))
#define CHECK_GT(a, b) CHECK((a) > (b))
#define CHECK_LT(a, b) CHECK((a) < (b))
#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps))
#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps))
#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps))
#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps))
#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps))
#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps))
//clang-format on
#endif // CHECK_H_
07070100000044000081A400000000000000000000000160C0813C00001498000000000000000000000000000000000000002200000000benchmark-1.5.5/src/colorprint.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "colorprint.h"
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <string>
#include "check.h"
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <windows.h>
#include <io.h>
#else
#include <unistd.h>
#endif // BENCHMARK_OS_WINDOWS
namespace benchmark {
namespace {
#ifdef BENCHMARK_OS_WINDOWS
typedef WORD PlatformColorCode;
#else
typedef const char* PlatformColorCode;
#endif
PlatformColorCode GetPlatformColorCode(LogColor color) {
#ifdef BENCHMARK_OS_WINDOWS
switch (color) {
case COLOR_RED:
return FOREGROUND_RED;
case COLOR_GREEN:
return FOREGROUND_GREEN;
case COLOR_YELLOW:
return FOREGROUND_RED | FOREGROUND_GREEN;
case COLOR_BLUE:
return FOREGROUND_BLUE;
case COLOR_MAGENTA:
return FOREGROUND_BLUE | FOREGROUND_RED;
case COLOR_CYAN:
return FOREGROUND_BLUE | FOREGROUND_GREEN;
case COLOR_WHITE: // fall through to default
default:
return 0;
}
#else
switch (color) {
case COLOR_RED:
return "1";
case COLOR_GREEN:
return "2";
case COLOR_YELLOW:
return "3";
case COLOR_BLUE:
return "4";
case COLOR_MAGENTA:
return "5";
case COLOR_CYAN:
return "6";
case COLOR_WHITE:
return "7";
default:
return nullptr;
};
#endif
}
} // end namespace
std::string FormatString(const char* msg, va_list args) {
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
std::size_t size = 256;
char local_buff[256];
auto ret = vsnprintf(local_buff, size, msg, args_cp);
va_end(args_cp);
// currently there is no error handling for failure, so this is hack.
CHECK(ret >= 0);
if (ret == 0) // handle empty expansion
return {};
else if (static_cast<size_t>(ret) < size)
return local_buff;
else {
// we did not provide a long enough buffer on our first attempt.
size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]);
ret = vsnprintf(buff.get(), size, msg, args);
CHECK(ret > 0 && ((size_t)ret) < size);
return buff.get();
}
}
std::string FormatString(const char* msg, ...) {
va_list args;
va_start(args, msg);
auto tmp = FormatString(msg, args);
va_end(args);
return tmp;
}
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ColorPrintf(out, color, fmt, args);
va_end(args);
}
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
va_list args) {
#ifdef BENCHMARK_OS_WINDOWS
((void)out); // suppress unused warning
const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
// Gets the current text color.
CONSOLE_SCREEN_BUFFER_INFO buffer_info;
GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
const WORD old_color_attrs = buffer_info.wAttributes;
// We need to flush the stream buffers into the console before each
// SetConsoleTextAttribute call lest it affect the text that is already
// printed but has not yet reached the console.
fflush(stdout);
SetConsoleTextAttribute(stdout_handle,
GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
vprintf(fmt, args);
fflush(stdout);
// Restores the text color.
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else
const char* color_code = GetPlatformColorCode(color);
if (color_code) out << FormatString("\033[0;3%sm", color_code);
out << FormatString(fmt, args) << "\033[m";
#endif
}
bool IsColorTerminal() {
#if BENCHMARK_OS_WINDOWS
// On Windows the TERM variable is usually not set, but the
// console there does support colors.
return 0 != _isatty(_fileno(stdout));
#else
// On non-Windows platforms, we rely on the TERM variable. This list of
// supported TERM values is copied from Google Test:
// <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
const char* const SUPPORTED_TERM_VALUES[] = {
"xterm", "xterm-color", "xterm-256color",
"screen", "screen-256color", "tmux",
"tmux-256color", "rxvt-unicode", "rxvt-unicode-256color",
"linux", "cygwin",
};
const char* const term = getenv("TERM");
bool term_supports_color = false;
for (const char* candidate : SUPPORTED_TERM_VALUES) {
if (term && 0 == strcmp(term, candidate)) {
term_supports_color = true;
break;
}
}
return 0 != isatty(fileno(stdout)) && term_supports_color;
#endif // BENCHMARK_OS_WINDOWS
}
} // end namespace benchmark
07070100000045000081A400000000000000000000000160C0813C000002F8000000000000000000000000000000000000002100000000benchmark-1.5.5/src/colorprint.h#ifndef BENCHMARK_COLORPRINT_H_
#define BENCHMARK_COLORPRINT_H_
#include <cstdarg>
#include <iostream>
#include <string>
namespace benchmark {
enum LogColor {
COLOR_DEFAULT,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW,
COLOR_BLUE,
COLOR_MAGENTA,
COLOR_CYAN,
COLOR_WHITE
};
std::string FormatString(const char* msg, va_list args);
std::string FormatString(const char* msg, ...);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
va_list args);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...);
// Returns true if stdout appears to be a terminal that supports colored
// output, false otherwise.
bool IsColorTerminal();
} // end namespace benchmark
#endif // BENCHMARK_COLORPRINT_H_
07070100000046000081A400000000000000000000000160C0813C0000258B000000000000000000000000000000000000002800000000benchmark-1.5.5/src/commandlineflags.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "commandlineflags.h"
#include <algorithm>
#include <cctype>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <limits>
#include <map>
#include <utility>
#include "../src/string_util.h"
namespace benchmark {
namespace {
// Parses 'str' for a 32-bit signed integer. If successful, writes
// the result to *value and returns true; otherwise leaves *value
// unchanged and returns false.
bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
// Parses the environment variable as a decimal integer.
char* end = nullptr;
const long long_value = strtol(str, &end, 10); // NOLINT
// Has strtol() consumed all characters in the string?
if (*end != '\0') {
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\".\n";
return false;
}
// Is the parsed value in the range of an Int32?
const int32_t result = static_cast<int32_t>(long_value);
if (long_value == std::numeric_limits<long>::max() ||
long_value == std::numeric_limits<long>::min() ||
// The parsed value overflows as a long. (strtol() returns
// LONG_MAX or LONG_MIN when the input overflows.)
result != long_value
// The parsed value overflows as an Int32.
) {
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\", "
<< "which overflows.\n";
return false;
}
*value = result;
return true;
}
// Parses 'str' for a double. If successful, writes the result to *value and
// returns true; otherwise leaves *value unchanged and returns false.
bool ParseDouble(const std::string& src_text, const char* str, double* value) {
// Parses the environment variable as a decimal integer.
char* end = nullptr;
const double double_value = strtod(str, &end); // NOLINT
// Has strtol() consumed all characters in the string?
if (*end != '\0') {
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a double, "
<< "but actually has value \"" << str << "\".\n";
return false;
}
*value = double_value;
return true;
}
// Parses 'str' into KV pairs. If successful, writes the result to *value and
// returns true; otherwise leaves *value unchanged and returns false.
bool ParseKvPairs(const std::string& src_text, const char* str,
std::map<std::string, std::string>* value) {
std::map<std::string, std::string> kvs;
for (const auto& kvpair : StrSplit(str, ',')) {
const auto kv = StrSplit(kvpair, '=');
if (kv.size() != 2) {
std::cerr << src_text << " is expected to be a comma-separated list of "
<< "<key>=<value> strings, but actually has value \"" << str
<< "\".\n";
return false;
}
if (!kvs.emplace(kv[0], kv[1]).second) {
std::cerr << src_text << " is expected to contain unique keys but key \""
<< kv[0] << "\" was repeated.\n";
return false;
}
}
*value = kvs;
return true;
}
// Returns the name of the environment variable corresponding to the
// given flag. For example, FlagToEnvVar("foo") will return
// "BENCHMARK_FOO" in the open-source version.
static std::string FlagToEnvVar(const char* flag) {
const std::string flag_str(flag);
std::string env_var;
for (size_t i = 0; i != flag_str.length(); ++i)
env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
return env_var;
}
} // namespace
bool BoolFromEnv(const char* flag, bool default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str());
return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
}
int32_t Int32FromEnv(const char* flag, int32_t default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str());
int32_t value = default_val;
if (value_str == nullptr ||
!ParseInt32(std::string("Environment variable ") + env_var, value_str,
&value)) {
return default_val;
}
return value;
}
double DoubleFromEnv(const char* flag, double default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str());
double value = default_val;
if (value_str == nullptr ||
!ParseDouble(std::string("Environment variable ") + env_var, value_str,
&value)) {
return default_val;
}
return value;
}
const char* StringFromEnv(const char* flag, const char* default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value = getenv(env_var.c_str());
return value == nullptr ? default_val : value;
}
std::map<std::string, std::string> KvPairsFromEnv(
const char* flag, std::map<std::string, std::string> default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str());
if (value_str == nullptr) return default_val;
std::map<std::string, std::string> value;
if (!ParseKvPairs("Environment variable " + env_var, value_str, &value)) {
return default_val;
}
return value;
}
// Parses a string as a command line flag. The string should have
// the format "--flag=value". When def_optional is true, the "=value"
// part can be omitted.
//
// Returns the value of the flag, or nullptr if the parsing failed.
const char* ParseFlagValue(const char* str, const char* flag,
bool def_optional) {
// str and flag must not be nullptr.
if (str == nullptr || flag == nullptr) return nullptr;
// The flag must start with "--".
const std::string flag_str = std::string("--") + std::string(flag);
const size_t flag_len = flag_str.length();
if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr;
// Skips the flag name.
const char* flag_end = str + flag_len;
// When def_optional is true, it's OK to not have a "=value" part.
if (def_optional && (flag_end[0] == '\0')) return flag_end;
// If def_optional is true and there are more characters after the
// flag name, or if def_optional is false, there must be a '=' after
// the flag name.
if (flag_end[0] != '=') return nullptr;
// Returns the string after "=".
return flag_end + 1;
}
bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, true);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Converts the string value to a bool.
*value = IsTruthyFlagValue(value_str);
return true;
}
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Sets *value to the value of the flag.
return ParseInt32(std::string("The value of flag --") + flag, value_str,
value);
}
bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Sets *value to the value of the flag.
return ParseDouble(std::string("The value of flag --") + flag, value_str,
value);
}
bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
*value = value_str;
return true;
}
bool ParseKeyValueFlag(
const char* str, const char* flag,
std::map<std::string, std::string>* value) {
const char* const value_str = ParseFlagValue(str, flag, false);
if (value_str == nullptr) return false;
for (const auto& kvpair : StrSplit(value_str, ',')) {
const auto kv = StrSplit(kvpair, '=');
if (kv.size() != 2) return false;
value->emplace(kv[0], kv[1]);
}
return true;
}
bool IsFlag(const char* str, const char* flag) {
return (ParseFlagValue(str, flag, true) != nullptr);
}
bool IsTruthyFlagValue(const std::string& value) {
if (value.size() == 1) {
char v = value[0];
return isalnum(v) &&
!(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
} else if (!value.empty()) {
std::string value_lower(value);
std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
[](char c) { return static_cast<char>(::tolower(c)); });
return !(value_lower == "false" || value_lower == "no" ||
value_lower == "off");
} else
return true;
}
} // end namespace benchmark
07070100000047000081A400000000000000000000000160C0813C000011D3000000000000000000000000000000000000002700000000benchmark-1.5.5/src/commandlineflags.h#ifndef BENCHMARK_COMMANDLINEFLAGS_H_
#define BENCHMARK_COMMANDLINEFLAGS_H_
#include <cstdint>
#include <map>
#include <string>
// Macro for referencing flags.
#define FLAG(name) FLAGS_##name
// Macros for declaring flags.
#define DECLARE_bool(name) extern bool FLAG(name)
#define DECLARE_int32(name) extern int32_t FLAG(name)
#define DECLARE_double(name) extern double FLAG(name)
#define DECLARE_string(name) extern std::string FLAG(name)
#define DECLARE_kvpairs(name) \
extern std::map<std::string, std::string> FLAG(name)
// Macros for defining flags.
#define DEFINE_bool(name, default_val) \
bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
#define DEFINE_int32(name, default_val) \
int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val)
#define DEFINE_double(name, default_val) \
double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_string(name, default_val) \
std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val)
#define DEFINE_kvpairs(name, default_val) \
std::map<std::string, std::string> FLAG(name) = \
benchmark::KvPairsFromEnv(#name, default_val)
namespace benchmark {
// Parses a bool from the environment variable corresponding to the given flag.
//
// If the variable exists, returns IsTruthyFlagValue() value; if not,
// returns the given default value.
bool BoolFromEnv(const char* flag, bool default_val);
// Parses an Int32 from the environment variable corresponding to the given
// flag.
//
// If the variable exists, returns ParseInt32() value; if not, returns
// the given default value.
int32_t Int32FromEnv(const char* flag, int32_t default_val);
// Parses an Double from the environment variable corresponding to the given
// flag.
//
// If the variable exists, returns ParseDouble(); if not, returns
// the given default value.
double DoubleFromEnv(const char* flag, double default_val);
// Parses a string from the environment variable corresponding to the given
// flag.
//
// If variable exists, returns its value; if not, returns
// the given default value.
const char* StringFromEnv(const char* flag, const char* default_val);
// Parses a set of kvpairs from the environment variable corresponding to the
// given flag.
//
// If variable exists, returns its value; if not, returns
// the given default value.
std::map<std::string, std::string> KvPairsFromEnv(
const char* flag, std::map<std::string, std::string> default_val);
// Parses a string for a bool flag, in the form of either
// "--flag=value" or "--flag".
//
// In the former case, the value is taken as true if it passes IsTruthyValue().
//
// In the latter case, the value is taken as true.
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseBoolFlag(const char* str, const char* flag, bool* value);
// Parses a string for an Int32 flag, in the form of "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value);
// Parses a string for a Double flag, in the form of "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseDoubleFlag(const char* str, const char* flag, double* value);
// Parses a string for a string flag, in the form of "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseStringFlag(const char* str, const char* flag, std::string* value);
// Parses a string for a kvpairs flag in the form "--flag=key=value,key=value"
//
// On success, stores the value of the flag in *value and returns true. On
// failure returns false, though *value may have been mutated.
bool ParseKeyValueFlag(const char* str, const char* flag,
std::map<std::string, std::string>* value);
// Returns true if the string matches the flag.
bool IsFlag(const char* str, const char* flag);
// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
// some non-alphanumeric character. Also returns false if the value matches
// one of 'no', 'false', 'off' (case-insensitive). As a special case, also
// returns true if value is the empty string.
bool IsTruthyFlagValue(const std::string& value);
} // end namespace benchmark
#endif // BENCHMARK_COMMANDLINEFLAGS_H_
07070100000048000081A400000000000000000000000160C0813C00002091000000000000000000000000000000000000002200000000benchmark-1.5.5/src/complexity.cc// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Source project : https://github.com/ismaelJimenez/cpp.leastsq
// Adapted to be used with google benchmark
#include "benchmark/benchmark.h"
#include <algorithm>
#include <cmath>
#include "check.h"
#include "complexity.h"
namespace benchmark {
// Internal function to calculate the different scalability forms
BigOFunc* FittingCurve(BigO complexity) {
static const double kLog2E = 1.44269504088896340736;
switch (complexity) {
case oN:
return [](IterationCount n) -> double { return static_cast<double>(n); };
case oNSquared:
return [](IterationCount n) -> double { return std::pow(n, 2); };
case oNCubed:
return [](IterationCount n) -> double { return std::pow(n, 3); };
case oLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
return
[](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
case oNLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
return [](IterationCount n) {
return kLog2E * n * log(static_cast<double>(n));
};
case o1:
default:
return [](IterationCount) { return 1.0; };
}
}
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity) {
switch (complexity) {
case oN:
return "N";
case oNSquared:
return "N^2";
case oNCubed:
return "N^3";
case oLogN:
return "lgN";
case oNLogN:
return "NlgN";
case o1:
return "(1)";
default:
return "f(N)";
}
}
// Find the coefficient for the high-order term in the running time, by
// minimizing the sum of squares of relative error, for the fitting curve
// given by the lambda expression.
// - n : Vector containing the size of the benchmark tests.
// - time : Vector containing the times for the benchmark tests.
// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
// For a deeper explanation on the algorithm logic, please refer to
// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time,
BigOFunc* fitting_curve) {
double sigma_gn_squared = 0.0;
double sigma_time = 0.0;
double sigma_time_gn = 0.0;
// Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i) {
double gn_i = fitting_curve(n[i]);
sigma_gn_squared += gn_i * gn_i;
sigma_time += time[i];
sigma_time_gn += time[i] * gn_i;
}
LeastSq result;
result.complexity = oLambda;
// Calculate complexity.
result.coef = sigma_time_gn / sigma_gn_squared;
// Calculate RMS
double rms = 0.0;
for (size_t i = 0; i < n.size(); ++i) {
double fit = result.coef * fitting_curve(n[i]);
rms += pow((time[i] - fit), 2);
}
// Normalized RMS by the mean of the observed values
double mean = sigma_time / n.size();
result.rms = sqrt(rms / n.size()) / mean;
return result;
}
// Find the coefficient for the high-order term in the running time, by
// minimizing the sum of squares of relative error.
// - n : Vector containing the size of the benchmark tests.
// - time : Vector containing the times for the benchmark tests.
// - complexity : If different than oAuto, the fitting curve will stick to
// this one. If it is oAuto, it will be calculated the best
// fitting curve.
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time, const BigO complexity) {
CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
// benchmark runs are given
CHECK_NE(complexity, oNone);
LeastSq best_fit;
if (complexity == oAuto) {
std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
// Take o1 as default best fitting curve
best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
best_fit.complexity = o1;
// Compute all possible fitting curves and stick to the best one
for (const auto& fit : fit_curves) {
LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
if (current_fit.rms < best_fit.rms) {
best_fit = current_fit;
best_fit.complexity = fit;
}
}
} else {
best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
best_fit.complexity = complexity;
}
return best_fit;
}
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports) {
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
if (reports.size() < 2) return results;
// Accumulators.
std::vector<int64_t> n;
std::vector<double> real_time;
std::vector<double> cpu_time;
// Populate the accumulators.
for (const Run& run : reports) {
CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
n.push_back(run.complexity_n);
real_time.push_back(run.real_accumulated_time / run.iterations);
cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
}
LeastSq result_cpu;
LeastSq result_real;
if (reports[0].complexity == oLambda) {
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
} else {
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
}
// Drop the 'args' when reporting complexity.
auto run_name = reports[0].run_name;
run_name.args.clear();
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o;
big_o.run_name = run_name;
big_o.family_index = reports[0].family_index;
big_o.per_family_instance_index = reports[0].per_family_instance_index;
big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
big_o.repetitions = reports[0].repetitions;
big_o.repetition_index = Run::no_repetition_index;
big_o.threads = reports[0].threads;
big_o.aggregate_name = "BigO";
big_o.report_label = reports[0].report_label;
big_o.iterations = 0;
big_o.real_accumulated_time = result_real.coef;
big_o.cpu_accumulated_time = result_cpu.coef;
big_o.report_big_o = true;
big_o.complexity = result_cpu.complexity;
// All the time results are reported after being multiplied by the
// time unit multiplier. But since RMS is a relative quantity it
// should not be multiplied at all. So, here, we _divide_ it by the
// multiplier so that when it is multiplied later the result is the
// correct one.
double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
// Only add label to mean/stddev if it is same for all runs
Run rms;
rms.run_name = run_name;
rms.family_index = reports[0].family_index;
rms.per_family_instance_index = reports[0].per_family_instance_index;
rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
rms.aggregate_name = "RMS";
rms.report_label = big_o.report_label;
rms.iterations = 0;
rms.repetition_index = Run::no_repetition_index;
rms.repetitions = reports[0].repetitions;
rms.threads = reports[0].threads;
rms.real_accumulated_time = result_real.rms / multiplier;
rms.cpu_accumulated_time = result_cpu.rms / multiplier;
rms.report_rms = true;
rms.complexity = result_cpu.complexity;
// don't forget to keep the time unit, or we won't be able to
// recover the correct value.
rms.time_unit = reports[0].time_unit;
results.push_back(big_o);
results.push_back(rms);
return results;
}
} // end namespace benchmark
07070100000049000081A400000000000000000000000160C0813C000007BB000000000000000000000000000000000000002100000000benchmark-1.5.5/src/complexity.h// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Source project : https://github.com/ismaelJimenez/cpp.leastsq
// Adapted to be used with google benchmark
#ifndef COMPLEXITY_H_
#define COMPLEXITY_H_
#include <string>
#include <vector>
#include "benchmark/benchmark.h"
namespace benchmark {
// Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports);
// This data structure will contain the result returned by MinimalLeastSq
// - coef : Estimated coeficient for the high-order term as
// interpolated from data.
// - rms : Normalized Root Mean Squared Error.
// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability
// form has been provided to MinimalLeastSq this will return
// the same value. In case BigO::oAuto has been selected, this
// parameter will return the best fitting curve detected.
struct LeastSq {
LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {}
double coef;
double rms;
BigO complexity;
};
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity);
} // end namespace benchmark
#endif // COMPLEXITY_H_
0707010000004A000081A400000000000000000000000160C0813C000016D9000000000000000000000000000000000000002800000000benchmark-1.5.5/src/console_reporter.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <iostream>
#include <string>
#include <tuple>
#include <vector>
#include "benchmark/benchmark.h"
#include "check.h"
#include "colorprint.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "counter.h"
#include "internal_macros.h"
#include "string_util.h"
#include "timers.h"
namespace benchmark {
bool ConsoleReporter::ReportContext(const Context& context) {
name_field_width_ = context.name_field_width;
printed_header_ = false;
prev_counters_.clear();
PrintBasicContext(&GetErrorStream(), context);
#ifdef BENCHMARK_OS_WINDOWS
if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) {
GetErrorStream()
<< "Color printing is only supported for stdout on windows."
" Disabling color printing\n";
output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color);
}
#endif
return true;
}
void ConsoleReporter::PrintHeader(const Run& run) {
std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_),
"Benchmark", "Time", "CPU", "Iterations");
if(!run.counters.empty()) {
if(output_options_ & OO_Tabular) {
for(auto const& c : run.counters) {
str += FormatString(" %10s", c.first.c_str());
}
} else {
str += " UserCounters...";
}
}
std::string line = std::string(str.length(), '-');
GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
}
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
for (const auto& run : reports) {
// print the header:
// --- if none was printed yet
bool print_header = !printed_header_;
// --- or if the format is tabular and this run
// has different fields from the prev header
print_header |= (output_options_ & OO_Tabular) &&
(!internal::SameNames(run.counters, prev_counters_));
if (print_header) {
printed_header_ = true;
prev_counters_ = run.counters;
PrintHeader(run);
}
// As an alternative to printing the headers like this, we could sort
// the benchmarks by header and then print. But this would require
// waiting for the full results before printing, or printing twice.
PrintRunData(run);
}
}
static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
...) {
va_list args;
va_start(args, fmt);
out << FormatString(fmt, args);
va_end(args);
}
static std::string FormatTime(double time) {
// Align decimal places...
if (time < 1.0) {
return FormatString("%10.3f", time);
}
if (time < 10.0) {
return FormatString("%10.2f", time);
}
if (time < 100.0) {
return FormatString("%10.1f", time);
}
return FormatString("%10.0f", time);
}
void ConsoleReporter::PrintRunData(const Run& result) {
typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
auto& Out = GetOutputStream();
PrinterFn* printer = (output_options_ & OO_Color) ?
(PrinterFn*)ColorPrintf : IgnoreColorPrint;
auto name_color =
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
printer(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name().c_str());
if (result.error_occurred) {
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
result.error_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
}
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
const std::string real_time_str = FormatTime(real_time);
const std::string cpu_time_str = FormatTime(cpu_time);
if (result.report_big_o) {
std::string big_o = GetBigOString(result.complexity);
printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(),
cpu_time, big_o.c_str());
} else if (result.report_rms) {
printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%",
cpu_time * 100, "%");
} else {
const char* timeLabel = GetTimeUnitString(result.time_unit);
printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel,
cpu_time_str.c_str(), timeLabel);
}
if (!result.report_big_o && !result.report_rms) {
printer(Out, COLOR_CYAN, "%10lld", result.iterations);
}
for (auto& c : result.counters) {
const std::size_t cNameLen = std::max(std::string::size_type(10),
c.first.length());
auto const& s = HumanReadableNumber(c.second.value, c.second.oneK);
const char* unit = "";
if (c.second.flags & Counter::kIsRate)
unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
if (output_options_ & OO_Tabular) {
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(),
unit);
} else {
printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit);
}
}
if (!result.report_label.empty()) {
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}
printer(Out, COLOR_DEFAULT, "\n");
}
} // end namespace benchmark
0707010000004B000081A400000000000000000000000160C0813C00000851000000000000000000000000000000000000001F00000000benchmark-1.5.5/src/counter.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "counter.h"
namespace benchmark {
namespace internal {
double Finish(Counter const& c, IterationCount iterations, double cpu_time,
double num_threads) {
double v = c.value;
if (c.flags & Counter::kIsRate) {
v /= cpu_time;
}
if (c.flags & Counter::kAvgThreads) {
v /= num_threads;
}
if (c.flags & Counter::kIsIterationInvariant) {
v *= iterations;
}
if (c.flags & Counter::kAvgIterations) {
v /= iterations;
}
if (c.flags & Counter::kInvert) { // Invert is *always* last.
v = 1.0 / v;
}
return v;
}
void Finish(UserCounters* l, IterationCount iterations, double cpu_time,
double num_threads) {
for (auto& c : *l) {
c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
}
}
void Increment(UserCounters* l, UserCounters const& r) {
// add counters present in both or just in *l
for (auto& c : *l) {
auto it = r.find(c.first);
if (it != r.end()) {
c.second.value = c.second + it->second;
}
}
// add counters present in r, but not in *l
for (auto const& tc : r) {
auto it = l->find(tc.first);
if (it == l->end()) {
(*l)[tc.first] = tc.second;
}
}
}
bool SameNames(UserCounters const& l, UserCounters const& r) {
if (&l == &r) return true;
if (l.size() != r.size()) {
return false;
}
for (auto const& c : l) {
if (r.find(c.first) == r.end()) {
return false;
}
}
return true;
}
} // end namespace internal
} // end namespace benchmark
0707010000004C000081A400000000000000000000000160C0813C00000469000000000000000000000000000000000000001E00000000benchmark-1.5.5/src/counter.h// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_COUNTER_H_
#define BENCHMARK_COUNTER_H_
#include "benchmark/benchmark.h"
namespace benchmark {
// these counter-related functions are hidden to reduce API surface.
namespace internal {
void Finish(UserCounters* l, IterationCount iterations, double time,
double num_threads);
void Increment(UserCounters* l, UserCounters const& r);
bool SameNames(UserCounters const& l, UserCounters const& r);
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_COUNTER_H_
0707010000004D000081A400000000000000000000000160C0813C00001114000000000000000000000000000000000000002400000000benchmark-1.5.5/src/csv_reporter.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "complexity.h"
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <string>
#include <tuple>
#include <vector>
#include "check.h"
#include "string_util.h"
#include "timers.h"
// File format reference: http://edoceo.com/utilitas/csv-file-format.
namespace benchmark {
namespace {
std::vector<std::string> elements = {
"name", "iterations", "real_time", "cpu_time",
"time_unit", "bytes_per_second", "items_per_second", "label",
"error_occurred", "error_message"};
} // namespace
std::string CsvEscape(const std::string & s) {
std::string tmp;
tmp.reserve(s.size() + 2);
for (char c : s) {
switch (c) {
case '"' : tmp += "\"\""; break;
default : tmp += c; break;
}
}
return '"' + tmp + '"';
}
bool CSVReporter::ReportContext(const Context& context) {
PrintBasicContext(&GetErrorStream(), context);
return true;
}
void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
std::ostream& Out = GetOutputStream();
if (!printed_header_) {
// save the names of all the user counters
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
user_counter_names_.insert(cnt.first);
}
}
// print the header
for (auto B = elements.begin(); B != elements.end();) {
Out << *B++;
if (B != elements.end()) Out << ",";
}
for (auto B = user_counter_names_.begin();
B != user_counter_names_.end();) {
Out << ",\"" << *B++ << "\"";
}
Out << "\n";
printed_header_ = true;
} else {
// check that all the current counters are saved in the name set
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
<< "All counters must be present in each run. "
<< "Counter named \"" << cnt.first
<< "\" was not in a run after being added to the header";
}
}
}
// print results for each run
for (const auto& run : reports) {
PrintRunData(run);
}
}
void CSVReporter::PrintRunData(const Run& run) {
std::ostream& Out = GetOutputStream();
Out << CsvEscape(run.benchmark_name()) << ",";
if (run.error_occurred) {
Out << std::string(elements.size() - 3, ',');
Out << "true,";
Out << CsvEscape(run.error_message) << "\n";
return;
}
// Do not print iteration on bigO and RMS report
if (!run.report_big_o && !run.report_rms) {
Out << run.iterations;
}
Out << ",";
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on bigO and RMS report
if (run.report_big_o) {
Out << GetBigOString(run.complexity);
} else if (!run.report_rms) {
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
if (run.counters.find("bytes_per_second") != run.counters.end()) {
Out << run.counters.at("bytes_per_second");
}
Out << ",";
if (run.counters.find("items_per_second") != run.counters.end()) {
Out << run.counters.at("items_per_second");
}
Out << ",";
if (!run.report_label.empty()) {
Out << CsvEscape(run.report_label);
}
Out << ",,"; // for error_occurred and error_message
// Print user counters
for (const auto& ucn : user_counter_names_) {
auto it = run.counters.find(ucn);
if (it == run.counters.end()) {
Out << ",";
} else {
Out << "," << it->second;
}
}
Out << '\n';
}
} // end namespace benchmark
0707010000004E000081A400000000000000000000000160C0813C000023B9000000000000000000000000000000000000002100000000benchmark-1.5.5/src/cycleclock.h// ----------------------------------------------------------------------
// CycleClock
// A CycleClock tells you the current time in Cycles. The "time"
// is actually time since power-on. This is like time() but doesn't
// involve a system call and is much more precise.
//
// NOTE: Not all cpu/platform/kernel combinations guarantee that this
// clock increments at a constant rate or is synchronized across all logical
// cpus in a system.
//
// If you need the above guarantees, please consider using a different
// API. There are efforts to provide an interface which provides a millisecond
// granularity and implemented as a memory read. A memory read is generally
// cheaper than the CycleClock for many architectures.
//
// Also, in some out of order CPU implementations, the CycleClock is not
// serializing. So if you're trying to count at cycles granularity, your
// data might be inaccurate due to out of order instruction execution.
// ----------------------------------------------------------------------
#ifndef BENCHMARK_CYCLECLOCK_H_
#define BENCHMARK_CYCLECLOCK_H_
#include <cstdint>
#include "benchmark/benchmark.h"
#include "internal_macros.h"
#if defined(BENCHMARK_OS_MACOSX)
#include <mach/mach_time.h>
#endif
// For MSVC, we want to use '_asm rdtsc' when possible (since it works
// with even ancient MSVC compilers), and when not possible the
// __rdtsc intrinsic, declared in <intrin.h>. Unfortunately, in some
// environments, <windows.h> and <intrin.h> have conflicting
// declarations of some other intrinsics, breaking compilation.
// Therefore, we simply declare __rdtsc ourselves. See also
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64)
extern "C" uint64_t __rdtsc();
#pragma intrinsic(__rdtsc)
#endif
#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
#include <sys/time.h>
#include <time.h>
#endif
#ifdef BENCHMARK_OS_EMSCRIPTEN
#include <emscripten.h>
#endif
namespace benchmark {
// NOTE: only i386 and x86_64 have been well tested.
// PPC, sparc, alpha, and ia64 are based on
// http://peter.kuscsik.com/wordpress/?p=14
// with modifications by m3b. See also
// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
namespace cycleclock {
// This should return the number of cycles since power-on. Thread-safe.
inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
#if defined(BENCHMARK_OS_MACOSX)
// this goes at the top because we need ALL Macs, regardless of
// architecture, to return the number of "mach time units" that
// have passed since startup. See sysinfo.cc where
// InitializeSystemInfo() sets the supposed cpu clock frequency of
// macs to the number of mach time units per second, not actual
// CPU clock frequency (which can change in the face of CPU
// frequency scaling). Also note that when the Mac sleeps, this
// counter pauses; it does not continue counting, nor does it
// reset to zero.
return mach_absolute_time();
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// this goes above x86-specific code because old versions of Emscripten
// define __x86_64__, although they have nothing to do with it.
return static_cast<int64_t>(emscripten_get_now() * 1e+6);
#elif defined(__i386__)
int64_t ret;
__asm__ volatile("rdtsc" : "=A"(ret));
return ret;
#elif defined(__x86_64__) || defined(__amd64__)
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
#elif defined(__powerpc__) || defined(__ppc__)
// This returns a time-base, which is not always precisely a cycle-count.
#if defined(__powerpc64__) || defined(__ppc64__)
int64_t tb;
asm volatile("mfspr %0, 268" : "=r"(tb));
return tb;
#else
uint32_t tbl, tbu0, tbu1;
asm volatile(
"mftbu %0\n"
"mftb %1\n"
"mftbu %2"
: "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
tbl &= -static_cast<int32_t>(tbu0 == tbu1);
// high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed)
return (static_cast<uint64_t>(tbu1) << 32) | tbl;
#endif
#elif defined(__sparc__)
int64_t tick;
asm(".byte 0x83, 0x41, 0x00, 0x00");
asm("mov %%g1, %0" : "=r"(tick));
return tick;
#elif defined(__ia64__)
int64_t itc;
asm("mov %0 = ar.itc" : "=r"(itc));
return itc;
#elif defined(COMPILER_MSVC) && defined(_M_IX86)
// Older MSVC compilers (like 7.x) don't seem to support the
// __rdtsc intrinsic properly, so I prefer to use _asm instead
// when I know it will work. Otherwise, I'll use __rdtsc and hope
// the code is being compiled with a non-ancient compiler.
_asm rdtsc
#elif defined(COMPILER_MSVC) && defined(_M_ARM64)
// See https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=vs-2019
// and https://reviews.llvm.org/D53115
int64_t virtual_timer_value;
virtual_timer_value = _ReadStatusReg(ARM64_CNTVCT);
return virtual_timer_value;
#elif defined(COMPILER_MSVC)
return __rdtsc();
#elif defined(BENCHMARK_OS_NACL)
// Native Client validator on x86/x86-64 allows RDTSC instructions,
// and this case is handled above. Native Client validator on ARM
// rejects MRC instructions (used in the ARM-specific sequence below),
// so we handle it here. Portable Native Client compiles to
// architecture-agnostic bytecode, which doesn't provide any
// cycle counter access mnemonics.
// Native Client does not provide any API to access cycle counter.
// Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
// because is provides nanosecond resolution (which is noticable at
// least for PNaCl modules running on x86 Mac & Linux).
// Initialize to always return 0 if clock_gettime fails.
struct timespec ts = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
#elif defined(__aarch64__)
// System timer of ARMv8 runs at a different frequency than the CPU's.
// The frequency is fixed, typically in the range 1-50MHz. It can be
// read at CNTFRQ special register. We assume the OS has set up
// the virtual timer properly.
int64_t virtual_timer_value;
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
return virtual_timer_value;
#elif defined(__ARM_ARCH)
// V6 is the earliest arch that has a standard cyclecount
// Native Client validator doesn't allow MRC instructions.
#if (__ARM_ARCH >= 6)
uint32_t pmccntr;
uint32_t pmuseren;
uint32_t pmcntenset;
// Read the user mode perf monitor counter access permissions.
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
if (pmcntenset & 0x80000000ul) { // Is it counting?
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
// The counter is set up to count every 64th cycle
return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
}
}
#endif
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__mips__) || defined(__m68k__)
// mips apparently only allows rdtsc for superusers, so we fall
// back to gettimeofday. It's possible clock_gettime would be better.
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__loongarch__)
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__s390__) // Covers both s390 and s390x.
// Return the CPU clock.
uint64_t tsc;
#if defined(BENCHMARK_OS_ZOS) && defined(COMPILER_IBMXL)
// z/OS XL compiler HLASM syntax.
asm(" stck %0" : "=m"(tsc) : : "cc");
#else
asm("stck %0" : "=Q"(tsc) : : "cc");
#endif
return tsc;
#elif defined(__riscv) // RISC-V
// Use RDCYCLE (and RDCYCLEH on riscv32)
#if __riscv_xlen == 32
uint32_t cycles_lo, cycles_hi0, cycles_hi1;
// This asm also includes the PowerPC overflow handling strategy, as above.
// Implemented in assembly because Clang insisted on branching.
asm volatile(
"rdcycleh %0\n"
"rdcycle %1\n"
"rdcycleh %2\n"
"sub %0, %0, %2\n"
"seqz %0, %0\n"
"sub %0, zero, %0\n"
"and %1, %1, %0\n"
: "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
#else
uint64_t cycles;
asm volatile("rdcycle %0" : "=r"(cycles));
return cycles;
#endif
#elif defined(__e2k__) || defined(__elbrus__)
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#else
// The soft failover to a generic implementation is automatic only for ARM.
// For other platforms the developer is expected to make an attempt to create
// a fast implementation and use generic version if nothing better is available.
#error You need to define CycleTimer for your OS and CPU
#endif
}
} // end namespace cycleclock
} // end namespace benchmark
#endif // BENCHMARK_CYCLECLOCK_H_
0707010000004F000081A400000000000000000000000160C0813C000009B4000000000000000000000000000000000000002600000000benchmark-1.5.5/src/internal_macros.h#ifndef BENCHMARK_INTERNAL_MACROS_H_
#define BENCHMARK_INTERNAL_MACROS_H_
#include "benchmark/benchmark.h"
/* Needed to detect STL */
#include <cstdlib>
// clang-format off
#ifndef __has_feature
#define __has_feature(x) 0
#endif
#if defined(__clang__)
#if defined(__ibmxl__)
#if !defined(COMPILER_IBMXL)
#define COMPILER_IBMXL
#endif
#elif !defined(COMPILER_CLANG)
#define COMPILER_CLANG
#endif
#elif defined(_MSC_VER)
#if !defined(COMPILER_MSVC)
#define COMPILER_MSVC
#endif
#elif defined(__GNUC__)
#if !defined(COMPILER_GCC)
#define COMPILER_GCC
#endif
#endif
#if __has_feature(cxx_attributes)
#define BENCHMARK_NORETURN [[noreturn]]
#elif defined(__GNUC__)
#define BENCHMARK_NORETURN __attribute__((noreturn))
#elif defined(COMPILER_MSVC)
#define BENCHMARK_NORETURN __declspec(noreturn)
#else
#define BENCHMARK_NORETURN
#endif
#if defined(__CYGWIN__)
#define BENCHMARK_OS_CYGWIN 1
#elif defined(_WIN32)
#define BENCHMARK_OS_WINDOWS 1
#if defined(__MINGW32__)
#define BENCHMARK_OS_MINGW 1
#endif
#elif defined(__APPLE__)
#define BENCHMARK_OS_APPLE 1
#include "TargetConditionals.h"
#if defined(TARGET_OS_MAC)
#define BENCHMARK_OS_MACOSX 1
#if defined(TARGET_OS_IPHONE)
#define BENCHMARK_OS_IOS 1
#endif
#endif
#elif defined(__FreeBSD__)
#define BENCHMARK_OS_FREEBSD 1
#elif defined(__NetBSD__)
#define BENCHMARK_OS_NETBSD 1
#elif defined(__OpenBSD__)
#define BENCHMARK_OS_OPENBSD 1
#elif defined(__DragonFly__)
#define BENCHMARK_OS_DRAGONFLY 1
#elif defined(__linux__)
#define BENCHMARK_OS_LINUX 1
#elif defined(__native_client__)
#define BENCHMARK_OS_NACL 1
#elif defined(__EMSCRIPTEN__)
#define BENCHMARK_OS_EMSCRIPTEN 1
#elif defined(__rtems__)
#define BENCHMARK_OS_RTEMS 1
#elif defined(__Fuchsia__)
#define BENCHMARK_OS_FUCHSIA 1
#elif defined (__SVR4) && defined (__sun)
#define BENCHMARK_OS_SOLARIS 1
#elif defined(__QNX__)
#define BENCHMARK_OS_QNX 1
#elif defined(__MVS__)
#define BENCHMARK_OS_ZOS 1
#endif
#if defined(__ANDROID__) && defined(__GLIBCXX__)
#define BENCHMARK_STL_ANDROID_GNUSTL 1
#endif
#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
&& !defined(__EXCEPTIONS)
#define BENCHMARK_HAS_NO_EXCEPTIONS
#endif
#if defined(COMPILER_CLANG) || defined(COMPILER_GCC)
#define BENCHMARK_MAYBE_UNUSED __attribute__((unused))
#else
#define BENCHMARK_MAYBE_UNUSED
#endif
// clang-format on
#endif // BENCHMARK_INTERNAL_MACROS_H_
07070100000050000081A400000000000000000000000160C0813C000021AE000000000000000000000000000000000000002500000000benchmark-1.5.5/src/json_reporter.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "complexity.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <iomanip> // for setprecision
#include <iostream>
#include <limits>
#include <string>
#include <tuple>
#include <vector>
#include "string_util.h"
#include "timers.h"
namespace benchmark {
namespace internal {
extern std::map<std::string, std::string>* global_context;
}
namespace {
std::string StrEscape(const std::string & s) {
std::string tmp;
tmp.reserve(s.size());
for (char c : s) {
switch (c) {
case '\b': tmp += "\\b"; break;
case '\f': tmp += "\\f"; break;
case '\n': tmp += "\\n"; break;
case '\r': tmp += "\\r"; break;
case '\t': tmp += "\\t"; break;
case '\\': tmp += "\\\\"; break;
case '"' : tmp += "\\\""; break;
default : tmp += c; break;
}
}
return tmp;
}
std::string FormatKV(std::string const& key, std::string const& value) {
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
}
std::string FormatKV(std::string const& key, const char* value) {
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
}
std::string FormatKV(std::string const& key, bool value) {
return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false");
}
std::string FormatKV(std::string const& key, int64_t value) {
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
}
std::string FormatKV(std::string const& key, IterationCount value) {
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
}
std::string FormatKV(std::string const& key, double value) {
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": ";
if (std::isnan(value))
ss << (value < 0 ? "-" : "") << "NaN";
else if (std::isinf(value))
ss << (value < 0 ? "-" : "") << "Infinity";
else {
const auto max_digits10 =
std::numeric_limits<decltype(value)>::max_digits10;
const auto max_fractional_digits10 = max_digits10 - 1;
ss << std::scientific << std::setprecision(max_fractional_digits10)
<< value;
}
return ss.str();
}
int64_t RoundDouble(double v) { return std::lround(v); }
} // end namespace
bool JSONReporter::ReportContext(const Context& context) {
std::ostream& out = GetOutputStream();
out << "{\n";
std::string inner_indent(2, ' ');
// Open context block and print context information.
out << inner_indent << "\"context\": {\n";
std::string indent(4, ' ');
std::string walltime_value = LocalDateTimeString();
out << indent << FormatKV("date", walltime_value) << ",\n";
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
if (Context::executable_name) {
out << indent << FormatKV("executable", Context::executable_name) << ",\n";
}
CPUInfo const& info = context.cpu_info;
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus))
<< ",\n";
out << indent
<< FormatKV("mhz_per_cpu",
RoundDouble(info.cycles_per_second / 1000000.0))
<< ",\n";
if (CPUInfo::Scaling::UNKNOWN != info.scaling) {
out << indent << FormatKV("cpu_scaling_enabled", info.scaling == CPUInfo::Scaling::ENABLED ? true : false)
<< ",\n";
}
out << indent << "\"caches\": [\n";
indent = std::string(6, ' ');
std::string cache_indent(8, ' ');
for (size_t i = 0; i < info.caches.size(); ++i) {
auto& CI = info.caches[i];
out << indent << "{\n";
out << cache_indent << FormatKV("type", CI.type) << ",\n";
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
<< ",\n";
out << cache_indent
<< FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
out << cache_indent
<< FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
<< "\n";
out << indent << "}";
if (i != info.caches.size() - 1) out << ",";
out << "\n";
}
indent = std::string(4, ' ');
out << indent << "],\n";
out << indent << "\"load_avg\": [";
for (auto it = info.load_avg.begin(); it != info.load_avg.end();) {
out << *it++;
if (it != info.load_avg.end()) out << ",";
}
out << "],\n";
#if defined(NDEBUG)
const char build_type[] = "release";
#else
const char build_type[] = "debug";
#endif
out << indent << FormatKV("library_build_type", build_type) << "\n";
if (internal::global_context != nullptr) {
for (const auto& kv: *internal::global_context) {
out << indent << FormatKV(kv.first, kv.second) << "\n";
}
}
// Close context block and open the list of benchmarks.
out << inner_indent << "},\n";
out << inner_indent << "\"benchmarks\": [\n";
return true;
}
void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
if (reports.empty()) {
return;
}
std::string indent(4, ' ');
std::ostream& out = GetOutputStream();
if (!first_report_) {
out << ",\n";
}
first_report_ = false;
for (auto it = reports.begin(); it != reports.end(); ++it) {
out << indent << "{\n";
PrintRunData(*it);
out << indent << '}';
auto it_cp = it;
if (++it_cp != reports.end()) {
out << ",\n";
}
}
}
void JSONReporter::Finalize() {
// Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const& run) {
std::string indent(6, ' ');
std::ostream& out = GetOutputStream();
out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
out << indent << FormatKV("family_index", run.family_index) << ",\n";
out << indent
<< FormatKV("per_family_instance_index", run.per_family_instance_index)
<< ",\n";
out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
out << indent << FormatKV("run_type", [&run]() -> const char* {
switch (run.run_type) {
case BenchmarkReporter::Run::RT_Iteration:
return "iteration";
case BenchmarkReporter::Run::RT_Aggregate:
return "aggregate";
}
BENCHMARK_UNREACHABLE();
}()) << ",\n";
out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) {
out << indent << FormatKV("repetition_index", run.repetition_index)
<< ",\n";
}
out << indent << FormatKV("threads", run.threads) << ",\n";
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
}
if (run.error_occurred) {
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n";
}
if (!run.report_big_o && !run.report_rms) {
out << indent << FormatKV("iterations", run.iterations) << ",\n";
out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
out << ",\n"
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_big_o) {
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
<< ",\n";
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime())
<< ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_rms) {
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
}
for (auto& c : run.counters) {
out << ",\n" << indent << FormatKV(c.first, c.second);
}
if (run.has_memory_result) {
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
}
if (!run.report_label.empty()) {
out << ",\n" << indent << FormatKV("label", run.report_label);
}
out << '\n';
}
} // end namespace benchmark
07070100000051000081A400000000000000000000000160C0813C0000065D000000000000000000000000000000000000001A00000000benchmark-1.5.5/src/log.h#ifndef BENCHMARK_LOG_H_
#define BENCHMARK_LOG_H_
#include <iostream>
#include <ostream>
#include "benchmark/benchmark.h"
namespace benchmark {
namespace internal {
typedef std::basic_ostream<char>&(EndLType)(std::basic_ostream<char>&);
class LogType {
friend LogType& GetNullLogInstance();
friend LogType& GetErrorLogInstance();
// FIXME: Add locking to output.
template <class Tp>
friend LogType& operator<<(LogType&, Tp const&);
friend LogType& operator<<(LogType&, EndLType*);
private:
LogType(std::ostream* out) : out_(out) {}
std::ostream* out_;
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
};
template <class Tp>
LogType& operator<<(LogType& log, Tp const& value) {
if (log.out_) {
*log.out_ << value;
}
return log;
}
inline LogType& operator<<(LogType& log, EndLType* m) {
if (log.out_) {
*log.out_ << m;
}
return log;
}
inline int& LogLevel() {
static int log_level = 0;
return log_level;
}
inline LogType& GetNullLogInstance() {
static LogType log(nullptr);
return log;
}
inline LogType& GetErrorLogInstance() {
static LogType log(&std::clog);
return log;
}
inline LogType& GetLogInstanceForLevel(int level) {
if (level <= LogLevel()) {
return GetErrorLogInstance();
}
return GetNullLogInstance();
}
} // end namespace internal
} // end namespace benchmark
// clang-format off
#define VLOG(x) \
(::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \
" ")
// clang-format on
#endif
07070100000052000081A400000000000000000000000160C0813C000011DF000000000000000000000000000000000000001C00000000benchmark-1.5.5/src/mutex.h#ifndef BENCHMARK_MUTEX_H_
#define BENCHMARK_MUTEX_H_
#include <condition_variable>
#include <mutex>
#include "check.h"
// Enable thread safety attributes only with clang.
// The attributes can be safely erased when compiling with other compilers.
#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op
#endif
#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x))
#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable)
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x))
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x))
#define ACQUIRED_BEFORE(...) \
THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) \
THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__))
#define REQUIRES(...) \
THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__))
#define RELEASE(...) \
THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__))
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) \
THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x))
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x))
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis)
namespace benchmark {
typedef std::condition_variable Condition;
// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that
// we can annotate them with thread safety attributes and use the
// -Wthread-safety warning with clang. The standard library types cannot be
// used directly because they do not provide the required annotations.
class CAPABILITY("mutex") Mutex {
public:
Mutex() {}
void lock() ACQUIRE() { mut_.lock(); }
void unlock() RELEASE() { mut_.unlock(); }
std::mutex& native_handle() { return mut_; }
private:
std::mutex mut_;
};
class SCOPED_CAPABILITY MutexLock {
typedef std::unique_lock<std::mutex> MutexLockImp;
public:
MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {}
~MutexLock() RELEASE() {}
MutexLockImp& native_handle() { return ml_; }
private:
MutexLockImp ml_;
};
class Barrier {
public:
Barrier(int num_threads) : running_threads_(num_threads) {}
// Called by each thread
bool wait() EXCLUDES(lock_) {
bool last_thread = false;
{
MutexLock ml(lock_);
last_thread = createBarrier(ml);
}
if (last_thread) phase_condition_.notify_all();
return last_thread;
}
void removeThread() EXCLUDES(lock_) {
MutexLock ml(lock_);
--running_threads_;
if (entered_ != 0) phase_condition_.notify_all();
}
private:
Mutex lock_;
Condition phase_condition_;
int running_threads_;
// State for barrier management
int phase_number_ = 0;
int entered_ = 0; // Number of threads that have entered this barrier
// Enter the barrier and wait until all other threads have also
// entered the barrier. Returns iff this is the last thread to
// enter the barrier.
bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
CHECK_LT(entered_, running_threads_);
entered_++;
if (entered_ < running_threads_) {
// Wait for all threads to enter
int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() {
return this->phase_number_ > phase_number_cp ||
entered_ == running_threads_; // A thread has aborted in error
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp) return false;
// else (running_threads_ == entered_) and we are the last thread.
}
// Last thread has reached the barrier
phase_number_++;
entered_ = 0;
return true;
}
};
} // end namespace benchmark
#endif // BENCHMARK_MUTEX_H_
07070100000053000081A400000000000000000000000160C0813C00000FC5000000000000000000000000000000000000002500000000benchmark-1.5.5/src/perf_counters.cc// Copyright 2021 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "perf_counters.h"
#include <cstring>
#include <vector>
#if defined HAVE_LIBPFM
#include "perfmon/pfmlib.h"
#include "perfmon/pfmlib_perf_event.h"
#endif
namespace benchmark {
namespace internal {
constexpr size_t PerfCounterValues::kMaxCounters;
#if defined HAVE_LIBPFM
const bool PerfCounters::kSupported = true;
bool PerfCounters::Initialize() { return pfm_initialize() == PFM_SUCCESS; }
PerfCounters PerfCounters::Create(
const std::vector<std::string>& counter_names) {
if (counter_names.empty()) {
return NoCounters();
}
if (counter_names.size() > PerfCounterValues::kMaxCounters) {
GetErrorLogInstance()
<< counter_names.size()
<< " counters were requested. The minimum is 1, the maximum is "
<< PerfCounterValues::kMaxCounters << "\n";
return NoCounters();
}
std::vector<int> counter_ids(counter_names.size());
const int mode = PFM_PLM3; // user mode only
for (size_t i = 0; i < counter_names.size(); ++i) {
const bool is_first = i == 0;
struct perf_event_attr attr{};
attr.size = sizeof(attr);
const int group_id = !is_first ? counter_ids[0] : -1;
const auto& name = counter_names[i];
if (name.empty()) {
GetErrorLogInstance() << "A counter name was the empty string\n";
return NoCounters();
}
pfm_perf_encode_arg_t arg{};
arg.attr = &attr;
const int pfm_get =
pfm_get_os_event_encoding(name.c_str(), mode, PFM_OS_PERF_EVENT, &arg);
if (pfm_get != PFM_SUCCESS) {
GetErrorLogInstance() << "Unknown counter name: " << name << "\n";
return NoCounters();
}
attr.disabled = is_first;
// Note: the man page for perf_event_create suggests inerit = true and
// read_format = PERF_FORMAT_GROUP don't work together, but that's not the
// case.
attr.inherit = true;
attr.pinned = is_first;
attr.exclude_kernel = true;
attr.exclude_user = false;
attr.exclude_hv = true;
// Read all counters in one read.
attr.read_format = PERF_FORMAT_GROUP;
int id = -1;
static constexpr size_t kNrOfSyscallRetries = 5;
// Retry syscall as it was interrupted often (b/64774091).
for (size_t num_retries = 0; num_retries < kNrOfSyscallRetries;
++num_retries) {
id = perf_event_open(&attr, 0, -1, group_id, 0);
if (id >= 0 || errno != EINTR) {
break;
}
}
if (id < 0) {
GetErrorLogInstance()
<< "Failed to get a file descriptor for " << name << "\n";
return NoCounters();
}
counter_ids[i] = id;
}
if (ioctl(counter_ids[0], PERF_EVENT_IOC_ENABLE) != 0) {
GetErrorLogInstance() << "Failed to start counters\n";
return NoCounters();
}
return PerfCounters(counter_names, std::move(counter_ids));
}
PerfCounters::~PerfCounters() {
if (counter_ids_.empty()) {
return;
}
ioctl(counter_ids_[0], PERF_EVENT_IOC_DISABLE);
for (int fd : counter_ids_) {
close(fd);
}
}
#else // defined HAVE_LIBPFM
const bool PerfCounters::kSupported = false;
bool PerfCounters::Initialize() { return false; }
PerfCounters PerfCounters::Create(
const std::vector<std::string>& counter_names) {
if (!counter_names.empty()) {
GetErrorLogInstance() << "Performance counters not supported.";
}
return NoCounters();
}
PerfCounters::~PerfCounters() = default;
#endif // defined HAVE_LIBPFM
} // namespace internal
} // namespace benchmark
07070100000054000081A400000000000000000000000160C0813C0000171A000000000000000000000000000000000000002400000000benchmark-1.5.5/src/perf_counters.h// Copyright 2021 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_PERF_COUNTERS_H
#define BENCHMARK_PERF_COUNTERS_H
#include <array>
#include <cstdint>
#include <vector>
#include "benchmark/benchmark.h"
#include "check.h"
#include "log.h"
#ifndef BENCHMARK_OS_WINDOWS
#include <unistd.h>
#endif
namespace benchmark {
namespace internal {
// Typically, we can only read a small number of counters. There is also a
// padding preceding counter values, when reading multiple counters with one
// syscall (which is desirable). PerfCounterValues abstracts these details.
// The implementation ensures the storage is inlined, and allows 0-based
// indexing into the counter values.
// The object is used in conjunction with a PerfCounters object, by passing it
// to Snapshot(). The values are populated such that
// perfCounters->names()[i]'s value is obtained at position i (as given by
// operator[]) of this object.
class PerfCounterValues {
public:
explicit PerfCounterValues(size_t nr_counters) : nr_counters_(nr_counters) {
CHECK_LE(nr_counters_, kMaxCounters);
}
uint64_t operator[](size_t pos) const { return values_[kPadding + pos]; }
static constexpr size_t kMaxCounters = 3;
private:
friend class PerfCounters;
// Get the byte buffer in which perf counters can be captured.
// This is used by PerfCounters::Read
std::pair<char*, size_t> get_data_buffer() {
return {reinterpret_cast<char*>(values_.data()),
sizeof(uint64_t) * (kPadding + nr_counters_)};
}
static constexpr size_t kPadding = 1;
std::array<uint64_t, kPadding + kMaxCounters> values_;
const size_t nr_counters_;
};
// Collect PMU counters. The object, once constructed, is ready to be used by
// calling read(). PMU counter collection is enabled from the time create() is
// called, to obtain the object, until the object's destructor is called.
class PerfCounters final {
public:
// True iff this platform supports performance counters.
static const bool kSupported;
bool IsValid() const { return is_valid_; }
static PerfCounters NoCounters() { return PerfCounters(); }
~PerfCounters();
PerfCounters(PerfCounters&&) = default;
PerfCounters(const PerfCounters&) = delete;
// Platform-specific implementations may choose to do some library
// initialization here.
static bool Initialize();
// Return a PerfCounters object ready to read the counters with the names
// specified. The values are user-mode only. The counter name format is
// implementation and OS specific.
// TODO: once we move to C++-17, this should be a std::optional, and then the
// IsValid() boolean can be dropped.
static PerfCounters Create(const std::vector<std::string>& counter_names);
// Take a snapshot of the current value of the counters into the provided
// valid PerfCounterValues storage. The values are populated such that:
// names()[i]'s value is (*values)[i]
BENCHMARK_ALWAYS_INLINE bool Snapshot(PerfCounterValues* values) const {
#ifndef BENCHMARK_OS_WINDOWS
assert(values != nullptr);
assert(IsValid());
auto buffer = values->get_data_buffer();
auto read_bytes = ::read(counter_ids_[0], buffer.first, buffer.second);
return static_cast<size_t>(read_bytes) == buffer.second;
#else
(void)values;
return false;
#endif
}
const std::vector<std::string>& names() const { return counter_names_; }
size_t num_counters() const { return counter_names_.size(); }
private:
PerfCounters(const std::vector<std::string>& counter_names,
std::vector<int>&& counter_ids)
: counter_ids_(std::move(counter_ids)),
counter_names_(counter_names),
is_valid_(true) {}
PerfCounters() : is_valid_(false) {}
std::vector<int> counter_ids_;
const std::vector<std::string> counter_names_;
const bool is_valid_;
};
// Typical usage of the above primitives.
class PerfCountersMeasurement final {
public:
PerfCountersMeasurement(PerfCounters&& c)
: counters_(std::move(c)),
start_values_(counters_.IsValid() ? counters_.names().size() : 0),
end_values_(counters_.IsValid() ? counters_.names().size() : 0) {}
bool IsValid() const { return counters_.IsValid(); }
BENCHMARK_ALWAYS_INLINE void Start() {
assert(IsValid());
// Tell the compiler to not move instructions above/below where we take
// the snapshot.
ClobberMemory();
counters_.Snapshot(&start_values_);
ClobberMemory();
}
BENCHMARK_ALWAYS_INLINE std::vector<std::pair<std::string, double>>
StopAndGetMeasurements() {
assert(IsValid());
// Tell the compiler to not move instructions above/below where we take
// the snapshot.
ClobberMemory();
counters_.Snapshot(&end_values_);
ClobberMemory();
std::vector<std::pair<std::string, double>> ret;
for (size_t i = 0; i < counters_.names().size(); ++i) {
double measurement = static_cast<double>(end_values_[i]) -
static_cast<double>(start_values_[i]);
ret.push_back({counters_.names()[i], measurement});
}
return ret;
}
private:
PerfCounters counters_;
PerfCounterValues start_values_;
PerfCounterValues end_values_;
};
BENCHMARK_UNUSED static bool perf_init_anchor = PerfCounters::Initialize();
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_PERF_COUNTERS_H
07070100000055000081A400000000000000000000000160C0813C00000F3C000000000000000000000000000000000000001900000000benchmark-1.5.5/src/re.h// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_RE_H_
#define BENCHMARK_RE_H_
#include "internal_macros.h"
// clang-format off
#if !defined(HAVE_STD_REGEX) && \
!defined(HAVE_GNU_POSIX_REGEX) && \
!defined(HAVE_POSIX_REGEX)
// No explicit regex selection; detect based on builtin hints.
#if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE)
#define HAVE_POSIX_REGEX 1
#elif __cplusplus >= 199711L
#define HAVE_STD_REGEX 1
#endif
#endif
// Prefer C regex libraries when compiling w/o exceptions so that we can
// correctly report errors.
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \
defined(BENCHMARK_HAVE_STD_REGEX) && \
(defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
#undef HAVE_STD_REGEX
#endif
#if defined(HAVE_STD_REGEX)
#include <regex>
#elif defined(HAVE_GNU_POSIX_REGEX)
#include <gnuregex.h>
#elif defined(HAVE_POSIX_REGEX)
#include <regex.h>
#else
#error No regular expression backend was found!
#endif
// clang-format on
#include <string>
#include "check.h"
namespace benchmark {
// A wrapper around the POSIX regular expression API that provides automatic
// cleanup
class Regex {
public:
Regex() : init_(false) {}
~Regex();
// Compile a regular expression matcher from spec. Returns true on success.
//
// On failure (and if error is not nullptr), error is populated with a human
// readable error message if an error occurs.
bool Init(const std::string& spec, std::string* error);
// Returns whether str matches the compiled regular expression.
bool Match(const std::string& str);
private:
bool init_;
// Underlying regular expression object
#if defined(HAVE_STD_REGEX)
std::regex re_;
#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
regex_t re_;
#else
#error No regular expression backend implementation available
#endif
};
#if defined(HAVE_STD_REGEX)
inline bool Regex::Init(const std::string& spec, std::string* error) {
#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
((void)error); // suppress unused warning
#else
try {
#endif
re_ = std::regex(spec, std::regex_constants::extended);
init_ = true;
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
}
catch (const std::regex_error& e) {
if (error) {
*error = e.what();
}
}
#endif
return init_;
}
inline Regex::~Regex() {}
inline bool Regex::Match(const std::string& str) {
if (!init_) {
return false;
}
return std::regex_search(str, re_);
}
#else
inline bool Regex::Init(const std::string& spec, std::string* error) {
int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
if (error) {
size_t needed = regerror(ec, &re_, nullptr, 0);
char* errbuf = new char[needed];
regerror(ec, &re_, errbuf, needed);
// regerror returns the number of bytes necessary to null terminate
// the string, so we move that when assigning to error.
CHECK_NE(needed, 0);
error->assign(errbuf, needed - 1);
delete[] errbuf;
}
return false;
}
init_ = true;
return true;
}
inline Regex::~Regex() {
if (init_) {
regfree(&re_);
}
}
inline bool Regex::Match(const std::string& str) {
if (!init_) {
return false;
}
return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;
}
#endif
} // end namespace benchmark
#endif // BENCHMARK_RE_H_
07070100000056000081A400000000000000000000000160C0813C00000DED000000000000000000000000000000000000002000000000benchmark-1.5.5/src/reporter.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "timers.h"
#include <cstdlib>
#include <iostream>
#include <map>
#include <string>
#include <tuple>
#include <vector>
#include "check.h"
#include "string_util.h"
namespace benchmark {
namespace internal {
extern std::map<std::string, std::string>* global_context;
}
BenchmarkReporter::BenchmarkReporter()
: output_stream_(&std::cout), error_stream_(&std::cerr) {}
BenchmarkReporter::~BenchmarkReporter() {}
void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Context const &context) {
CHECK(out) << "cannot be null";
auto &Out = *out;
Out << LocalDateTimeString() << "\n";
if (context.executable_name)
Out << "Running " << context.executable_name << "\n";
const CPUInfo &info = context.cpu_info;
Out << "Run on (" << info.num_cpus << " X "
<< (info.cycles_per_second / 1000000.0) << " MHz CPU "
<< ((info.num_cpus > 1) ? "s" : "") << ")\n";
if (info.caches.size() != 0) {
Out << "CPU Caches:\n";
for (auto &CInfo : info.caches) {
Out << " L" << CInfo.level << " " << CInfo.type << " "
<< (CInfo.size / 1024) << " KiB";
if (CInfo.num_sharing != 0)
Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
Out << "\n";
}
}
if (!info.load_avg.empty()) {
Out << "Load Average: ";
for (auto It = info.load_avg.begin(); It != info.load_avg.end();) {
Out << StrFormat("%.2f", *It++);
if (It != info.load_avg.end()) Out << ", ";
}
Out << "\n";
}
if (internal::global_context != nullptr) {
for (const auto& kv: *internal::global_context) {
Out << kv.first << ": " << kv.second << "\n";
}
}
if (CPUInfo::Scaling::ENABLED == info.scaling) {
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
}
#ifndef NDEBUG
Out << "***WARNING*** Library was built as DEBUG. Timings may be "
"affected.\n";
#endif
}
// No initializer because it's already initialized to NULL.
const char *BenchmarkReporter::Context::executable_name;
BenchmarkReporter::Context::Context()
: cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {}
std::string BenchmarkReporter::Run::benchmark_name() const {
std::string name = run_name.str();
if (run_type == RT_Aggregate) {
name += "_" + aggregate_name;
}
return name;
}
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0) new_time /= static_cast<double>(iterations);
return new_time;
}
double BenchmarkReporter::Run::GetAdjustedCPUTime() const {
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0) new_time /= static_cast<double>(iterations);
return new_time;
}
} // end namespace benchmark
07070100000057000081A400000000000000000000000160C0813C000008A5000000000000000000000000000000000000001D00000000benchmark-1.5.5/src/sleep.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sleep.h"
#include <cerrno>
#include <cstdlib>
#include <ctime>
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <windows.h>
#endif
#ifdef BENCHMARK_OS_ZOS
#include <unistd.h>
#endif
namespace benchmark {
#ifdef BENCHMARK_OS_WINDOWS
// Window's Sleep takes milliseconds argument.
void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); }
void SleepForSeconds(double seconds) {
SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
}
#else // BENCHMARK_OS_WINDOWS
void SleepForMicroseconds(int microseconds) {
#ifdef BENCHMARK_OS_ZOS
// z/OS does not support nanosleep. Instead call sleep() and then usleep() to
// sleep for the remaining microseconds because usleep() will fail if its
// argument is greater than 1000000.
div_t sleepTime = div(microseconds, kNumMicrosPerSecond);
int seconds = sleepTime.quot;
while (seconds != 0)
seconds = sleep(seconds);
while (usleep(sleepTime.rem) == -1 && errno == EINTR)
;
#else
struct timespec sleep_time;
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
; // Ignore signals and wait for the full interval to elapse.
#endif
}
void SleepForMilliseconds(int milliseconds) {
SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
}
void SleepForSeconds(double seconds) {
SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
}
#endif // BENCHMARK_OS_WINDOWS
} // end namespace benchmark
07070100000058000081A400000000000000000000000160C0813C000001D1000000000000000000000000000000000000001C00000000benchmark-1.5.5/src/sleep.h#ifndef BENCHMARK_SLEEP_H_
#define BENCHMARK_SLEEP_H_
namespace benchmark {
const int kNumMillisPerSecond = 1000;
const int kNumMicrosPerMilli = 1000;
const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;
const int kNumNanosPerMicro = 1000;
const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond;
void SleepForMilliseconds(int milliseconds);
void SleepForSeconds(double seconds);
} // end namespace benchmark
#endif // BENCHMARK_SLEEP_H_
07070100000059000081A400000000000000000000000160C0813C00001AFB000000000000000000000000000000000000002200000000benchmark-1.5.5/src/statistics.cc// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
// Copyright 2017 Roman Lebedev. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include <algorithm>
#include <cmath>
#include <numeric>
#include <string>
#include <vector>
#include "check.h"
#include "statistics.h"
namespace benchmark {
auto StatisticsSum = [](const std::vector<double>& v) {
return std::accumulate(v.begin(), v.end(), 0.0);
};
double StatisticsMean(const std::vector<double>& v) {
if (v.empty()) return 0.0;
return StatisticsSum(v) * (1.0 / v.size());
}
double StatisticsMedian(const std::vector<double>& v) {
if (v.size() < 3) return StatisticsMean(v);
std::vector<double> copy(v);
auto center = copy.begin() + v.size() / 2;
std::nth_element(copy.begin(), center, copy.end());
// did we have an odd number of samples?
// if yes, then center is the median
// it no, then we are looking for the average between center and the value
// before
if (v.size() % 2 == 1) return *center;
auto center2 = copy.begin() + v.size() / 2 - 1;
std::nth_element(copy.begin(), center2, copy.end());
return (*center + *center2) / 2.0;
}
// Return the sum of the squares of this sample set
auto SumSquares = [](const std::vector<double>& v) {
return std::inner_product(v.begin(), v.end(), v.begin(), 0.0);
};
auto Sqr = [](const double dat) { return dat * dat; };
auto Sqrt = [](const double dat) {
// Avoid NaN due to imprecision in the calculations
if (dat < 0.0) return 0.0;
return std::sqrt(dat);
};
double StatisticsStdDev(const std::vector<double>& v) {
const auto mean = StatisticsMean(v);
if (v.empty()) return mean;
// Sample standard deviation is undefined for n = 1
if (v.size() == 1) return 0.0;
const double avg_squares = SumSquares(v) * (1.0 / v.size());
return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
}
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports) {
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
auto error_count =
std::count_if(reports.begin(), reports.end(),
[](Run const& run) { return run.error_occurred; });
if (reports.size() - error_count < 2) {
// We don't report aggregated data if there was a single run.
return results;
}
// Accumulators.
std::vector<double> real_accumulated_time_stat;
std::vector<double> cpu_accumulated_time_stat;
real_accumulated_time_stat.reserve(reports.size());
cpu_accumulated_time_stat.reserve(reports.size());
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
const IterationCount run_iterations = reports.front().iterations;
// create stats for user counters
struct CounterStat {
Counter c;
std::vector<double> s;
};
std::map<std::string, CounterStat> counter_stats;
for (Run const& r : reports) {
for (auto const& cnt : r.counters) {
auto it = counter_stats.find(cnt.first);
if (it == counter_stats.end()) {
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
it = counter_stats.find(cnt.first);
it->second.s.reserve(reports.size());
} else {
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
}
}
}
// Populate the accumulators.
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred) continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters
for (auto const& cnt : run.counters) {
auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end());
it->second.s.emplace_back(cnt.second);
}
}
// Only add label if it is same for all runs
std::string report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++) {
if (reports[i].report_label != report_label) {
report_label = "";
break;
}
}
const double iteration_rescale_factor =
double(reports.size()) / double(run_iterations);
for (const auto& Stat : *reports[0].statistics) {
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run data;
data.run_name = reports[0].run_name;
data.family_index = reports[0].family_index;
data.per_family_instance_index = reports[0].per_family_instance_index;
data.run_type = BenchmarkReporter::Run::RT_Aggregate;
data.threads = reports[0].threads;
data.repetitions = reports[0].repetitions;
data.repetition_index = Run::no_repetition_index;
data.aggregate_name = Stat.name_;
data.report_label = report_label;
// It is incorrect to say that an aggregate is computed over
// run's iterations, because those iterations already got averaged.
// Similarly, if there are N repetitions with 1 iterations each,
// an aggregate will be computed over N measurements, not 1.
// Thus it is best to simply use the count of separate reports.
data.iterations = reports.size();
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
// We will divide these times by data.iterations when reporting, but the
// data.iterations is not nessesairly the scale of these measurements,
// because in each repetition, these timers are sum over all the iterations.
// And if we want to say that the stats are over N repetitions and not
// M iterations, we need to multiply these by (N/M).
data.real_accumulated_time *= iteration_rescale_factor;
data.cpu_accumulated_time *= iteration_rescale_factor;
data.time_unit = reports[0].time_unit;
// user counters
for (auto const& kv : counter_stats) {
// Do *NOT* rescale the custom counters. They are already properly scaled.
const auto uc_stat = Stat.compute_(kv.second.s);
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags,
counter_stats[kv.first].c.oneK);
data.counters[kv.first] = c;
}
results.push_back(data);
}
return results;
}
} // end namespace benchmark
0707010000005A000081A400000000000000000000000160C0813C0000054F000000000000000000000000000000000000002100000000benchmark-1.5.5/src/statistics.h// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
// Copyright 2017 Roman Lebedev. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef STATISTICS_H_
#define STATISTICS_H_
#include <vector>
#include "benchmark/benchmark.h"
namespace benchmark {
// Return a vector containing the mean, median and standard devation information
// (and any user-specified info) for the specified list of reports. If 'reports'
// contains less than two non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports);
double StatisticsMean(const std::vector<double>& v);
double StatisticsMedian(const std::vector<double>& v);
double StatisticsStdDev(const std::vector<double>& v);
} // end namespace benchmark
#endif // STATISTICS_H_
0707010000005B000081A400000000000000000000000160C0813C00002039000000000000000000000000000000000000002300000000benchmark-1.5.5/src/string_util.cc#include "string_util.h"
#include <array>
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
#include <cerrno>
#endif
#include <cmath>
#include <cstdarg>
#include <cstdio>
#include <memory>
#include <sstream>
#include "arraysize.h"
namespace benchmark {
namespace {
// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
const char kBigSIUnits[] = "kMGTPEZY";
// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi.
const char kBigIECUnits[] = "KMGTPEZY";
// milli, micro, nano, pico, femto, atto, zepto, yocto.
const char kSmallSIUnits[] = "munpfazy";
// We require that all three arrays have the same size.
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits),
"SI and IEC unit arrays must be the same size");
static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
"Small SI and Big SI unit arrays must be the same size");
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
void ToExponentAndMantissa(double val, double thresh, int precision,
double one_k, std::string* mantissa,
int64_t* exponent) {
std::stringstream mantissa_stream;
if (val < 0) {
mantissa_stream << "-";
val = -val;
}
// Adjust threshold so that it never excludes things which can't be rendered
// in 'precision' digits.
const double adjusted_threshold =
std::max(thresh, 1.0 / std::pow(10.0, precision));
const double big_threshold = adjusted_threshold * one_k;
const double small_threshold = adjusted_threshold;
// Values in ]simple_threshold,small_threshold[ will be printed as-is
const double simple_threshold = 0.01;
if (val > big_threshold) {
// Positive powers
double scaled = val;
for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) {
scaled /= one_k;
if (scaled <= big_threshold) {
mantissa_stream << scaled;
*exponent = i + 1;
*mantissa = mantissa_stream.str();
return;
}
}
mantissa_stream << val;
*exponent = 0;
} else if (val < small_threshold) {
// Negative powers
if (val < simple_threshold) {
double scaled = val;
for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) {
scaled *= one_k;
if (scaled >= small_threshold) {
mantissa_stream << scaled;
*exponent = -static_cast<int64_t>(i + 1);
*mantissa = mantissa_stream.str();
return;
}
}
}
mantissa_stream << val;
*exponent = 0;
} else {
mantissa_stream << val;
*exponent = 0;
}
*mantissa = mantissa_stream.str();
}
std::string ExponentToPrefix(int64_t exponent, bool iec) {
if (exponent == 0) return "";
const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
if (index >= kUnitsSize) return "";
const char* array =
(exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
if (iec)
return array[index] + std::string("i");
else
return std::string(1, array[index]);
}
std::string ToBinaryStringFullySpecified(double value, double threshold,
int precision, double one_k = 1024.0) {
std::string mantissa;
int64_t exponent;
ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa,
&exponent);
return mantissa + ExponentToPrefix(exponent, false);
}
} // end namespace
void AppendHumanReadable(int n, std::string* str) {
std::stringstream ss;
// Round down to the nearest SI prefix.
ss << ToBinaryStringFullySpecified(n, 1.0, 0);
*str += ss.str();
}
std::string HumanReadableNumber(double n, double one_k) {
// 1.1 means that figures up to 1.1k should be shown with the next unit down;
// this softens edge effects.
// 1 means that we should show one decimal place of precision.
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
}
std::string StrFormatImp(const char* msg, va_list args) {
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
// TODO(ericwf): use std::array for first attempt to avoid one memory
// allocation guess what the size might be
std::array<char, 256> local_buff;
std::size_t size = local_buff.size();
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
va_end(args_cp);
// handle empty expansion
if (ret == 0) return std::string{};
if (static_cast<std::size_t>(ret) < size)
return std::string(local_buff.data());
// we did not provide a long enough buffer on our first attempt.
// add 1 to size to account for null-byte in size cast to prevent overflow
size = static_cast<std::size_t>(ret) + 1;
auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
ret = vsnprintf(buff_ptr.get(), size, msg, args);
return std::string(buff_ptr.get());
}
std::string StrFormat(const char* format, ...) {
va_list args;
va_start(args, format);
std::string tmp = StrFormatImp(format, args);
va_end(args);
return tmp;
}
std::vector<std::string> StrSplit(const std::string& str, char delim) {
if (str.empty()) return {};
std::vector<std::string> ret;
size_t first = 0;
size_t next = str.find(delim);
for (; next != std::string::npos;
first = next + 1, next = str.find(delim, first)) {
ret.push_back(str.substr(first, next - first));
}
ret.push_back(str.substr(first));
return ret;
}
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
/*
* GNU STL in Android NDK lacks support for some C++11 functions, including
* stoul, stoi, stod. We reimplement them here using C functions strtoul,
* strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace.
*/
unsigned long stoul(const std::string& str, size_t* pos, int base) {
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
const char* strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart);
const unsigned long result = strtoul(strStart, &strEnd, base);
const int strtoulErrno = errno;
/* Restore previous errno */
errno = oldErrno;
/* Check for errors and return */
if (strtoulErrno == ERANGE) {
throw std::out_of_range(
"stoul failed: " + str + " is outside of range of unsigned long");
} else if (strEnd == strStart || strtoulErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
}
if (pos != nullptr) {
*pos = static_cast<size_t>(strEnd - strStart);
}
return result;
}
int stoi(const std::string& str, size_t* pos, int base) {
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
const char* strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart);
const long result = strtol(strStart, &strEnd, base);
const int strtolErrno = errno;
/* Restore previous errno */
errno = oldErrno;
/* Check for errors and return */
if (strtolErrno == ERANGE || long(int(result)) != result) {
throw std::out_of_range(
"stoul failed: " + str + " is outside of range of int");
} else if (strEnd == strStart || strtolErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
}
if (pos != nullptr) {
*pos = static_cast<size_t>(strEnd - strStart);
}
return int(result);
}
double stod(const std::string& str, size_t* pos) {
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
const char* strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart);
const double result = strtod(strStart, &strEnd);
/* Restore previous errno */
const int strtodErrno = errno;
errno = oldErrno;
/* Check for errors and return */
if (strtodErrno == ERANGE) {
throw std::out_of_range(
"stoul failed: " + str + " is outside of range of int");
} else if (strEnd == strStart || strtodErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
}
if (pos != nullptr) {
*pos = static_cast<size_t>(strEnd - strStart);
}
return result;
}
#endif
} // end namespace benchmark
0707010000005C000081A400000000000000000000000160C0813C0000069F000000000000000000000000000000000000002200000000benchmark-1.5.5/src/string_util.h#ifndef BENCHMARK_STRING_UTIL_H_
#define BENCHMARK_STRING_UTIL_H_
#include <sstream>
#include <string>
#include <utility>
#include "internal_macros.h"
namespace benchmark {
void AppendHumanReadable(int n, std::string* str);
std::string HumanReadableNumber(double n, double one_k = 1024.0);
#if defined(__MINGW32__)
__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2)))
#elif defined(__GNUC__)
__attribute__((format(printf, 1, 2)))
#endif
std::string
StrFormat(const char* format, ...);
inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
return out;
}
template <class First, class... Rest>
inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) {
out << std::forward<First>(f);
return StrCatImp(out, std::forward<Rest>(rest)...);
}
template <class... Args>
inline std::string StrCat(Args&&... args) {
std::ostringstream ss;
StrCatImp(ss, std::forward<Args>(args)...);
return ss.str();
}
std::vector<std::string> StrSplit(const std::string& str, char delim);
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
/*
* GNU STL in Android NDK lacks support for some C++11 functions, including
* stoul, stoi, stod. We reimplement them here using C functions strtoul,
* strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace.
*/
unsigned long stoul(const std::string& str, size_t* pos = nullptr,
int base = 10);
int stoi(const std::string& str, size_t* pos = nullptr, int base = 10);
double stod(const std::string& str, size_t* pos = nullptr);
#else
using std::stoul;
using std::stoi;
using std::stod;
#endif
} // end namespace benchmark
#endif // BENCHMARK_STRING_UTIL_H_
0707010000005D000081A400000000000000000000000160C0813C0000575D000000000000000000000000000000000000001F00000000benchmark-1.5.5/src/sysinfo.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <versionhelpers.h>
#include <windows.h>
#include <codecvt>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \
defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD || \
defined BENCHMARK_OS_DRAGONFLY
#define BENCHMARK_HAS_SYSCTL
#include <sys/sysctl.h>
#endif
#endif
#if defined(BENCHMARK_OS_SOLARIS)
#include <kstat.h>
#endif
#if defined(BENCHMARK_OS_QNX)
#include <sys/syspage.h>
#endif
#include <algorithm>
#include <array>
#include <bitset>
#include <cerrno>
#include <climits>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <iterator>
#include <limits>
#include <memory>
#include <sstream>
#include <locale>
#include <utility>
#include "check.h"
#include "cycleclock.h"
#include "internal_macros.h"
#include "log.h"
#include "sleep.h"
#include "string_util.h"
namespace benchmark {
namespace {
void PrintImp(std::ostream& out) { out << std::endl; }
template <class First, class... Rest>
void PrintImp(std::ostream& out, First&& f, Rest&&... rest) {
out << std::forward<First>(f);
PrintImp(out, std::forward<Rest>(rest)...);
}
template <class... Args>
BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) {
PrintImp(std::cerr, std::forward<Args>(args)...);
std::exit(EXIT_FAILURE);
}
#ifdef BENCHMARK_HAS_SYSCTL
/// ValueUnion - A type used to correctly alias the byte-for-byte output of
/// `sysctl` with the result type it's to be interpreted as.
struct ValueUnion {
union DataT {
uint32_t uint32_value;
uint64_t uint64_value;
// For correct aliasing of union members from bytes.
char bytes[8];
};
using DataPtr = std::unique_ptr<DataT, decltype(&std::free)>;
// The size of the data union member + its trailing array size.
size_t Size;
DataPtr Buff;
public:
ValueUnion() : Size(0), Buff(nullptr, &std::free) {}
explicit ValueUnion(size_t BuffSize)
: Size(sizeof(DataT) + BuffSize),
Buff(::new (std::malloc(Size)) DataT(), &std::free) {}
ValueUnion(ValueUnion&& other) = default;
explicit operator bool() const { return bool(Buff); }
char* data() const { return Buff->bytes; }
std::string GetAsString() const { return std::string(data()); }
int64_t GetAsInteger() const {
if (Size == sizeof(Buff->uint32_value))
return static_cast<int32_t>(Buff->uint32_value);
else if (Size == sizeof(Buff->uint64_value))
return static_cast<int64_t>(Buff->uint64_value);
BENCHMARK_UNREACHABLE();
}
uint64_t GetAsUnsigned() const {
if (Size == sizeof(Buff->uint32_value))
return Buff->uint32_value;
else if (Size == sizeof(Buff->uint64_value))
return Buff->uint64_value;
BENCHMARK_UNREACHABLE();
}
template <class T, int N>
std::array<T, N> GetAsArray() {
const int ArrSize = sizeof(T) * N;
CHECK_LE(ArrSize, Size);
std::array<T, N> Arr;
std::memcpy(Arr.data(), data(), ArrSize);
return Arr;
}
};
ValueUnion GetSysctlImp(std::string const& Name) {
#if defined BENCHMARK_OS_OPENBSD
int mib[2];
mib[0] = CTL_HW;
if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){
ValueUnion buff(sizeof(int));
if (Name == "hw.ncpu") {
mib[1] = HW_NCPU;
} else {
mib[1] = HW_CPUSPEED;
}
if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) {
return ValueUnion();
}
return buff;
}
return ValueUnion();
#else
size_t CurBuffSize = 0;
if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1)
return ValueUnion();
ValueUnion buff(CurBuffSize);
if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0)
return buff;
return ValueUnion();
#endif
}
BENCHMARK_MAYBE_UNUSED
bool GetSysctl(std::string const& Name, std::string* Out) {
Out->clear();
auto Buff = GetSysctlImp(Name);
if (!Buff) return false;
Out->assign(Buff.data());
return true;
}
template <class Tp,
class = typename std::enable_if<std::is_integral<Tp>::value>::type>
bool GetSysctl(std::string const& Name, Tp* Out) {
*Out = 0;
auto Buff = GetSysctlImp(Name);
if (!Buff) return false;
*Out = static_cast<Tp>(Buff.GetAsUnsigned());
return true;
}
template <class Tp, size_t N>
bool GetSysctl(std::string const& Name, std::array<Tp, N>* Out) {
auto Buff = GetSysctlImp(Name);
if (!Buff) return false;
*Out = Buff.GetAsArray<Tp, N>();
return true;
}
#endif
template <class ArgT>
bool ReadFromFile(std::string const& fname, ArgT* arg) {
*arg = ArgT();
std::ifstream f(fname.c_str());
if (!f.is_open()) return false;
f >> *arg;
return f.good();
}
CPUInfo::Scaling CpuScaling(int num_cpus) {
// We don't have a valid CPU count, so don't even bother.
if (num_cpus <= 0) return CPUInfo::Scaling::UNKNOWN;
#ifdef BENCHMARK_OS_QNX
return CPUInfo::Scaling::UNKNOWN;
#endif
#ifndef BENCHMARK_OS_WINDOWS
// On Linux, the CPUfreq subsystem exposes CPU information as files on the
// local file system. If reading the exported files fails, then we may not be
// running on Linux, so we silently ignore all the read errors.
std::string res;
for (int cpu = 0; cpu < num_cpus; ++cpu) {
std::string governor_file =
StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
if (ReadFromFile(governor_file, &res) && res != "performance") return CPUInfo::Scaling::ENABLED;
}
return CPUInfo::Scaling::DISABLED;
#endif
return CPUInfo::Scaling::UNKNOWN;
}
int CountSetBitsInCPUMap(std::string Val) {
auto CountBits = [](std::string Part) {
using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
Part = "0x" + Part;
CPUMask Mask(benchmark::stoul(Part, nullptr, 16));
return static_cast<int>(Mask.count());
};
size_t Pos;
int total = 0;
while ((Pos = Val.find(',')) != std::string::npos) {
total += CountBits(Val.substr(0, Pos));
Val = Val.substr(Pos + 1);
}
if (!Val.empty()) {
total += CountBits(Val);
}
return total;
}
BENCHMARK_MAYBE_UNUSED
std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
std::vector<CPUInfo::CacheInfo> res;
std::string dir = "/sys/devices/system/cpu/cpu0/cache/";
int Idx = 0;
while (true) {
CPUInfo::CacheInfo info;
std::string FPath = StrCat(dir, "index", Idx++, "/");
std::ifstream f(StrCat(FPath, "size").c_str());
if (!f.is_open()) break;
std::string suffix;
f >> info.size;
if (f.fail())
PrintErrorAndDie("Failed while reading file '", FPath, "size'");
if (f.good()) {
f >> suffix;
if (f.bad())
PrintErrorAndDie(
"Invalid cache size format: failed to read size suffix");
else if (f && suffix != "K")
PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix);
else if (suffix == "K")
info.size *= 1024;
}
if (!ReadFromFile(StrCat(FPath, "type"), &info.type))
PrintErrorAndDie("Failed to read from file ", FPath, "type");
if (!ReadFromFile(StrCat(FPath, "level"), &info.level))
PrintErrorAndDie("Failed to read from file ", FPath, "level");
std::string map_str;
if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str))
PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map");
info.num_sharing = CountSetBitsInCPUMap(map_str);
res.push_back(info);
}
return res;
}
#ifdef BENCHMARK_OS_MACOSX
std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
std::vector<CPUInfo::CacheInfo> res;
std::array<uint64_t, 4> CacheCounts{{0, 0, 0, 0}};
GetSysctl("hw.cacheconfig", &CacheCounts);
struct {
std::string name;
std::string type;
int level;
uint64_t num_sharing;
} Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]},
{"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
{"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
{"hw.l3cachesize", "Unified", 3, CacheCounts[3]}};
for (auto& C : Cases) {
int val;
if (!GetSysctl(C.name, &val)) continue;
CPUInfo::CacheInfo info;
info.type = C.type;
info.level = C.level;
info.size = val;
info.num_sharing = static_cast<int>(C.num_sharing);
res.push_back(std::move(info));
}
return res;
}
#elif defined(BENCHMARK_OS_WINDOWS)
std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
std::vector<CPUInfo::CacheInfo> res;
DWORD buffer_size = 0;
using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
using CInfo = CACHE_DESCRIPTOR;
using UPtr = std::unique_ptr<PInfo, decltype(&std::free)>;
GetLogicalProcessorInformation(nullptr, &buffer_size);
UPtr buff((PInfo*)malloc(buffer_size), &std::free);
if (!GetLogicalProcessorInformation(buff.get(), &buffer_size))
PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ",
GetLastError());
PInfo* it = buff.get();
PInfo* end = buff.get() + (buffer_size / sizeof(PInfo));
for (; it != end; ++it) {
if (it->Relationship != RelationCache) continue;
using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>;
BitSet B(it->ProcessorMask);
// To prevent duplicates, only consider caches where CPU 0 is specified
if (!B.test(0)) continue;
CInfo* Cache = &it->Cache;
CPUInfo::CacheInfo C;
C.num_sharing = static_cast<int>(B.count());
C.level = Cache->Level;
C.size = Cache->Size;
switch (Cache->Type) {
case CacheUnified:
C.type = "Unified";
break;
case CacheInstruction:
C.type = "Instruction";
break;
case CacheData:
C.type = "Data";
break;
case CacheTrace:
C.type = "Trace";
break;
default:
C.type = "Unknown";
break;
}
res.push_back(C);
}
return res;
}
#elif BENCHMARK_OS_QNX
std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX() {
std::vector<CPUInfo::CacheInfo> res;
struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr);
uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr);
int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize ;
for(int i = 0; i < num; ++i ) {
CPUInfo::CacheInfo info;
switch (cache->flags){
case CACHE_FLAG_INSTR :
info.type = "Instruction";
info.level = 1;
break;
case CACHE_FLAG_DATA :
info.type = "Data";
info.level = 1;
break;
case CACHE_FLAG_UNIFIED :
info.type = "Unified";
info.level = 2;
break;
case CACHE_FLAG_SHARED :
info.type = "Shared";
info.level = 3;
break;
default :
continue;
break;
}
info.size = cache->line_size * cache->num_lines;
info.num_sharing = 0;
res.push_back(std::move(info));
cache = SYSPAGE_ARRAY_ADJ_OFFSET(cacheattr, cache, elsize);
}
return res;
}
#endif
std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
#ifdef BENCHMARK_OS_MACOSX
return GetCacheSizesMacOSX();
#elif defined(BENCHMARK_OS_WINDOWS)
return GetCacheSizesWindows();
#elif defined(BENCHMARK_OS_QNX)
return GetCacheSizesQNX();
#else
return GetCacheSizesFromKVFS();
#endif
}
std::string GetSystemName() {
#if defined(BENCHMARK_OS_WINDOWS)
std::string str;
const unsigned COUNT = MAX_COMPUTERNAME_LENGTH+1;
TCHAR hostname[COUNT] = {'\0'};
DWORD DWCOUNT = COUNT;
if (!GetComputerName(hostname, &DWCOUNT))
return std::string("");
#ifndef UNICODE
str = std::string(hostname, DWCOUNT);
#else
//Using wstring_convert, Is deprecated in C++17
using convert_type = std::codecvt_utf8<wchar_t>;
std::wstring_convert<convert_type, wchar_t> converter;
std::wstring wStr(hostname, DWCOUNT);
str = converter.to_bytes(wStr);
#endif
return str;
#else // defined(BENCHMARK_OS_WINDOWS)
#ifndef HOST_NAME_MAX
#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined
#define HOST_NAME_MAX 64
#elif defined(BENCHMARK_OS_NACL)
#define HOST_NAME_MAX 64
#elif defined(BENCHMARK_OS_QNX)
#define HOST_NAME_MAX 154
#elif defined(BENCHMARK_OS_RTEMS)
#define HOST_NAME_MAX 256
#else
#warning "HOST_NAME_MAX not defined. using 64"
#define HOST_NAME_MAX 64
#endif
#endif // def HOST_NAME_MAX
char hostname[HOST_NAME_MAX];
int retVal = gethostname(hostname, HOST_NAME_MAX);
if (retVal != 0) return std::string("");
return std::string(hostname);
#endif // Catch-all POSIX block.
}
int GetNumCPUs() {
#ifdef BENCHMARK_HAS_SYSCTL
int NumCPU = -1;
if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU;
fprintf(stderr, "Err: %s\n", strerror(errno));
std::exit(EXIT_FAILURE);
#elif defined(BENCHMARK_OS_WINDOWS)
SYSTEM_INFO sysinfo;
// Use memset as opposed to = {} to avoid GCC missing initializer false
// positives.
std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
GetSystemInfo(&sysinfo);
return sysinfo.dwNumberOfProcessors; // number of logical
// processors in the current
// group
#elif defined(BENCHMARK_OS_SOLARIS)
// Returns -1 in case of a failure.
int NumCPU = sysconf(_SC_NPROCESSORS_ONLN);
if (NumCPU < 0) {
fprintf(stderr,
"sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n",
strerror(errno));
}
return NumCPU;
#elif defined(BENCHMARK_OS_QNX)
return static_cast<int>(_syspage_ptr->num_cpu);
#else
int NumCPUs = 0;
int MaxID = -1;
std::ifstream f("/proc/cpuinfo");
if (!f.is_open()) {
std::cerr << "failed to open /proc/cpuinfo\n";
return -1;
}
const std::string Key = "processor";
std::string ln;
while (std::getline(f, ln)) {
if (ln.empty()) continue;
size_t SplitIdx = ln.find(':');
std::string value;
#if defined(__s390__)
// s390 has another format in /proc/cpuinfo
// it needs to be parsed differently
if (SplitIdx != std::string::npos) value = ln.substr(Key.size()+1,SplitIdx-Key.size()-1);
#else
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
#endif
if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) {
NumCPUs++;
if (!value.empty()) {
int CurID = benchmark::stoi(value);
MaxID = std::max(CurID, MaxID);
}
}
}
if (f.bad()) {
std::cerr << "Failure reading /proc/cpuinfo\n";
return -1;
}
if (!f.eof()) {
std::cerr << "Failed to read to end of /proc/cpuinfo\n";
return -1;
}
f.close();
if ((MaxID + 1) != NumCPUs) {
fprintf(stderr,
"CPU ID assignments in /proc/cpuinfo seem messed up."
" This is usually caused by a bad BIOS.\n");
}
return NumCPUs;
#endif
BENCHMARK_UNREACHABLE();
}
double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
// Currently, scaling is only used on linux path here,
// suppress diagnostics about it being unused on other paths.
(void)scaling;
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
long freq;
// If the kernel is exporting the tsc frequency use that. There are issues
// where cpuinfo_max_freq cannot be relied on because the BIOS may be
// exporintg an invalid p-state (on x86) or p-states may be used to put the
// processor in a new mode (turbo mode). Essentially, those frequencies
// cannot always be relied upon. The same reasons apply to /proc/cpuinfo as
// well.
if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)
// If CPU scaling is disabled, use the the *current* frequency.
// Note that we specifically don't want to read cpuinfo_cur_freq,
// because it is only readable by root.
|| (scaling == CPUInfo::Scaling::DISABLED &&
ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq",
&freq))
// Otherwise, if CPU scaling may be in effect, we want to use
// the *maximum* frequency, not whatever CPU speed some random processor
// happens to be using now.
|| ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
&freq)) {
// The value is in kHz (as the file name suggests). For example, on a
// 2GHz warpstation, the file contains the value "2000000".
return freq * 1000.0;
}
const double error_value = -1;
double bogo_clock = error_value;
std::ifstream f("/proc/cpuinfo");
if (!f.is_open()) {
std::cerr << "failed to open /proc/cpuinfo\n";
return error_value;
}
auto startsWithKey = [](std::string const& Value, std::string const& Key) {
if (Key.size() > Value.size()) return false;
auto Cmp = [&](char X, char Y) {
return std::tolower(X) == std::tolower(Y);
};
return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp);
};
std::string ln;
while (std::getline(f, ln)) {
if (ln.empty()) continue;
size_t SplitIdx = ln.find(':');
std::string value;
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
// accept positive values. Some environments (virtual machines) report zero,
// which would cause infinite looping in WallTime_Init.
if (startsWithKey(ln, "cpu MHz")) {
if (!value.empty()) {
double cycles_per_second = benchmark::stod(value) * 1000000.0;
if (cycles_per_second > 0) return cycles_per_second;
}
} else if (startsWithKey(ln, "bogomips")) {
if (!value.empty()) {
bogo_clock = benchmark::stod(value) * 1000000.0;
if (bogo_clock < 0.0) bogo_clock = error_value;
}
}
}
if (f.bad()) {
std::cerr << "Failure reading /proc/cpuinfo\n";
return error_value;
}
if (!f.eof()) {
std::cerr << "Failed to read to end of /proc/cpuinfo\n";
return error_value;
}
f.close();
// If we found the bogomips clock, but nothing better, we'll use it (but
// we're not happy about it); otherwise, fallback to the rough estimation
// below.
if (bogo_clock >= 0.0) return bogo_clock;
#elif defined BENCHMARK_HAS_SYSCTL
constexpr auto* FreqStr =
#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD)
"machdep.tsc_freq";
#elif defined BENCHMARK_OS_OPENBSD
"hw.cpuspeed";
#elif defined BENCHMARK_OS_DRAGONFLY
"hw.tsc_frequency";
#else
"hw.cpufrequency";
#endif
unsigned long long hz = 0;
#if defined BENCHMARK_OS_OPENBSD
if (GetSysctl(FreqStr, &hz)) return hz * 1000000;
#else
if (GetSysctl(FreqStr, &hz)) return hz;
#endif
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
FreqStr, strerror(errno));
#elif defined BENCHMARK_OS_WINDOWS
// In NT, read MHz from the registry. If we fail to do so or we're in win9x
// then make a crude estimate.
DWORD data, data_size = sizeof(data);
if (IsWindowsXPOrGreater() &&
SUCCEEDED(
SHGetValueA(HKEY_LOCAL_MACHINE,
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
"~MHz", nullptr, &data, &data_size)))
return static_cast<double>((int64_t)data *
(int64_t)(1000 * 1000)); // was mhz
#elif defined (BENCHMARK_OS_SOLARIS)
kstat_ctl_t *kc = kstat_open();
if (!kc) {
std::cerr << "failed to open /dev/kstat\n";
return -1;
}
kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0");
if (!ksp) {
std::cerr << "failed to lookup in /dev/kstat\n";
return -1;
}
if (kstat_read(kc, ksp, NULL) < 0) {
std::cerr << "failed to read from /dev/kstat\n";
return -1;
}
kstat_named_t *knp =
(kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz");
if (!knp) {
std::cerr << "failed to lookup data in /dev/kstat\n";
return -1;
}
if (knp->data_type != KSTAT_DATA_UINT64) {
std::cerr << "current_clock_Hz is of unexpected data type: "
<< knp->data_type << "\n";
return -1;
}
double clock_hz = knp->value.ui64;
kstat_close(kc);
return clock_hz;
#elif defined (BENCHMARK_OS_QNX)
return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) *
(int64_t)(1000 * 1000));
#endif
// If we've fallen through, attempt to roughly estimate the CPU clock rate.
const int estimate_time_ms = 1000;
const auto start_ticks = cycleclock::Now();
SleepForMilliseconds(estimate_time_ms);
return static_cast<double>(cycleclock::Now() - start_ticks);
}
std::vector<double> GetLoadAvg() {
#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
defined BENCHMARK_OS_OPENBSD || defined BENCHMARK_OS_DRAGONFLY) && \
!defined(__ANDROID__)
constexpr int kMaxSamples = 3;
std::vector<double> res(kMaxSamples, 0.0);
const int nelem = getloadavg(res.data(), kMaxSamples);
if (nelem < 1) {
res.clear();
} else {
res.resize(nelem);
}
return res;
#else
return {};
#endif
}
} // end namespace
const CPUInfo& CPUInfo::Get() {
static const CPUInfo* info = new CPUInfo();
return *info;
}
CPUInfo::CPUInfo()
: num_cpus(GetNumCPUs()),
scaling(CpuScaling(num_cpus)),
cycles_per_second(GetCPUCyclesPerSecond(scaling)),
caches(GetCacheSizes()),
load_avg(GetLoadAvg()) {}
const SystemInfo& SystemInfo::Get() {
static const SystemInfo* info = new SystemInfo();
return *info;
}
SystemInfo::SystemInfo() : name(GetSystemName()) {}
} // end namespace benchmark
0707010000005E000081A400000000000000000000000160C0813C0000062F000000000000000000000000000000000000002500000000benchmark-1.5.5/src/thread_manager.h#ifndef BENCHMARK_THREAD_MANAGER_H
#define BENCHMARK_THREAD_MANAGER_H
#include <atomic>
#include "benchmark/benchmark.h"
#include "mutex.h"
namespace benchmark {
namespace internal {
class ThreadManager {
public:
explicit ThreadManager(int num_threads)
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
return benchmark_mutex_;
}
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
return start_stop_barrier_.wait();
}
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
start_stop_barrier_.removeThread();
if (--alive_threads_ == 0) {
MutexLock lock(end_cond_mutex_);
end_condition_.notify_all();
}
}
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
MutexLock lock(end_cond_mutex_);
end_condition_.wait(lock.native_handle(),
[this]() { return alive_threads_ == 0; });
}
public:
struct Result {
IterationCount iterations = 0;
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
int64_t complexity_n = 0;
std::string report_label_;
std::string error_message_;
bool has_error_ = false;
UserCounters counters;
};
GUARDED_BY(GetBenchmarkMutex()) Result results;
private:
mutable Mutex benchmark_mutex_;
std::atomic<int> alive_threads_;
Barrier start_stop_barrier_;
Mutex end_cond_mutex_;
Condition end_condition_;
};
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_THREAD_MANAGER_H
0707010000005F000081A400000000000000000000000160C0813C000008ED000000000000000000000000000000000000002300000000benchmark-1.5.5/src/thread_timer.h#ifndef BENCHMARK_THREAD_TIMER_H
#define BENCHMARK_THREAD_TIMER_H
#include "check.h"
#include "timers.h"
namespace benchmark {
namespace internal {
class ThreadTimer {
explicit ThreadTimer(bool measure_process_cpu_time_)
: measure_process_cpu_time(measure_process_cpu_time_) {}
public:
static ThreadTimer Create() {
return ThreadTimer(/*measure_process_cpu_time_=*/false);
}
static ThreadTimer CreateProcessCpuTime() {
return ThreadTimer(/*measure_process_cpu_time_=*/true);
}
// Called by each thread
void StartTimer() {
running_ = true;
start_real_time_ = ChronoClockNow();
start_cpu_time_ = ReadCpuTimerOfChoice();
}
// Called by each thread
void StopTimer() {
CHECK(running_);
running_ = false;
real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative
// time. Guard against that.
cpu_time_used_ +=
std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
}
// Called by each thread
void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
bool running() const { return running_; }
// REQUIRES: timer is not running
double real_time_used() const {
CHECK(!running_);
return real_time_used_;
}
// REQUIRES: timer is not running
double cpu_time_used() const {
CHECK(!running_);
return cpu_time_used_;
}
// REQUIRES: timer is not running
double manual_time_used() const {
CHECK(!running_);
return manual_time_used_;
}
private:
double ReadCpuTimerOfChoice() const {
if (measure_process_cpu_time) return ProcessCPUUsage();
return ThreadCPUUsage();
}
// should the thread, or the process, time be measured?
const bool measure_process_cpu_time;
bool running_ = false; // Is the timer running
double start_real_time_ = 0; // If running_
double start_cpu_time_ = 0; // If running_
// Accumulated time so far (does not contain current slice if running_)
double real_time_used_ = 0;
double cpu_time_used_ = 0;
// Manually set iteration time. User sets this with SetIterationTime(seconds).
double manual_time_used_ = 0;
};
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_THREAD_TIMER_H
07070100000060000081A400000000000000000000000160C0813C000022A3000000000000000000000000000000000000001E00000000benchmark-1.5.5/src/timers.cc// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "timers.h"
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <versionhelpers.h>
#include <windows.h>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_DRAGONFLY || \
defined BENCHMARK_OS_MACOSX
#include <sys/sysctl.h>
#endif
#if defined(BENCHMARK_OS_MACOSX)
#include <mach/mach_init.h>
#include <mach/mach_port.h>
#include <mach/thread_act.h>
#endif
#endif
#ifdef BENCHMARK_OS_EMSCRIPTEN
#include <emscripten.h>
#endif
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <iostream>
#include <limits>
#include <mutex>
#include "check.h"
#include "log.h"
#include "sleep.h"
#include "string_util.h"
namespace benchmark {
// Suppress unused warnings on helper functions.
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
namespace {
#if defined(BENCHMARK_OS_WINDOWS)
double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
ULARGE_INTEGER kernel;
ULARGE_INTEGER user;
kernel.HighPart = kernel_time.dwHighDateTime;
kernel.LowPart = kernel_time.dwLowDateTime;
user.HighPart = user_time.dwHighDateTime;
user.LowPart = user_time.dwLowDateTime;
return (static_cast<double>(kernel.QuadPart) +
static_cast<double>(user.QuadPart)) *
1e-7;
}
#elif !defined(BENCHMARK_OS_FUCHSIA)
double MakeTime(struct rusage const& ru) {
return (static_cast<double>(ru.ru_utime.tv_sec) +
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
static_cast<double>(ru.ru_stime.tv_sec) +
static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
}
#endif
#if defined(BENCHMARK_OS_MACOSX)
double MakeTime(thread_basic_info_data_t const& info) {
return (static_cast<double>(info.user_time.seconds) +
static_cast<double>(info.user_time.microseconds) * 1e-6 +
static_cast<double>(info.system_time.seconds) +
static_cast<double>(info.system_time.microseconds) * 1e-6);
}
#endif
#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
double MakeTime(struct timespec const& ts) {
return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
}
#endif
BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) {
std::cerr << "ERROR: " << msg << std::endl;
std::exit(EXIT_FAILURE);
}
} // end namespace
double ProcessCPUUsage() {
#if defined(BENCHMARK_OS_WINDOWS)
HANDLE proc = GetCurrentProcess();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time,
&user_time))
return MakeTime(kernel_time, user_time);
DiagnoseAndExit("GetProccessTimes() failed");
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
// Use Emscripten-specific API. Reported CPU time would be exactly the
// same as total time, but this is ok because there aren't long-latency
// syncronous system calls in Emscripten.
return emscripten_get_now() * 1e-3;
#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292
struct timespec spec;
if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
return MakeTime(spec);
DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
#else
struct rusage ru;
if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
#endif
}
double ThreadCPUUsage() {
#if defined(BENCHMARK_OS_WINDOWS)
HANDLE this_thread = GetCurrentThread();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time,
&user_time);
return MakeTime(kernel_time, user_time);
#elif defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t info;
mach_port_t thread = pthread_mach_thread_np(pthread_self());
if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) ==
KERN_SUCCESS) {
return MakeTime(info);
}
DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// Emscripten doesn't support traditional threads
return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_RTEMS)
// RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
// https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_SOLARIS)
struct rusage ru;
if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
#elif defined(CLOCK_THREAD_CPUTIME_ID)
struct timespec ts;
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
#else
#error Per-thread timing is not available on your system.
#endif
}
std::string LocalDateTimeString() {
// Write the local time in RFC3339 format yyyy-mm-ddTHH:MM:SS+/-HH:MM.
typedef std::chrono::system_clock Clock;
std::time_t now = Clock::to_time_t(Clock::now());
const std::size_t kTzOffsetLen = 6;
const std::size_t kTimestampLen = 19;
std::size_t tz_len;
std::size_t timestamp_len;
long int offset_minutes;
char tz_offset_sign = '+';
// tz_offset is set in one of three ways:
// * strftime with %z - This either returns empty or the ISO 8601 time. The maximum length an
// ISO 8601 string can be is 7 (e.g. -03:30, plus trailing zero).
// * snprintf with %c%02li:%02li - The maximum length is 41 (one for %c, up to 19 for %02li,
// one for :, up to 19 %02li, plus trailing zero).
// * A fixed string of "-00:00". The maximum length is 7 (-00:00, plus trailing zero).
//
// Thus, the maximum size this needs to be is 41.
char tz_offset[41];
// Long enough buffer to avoid format-overflow warnings
char storage[128];
#if defined(BENCHMARK_OS_WINDOWS)
std::tm *timeinfo_p = ::localtime(&now);
#else
std::tm timeinfo;
std::tm *timeinfo_p = &timeinfo;
::localtime_r(&now, &timeinfo);
#endif
tz_len = std::strftime(tz_offset, sizeof(tz_offset), "%z", timeinfo_p);
if (tz_len < kTzOffsetLen && tz_len > 1) {
// Timezone offset was written. strftime writes offset as +HHMM or -HHMM,
// RFC3339 specifies an offset as +HH:MM or -HH:MM. To convert, we parse
// the offset as an integer, then reprint it to a string.
offset_minutes = ::strtol(tz_offset, NULL, 10);
if (offset_minutes < 0) {
offset_minutes *= -1;
tz_offset_sign = '-';
}
tz_len = ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li",
tz_offset_sign, offset_minutes / 100, offset_minutes % 100);
CHECK(tz_len == kTzOffsetLen);
((void)tz_len); // Prevent unused variable warning in optimized build.
} else {
// Unknown offset. RFC3339 specifies that unknown local offsets should be
// written as UTC time with -00:00 timezone.
#if defined(BENCHMARK_OS_WINDOWS)
// Potential race condition if another thread calls localtime or gmtime.
timeinfo_p = ::gmtime(&now);
#else
::gmtime_r(&now, &timeinfo);
#endif
strncpy(tz_offset, "-00:00", kTzOffsetLen + 1);
}
timestamp_len = std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S",
timeinfo_p);
CHECK(timestamp_len == kTimestampLen);
// Prevent unused variable warning in optimized build.
((void)kTimestampLen);
std::strncat(storage, tz_offset, sizeof(storage) - timestamp_len - 1);
return std::string(storage);
}
} // end namespace benchmark
07070100000061000081A400000000000000000000000160C0813C0000046C000000000000000000000000000000000000001D00000000benchmark-1.5.5/src/timers.h#ifndef BENCHMARK_TIMERS_H
#define BENCHMARK_TIMERS_H
#include <chrono>
#include <string>
namespace benchmark {
// Return the CPU usage of the current process
double ProcessCPUUsage();
// Return the CPU usage of the children of the current process
double ChildrenCPUUsage();
// Return the CPU usage of the current thread
double ThreadCPUUsage();
#if defined(HAVE_STEADY_CLOCK)
template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady>
struct ChooseSteadyClock {
typedef std::chrono::high_resolution_clock type;
};
template <>
struct ChooseSteadyClock<false> {
typedef std::chrono::steady_clock type;
};
#endif
struct ChooseClockType {
#if defined(HAVE_STEADY_CLOCK)
typedef ChooseSteadyClock<>::type type;
#else
typedef std::chrono::high_resolution_clock type;
#endif
};
inline double ChronoClockNow() {
typedef ChooseClockType::type ClockType;
using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
return FpSeconds(ClockType::now().time_since_epoch()).count();
}
std::string LocalDateTimeString();
} // end namespace benchmark
#endif // BENCHMARK_TIMERS_H
07070100000062000041ED00000000000000000000000260C0813C00000000000000000000000000000000000000000000001500000000benchmark-1.5.5/test07070100000063000081A400000000000000000000000160C0813C000005BF000000000000000000000000000000000000002900000000benchmark-1.5.5/test/AssemblyTests.cmake
include(split_list)
set(ASM_TEST_FLAGS "")
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
if (BENCHMARK_HAS_O3_FLAG)
list(APPEND ASM_TEST_FLAGS -O3)
endif()
check_cxx_compiler_flag(-g0 BENCHMARK_HAS_G0_FLAG)
if (BENCHMARK_HAS_G0_FLAG)
list(APPEND ASM_TEST_FLAGS -g0)
endif()
check_cxx_compiler_flag(-fno-stack-protector BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
if (BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
list(APPEND ASM_TEST_FLAGS -fno-stack-protector)
endif()
split_list(ASM_TEST_FLAGS)
string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER)
macro(add_filecheck_test name)
cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV})
add_library(${name} OBJECT ${name}.cc)
set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}")
set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s")
add_custom_target(copy_${name} ALL
COMMAND ${PROJECT_SOURCE_DIR}/tools/strip_asm.py
$<TARGET_OBJECTS:${name}>
${ASM_OUTPUT_FILE}
BYPRODUCTS ${ASM_OUTPUT_FILE})
add_dependencies(copy_${name} ${name})
if (NOT ARG_CHECK_PREFIXES)
set(ARG_CHECK_PREFIXES "CHECK")
endif()
foreach(prefix ${ARG_CHECK_PREFIXES})
add_test(NAME run_${name}_${prefix}
COMMAND
${LLVM_FILECHECK_EXE} ${name}.cc
--input-file=${ASM_OUTPUT_FILE}
--check-prefixes=CHECK,CHECK-${ASM_TEST_COMPILER}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
endforeach()
endmacro()
07070100000064000081A400000000000000000000000160C0813C00000750000000000000000000000000000000000000001B00000000benchmark-1.5.5/test/BUILDTEST_COPTS = [
"-pedantic",
"-pedantic-errors",
"-std=c++11",
"-Wall",
"-Wextra",
"-Wshadow",
# "-Wshorten-64-to-32",
"-Wfloat-equal",
"-fstrict-aliasing",
]
PER_SRC_COPTS = ({
"cxx03_test.cc": ["-std=c++03"],
# Some of the issues with DoNotOptimize only occur when optimization is enabled
"donotoptimize_test.cc": ["-O3"],
})
TEST_ARGS = ["--benchmark_min_time=0.01"]
PER_SRC_TEST_ARGS = ({
"user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"],
"repetitions_test.cc": [" --benchmark_repetitions=3"],
})
load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
cc_library(
name = "output_test_helper",
testonly = 1,
srcs = ["output_test_helper.cc"],
hdrs = ["output_test.h"],
copts = TEST_COPTS,
deps = [
"//:benchmark",
"//:benchmark_internal_headers",
],
)
[
cc_test(
name = test_src[:-len(".cc")],
size = "small",
srcs = [test_src],
args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []),
copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []),
deps = [
":output_test_helper",
"//:benchmark",
"//:benchmark_internal_headers",
"@com_google_googletest//:gtest",
] + (
["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else []
),
# FIXME: Add support for assembly tests to bazel.
# See Issue #556
# https://github.com/google/benchmark/issues/556
)
for test_src in glob(
["*test.cc"],
exclude = [
"*_assembly_test.cc",
"link_main_test.cc",
],
)
]
cc_test(
name = "link_main_test",
size = "small",
srcs = ["link_main_test.cc"],
copts = TEST_COPTS,
deps = ["//:benchmark_main"],
)
07070100000065000081A400000000000000000000000160C0813C00002AA7000000000000000000000000000000000000002400000000benchmark-1.5.5/test/CMakeLists.txt# Enable the tests
find_package(Threads REQUIRED)
include(CheckCXXCompilerFlag)
# NOTE: Some tests use `<cassert>` to perform the test. Therefore we must
# strip -DNDEBUG from the default CMake flags in DEBUG mode.
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" )
add_definitions( -UNDEBUG )
add_definitions(-DTEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS)
# Also remove /D NDEBUG to avoid MSVC warnings about conflicting defines.
foreach (flags_var_to_scrub
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_C_FLAGS_MINSIZEREL)
string (REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " "
"${flags_var_to_scrub}" "${${flags_var_to_scrub}}")
endforeach()
endif()
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
set(BENCHMARK_O3_FLAG "")
if (BENCHMARK_HAS_O3_FLAG)
set(BENCHMARK_O3_FLAG "-O3")
endif()
# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise
# they will break the configuration check.
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
list(APPEND CMAKE_EXE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
endif()
add_library(output_test_helper STATIC output_test_helper.cc output_test.h)
macro(compile_benchmark_test name)
add_executable(${name} "${name}.cc")
target_link_libraries(${name} benchmark::benchmark ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_benchmark_test)
macro(compile_benchmark_test_with_main name)
add_executable(${name} "${name}.cc")
target_link_libraries(${name} benchmark::benchmark_main)
endmacro(compile_benchmark_test_with_main)
macro(compile_output_test name)
add_executable(${name} "${name}.cc" output_test.h)
target_link_libraries(${name} output_test_helper benchmark::benchmark
${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_output_test)
# Demonstration executable
compile_benchmark_test(benchmark_test)
add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01)
compile_benchmark_test(filter_test)
macro(add_filter_test name filter expect)
add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect})
add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
endmacro(add_filter_test)
add_filter_test(filter_simple "Foo" 3)
add_filter_test(filter_simple_negative "-Foo" 2)
add_filter_test(filter_suffix "BM_.*" 4)
add_filter_test(filter_suffix_negative "-BM_.*" 1)
add_filter_test(filter_regex_all ".*" 5)
add_filter_test(filter_regex_all_negative "-.*" 0)
add_filter_test(filter_regex_blank "" 5)
add_filter_test(filter_regex_blank_negative "-" 0)
add_filter_test(filter_regex_none "monkey" 0)
add_filter_test(filter_regex_none_negative "-monkey" 5)
add_filter_test(filter_regex_wildcard ".*Foo.*" 3)
add_filter_test(filter_regex_wildcard_negative "-.*Foo.*" 2)
add_filter_test(filter_regex_begin "^BM_.*" 4)
add_filter_test(filter_regex_begin_negative "-^BM_.*" 1)
add_filter_test(filter_regex_begin2 "^N" 1)
add_filter_test(filter_regex_begin2_negative "-^N" 4)
add_filter_test(filter_regex_end ".*Ba$" 1)
add_filter_test(filter_regex_end_negative "-.*Ba$" 4)
compile_benchmark_test(options_test)
add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01)
compile_benchmark_test(basic_test)
add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01)
compile_output_test(repetitions_test)
add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01 --benchmark_repetitions=3)
compile_benchmark_test(diagnostics_test)
add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01)
compile_benchmark_test(skip_with_error_test)
add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01)
compile_benchmark_test(donotoptimize_test)
# Some of the issues with DoNotOptimize only occur when optimization is enabled
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
if (BENCHMARK_HAS_O3_FLAG)
set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3")
endif()
add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01)
compile_benchmark_test(fixture_test)
add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01)
compile_benchmark_test(register_benchmark_test)
add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01)
compile_benchmark_test(map_test)
add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01)
compile_benchmark_test(multiple_ranges_test)
add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01)
compile_benchmark_test(args_product_test)
add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01)
compile_benchmark_test_with_main(link_main_test)
add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01)
compile_output_test(reporter_output_test)
add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01)
compile_output_test(templated_fixture_test)
add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01)
compile_output_test(user_counters_test)
add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01)
compile_output_test(perf_counters_test)
add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01 --benchmark_perf_counters=CYCLES,BRANCHES)
compile_output_test(internal_threading_test)
add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01)
compile_output_test(report_aggregates_only_test)
add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01)
compile_output_test(display_aggregates_only_test)
add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01)
compile_output_test(user_counters_tabular_test)
add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01)
compile_output_test(user_counters_thousands_test)
add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01)
compile_output_test(memory_manager_test)
add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01)
check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
if (BENCHMARK_HAS_CXX03_FLAG)
compile_benchmark_test(cxx03_test)
set_target_properties(cxx03_test
PROPERTIES
CXX_STANDARD 98
CXX_STANDARD_REQUIRED YES)
# libstdc++ provides different definitions within <map> between dialects. When
# LTO is enabled and -Werror is specified GCC diagnoses this ODR violation
# causing the test to fail to compile. To prevent this we explicitly disable
# the warning.
check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR)
if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR)
set_target_properties(cxx03_test
PROPERTIES
LINK_FLAGS "-Wno-odr")
endif()
add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01)
endif()
# Attempt to work around flaky test failures when running on Appveyor servers.
if (DEFINED ENV{APPVEYOR})
set(COMPLEXITY_MIN_TIME "0.5")
else()
set(COMPLEXITY_MIN_TIME "0.01")
endif()
compile_output_test(complexity_test)
add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
###############################################################################
# GoogleTest Unit Tests
###############################################################################
if (BENCHMARK_ENABLE_GTEST_TESTS)
macro(compile_gtest name)
add_executable(${name} "${name}.cc")
target_link_libraries(${name} benchmark::benchmark
gmock_main ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_gtest)
macro(add_gtest name)
compile_gtest(${name})
add_test(NAME ${name} COMMAND ${name})
endmacro()
add_gtest(benchmark_gtest)
add_gtest(benchmark_name_gtest)
add_gtest(benchmark_random_interleaving_gtest)
add_gtest(commandlineflags_gtest)
add_gtest(statistics_gtest)
add_gtest(string_util_gtest)
add_gtest(perf_counters_gtest)
endif(BENCHMARK_ENABLE_GTEST_TESTS)
###############################################################################
# Assembly Unit Tests
###############################################################################
if (BENCHMARK_ENABLE_ASSEMBLY_TESTS)
if (NOT LLVM_FILECHECK_EXE)
message(FATAL_ERROR "LLVM FileCheck is required when including this file")
endif()
include(AssemblyTests.cmake)
add_filecheck_test(donotoptimize_assembly_test)
add_filecheck_test(state_assembly_test)
add_filecheck_test(clobber_memory_assembly_test)
endif()
###############################################################################
# Code Coverage Configuration
###############################################################################
# Add the coverage command(s)
if(CMAKE_BUILD_TYPE)
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
endif()
if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
find_program(GCOV gcov)
find_program(LCOV lcov)
find_program(GENHTML genhtml)
find_program(CTEST ctest)
if (GCOV AND LCOV AND GENHTML AND CTEST AND HAVE_CXX_FLAG_COVERAGE)
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/lcov/index.html
COMMAND ${LCOV} -q -z -d .
COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o before.lcov -i
COMMAND ${CTEST} --force-new-ctest-process
COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o after.lcov
COMMAND ${LCOV} -q -a before.lcov -a after.lcov --output-file final.lcov
COMMAND ${LCOV} -q -r final.lcov "'${CMAKE_SOURCE_DIR}/test/*'" -o final.lcov
COMMAND ${GENHTML} final.lcov -o lcov --demangle-cpp --sort -p "${CMAKE_BINARY_DIR}" -t benchmark
DEPENDS filter_test benchmark_test options_test basic_test fixture_test cxx03_test complexity_test
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMENT "Running LCOV"
)
add_custom_target(coverage
DEPENDS ${CMAKE_BINARY_DIR}/lcov/index.html
COMMENT "LCOV report at lcov/index.html"
)
message(STATUS "Coverage command added")
else()
if (HAVE_CXX_FLAG_COVERAGE)
set(CXX_FLAG_COVERAGE_MESSAGE supported)
else()
set(CXX_FLAG_COVERAGE_MESSAGE unavailable)
endif()
message(WARNING
"Coverage not available:\n"
" gcov: ${GCOV}\n"
" lcov: ${LCOV}\n"
" genhtml: ${GENHTML}\n"
" ctest: ${CTEST}\n"
" --coverage flag: ${CXX_FLAG_COVERAGE_MESSAGE}")
endif()
endif()
07070100000066000081A400000000000000000000000160C0813C000008B0000000000000000000000000000000000000002A00000000benchmark-1.5.5/test/args_product_test.cc#include "benchmark/benchmark.h"
#include <cassert>
#include <iostream>
#include <set>
#include <vector>
class ArgsProductFixture : public ::benchmark::Fixture {
public:
ArgsProductFixture()
: expectedValues({{0, 100, 2000, 30000},
{1, 15, 3, 8},
{1, 15, 3, 9},
{1, 15, 7, 8},
{1, 15, 7, 9},
{1, 15, 10, 8},
{1, 15, 10, 9},
{2, 15, 3, 8},
{2, 15, 3, 9},
{2, 15, 7, 8},
{2, 15, 7, 9},
{2, 15, 10, 8},
{2, 15, 10, 9},
{4, 5, 6, 11}}) {}
void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
std::vector<int64_t> ranges = {state.range(0), state.range(1),
state.range(2), state.range(3)};
assert(expectedValues.find(ranges) != expectedValues.end());
actualValues.insert(ranges);
}
// NOTE: This is not TearDown as we want to check after _all_ runs are
// complete.
virtual ~ArgsProductFixture() {
if (actualValues != expectedValues) {
std::cout << "EXPECTED\n";
for (auto v : expectedValues) {
std::cout << "{";
for (int64_t iv : v) {
std::cout << iv << ", ";
}
std::cout << "}\n";
}
std::cout << "ACTUAL\n";
for (auto v : actualValues) {
std::cout << "{";
for (int64_t iv : v) {
std::cout << iv << ", ";
}
std::cout << "}\n";
}
}
}
std::set<std::vector<int64_t>> expectedValues;
std::set<std::vector<int64_t>> actualValues;
};
BENCHMARK_DEFINE_F(ArgsProductFixture, Empty)(benchmark::State& state) {
for (auto _ : state) {
int64_t product =
state.range(0) * state.range(1) * state.range(2) * state.range(3);
for (int64_t x = 0; x < product; x++) {
benchmark::DoNotOptimize(x);
}
}
}
BENCHMARK_REGISTER_F(ArgsProductFixture, Empty)
->Args({0, 100, 2000, 30000})
->ArgsProduct({{1, 2}, {15}, {3, 7, 10}, {8, 9}})
->Args({4, 5, 6, 11});
BENCHMARK_MAIN();
07070100000067000081A400000000000000000000000160C0813C000011E8000000000000000000000000000000000000002300000000benchmark-1.5.5/test/basic_test.cc
#include "benchmark/benchmark.h"
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_empty);
BENCHMARK(BM_empty)->ThreadPerCpu();
void BM_spin_empty(benchmark::State& state) {
for (auto _ : state) {
for (int x = 0; x < state.range(0); ++x) {
benchmark::DoNotOptimize(x);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_empty);
BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
void BM_spin_pause_before(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before);
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State& state) {
for (auto _ : state) {
state.PauseTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_during);
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
void BM_pause_during(benchmark::State& state) {
for (auto _ : state) {
state.PauseTiming();
state.ResumeTiming();
}
}
BENCHMARK(BM_pause_during);
BENCHMARK(BM_pause_during)->ThreadPerCpu();
BENCHMARK(BM_pause_during)->UseRealTime();
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
void BM_spin_pause_after(benchmark::State& state) {
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
void BM_spin_pause_before_and_after(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
void BM_empty_stop_start(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_empty_stop_start);
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
void BM_KeepRunning(benchmark::State& state) {
benchmark::IterationCount iter_count = 0;
assert(iter_count == state.iterations());
while (state.KeepRunning()) {
++iter_count;
}
assert(iter_count == state.iterations());
}
BENCHMARK(BM_KeepRunning);
void BM_KeepRunningBatch(benchmark::State& state) {
// Choose a batch size >1000 to skip the typical runs with iteration
// targets of 10, 100 and 1000. If these are not actually skipped the
// bug would be detectable as consecutive runs with the same iteration
// count. Below we assert that this does not happen.
const benchmark::IterationCount batch_size = 1009;
static benchmark::IterationCount prior_iter_count = 0;
benchmark::IterationCount iter_count = 0;
while (state.KeepRunningBatch(batch_size)) {
iter_count += batch_size;
}
assert(state.iterations() == iter_count);
// Verify that the iteration count always increases across runs (see
// comment above).
assert(iter_count == batch_size // max_iterations == 1
|| iter_count > prior_iter_count); // max_iterations > batch_size
prior_iter_count = iter_count;
}
// Register with a fixed repetition count to establish the invariant that
// the iteration count should always change across runs. This overrides
// the --benchmark_repetitions command line flag, which would otherwise
// cause this test to fail if set > 1.
BENCHMARK(BM_KeepRunningBatch)->Repetitions(1);
void BM_RangedFor(benchmark::State& state) {
benchmark::IterationCount iter_count = 0;
for (auto _ : state) {
++iter_count;
}
assert(iter_count == state.max_iterations);
}
BENCHMARK(BM_RangedFor);
// Ensure that StateIterator provides all the necessary typedefs required to
// instantiate std::iterator_traits.
static_assert(std::is_same<
typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
typename benchmark::State::StateIterator::value_type>::value, "");
BENCHMARK_MAIN();
07070100000068000081A400000000000000000000000160C0813C00001296000000000000000000000000000000000000002800000000benchmark-1.5.5/test/benchmark_gtest.cc#include <map>
#include <string>
#include <vector>
#include "../src/benchmark_register.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace benchmark {
namespace internal {
extern std::map<std::string, std::string>* global_context;
namespace {
TEST(AddRangeTest, Simple) {
std::vector<int> dst;
AddRange(&dst, 1, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
}
TEST(AddRangeTest, Simple64) {
std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
}
TEST(AddRangeTest, Advanced) {
std::vector<int> dst;
AddRange(&dst, 5, 15, 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
}
TEST(AddRangeTest, Advanced64) {
std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
}
TEST(AddRangeTest, FullRange8) {
std::vector<int8_t> dst;
AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127));
}
TEST(AddRangeTest, FullRange64) {
std::vector<int64_t> dst;
AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024);
EXPECT_THAT(
dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL,
1099511627776LL, 1125899906842624LL,
1152921504606846976LL, 9223372036854775807LL));
}
TEST(AddRangeTest, NegativeRanges) {
std::vector<int> dst;
AddRange(&dst, -8, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0));
}
TEST(AddRangeTest, StrictlyNegative) {
std::vector<int> dst;
AddRange(&dst, -8, -1, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1));
}
TEST(AddRangeTest, SymmetricNegativeRanges) {
std::vector<int> dst;
AddRange(&dst, -8, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8));
}
TEST(AddRangeTest, SymmetricNegativeRangesOddMult) {
std::vector<int> dst;
AddRange(&dst, -30, 32, 5);
EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32));
}
TEST(AddRangeTest, NegativeRangesAsymmetric) {
std::vector<int> dst;
AddRange(&dst, -3, 5, 2);
EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5));
}
TEST(AddRangeTest, NegativeRangesLargeStep) {
// Always include -1, 0, 1 when crossing zero.
std::vector<int> dst;
AddRange(&dst, -8, 8, 10);
EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8));
}
TEST(AddRangeTest, ZeroOnlyRange) {
std::vector<int> dst;
AddRange(&dst, 0, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(0));
}
TEST(AddRangeTest, ZeroStartingRange) {
std::vector<int> dst;
AddRange(&dst, 0, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(0, 1, 2));
}
TEST(AddRangeTest, NegativeRange64) {
std::vector<int64_t> dst;
AddRange<int64_t>(&dst, -4, 4, 2);
EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4));
}
TEST(AddRangeTest, NegativeRangePreservesExistingOrder) {
// If elements already exist in the range, ensure we don't change
// their ordering by adding negative values.
std::vector<int64_t> dst = {1, 2, 3};
AddRange<int64_t>(&dst, -2, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2));
}
TEST(AddRangeTest, FullNegativeRange64) {
std::vector<int64_t> dst;
const auto min = std::numeric_limits<int64_t>::min();
const auto max = std::numeric_limits<int64_t>::max();
AddRange(&dst, min, max, 1024);
EXPECT_THAT(
dst, testing::ElementsAreArray(std::vector<int64_t>{
min, -1152921504606846976LL, -1125899906842624LL,
-1099511627776LL, -1073741824LL, -1048576LL, -1024LL, -1LL, 0LL,
1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL,
1125899906842624LL, 1152921504606846976LL, max}));
}
TEST(AddRangeTest, Simple8) {
std::vector<int8_t> dst;
AddRange<int8_t>(&dst, 1, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8));
}
TEST(AddCustomContext, Simple) {
EXPECT_THAT(global_context, nullptr);
AddCustomContext("foo", "bar");
AddCustomContext("baz", "qux");
EXPECT_THAT(*global_context,
testing::UnorderedElementsAre(testing::Pair("foo", "bar"),
testing::Pair("baz", "qux")));
delete global_context;
global_context = nullptr;
}
TEST(AddCustomContext, DuplicateKey) {
EXPECT_THAT(global_context, nullptr);
AddCustomContext("foo", "bar");
AddCustomContext("foo", "qux");
EXPECT_THAT(*global_context,
testing::UnorderedElementsAre(testing::Pair("foo", "bar")));
delete global_context;
global_context = nullptr;
}
} // namespace
} // namespace internal
} // namespace benchmark
07070100000069000081A400000000000000000000000160C0813C0000082A000000000000000000000000000000000000002D00000000benchmark-1.5.5/test/benchmark_name_gtest.cc#include "benchmark/benchmark.h"
#include "gtest/gtest.h"
namespace {
using namespace benchmark;
using namespace benchmark::internal;
TEST(BenchmarkNameTest, Empty) {
const auto name = BenchmarkName();
EXPECT_EQ(name.str(), std::string());
}
TEST(BenchmarkNameTest, FunctionName) {
auto name = BenchmarkName();
name.function_name = "function_name";
EXPECT_EQ(name.str(), "function_name");
}
TEST(BenchmarkNameTest, FunctionNameAndArgs) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.args = "some_args:3/4/5";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/5");
}
TEST(BenchmarkNameTest, MinTime) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.args = "some_args:3/4";
name.min_time = "min_time:3.4s";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s");
}
TEST(BenchmarkNameTest, Iterations) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.iterations = "iterations:42";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42");
}
TEST(BenchmarkNameTest, Repetitions) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.repetitions = "repetitions:24";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24");
}
TEST(BenchmarkNameTest, TimeType) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.time_type = "hammer_time";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time");
}
TEST(BenchmarkNameTest, Threads) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.threads = "threads:256";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256");
}
TEST(BenchmarkNameTest, TestEmptyFunctionName) {
auto name = BenchmarkName();
name.args = "first:3/second:4";
name.threads = "threads:22";
EXPECT_EQ(name.str(), "first:3/second:4/threads:22");
}
} // end namespace
0707010000006A000081A400000000000000000000000160C0813C00000D83000000000000000000000000000000000000003C00000000benchmark-1.5.5/test/benchmark_random_interleaving_gtest.cc#include <queue>
#include <string>
#include <vector>
#include "../src/commandlineflags.h"
#include "../src/string_util.h"
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
DECLARE_bool(benchmark_enable_random_interleaving);
DECLARE_string(benchmark_filter);
DECLARE_int32(benchmark_repetitions);
namespace benchmark {
namespace internal {
namespace {
class EventQueue : public std::queue<std::string> {
public:
void Put(const std::string& event) { push(event); }
void Clear() {
while (!empty()) {
pop();
}
}
std::string Get() {
std::string event = front();
pop();
return event;
}
};
static EventQueue* queue = new EventQueue;
class NullReporter : public BenchmarkReporter {
public:
bool ReportContext(const Context& /*context*/) override { return true; }
void ReportRuns(const std::vector<Run>& /* report */) override {}
};
class BenchmarkTest : public testing::Test {
public:
static void SetupHook(int /* num_threads */) { queue->push("Setup"); }
static void TeardownHook(int /* num_threads */) { queue->push("Teardown"); }
void Execute(const std::string& pattern) {
queue->Clear();
BenchmarkReporter* reporter = new NullReporter;
FLAGS_benchmark_filter = pattern;
RunSpecifiedBenchmarks(reporter);
delete reporter;
queue->Put("DONE"); // End marker
}
};
static void BM_Match1(benchmark::State& state) {
const int64_t arg = state.range(0);
for (auto _ : state) {
}
queue->Put(StrFormat("BM_Match1/%d", static_cast<int>(arg)));
}
BENCHMARK(BM_Match1)
->Iterations(100)
->Arg(1)
->Arg(2)
->Arg(3)
->Range(10, 80)
->Args({90})
->Args({100});
TEST_F(BenchmarkTest, Match1) {
Execute("BM_Match1");
ASSERT_EQ("BM_Match1/1", queue->Get());
ASSERT_EQ("BM_Match1/2", queue->Get());
ASSERT_EQ("BM_Match1/3", queue->Get());
ASSERT_EQ("BM_Match1/10", queue->Get());
ASSERT_EQ("BM_Match1/64", queue->Get());
ASSERT_EQ("BM_Match1/80", queue->Get());
ASSERT_EQ("BM_Match1/90", queue->Get());
ASSERT_EQ("BM_Match1/100", queue->Get());
ASSERT_EQ("DONE", queue->Get());
}
TEST_F(BenchmarkTest, Match1WithRepetition) {
FLAGS_benchmark_repetitions = 2;
Execute("BM_Match1/(64|80)");
ASSERT_EQ("BM_Match1/64", queue->Get());
ASSERT_EQ("BM_Match1/64", queue->Get());
ASSERT_EQ("BM_Match1/80", queue->Get());
ASSERT_EQ("BM_Match1/80", queue->Get());
ASSERT_EQ("DONE", queue->Get());
}
TEST_F(BenchmarkTest, Match1WithRandomInterleaving) {
FLAGS_benchmark_enable_random_interleaving = true;
FLAGS_benchmark_repetitions = 100;
std::map<std::string, int> element_count;
std::map<std::string, int> interleaving_count;
Execute("BM_Match1/(64|80)");
for (int i = 0; i < 100; ++i) {
std::vector<std::string> interleaving;
interleaving.push_back(queue->Get());
interleaving.push_back(queue->Get());
element_count[interleaving[0].c_str()]++;
element_count[interleaving[1].c_str()]++;
interleaving_count[StrFormat("%s,%s", interleaving[0].c_str(),
interleaving[1].c_str())]++;
}
EXPECT_EQ(element_count["BM_Match1/64"], 100) << "Unexpected repetitions.";
EXPECT_EQ(element_count["BM_Match1/80"], 100) << "Unexpected repetitions.";
EXPECT_GE(interleaving_count.size(), 2) << "Interleaving was not randomized.";
ASSERT_EQ("DONE", queue->Get());
}
} // namespace
} // namespace internal
} // namespace benchmark
0707010000006B000081A400000000000000000000000160C0813C00001C4F000000000000000000000000000000000000002700000000benchmark-1.5.5/test/benchmark_test.cc#include "benchmark/benchmark.h"
#include <assert.h>
#include <math.h>
#include <stdint.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <list>
#include <map>
#include <mutex>
#include <set>
#include <sstream>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#if defined(__GNUC__)
#define BENCHMARK_NOINLINE __attribute__((noinline))
#else
#define BENCHMARK_NOINLINE
#endif
namespace {
int BENCHMARK_NOINLINE Factorial(uint32_t n) {
return (n == 1) ? 1 : n * Factorial(n - 1);
}
double CalculatePi(int depth) {
double pi = 0.0;
for (int i = 0; i < depth; ++i) {
double numerator = static_cast<double>(((i % 2) * 2) - 1);
double denominator = static_cast<double>((2 * i) - 1);
pi += numerator / denominator;
}
return (pi - 1.0) * 4;
}
std::set<int64_t> ConstructRandomSet(int64_t size) {
std::set<int64_t> s;
for (int i = 0; i < size; ++i) s.insert(s.end(), i);
return s;
}
std::mutex test_vector_mu;
std::vector<int>* test_vector = nullptr;
} // end namespace
static void BM_Factorial(benchmark::State& state) {
int fac_42 = 0;
for (auto _ : state) fac_42 = Factorial(8);
// Prevent compiler optimizations
std::stringstream ss;
ss << fac_42;
state.SetLabel(ss.str());
}
BENCHMARK(BM_Factorial);
BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State& state) {
double pi = 0.0;
for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0)));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
}
BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
static void BM_CalculatePi(benchmark::State& state) {
static const int depth = 1024;
for (auto _ : state) {
benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
}
}
BENCHMARK(BM_CalculatePi)->Threads(8);
BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) {
std::set<int64_t> data;
for (auto _ : state) {
state.PauseTiming();
data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j) data.insert(rand());
}
state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
}
// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower,
// non-timed part of each iteration will make the benchmark take forever.
BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}});
template <typename Container,
typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State& state) {
ValueType v = 42;
for (auto _ : state) {
Container c;
for (int64_t i = state.range(0); --i;) c.push_back(v);
}
const int64_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v));
}
BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)
->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
#ifdef BENCHMARK_HAS_CXX11
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
#endif
static void BM_StringCompare(benchmark::State& state) {
size_t len = static_cast<size_t>(state.range(0));
std::string s1(len, '-');
std::string s2(len, '-');
for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2));
}
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
static void BM_SetupTeardown(benchmark::State& state) {
if (state.thread_index == 0) {
// No need to lock test_vector_mu here as this is running single-threaded.
test_vector = new std::vector<int>();
}
int i = 0;
for (auto _ : state) {
std::lock_guard<std::mutex> l(test_vector_mu);
if (i % 2 == 0)
test_vector->push_back(i);
else
test_vector->pop_back();
++i;
}
if (state.thread_index == 0) {
delete test_vector;
}
}
BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State& state) {
double tracker = 0.0;
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
}
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
static void BM_ParallelMemset(benchmark::State& state) {
int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
int thread_size = static_cast<int>(size) / state.threads;
int from = thread_size * state.thread_index;
int to = from + thread_size;
if (state.thread_index == 0) {
test_vector = new std::vector<int>(static_cast<size_t>(size));
}
for (auto _ : state) {
for (int i = from; i < to; i++) {
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
benchmark::DoNotOptimize(test_vector->at(i) = 1);
}
}
if (state.thread_index == 0) {
delete test_vector;
}
}
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
static void BM_ManualTiming(benchmark::State& state) {
int64_t slept_for = 0;
int64_t microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration{
static_cast<double>(microseconds)};
for (auto _ : state) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
auto end = std::chrono::high_resolution_clock::now();
auto elapsed =
std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
state.SetIterationTime(elapsed.count());
slept_for += microseconds;
}
state.SetItemsProcessed(slept_for);
}
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
#ifdef BENCHMARK_HAS_CXX11
template <class... Args>
void BM_with_args(benchmark::State& state, Args&&...) {
for (auto _ : state) {
}
}
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"),
std::pair<int, double>(42, 3.8));
void BM_non_template_args(benchmark::State& state, int, double) {
while(state.KeepRunning()) {}
}
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
#endif // BENCHMARK_HAS_CXX11
static void BM_DenseThreadRanges(benchmark::State& st) {
switch (st.range(0)) {
case 1:
assert(st.threads == 1 || st.threads == 2 || st.threads == 3);
break;
case 2:
assert(st.threads == 1 || st.threads == 3 || st.threads == 4);
break;
case 3:
assert(st.threads == 5 || st.threads == 8 || st.threads == 11 ||
st.threads == 14);
break;
default:
assert(false && "Invalid test case number");
}
while (st.KeepRunning()) {
}
}
BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3);
BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2);
BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3);
BENCHMARK_MAIN();
0707010000006C000081A400000000000000000000000160C0813C00000551000000000000000000000000000000000000003500000000benchmark-1.5.5/test/clobber_memory_assembly_test.cc#include <benchmark/benchmark.h>
#ifdef __clang__
#pragma clang diagnostic ignored "-Wreturn-type"
#endif
extern "C" {
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
}
// CHECK-LABEL: test_basic:
extern "C" void test_basic() {
int x;
benchmark::DoNotOptimize(&x);
x = 101;
benchmark::ClobberMemory();
// CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: movl $101, [[DEST]]
// CHECK: ret
}
// CHECK-LABEL: test_redundant_store:
extern "C" void test_redundant_store() {
ExternInt = 3;
benchmark::ClobberMemory();
ExternInt = 51;
// CHECK-DAG: ExternInt
// CHECK-DAG: movl $3
// CHECK: movl $51
}
// CHECK-LABEL: test_redundant_read:
extern "C" void test_redundant_read() {
int x;
benchmark::DoNotOptimize(&x);
x = ExternInt;
benchmark::ClobberMemory();
x = ExternInt2;
// CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, [[DEST]]
// CHECK-NOT: ExternInt2
// CHECK: ret
}
// CHECK-LABEL: test_redundant_read2:
extern "C" void test_redundant_read2() {
int x;
benchmark::DoNotOptimize(&x);
x = ExternInt;
benchmark::ClobberMemory();
x = ExternInt2;
benchmark::ClobberMemory();
// CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, [[DEST]]
// CHECK: ExternInt2(%rip)
// CHECK: movl %eax, [[DEST]]
// CHECK: ret
}
0707010000006D000081A400000000000000000000000160C0813C000018A2000000000000000000000000000000000000002F00000000benchmark-1.5.5/test/commandlineflags_gtest.cc#include <cstdlib>
#include "../src/commandlineflags.h"
#include "../src/internal_macros.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace benchmark {
namespace {
#if defined(BENCHMARK_OS_WINDOWS)
int setenv(const char* name, const char* value, int overwrite) {
if (!overwrite) {
// NOTE: getenv_s is far superior but not available under mingw.
char* env_value = getenv(name);
if (env_value == nullptr) {
return -1;
}
}
return _putenv_s(name, value);
}
int unsetenv(const char* name) { return _putenv_s(name, ""); }
#endif // BENCHMARK_OS_WINDOWS
TEST(BoolFromEnv, Default) {
ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
EXPECT_EQ(BoolFromEnv("not_in_env", true), true);
}
TEST(BoolFromEnv, False) {
ASSERT_EQ(setenv("IN_ENV", "0", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "N", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "n", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "NO", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "No", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "no", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "F", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "f", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "FALSE", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "False", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "false", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "OFF", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "Off", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "off", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("IN_ENV");
}
TEST(BoolFromEnv, True) {
ASSERT_EQ(setenv("IN_ENV", "1", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "Y", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "y", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "YES", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "Yes", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "yes", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "T", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "t", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "TRUE", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "True", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "true", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "ON", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "On", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
ASSERT_EQ(setenv("IN_ENV", "on", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
#ifndef BENCHMARK_OS_WINDOWS
ASSERT_EQ(setenv("IN_ENV", "", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("IN_ENV");
#endif
}
TEST(Int32FromEnv, NotInEnv) {
ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42);
}
TEST(Int32FromEnv, InvalidInteger) {
ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 42), 42);
unsetenv("IN_ENV");
}
TEST(Int32FromEnv, ValidInteger) {
ASSERT_EQ(setenv("IN_ENV", "42", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 64), 42);
unsetenv("IN_ENV");
}
TEST(DoubleFromEnv, NotInEnv) {
ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51);
}
TEST(DoubleFromEnv, InvalidReal) {
ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51);
unsetenv("IN_ENV");
}
TEST(DoubleFromEnv, ValidReal) {
ASSERT_EQ(setenv("IN_ENV", "0.51", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51);
unsetenv("IN_ENV");
}
TEST(StringFromEnv, Default) {
ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo");
}
TEST(StringFromEnv, Valid) {
ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo");
unsetenv("IN_ENV");
}
TEST(KvPairsFromEnv, Default) {
ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
EXPECT_THAT(KvPairsFromEnv("not_in_env", {{"foo", "bar"}}),
testing::ElementsAre(testing::Pair("foo", "bar")));
}
TEST(KvPairsFromEnv, MalformedReturnsDefault) {
ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
EXPECT_THAT(KvPairsFromEnv("in_env", {{"foo", "bar"}}),
testing::ElementsAre(testing::Pair("foo", "bar")));
unsetenv("IN_ENV");
}
TEST(KvPairsFromEnv, Single) {
ASSERT_EQ(setenv("IN_ENV", "foo=bar", 1), 0);
EXPECT_THAT(KvPairsFromEnv("in_env", {}),
testing::ElementsAre(testing::Pair("foo", "bar")));
unsetenv("IN_ENV");
}
TEST(KvPairsFromEnv, Multiple) {
ASSERT_EQ(setenv("IN_ENV", "foo=bar,baz=qux", 1), 0);
EXPECT_THAT(KvPairsFromEnv("in_env", {}),
testing::UnorderedElementsAre(testing::Pair("foo", "bar"),
testing::Pair("baz", "qux")));
unsetenv("IN_ENV");
}
} // namespace
} // namespace benchmark
0707010000006E000081A400000000000000000000000160C0813C00002124000000000000000000000000000000000000002800000000benchmark-1.5.5/test/complexity_test.cc#undef NDEBUG
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <vector>
#include "benchmark/benchmark.h"
#include "output_test.h"
namespace {
#define ADD_COMPLEXITY_CASES(...) \
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int AddComplexityTest(std::string test_name, std::string big_o_test_name,
std::string rms_test_name, std::string big_o,
int family_index) {
SetSubstitutions({{"%name", test_name},
{"%bigo_name", big_o_test_name},
{"%rms_name", rms_test_name},
{"%bigo_str", "[ ]* %float " + big_o},
{"%bigo", big_o},
{"%rms", "[ ]*[0-9]+ %"}});
AddCases(
TC_ConsoleOut,
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
AddCases(
TC_JSONOut,
{{"\"name\": \"%bigo_name\",$"},
{"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"%name\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": %int,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"BigO\",$", MR_Next},
{"\"cpu_coefficient\": %float,$", MR_Next},
{"\"real_coefficient\": %float,$", MR_Next},
{"\"big_o\": \"%bigo\",$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next},
{"\"name\": \"%rms_name\",$"},
{"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"%name\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": %int,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"RMS\",$", MR_Next},
{"\"rms\": %float$", MR_Next},
{"}", MR_Next}});
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
{"^\"%bigo_name\"", MR_Not},
{"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
return 0;
}
} // end namespace
// ========================================================================= //
// --------------------------- Testing BigO O(1) --------------------------- //
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
for (auto _ : state) {
for (int i = 0; i < 1024; ++i) {
benchmark::DoNotOptimize(&i);
}
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
BENCHMARK(BM_Complexity_O1)
->Range(1, 1 << 18)
->Complexity([](benchmark::IterationCount) { return 1.0; });
const char *one_test_name = "BM_Complexity_O1";
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
const char *rms_o_1_test_name = "BM_Complexity_O1_RMS";
const char *enum_big_o_1 = "\\([0-9]+\\)";
// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto
// deduced.
// See https://github.com/google/benchmark/issues/272
const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
const char *lambda_big_o_1 = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
enum_big_o_1, /*family_index=*/0);
// Add auto enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
auto_big_o_1, /*family_index=*/1);
// Add lambda tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
lambda_big_o_1, /*family_index=*/2);
// ========================================================================= //
// --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= //
std::vector<int> ConstructRandomVector(int64_t size) {
std::vector<int> v;
v.reserve(static_cast<int>(size));
for (int i = 0; i < size; ++i) {
v.push_back(static_cast<int>(std::rand() % size));
}
return v;
}
void BM_Complexity_O_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
// Test worst case scenario (item not in vector)
const int64_t item_not_in_vector = state.range(0) * 2;
for (auto _ : state) {
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](benchmark::IterationCount n) -> double {
return static_cast<double>(n);
});
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
const char *n_test_name = "BM_Complexity_O_N";
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS";
const char *enum_auto_big_o_n = "N";
const char *lambda_big_o_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
enum_auto_big_o_n, /*family_index=*/3);
// Add lambda tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
lambda_big_o_n, /*family_index=*/4);
// ========================================================================= //
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
// ========================================================================= //
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
for (auto _ : state) {
std::sort(v.begin(), v.end());
}
state.SetComplexityN(state.range(0));
}
static const double kLog2E = 1.44269504088896340736;
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](benchmark::IterationCount n) {
return kLog2E * n * log(static_cast<double>(n));
});
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
const char *enum_auto_big_o_n_lg_n = "NlgN";
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n,
/*family_index=*/6);
// Add lambda tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n,
/*family_index=*/7);
// ========================================================================= //
// -------- Testing formatting of Complexity with captured args ------------ //
// ========================================================================= //
void BM_ComplexityCaptureArgs(benchmark::State& state, int n) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(n);
}
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)
->Complexity(benchmark::oN)
->Ranges({{1, 2}, {3, 4}});
const std::string complexity_capture_name =
"BM_ComplexityCaptureArgs/capture_test";
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO",
complexity_capture_name + "_RMS", "N", /*family_index=*/9);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char *argv[]) { RunOutputTests(argc, argv); }
0707010000006F000081A400000000000000000000000160C0813C00000652000000000000000000000000000000000000002300000000benchmark-1.5.5/test/cxx03_test.cc#undef NDEBUG
#include <cassert>
#include <cstddef>
#include "benchmark/benchmark.h"
#if __cplusplus >= 201103L
#error C++11 or greater detected. Should be C++03.
#endif
#ifdef BENCHMARK_HAS_CXX11
#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined.
#endif
void BM_empty(benchmark::State& state) {
while (state.KeepRunning()) {
volatile benchmark::IterationCount x = state.iterations();
((void)x);
}
}
BENCHMARK(BM_empty);
// The new C++11 interface for args/ranges requires initializer list support.
// Therefore we provide the old interface to support C++03.
void BM_old_arg_range_interface(benchmark::State& state) {
assert((state.range(0) == 1 && state.range(1) == 2) ||
(state.range(0) == 5 && state.range(1) == 6));
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6);
template <class T, class U>
void BM_template2(benchmark::State& state) {
BM_empty(state);
}
BENCHMARK_TEMPLATE2(BM_template2, int, long);
template <class T>
void BM_template1(benchmark::State& state) {
BM_empty(state);
}
BENCHMARK_TEMPLATE(BM_template1, long);
BENCHMARK_TEMPLATE1(BM_template1, int);
template <class T>
struct BM_Fixture : public ::benchmark::Fixture {
};
BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) {
BM_empty(state);
}
BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) {
BM_empty(state);
}
void BM_counters(benchmark::State& state) {
BM_empty(state);
state.counters["Foo"] = 2;
}
BENCHMARK(BM_counters);
BENCHMARK_MAIN();
07070100000070000081A400000000000000000000000160C0813C0000075A000000000000000000000000000000000000002900000000benchmark-1.5.5/test/diagnostics_test.cc// Testing:
// State::PauseTiming()
// State::ResumeTiming()
// Test that CHECK's within these function diagnose when they are called
// outside of the KeepRunning() loop.
//
// NOTE: Users should NOT include or use src/check.h. This is only done in
// order to test library internals.
#include <cstdlib>
#include <stdexcept>
#include "../src/check.h"
#include "benchmark/benchmark.h"
#if defined(__GNUC__) && !defined(__EXCEPTIONS)
#define TEST_HAS_NO_EXCEPTIONS
#endif
void TestHandler() {
#ifndef TEST_HAS_NO_EXCEPTIONS
throw std::logic_error("");
#else
std::abort();
#endif
}
void try_invalid_pause_resume(benchmark::State& state) {
#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS)
try {
state.PauseTiming();
std::abort();
} catch (std::logic_error const&) {
}
try {
state.ResumeTiming();
std::abort();
} catch (std::logic_error const&) {
}
#else
(void)state; // avoid unused warning
#endif
}
void BM_diagnostic_test(benchmark::State& state) {
static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state);
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
if (called_once == false) try_invalid_pause_resume(state);
called_once = true;
}
BENCHMARK(BM_diagnostic_test);
void BM_diagnostic_test_keep_running(benchmark::State& state) {
static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state);
while(state.KeepRunning()) {
benchmark::DoNotOptimize(state.iterations());
}
if (called_once == false) try_invalid_pause_resume(state);
called_once = true;
}
BENCHMARK(BM_diagnostic_test_keep_running);
int main(int argc, char* argv[]) {
benchmark::internal::GetAbortHandler() = &TestHandler;
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
}
07070100000071000081A400000000000000000000000160C0813C00000657000000000000000000000000000000000000003500000000benchmark-1.5.5/test/display_aggregates_only_test.cc
#undef NDEBUG
#include <cstdio>
#include <string>
#include "benchmark/benchmark.h"
#include "output_test.h"
// Ok this test is super ugly. We want to check what happens with the file
// reporter in the presence of DisplayAggregatesOnly().
// We do not care about console output, the normal tests check that already.
void BM_SummaryRepeat(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly();
int main(int argc, char* argv[]) {
const std::string output = GetFileReporterOutput(argc, argv);
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
1) {
std::cout << "Precondition mismatch. Expected to only find 6 "
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
"output:\n";
std::cout << output;
return 1;
}
return 0;
}
07070100000072000081A400000000000000000000000160C0813C000010D4000000000000000000000000000000000000003400000000benchmark-1.5.5/test/donotoptimize_assembly_test.cc#include <benchmark/benchmark.h>
#ifdef __clang__
#pragma clang diagnostic ignored "-Wreturn-type"
#endif
extern "C" {
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
inline int Add42(int x) { return x + 42; }
struct NotTriviallyCopyable {
NotTriviallyCopyable();
explicit NotTriviallyCopyable(int x) : value(x) {}
NotTriviallyCopyable(NotTriviallyCopyable const&);
int value;
};
struct Large {
int value;
int data[2];
};
}
// CHECK-LABEL: test_with_rvalue:
extern "C" void test_with_rvalue() {
benchmark::DoNotOptimize(Add42(0));
// CHECK: movl $42, %eax
// CHECK: ret
}
// CHECK-LABEL: test_with_large_rvalue:
extern "C" void test_with_large_rvalue() {
benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}});
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
}
// CHECK-LABEL: test_with_non_trivial_rvalue:
extern "C" void test_with_non_trivial_rvalue() {
benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt));
// CHECK: mov{{l|q}} ExternInt(%rip)
// CHECK: ret
}
// CHECK-LABEL: test_with_lvalue:
extern "C" void test_with_lvalue() {
int x = 101;
benchmark::DoNotOptimize(x);
// CHECK-GNU: movl $101, %eax
// CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
}
// CHECK-LABEL: test_with_large_lvalue:
extern "C" void test_with_large_lvalue() {
Large L{ExternInt, {ExternInt, ExternInt}};
benchmark::DoNotOptimize(L);
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
}
// CHECK-LABEL: test_with_non_trivial_lvalue:
extern "C" void test_with_non_trivial_lvalue() {
NotTriviallyCopyable NTC(ExternInt);
benchmark::DoNotOptimize(NTC);
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
}
// CHECK-LABEL: test_with_const_lvalue:
extern "C" void test_with_const_lvalue() {
const int x = 123;
benchmark::DoNotOptimize(x);
// CHECK: movl $123, %eax
// CHECK: ret
}
// CHECK-LABEL: test_with_large_const_lvalue:
extern "C" void test_with_large_const_lvalue() {
const Large L{ExternInt, {ExternInt, ExternInt}};
benchmark::DoNotOptimize(L);
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
}
// CHECK-LABEL: test_with_non_trivial_const_lvalue:
extern "C" void test_with_non_trivial_const_lvalue() {
const NotTriviallyCopyable Obj(ExternInt);
benchmark::DoNotOptimize(Obj);
// CHECK: mov{{q|l}} ExternInt(%rip)
// CHECK: ret
}
// CHECK-LABEL: test_div_by_two:
extern "C" int test_div_by_two(int input) {
int divisor = 2;
benchmark::DoNotOptimize(divisor);
return input / divisor;
// CHECK: movl $2, [[DEST:.*]]
// CHECK: idivl [[DEST]]
// CHECK: ret
}
// CHECK-LABEL: test_inc_integer:
extern "C" int test_inc_integer() {
int x = 0;
for (int i=0; i < 5; ++i)
benchmark::DoNotOptimize(++x);
// CHECK: movl $1, [[DEST:.*]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK-CLANG: movl [[DEST]], %eax
// CHECK: ret
return x;
}
// CHECK-LABEL: test_pointer_rvalue
extern "C" void test_pointer_rvalue() {
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
int x = 42;
benchmark::DoNotOptimize(&x);
}
// CHECK-LABEL: test_pointer_const_lvalue:
extern "C" void test_pointer_const_lvalue() {
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
int x = 42;
int * const xp = &x;
benchmark::DoNotOptimize(xp);
}
// CHECK-LABEL: test_pointer_lvalue:
extern "C" void test_pointer_lvalue() {
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]])
// CHECK: ret
int x = 42;
int *xp = &x;
benchmark::DoNotOptimize(xp);
}
07070100000073000081A400000000000000000000000160C0813C000004D1000000000000000000000000000000000000002B00000000benchmark-1.5.5/test/donotoptimize_test.cc#include "benchmark/benchmark.h"
#include <cstdint>
namespace {
#if defined(__GNUC__)
std::uint64_t double_up(const std::uint64_t x) __attribute__((const));
#endif
std::uint64_t double_up(const std::uint64_t x) { return x * 2; }
}
// Using DoNotOptimize on types like BitRef seem to cause a lot of problems
// with the inline assembly on both GCC and Clang.
struct BitRef {
int index;
unsigned char &byte;
public:
static BitRef Make() {
static unsigned char arr[2] = {};
BitRef b(1, arr[0]);
return b;
}
private:
BitRef(int i, unsigned char& b) : index(i), byte(b) {}
};
int main(int, char*[]) {
// this test verifies compilation of DoNotOptimize() for some types
char buffer8[8] = "";
benchmark::DoNotOptimize(buffer8);
char buffer20[20] = "";
benchmark::DoNotOptimize(buffer20);
char buffer1024[1024] = "";
benchmark::DoNotOptimize(buffer1024);
benchmark::DoNotOptimize(&buffer1024[0]);
int x = 123;
benchmark::DoNotOptimize(x);
benchmark::DoNotOptimize(&x);
benchmark::DoNotOptimize(x += 42);
benchmark::DoNotOptimize(double_up(x));
// These tests are to e
benchmark::DoNotOptimize(BitRef::Make());
BitRef lval = BitRef::Make();
benchmark::DoNotOptimize(lval);
}
07070100000074000081A400000000000000000000000160C0813C00000B95000000000000000000000000000000000000002400000000benchmark-1.5.5/test/filter_test.cc#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
#include "benchmark/benchmark.h"
namespace {
class TestReporter : public benchmark::ConsoleReporter {
public:
virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE {
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
++count_;
max_family_index_ =
std::max<size_t>(max_family_index_, report[0].family_index);
ConsoleReporter::ReportRuns(report);
};
TestReporter() : count_(0), max_family_index_(0) {}
virtual ~TestReporter() {}
size_t GetCount() const { return count_; }
size_t GetMaxFamilyIndex() const { return max_family_index_; }
private:
mutable size_t count_;
mutable size_t max_family_index_;
};
} // end namespace
static void NoPrefix(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(NoPrefix);
static void BM_Foo(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_Foo);
static void BM_Bar(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_Bar);
static void BM_FooBar(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_FooBar);
static void BM_FooBa(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_FooBa);
int main(int argc, char **argv) {
bool list_only = false;
for (int i = 0; i < argc; ++i)
list_only |= std::string(argv[i]).find("--benchmark_list_tests") !=
std::string::npos;
benchmark::Initialize(&argc, argv);
TestReporter test_reporter;
const size_t returned_count =
benchmark::RunSpecifiedBenchmarks(&test_reporter);
if (argc == 2) {
// Make sure we ran all of the tests
std::stringstream ss(argv[1]);
size_t expected_return;
ss >> expected_return;
if (returned_count != expected_return) {
std::cerr << "ERROR: Expected " << expected_return
<< " tests to match the filter but returned_count = "
<< returned_count << std::endl;
return -1;
}
const size_t expected_reports = list_only ? 0 : expected_return;
const size_t reports_count = test_reporter.GetCount();
if (reports_count != expected_reports) {
std::cerr << "ERROR: Expected " << expected_reports
<< " tests to be run but reported_count = " << reports_count
<< std::endl;
return -1;
}
const size_t max_family_index = test_reporter.GetMaxFamilyIndex();
const size_t num_families = reports_count == 0 ? 0 : 1 + max_family_index;
if (num_families != expected_reports) {
std::cerr << "ERROR: Expected " << expected_reports
<< " test families to be run but num_families = "
<< num_families << std::endl;
return -1;
}
}
return 0;
}
07070100000075000081A400000000000000000000000160C0813C000004C3000000000000000000000000000000000000002500000000benchmark-1.5.5/test/fixture_test.cc
#include "benchmark/benchmark.h"
#include <cassert>
#include <memory>
#define FIXTURE_BECHMARK_NAME MyFixture
class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
if (state.thread_index == 0) {
assert(data.get() == nullptr);
data.reset(new int(42));
}
}
void TearDown(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
if (state.thread_index == 0) {
assert(data.get() != nullptr);
data.reset();
}
}
~FIXTURE_BECHMARK_NAME() { assert(data == nullptr); }
std::unique_ptr<int> data;
};
BENCHMARK_F(FIXTURE_BECHMARK_NAME, Foo)(benchmark::State &st) {
assert(data.get() != nullptr);
assert(*data == 42);
for (auto _ : st) {
}
}
BENCHMARK_DEFINE_F(FIXTURE_BECHMARK_NAME, Bar)(benchmark::State& st) {
if (st.thread_index == 0) {
assert(data.get() != nullptr);
assert(*data == 42);
}
for (auto _ : st) {
assert(data.get() != nullptr);
assert(*data == 42);
}
st.SetItemsProcessed(st.range(0));
}
BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, Bar)->Arg(42);
BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, Bar)->Arg(42)->ThreadPerCpu();
BENCHMARK_MAIN();
07070100000076000081A400000000000000000000000160C0813C000015FF000000000000000000000000000000000000003000000000benchmark-1.5.5/test/internal_threading_test.cc
#undef NDEBUG
#include <chrono>
#include <thread>
#include "../src/timers.h"
#include "benchmark/benchmark.h"
#include "output_test.h"
static const std::chrono::duration<double, std::milli> time_frame(50);
static const double time_frame_in_sec(
std::chrono::duration_cast<std::chrono::duration<double, std::ratio<1, 1>>>(
time_frame)
.count());
void MyBusySpinwait() {
const auto start = benchmark::ChronoClockNow();
while (true) {
const auto now = benchmark::ChronoClockNow();
const auto elapsed = now - start;
if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >=
time_frame)
return;
}
}
// ========================================================================= //
// --------------------------- TEST CASES BEGIN ---------------------------- //
// ========================================================================= //
// ========================================================================= //
// BM_MainThread
void BM_MainThread(benchmark::State& state) {
for (auto _ : state) {
MyBusySpinwait();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] =
benchmark::Counter{1, benchmark::Counter::kIsRate};
}
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
// ========================================================================= //
// BM_WorkerThread
void BM_WorkerThread(benchmark::State& state) {
for (auto _ : state) {
std::thread Worker(&MyBusySpinwait);
Worker.join();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] =
benchmark::Counter{1, benchmark::Counter::kIsRate};
}
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
// ========================================================================= //
// BM_MainThreadAndWorkerThread
void BM_MainThreadAndWorkerThread(benchmark::State& state) {
for (auto _ : state) {
std::thread Worker(&MyBusySpinwait);
MyBusySpinwait();
Worker.join();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] =
benchmark::Counter{1, benchmark::Counter::kIsRate};
}
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
// ========================================================================= //
// ---------------------------- TEST CASES END ----------------------------- //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
07070100000077000081A400000000000000000000000160C0813C000000B1000000000000000000000000000000000000002700000000benchmark-1.5.5/test/link_main_test.cc#include "benchmark/benchmark.h"
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_empty);
07070100000078000081A400000000000000000000000160C0813C000005BE000000000000000000000000000000000000002100000000benchmark-1.5.5/test/map_test.cc#include "benchmark/benchmark.h"
#include <cstdlib>
#include <map>
namespace {
std::map<int, int> ConstructRandomMap(int size) {
std::map<int, int> m;
for (int i = 0; i < size; ++i) {
m.insert(std::make_pair(std::rand() % size, std::rand() % size));
}
return m;
}
} // namespace
// Basic version.
static void BM_MapLookup(benchmark::State& state) {
const int size = static_cast<int>(state.range(0));
std::map<int, int> m;
for (auto _ : state) {
state.PauseTiming();
m = ConstructRandomMap(size);
state.ResumeTiming();
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(std::rand() % size));
}
}
state.SetItemsProcessed(state.iterations() * size);
}
BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
// Using fixtures.
class MapFixture : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& st) BENCHMARK_OVERRIDE {
m = ConstructRandomMap(static_cast<int>(st.range(0)));
}
void TearDown(const ::benchmark::State&) BENCHMARK_OVERRIDE { m.clear(); }
std::map<int, int> m;
};
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
const int size = static_cast<int>(state.range(0));
for (auto _ : state) {
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(std::rand() % size));
}
}
state.SetItemsProcessed(state.iterations() * size);
}
BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12);
BENCHMARK_MAIN();
07070100000079000081A400000000000000000000000160C0813C000006BA000000000000000000000000000000000000002C00000000benchmark-1.5.5/test/memory_manager_test.cc#include <memory>
#include "../src/check.h"
#include "benchmark/benchmark.h"
#include "output_test.h"
class TestMemoryManager : public benchmark::MemoryManager {
void Start() BENCHMARK_OVERRIDE {}
void Stop(Result* result) BENCHMARK_OVERRIDE {
result->num_allocs = 42;
result->max_bytes_used = 42000;
}
};
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_empty);
ADD_CASES(TC_ConsoleOut, {{"^BM_empty %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_empty\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"allocs_per_iter\": %float,$", MR_Next},
{"\"max_bytes_used\": 42000$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}});
int main(int argc, char* argv[]) {
std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager());
benchmark::RegisterMemoryManager(mm.get());
RunOutputTests(argc, argv);
benchmark::RegisterMemoryManager(nullptr);
}
0707010000007A000081A400000000000000000000000160C0813C00000AB0000000000000000000000000000000000000002D00000000benchmark-1.5.5/test/multiple_ranges_test.cc#include "benchmark/benchmark.h"
#include <cassert>
#include <iostream>
#include <set>
#include <vector>
class MultipleRangesFixture : public ::benchmark::Fixture {
public:
MultipleRangesFixture()
: expectedValues({{1, 3, 5},
{1, 3, 8},
{1, 3, 15},
{2, 3, 5},
{2, 3, 8},
{2, 3, 15},
{1, 4, 5},
{1, 4, 8},
{1, 4, 15},
{2, 4, 5},
{2, 4, 8},
{2, 4, 15},
{1, 7, 5},
{1, 7, 8},
{1, 7, 15},
{2, 7, 5},
{2, 7, 8},
{2, 7, 15},
{7, 6, 3}}) {}
void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE {
std::vector<int64_t> ranges = {state.range(0), state.range(1),
state.range(2)};
assert(expectedValues.find(ranges) != expectedValues.end());
actualValues.insert(ranges);
}
// NOTE: This is not TearDown as we want to check after _all_ runs are
// complete.
virtual ~MultipleRangesFixture() {
if (actualValues != expectedValues) {
std::cout << "EXPECTED\n";
for (auto v : expectedValues) {
std::cout << "{";
for (int64_t iv : v) {
std::cout << iv << ", ";
}
std::cout << "}\n";
}
std::cout << "ACTUAL\n";
for (auto v : actualValues) {
std::cout << "{";
for (int64_t iv : v) {
std::cout << iv << ", ";
}
std::cout << "}\n";
}
}
}
std::set<std::vector<int64_t>> expectedValues;
std::set<std::vector<int64_t>> actualValues;
};
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
for (auto _ : state) {
int64_t product = state.range(0) * state.range(1) * state.range(2);
for (int64_t x = 0; x < product; x++) {
benchmark::DoNotOptimize(x);
}
}
}
BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)
->RangeMultiplier(2)
->Ranges({{1, 2}, {3, 7}, {5, 15}})
->Args({7, 6, 3});
void BM_CheckDefaultArgument(benchmark::State& state) {
// Test that the 'range()' without an argument is the same as 'range(0)'.
assert(state.range() == state.range(0));
assert(state.range() != state.range(1));
for (auto _ : state) {
}
}
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
static void BM_MultipleRanges(benchmark::State& st) {
for (auto _ : st) {
}
}
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});
BENCHMARK_MAIN();
0707010000007B000081A400000000000000000000000160C0813C000008F3000000000000000000000000000000000000002500000000benchmark-1.5.5/test/options_test.cc#include "benchmark/benchmark.h"
#include <chrono>
#include <thread>
#if defined(NDEBUG)
#undef NDEBUG
#endif
#include <cassert>
void BM_basic(benchmark::State& state) {
for (auto _ : state) {
}
}
void BM_basic_slow(benchmark::State& state) {
std::chrono::milliseconds sleep_duration(state.range(0));
for (auto _ : state) {
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
}
}
BENCHMARK(BM_basic);
BENCHMARK(BM_basic)->Arg(42);
BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond);
BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond);
BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond);
BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kSecond);
BENCHMARK(BM_basic)->Range(1, 8);
BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8);
BENCHMARK(BM_basic)->DenseRange(10, 15);
BENCHMARK(BM_basic)->Args({42, 42});
BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}});
BENCHMARK(BM_basic)->MinTime(0.7);
BENCHMARK(BM_basic)->UseRealTime();
BENCHMARK(BM_basic)->ThreadRange(2, 4);
BENCHMARK(BM_basic)->ThreadPerCpu();
BENCHMARK(BM_basic)->Repetitions(3);
BENCHMARK(BM_basic)
->RangeMultiplier(std::numeric_limits<int>::max())
->Range(std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max());
// Negative ranges
BENCHMARK(BM_basic)->Range(-64, -1);
BENCHMARK(BM_basic)->RangeMultiplier(4)->Range(-8, 8);
BENCHMARK(BM_basic)->DenseRange(-2, 2, 1);
BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}});
void CustomArgs(benchmark::internal::Benchmark* b) {
for (int i = 0; i < 10; ++i) {
b->Arg(i);
}
}
BENCHMARK(BM_basic)->Apply(CustomArgs);
void BM_explicit_iteration_count(benchmark::State& state) {
// Test that benchmarks specified with an explicit iteration count are
// only run once.
static bool invoked_before = false;
assert(!invoked_before);
invoked_before = true;
// Test that the requested iteration count is respected.
assert(state.max_iterations == 42);
size_t actual_iterations = 0;
for (auto _ : state)
++actual_iterations;
assert(state.iterations() == state.max_iterations);
assert(state.iterations() == 42);
}
BENCHMARK(BM_explicit_iteration_count)->Iterations(42);
BENCHMARK_MAIN();
0707010000007C000081A400000000000000000000000160C0813C00001ECF000000000000000000000000000000000000002300000000benchmark-1.5.5/test/output_test.h#ifndef TEST_OUTPUT_TEST_H
#define TEST_OUTPUT_TEST_H
#undef NDEBUG
#include <functional>
#include <initializer_list>
#include <memory>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "../src/re.h"
#include "benchmark/benchmark.h"
#define CONCAT2(x, y) x##y
#define CONCAT(x, y) CONCAT2(x, y)
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__)
#define SET_SUBSTITUTIONS(...) \
int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
enum MatchRules {
MR_Default, // Skip non-matching lines until a match is found.
MR_Next, // Match must occur on the next line.
MR_Not // No line between the current position and the next match matches
// the regex
};
struct TestCase {
TestCase(std::string re, int rule = MR_Default);
std::string regex_str;
int match_rule;
std::string substituted_regex;
std::shared_ptr<benchmark::Regex> regex;
};
enum TestCaseID {
TC_ConsoleOut,
TC_ConsoleErr,
TC_JSONOut,
TC_JSONErr,
TC_CSVOut,
TC_CSVErr,
TC_NumID // PRIVATE
};
// Add a list of test cases to be run against the output specified by
// 'ID'
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il);
// Add or set a list of substitutions to be performed on constructed regex's
// See 'output_test_helper.cc' for a list of default substitutions.
int SetSubstitutions(
std::initializer_list<std::pair<std::string, std::string>> il);
// Run all output tests.
void RunOutputTests(int argc, char* argv[]);
// Count the number of 'pat' substrings in the 'haystack' string.
int SubstrCnt(const std::string& haystack, const std::string& pat);
// Run registered benchmarks with file reporter enabled, and return the content
// outputted by the file reporter.
std::string GetFileReporterOutput(int argc, char* argv[]);
// ========================================================================= //
// ------------------------- Results checking ------------------------------ //
// ========================================================================= //
// Call this macro to register a benchmark for checking its results. This
// should be all that's needed. It subscribes a function to check the (CSV)
// results of a benchmark. This is done only after verifying that the output
// strings are really as expected.
// bm_name_pattern: a name or a regex pattern which will be matched against
// all the benchmark names. Matching benchmarks
// will be the subject of a call to checker_function
// checker_function: should be of type ResultsCheckFn (see below)
#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \
size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function)
struct Results;
typedef std::function<void(Results const&)> ResultsCheckFn;
size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn);
// Class holding the results of a benchmark.
// It is passed in calls to checker functions.
struct Results {
// the benchmark name
std::string name;
// the benchmark fields
std::map<std::string, std::string> values;
Results(const std::string& n) : name(n) {}
int NumThreads() const;
double NumIterations() const;
typedef enum { kCpuTime, kRealTime } BenchmarkTime;
// get cpu_time or real_time in seconds
double GetTime(BenchmarkTime which) const;
// get the real_time duration of the benchmark in seconds.
// it is better to use fuzzy float checks for this, as the float
// ASCII formatting is lossy.
double DurationRealTime() const {
return NumIterations() * GetTime(kRealTime);
}
// get the cpu_time duration of the benchmark in seconds
double DurationCPUTime() const {
return NumIterations() * GetTime(kCpuTime);
}
// get the string for a result by name, or nullptr if the name
// is not found
const std::string* Get(const char* entry_name) const {
auto it = values.find(entry_name);
if (it == values.end()) return nullptr;
return &it->second;
}
// get a result by name, parsed as a specific type.
// NOTE: for counters, use GetCounterAs instead.
template <class T>
T GetAs(const char* entry_name) const;
// counters are written as doubles, so they have to be read first
// as a double, and only then converted to the asked type.
template <class T>
T GetCounterAs(const char* entry_name) const {
double dval = GetAs<double>(entry_name);
T tval = static_cast<T>(dval);
return tval;
}
};
template <class T>
T Results::GetAs(const char* entry_name) const {
auto* sv = Get(entry_name);
CHECK(sv != nullptr && !sv->empty());
std::stringstream ss;
ss << *sv;
T out;
ss >> out;
CHECK(!ss.fail());
return out;
}
//----------------------------------
// Macros to help in result checking. Do not use them with arguments causing
// side-effects.
// clang-format off
#define CHECK_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value) \
CONCAT(CHECK_, relationship) \
(entry.getfn< var_type >(var_name), (value)) << "\n" \
<< __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \
<< __FILE__ << ":" << __LINE__ << ": " \
<< "expected (" << #var_type << ")" << (var_name) \
<< "=" << (entry).getfn< var_type >(var_name) \
<< " to be " #relationship " to " << (value) << "\n"
// check with tolerance. eps_factor is the tolerance window, which is
// interpreted relative to value (eg, 0.1 means 10% of value).
#define CHECK_FLOAT_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value, eps_factor) \
CONCAT(CHECK_FLOAT_, relationship) \
(entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \
<< __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \
<< __FILE__ << ":" << __LINE__ << ": " \
<< "expected (" << #var_type << ")" << (var_name) \
<< "=" << (entry).getfn< var_type >(var_name) \
<< " to be " #relationship " to " << (value) << "\n" \
<< __FILE__ << ":" << __LINE__ << ": " \
<< "with tolerance of " << (eps_factor) * (value) \
<< " (" << (eps_factor)*100. << "%), " \
<< "but delta was " << ((entry).getfn< var_type >(var_name) - (value)) \
<< " (" << (((entry).getfn< var_type >(var_name) - (value)) \
/ \
((value) > 1.e-5 || value < -1.e-5 ? value : 1.e-5)*100.) \
<< "%)"
#define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \
CHECK_RESULT_VALUE_IMPL(entry, GetAs, var_type, var_name, relationship, value)
#define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \
CHECK_RESULT_VALUE_IMPL(entry, GetCounterAs, var_type, var_name, relationship, value)
#define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \
CHECK_FLOAT_RESULT_VALUE_IMPL(entry, GetAs, double, var_name, relationship, value, eps_factor)
#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \
CHECK_FLOAT_RESULT_VALUE_IMPL(entry, GetCounterAs, double, var_name, relationship, value, eps_factor)
// clang-format on
// ========================================================================= //
// --------------------------- Misc Utilities ------------------------------ //
// ========================================================================= //
namespace {
const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
} // end namespace
#endif // TEST_OUTPUT_TEST_H
0707010000007D000081A400000000000000000000000160C0813C00004516000000000000000000000000000000000000002B00000000benchmark-1.5.5/test/output_test_helper.cc#include <cstdio>
#include <cstring>
#include <fstream>
#include <iostream>
#include <map>
#include <memory>
#include <random>
#include <sstream>
#include <streambuf>
#include "../src/benchmark_api_internal.h"
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "../src/re.h" // NOTE: re.h is for internal use only
#include "output_test.h"
// ========================================================================= //
// ------------------------------ Internals -------------------------------- //
// ========================================================================= //
namespace internal {
namespace {
using TestCaseList = std::vector<TestCase>;
// Use a vector because the order elements are added matters during iteration.
// std::map/unordered_map don't guarantee that.
// For example:
// SetSubstitutions({{"%HelloWorld", "Hello"}, {"%Hello", "Hi"}});
// Substitute("%HelloWorld") // Always expands to Hello.
using SubMap = std::vector<std::pair<std::string, std::string>>;
TestCaseList& GetTestCaseList(TestCaseID ID) {
// Uses function-local statics to ensure initialization occurs
// before first use.
static TestCaseList lists[TC_NumID];
return lists[ID];
}
SubMap& GetSubstitutions() {
// Don't use 'dec_re' from header because it may not yet be initialized.
// clang-format off
static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
static std::string time_re = "([0-9]+[.])?[0-9]+";
static SubMap map = {
{"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"},
// human-readable float
{"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"},
{"%int", "[ ]*[0-9]+"},
{" %s ", "[ ]+"},
{"%time", "[ ]*" + time_re + "[ ]+ns"},
{"%console_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns [ ]*[0-9]+"},
{"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"},
{"%console_ms_report", "[ ]*" + time_re + "[ ]+ms [ ]*" + time_re + "[ ]+ms [ ]*[0-9]+"},
{"%console_s_report", "[ ]*" + time_re + "[ ]+s [ ]*" + time_re + "[ ]+s [ ]*[0-9]+"},
{"%console_time_only_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns"},
{"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"},
{"%console_us_time_only_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us"},
{"%csv_header",
"name,iterations,real_time,cpu_time,time_unit,bytes_per_second,"
"items_per_second,label,error_occurred,error_message"},
{"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"},
{"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"},
{"%csv_ms_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ms,,,,,"},
{"%csv_s_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",s,,,,,"},
{"%csv_bytes_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"},
{"%csv_items_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,," + safe_dec_re + ",,,"},
{"%csv_bytes_items_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re +
"," + safe_dec_re + ",,,"},
{"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"},
{"%csv_label_report_end", ",,"}};
// clang-format on
return map;
}
std::string PerformSubstitutions(std::string source) {
SubMap const& subs = GetSubstitutions();
using SizeT = std::string::size_type;
for (auto const& KV : subs) {
SizeT pos;
SizeT next_start = 0;
while ((pos = source.find(KV.first, next_start)) != std::string::npos) {
next_start = pos + KV.second.size();
source.replace(pos, KV.first.size(), KV.second);
}
}
return source;
}
void CheckCase(std::stringstream& remaining_output, TestCase const& TC,
TestCaseList const& not_checks) {
std::string first_line;
bool on_first = true;
std::string line;
while (remaining_output.eof() == false) {
CHECK(remaining_output.good());
std::getline(remaining_output, line);
if (on_first) {
first_line = line;
on_first = false;
}
for (const auto& NC : not_checks) {
CHECK(!NC.regex->Match(line))
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \""
<< NC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
if (TC.regex->Match(line)) return;
CHECK(TC.match_rule != MR_Next)
<< "Expected line \"" << line << "\" to match regex \"" << TC.regex_str
<< "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
CHECK(remaining_output.eof() == false)
<< "End of output reached before match for regex \"" << TC.regex_str
<< "\" was found"
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
void CheckCases(TestCaseList const& checks, std::stringstream& output) {
std::vector<TestCase> not_checks;
for (size_t i = 0; i < checks.size(); ++i) {
const auto& TC = checks[i];
if (TC.match_rule == MR_Not) {
not_checks.push_back(TC);
continue;
}
CheckCase(output, TC, not_checks);
not_checks.clear();
}
}
class TestReporter : public benchmark::BenchmarkReporter {
public:
TestReporter(std::vector<benchmark::BenchmarkReporter*> reps)
: reporters_(reps) {}
virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE {
bool last_ret = false;
bool first = true;
for (auto rep : reporters_) {
bool new_ret = rep->ReportContext(context);
CHECK(first || new_ret == last_ret)
<< "Reports return different values for ReportContext";
first = false;
last_ret = new_ret;
}
(void)first;
return last_ret;
}
void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
for (auto rep : reporters_) rep->ReportRuns(report);
}
void Finalize() BENCHMARK_OVERRIDE {
for (auto rep : reporters_) rep->Finalize();
}
private:
std::vector<benchmark::BenchmarkReporter*> reporters_;
};
} // namespace
} // end namespace internal
// ========================================================================= //
// -------------------------- Results checking ----------------------------- //
// ========================================================================= //
namespace internal {
// Utility class to manage subscribers for checking benchmark results.
// It works by parsing the CSV output to read the results.
class ResultsChecker {
public:
struct PatternAndFn : public TestCase { // reusing TestCase for its regexes
PatternAndFn(const std::string& rx, ResultsCheckFn fn_)
: TestCase(rx), fn(fn_) {}
ResultsCheckFn fn;
};
std::vector<PatternAndFn> check_patterns;
std::vector<Results> results;
std::vector<std::string> field_names;
void Add(const std::string& entry_pattern, ResultsCheckFn fn);
void CheckResults(std::stringstream& output);
private:
void SetHeader_(const std::string& csv_header);
void SetValues_(const std::string& entry_csv_line);
std::vector<std::string> SplitCsv_(const std::string& line);
};
// store the static ResultsChecker in a function to prevent initialization
// order problems
ResultsChecker& GetResultsChecker() {
static ResultsChecker rc;
return rc;
}
// add a results checker for a benchmark
void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) {
check_patterns.emplace_back(entry_pattern, fn);
}
// check the results of all subscribed benchmarks
void ResultsChecker::CheckResults(std::stringstream& output) {
// first reset the stream to the start
{
auto start = std::stringstream::pos_type(0);
// clear before calling tellg()
output.clear();
// seek to zero only when needed
if (output.tellg() > start) output.seekg(start);
// and just in case
output.clear();
}
// now go over every line and publish it to the ResultsChecker
std::string line;
bool on_first = true;
while (output.eof() == false) {
CHECK(output.good());
std::getline(output, line);
if (on_first) {
SetHeader_(line); // this is important
on_first = false;
continue;
}
SetValues_(line);
}
// finally we can call the subscribed check functions
for (const auto& p : check_patterns) {
VLOG(2) << "--------------------------------\n";
VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n";
for (const auto& r : results) {
if (!p.regex->Match(r.name)) {
VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n";
continue;
} else {
VLOG(2) << p.regex_str << " is matched by " << r.name << "\n";
}
VLOG(1) << "Checking results of " << r.name << ": ... \n";
p.fn(r);
VLOG(1) << "Checking results of " << r.name << ": OK.\n";
}
}
}
// prepare for the names in this header
void ResultsChecker::SetHeader_(const std::string& csv_header) {
field_names = SplitCsv_(csv_header);
}
// set the values for a benchmark
void ResultsChecker::SetValues_(const std::string& entry_csv_line) {
if (entry_csv_line.empty()) return; // some lines are empty
CHECK(!field_names.empty());
auto vals = SplitCsv_(entry_csv_line);
CHECK_EQ(vals.size(), field_names.size());
results.emplace_back(vals[0]); // vals[0] is the benchmark name
auto& entry = results.back();
for (size_t i = 1, e = vals.size(); i < e; ++i) {
entry.values[field_names[i]] = vals[i];
}
}
// a quick'n'dirty csv splitter (eliminating quotes)
std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) {
std::vector<std::string> out;
if (line.empty()) return out;
if (!field_names.empty()) out.reserve(field_names.size());
size_t prev = 0, pos = line.find_first_of(','), curr = pos;
while (pos != line.npos) {
CHECK(curr > 0);
if (line[prev] == '"') ++prev;
if (line[curr - 1] == '"') --curr;
out.push_back(line.substr(prev, curr - prev));
prev = pos + 1;
pos = line.find_first_of(',', pos + 1);
curr = pos;
}
curr = line.size();
if (line[prev] == '"') ++prev;
if (line[curr - 1] == '"') --curr;
out.push_back(line.substr(prev, curr - prev));
return out;
}
} // end namespace internal
size_t AddChecker(const char* bm_name, ResultsCheckFn fn) {
auto& rc = internal::GetResultsChecker();
rc.Add(bm_name, fn);
return rc.results.size();
}
int Results::NumThreads() const {
auto pos = name.find("/threads:");
if (pos == name.npos) return 1;
auto end = name.find('/', pos + 9);
std::stringstream ss;
ss << name.substr(pos + 9, end);
int num = 1;
ss >> num;
CHECK(!ss.fail());
return num;
}
double Results::NumIterations() const {
return GetAs<double>("iterations");
}
double Results::GetTime(BenchmarkTime which) const {
CHECK(which == kCpuTime || which == kRealTime);
const char* which_str = which == kCpuTime ? "cpu_time" : "real_time";
double val = GetAs<double>(which_str);
auto unit = Get("time_unit");
CHECK(unit);
if (*unit == "ns") {
return val * 1.e-9;
} else if (*unit == "us") {
return val * 1.e-6;
} else if (*unit == "ms") {
return val * 1.e-3;
} else if (*unit == "s") {
return val;
} else {
CHECK(1 == 0) << "unknown time unit: " << *unit;
return 0;
}
}
// ========================================================================= //
// -------------------------- Public API Definitions------------------------ //
// ========================================================================= //
TestCase::TestCase(std::string re, int rule)
: regex_str(std::move(re)),
match_rule(rule),
substituted_regex(internal::PerformSubstitutions(regex_str)),
regex(std::make_shared<benchmark::Regex>()) {
std::string err_str;
regex->Init(substituted_regex, &err_str);
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex
<< "\""
<< "\n originally \"" << regex_str << "\""
<< "\n got error: " << err_str;
}
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il) {
auto& L = internal::GetTestCaseList(ID);
L.insert(L.end(), il);
return 0;
}
int SetSubstitutions(
std::initializer_list<std::pair<std::string, std::string>> il) {
auto& subs = internal::GetSubstitutions();
for (auto KV : il) {
bool exists = false;
KV.second = internal::PerformSubstitutions(KV.second);
for (auto& EKV : subs) {
if (EKV.first == KV.first) {
EKV.second = std::move(KV.second);
exists = true;
break;
}
}
if (!exists) subs.push_back(std::move(KV));
}
return 0;
}
// Disable deprecated warnings temporarily because we need to reference
// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
void RunOutputTests(int argc, char* argv[]) {
using internal::GetTestCaseList;
benchmark::Initialize(&argc, argv);
auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true);
benchmark::ConsoleReporter CR(options);
benchmark::JSONReporter JR;
benchmark::CSVReporter CSVR;
struct ReporterTest {
const char* name;
std::vector<TestCase>& output_cases;
std::vector<TestCase>& error_cases;
benchmark::BenchmarkReporter& reporter;
std::stringstream out_stream;
std::stringstream err_stream;
ReporterTest(const char* n, std::vector<TestCase>& out_tc,
std::vector<TestCase>& err_tc,
benchmark::BenchmarkReporter& br)
: name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) {
reporter.SetOutputStream(&out_stream);
reporter.SetErrorStream(&err_stream);
}
} TestCases[] = {
{"ConsoleReporter", GetTestCaseList(TC_ConsoleOut),
GetTestCaseList(TC_ConsoleErr), CR},
{"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr),
JR},
{"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr),
CSVR},
};
// Create the test reporter and run the benchmarks.
std::cout << "Running benchmarks...\n";
internal::TestReporter test_rep({&CR, &JR, &CSVR});
benchmark::RunSpecifiedBenchmarks(&test_rep);
for (auto& rep_test : TestCases) {
std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
std::string banner(msg.size() - 1, '-');
std::cout << banner << msg << banner << "\n";
std::cerr << rep_test.err_stream.str();
std::cout << rep_test.out_stream.str();
internal::CheckCases(rep_test.error_cases, rep_test.err_stream);
internal::CheckCases(rep_test.output_cases, rep_test.out_stream);
std::cout << "\n";
}
// now that we know the output is as expected, we can dispatch
// the checks to subscribees.
auto& csv = TestCases[2];
// would use == but gcc spits a warning
CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
internal::GetResultsChecker().CheckResults(csv.out_stream);
}
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
int SubstrCnt(const std::string& haystack, const std::string& pat) {
if (pat.length() == 0) return 0;
int count = 0;
for (size_t offset = haystack.find(pat); offset != std::string::npos;
offset = haystack.find(pat, offset + pat.length()))
++count;
return count;
}
static char ToHex(int ch) {
return ch < 10 ? static_cast<char>('0' + ch)
: static_cast<char>('a' + (ch - 10));
}
static char RandomHexChar() {
static std::mt19937 rd{std::random_device{}()};
static std::uniform_int_distribution<int> mrand{0, 15};
return ToHex(mrand(rd));
}
static std::string GetRandomFileName() {
std::string model = "test.%%%%%%";
for (auto & ch : model) {
if (ch == '%')
ch = RandomHexChar();
}
return model;
}
static bool FileExists(std::string const& name) {
std::ifstream in(name.c_str());
return in.good();
}
static std::string GetTempFileName() {
// This function attempts to avoid race conditions where two tests
// create the same file at the same time. However, it still introduces races
// similar to tmpnam.
int retries = 3;
while (--retries) {
std::string name = GetRandomFileName();
if (!FileExists(name))
return name;
}
std::cerr << "Failed to create unique temporary file name" << std::endl;
std::abort();
}
std::string GetFileReporterOutput(int argc, char* argv[]) {
std::vector<char*> new_argv(argv, argv + argc);
assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size());
std::string tmp_file_name = GetTempFileName();
std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n';
std::string tmp = "--benchmark_out=";
tmp += tmp_file_name;
new_argv.emplace_back(const_cast<char*>(tmp.c_str()));
argc = int(new_argv.size());
benchmark::Initialize(&argc, new_argv.data());
benchmark::RunSpecifiedBenchmarks();
// Read the output back from the file, and delete the file.
std::ifstream tmp_stream(tmp_file_name);
std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)),
std::istreambuf_iterator<char>());
std::remove(tmp_file_name.c_str());
return output;
}
0707010000007E000081A400000000000000000000000160C0813C0000130D000000000000000000000000000000000000002C00000000benchmark-1.5.5/test/perf_counters_gtest.cc#include <thread>
#include "../src/perf_counters.h"
#include "gtest/gtest.h"
#ifndef GTEST_SKIP
struct MsgHandler {
void operator=(std::ostream&){}
};
#define GTEST_SKIP() return MsgHandler() = std::cout
#endif
using benchmark::internal::PerfCounters;
using benchmark::internal::PerfCounterValues;
namespace {
const char kGenericPerfEvent1[] = "CYCLES";
const char kGenericPerfEvent2[] = "BRANCHES";
const char kGenericPerfEvent3[] = "INSTRUCTIONS";
TEST(PerfCountersTest, Init) {
EXPECT_EQ(PerfCounters::Initialize(), PerfCounters::kSupported);
}
TEST(PerfCountersTest, OneCounter) {
if (!PerfCounters::kSupported) {
GTEST_SKIP() << "Performance counters not supported.\n";
}
EXPECT_TRUE(PerfCounters::Initialize());
EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1}).IsValid());
}
TEST(PerfCountersTest, NegativeTest) {
if (!PerfCounters::kSupported) {
EXPECT_FALSE(PerfCounters::Initialize());
return;
}
EXPECT_TRUE(PerfCounters::Initialize());
EXPECT_FALSE(PerfCounters::Create({}).IsValid());
EXPECT_FALSE(PerfCounters::Create({""}).IsValid());
EXPECT_FALSE(PerfCounters::Create({"not a counter name"}).IsValid());
{
EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2,
kGenericPerfEvent3})
.IsValid());
}
EXPECT_FALSE(
PerfCounters::Create({kGenericPerfEvent2, "", kGenericPerfEvent1})
.IsValid());
EXPECT_FALSE(PerfCounters::Create({kGenericPerfEvent3, "not a counter name",
kGenericPerfEvent1})
.IsValid());
{
EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2,
kGenericPerfEvent3})
.IsValid());
}
EXPECT_FALSE(
PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2,
kGenericPerfEvent3, "MISPREDICTED_BRANCH_RETIRED"})
.IsValid());
}
TEST(PerfCountersTest, Read1Counter) {
if (!PerfCounters::kSupported) {
GTEST_SKIP() << "Test skipped because libpfm is not supported.\n";
}
EXPECT_TRUE(PerfCounters::Initialize());
auto counters = PerfCounters::Create({kGenericPerfEvent1});
EXPECT_TRUE(counters.IsValid());
PerfCounterValues values1(1);
EXPECT_TRUE(counters.Snapshot(&values1));
EXPECT_GT(values1[0], 0);
PerfCounterValues values2(1);
EXPECT_TRUE(counters.Snapshot(&values2));
EXPECT_GT(values2[0], 0);
EXPECT_GT(values2[0], values1[0]);
}
TEST(PerfCountersTest, Read2Counters) {
if (!PerfCounters::kSupported) {
GTEST_SKIP() << "Test skipped because libpfm is not supported.\n";
}
EXPECT_TRUE(PerfCounters::Initialize());
auto counters =
PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2});
EXPECT_TRUE(counters.IsValid());
PerfCounterValues values1(2);
EXPECT_TRUE(counters.Snapshot(&values1));
EXPECT_GT(values1[0], 0);
EXPECT_GT(values1[1], 0);
PerfCounterValues values2(2);
EXPECT_TRUE(counters.Snapshot(&values2));
EXPECT_GT(values2[0], 0);
EXPECT_GT(values2[1], 0);
}
size_t do_work() {
size_t res = 0;
for (size_t i = 0; i < 100000000; ++i) res += i * i;
return res;
}
void measure(size_t threadcount, PerfCounterValues* values1,
PerfCounterValues* values2) {
CHECK_NE(values1, nullptr);
CHECK_NE(values2, nullptr);
std::vector<std::thread> threads(threadcount);
auto work = [&]() { CHECK(do_work() > 1000); };
// We need to first set up the counters, then start the threads, so the
// threads would inherit the counters. But later, we need to first destroy the
// thread pool (so all the work finishes), then measure the counters. So the
// scopes overlap, and we need to explicitly control the scope of the
// threadpool.
auto counters =
PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent3});
for (auto& t : threads) t = std::thread(work);
counters.Snapshot(values1);
for (auto& t : threads) t.join();
counters.Snapshot(values2);
}
TEST(PerfCountersTest, MultiThreaded) {
if (!PerfCounters::kSupported) {
GTEST_SKIP() << "Test skipped because libpfm is not supported.";
}
EXPECT_TRUE(PerfCounters::Initialize());
PerfCounterValues values1(2);
PerfCounterValues values2(2);
measure(2, &values1, &values2);
std::vector<double> D1{static_cast<double>(values2[0] - values1[0]),
static_cast<double>(values2[1] - values1[1])};
measure(4, &values1, &values2);
std::vector<double> D2{static_cast<double>(values2[0] - values1[0]),
static_cast<double>(values2[1] - values1[1])};
// Some extra work will happen on the main thread - like joining the threads
// - so the ratio won't be quite 2.0, but very close.
EXPECT_GE(D2[0], 1.9 * D1[0]);
EXPECT_GE(D2[1], 1.9 * D1[1]);
}
} // namespace
0707010000007F000081A400000000000000000000000160C0813C0000028A000000000000000000000000000000000000002B00000000benchmark-1.5.5/test/perf_counters_test.cc#undef NDEBUG
#include "../src/perf_counters.h"
#include "benchmark/benchmark.h"
#include "output_test.h"
void BM_Simple(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_Simple);
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Simple\",$"}});
void CheckSimple(Results const& e) {
CHECK_COUNTER_VALUE(e, double, "CYCLES", GT, 0);
CHECK_COUNTER_VALUE(e, double, "BRANCHES", GT, 0.0);
}
CHECK_BENCHMARK_RESULTS("BM_Simple", &CheckSimple);
int main(int argc, char* argv[]) {
if (!benchmark::internal::PerfCounters::kSupported) {
return 0;
}
RunOutputTests(argc, argv);
}
07070100000080000081A400000000000000000000000160C0813C00001554000000000000000000000000000000000000003000000000benchmark-1.5.5/test/register_benchmark_test.cc
#undef NDEBUG
#include <cassert>
#include <vector>
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h"
namespace {
class TestReporter : public benchmark::ConsoleReporter {
public:
virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
std::vector<Run> all_runs_;
};
struct TestCase {
std::string name;
const char* label;
// Note: not explicit as we rely on it being converted through ADD_CASES.
TestCase(const char* xname) : TestCase(xname, nullptr) {}
TestCase(const char* xname, const char* xlabel)
: name(xname), label(xlabel) {}
typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const {
// clang-format off
CHECK(name == run.benchmark_name()) << "expected " << name << " got "
<< run.benchmark_name();
if (label) {
CHECK(run.report_label == label) << "expected " << label << " got "
<< run.report_label;
} else {
CHECK(run.report_label == "");
}
// clang-format on
}
};
std::vector<TestCase> ExpectedResults;
int AddCases(std::initializer_list<TestCase> const& v) {
for (auto N : v) {
ExpectedResults.push_back(N);
}
return 0;
}
#define CONCAT(x, y) CONCAT2(x, y)
#define CONCAT2(x, y) x##y
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases({__VA_ARGS__})
} // end namespace
typedef benchmark::internal::Benchmark* ReturnVal;
//----------------------------------------------------------------------------//
// Test RegisterBenchmark with no additional arguments
//----------------------------------------------------------------------------//
void BM_function(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_function);
ReturnVal dummy = benchmark::RegisterBenchmark(
"BM_function_manual_registration", BM_function);
ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
//----------------------------------------------------------------------------//
// Test RegisterBenchmark with additional arguments
// Note: GCC <= 4.8 do not support this form of RegisterBenchmark because they
// reject the variadic pack expansion of lambda captures.
//----------------------------------------------------------------------------//
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
void BM_extra_args(benchmark::State& st, const char* label) {
for (auto _ : st) {
}
st.SetLabel(label);
}
int RegisterFromFunction() {
std::pair<const char*, const char*> cases[] = {
{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}};
for (auto const& c : cases)
benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second);
return 0;
}
int dummy2 = RegisterFromFunction();
ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"});
#endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
//----------------------------------------------------------------------------//
// Test RegisterBenchmark with different callable types
//----------------------------------------------------------------------------//
struct CustomFixture {
void operator()(benchmark::State& st) {
for (auto _ : st) {
}
}
};
void TestRegistrationAtRuntime() {
#ifdef BENCHMARK_HAS_CXX11
{
CustomFixture fx;
benchmark::RegisterBenchmark("custom_fixture", fx);
AddCases({"custom_fixture"});
}
#endif
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
{
const char* x = "42";
auto capturing_lam = [=](benchmark::State& st) {
for (auto _ : st) {
}
st.SetLabel(x);
};
benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam);
AddCases({{"lambda_benchmark", x}});
}
#endif
}
// Test that all benchmarks, registered at either during static init or runtime,
// are run and the results are passed to the reported.
void RunTestOne() {
TestRegistrationAtRuntime();
TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter);
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
}
// Test that ClearRegisteredBenchmarks() clears all previously registered
// benchmarks.
// Also test that new benchmarks can be registered and ran afterwards.
void RunTestTwo() {
assert(ExpectedResults.size() != 0 &&
"must have at least one registered benchmark");
ExpectedResults.clear();
benchmark::ClearRegisteredBenchmarks();
TestReporter test_reporter;
size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter);
assert(num_ran == 0);
assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end());
TestRegistrationAtRuntime();
num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter);
assert(num_ran == ExpectedResults.size());
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
}
int main(int argc, char* argv[]) {
benchmark::Initialize(&argc, argv);
RunTestOne();
RunTestTwo();
}
07070100000081000081A400000000000000000000000160C0813C00002BC8000000000000000000000000000000000000002900000000benchmark-1.5.5/test/repetitions_test.cc
#include "benchmark/benchmark.h"
#include "output_test.h"
// ========================================================================= //
// ------------------------ Testing Basic Output --------------------------- //
// ========================================================================= //
void BM_ExplicitRepetitions(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_ExplicitRepetitions)->Repetitions(2);
ADD_CASES(TC_ConsoleOut,
{{"^BM_ExplicitRepetitions/repeats:2 %console_report$"}});
ADD_CASES(TC_ConsoleOut,
{{"^BM_ExplicitRepetitions/repeats:2 %console_report$"}});
ADD_CASES(TC_ConsoleOut,
{{"^BM_ExplicitRepetitions/repeats:2_mean %console_report$"}});
ADD_CASES(TC_ConsoleOut,
{{"^BM_ExplicitRepetitions/repeats:2_median %console_report$"}});
ADD_CASES(TC_ConsoleOut,
{{"^BM_ExplicitRepetitions/repeats:2_stddev %console_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_ExplicitRepetitions/repeats:2\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_ExplicitRepetitions/repeats:2\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_ExplicitRepetitions/repeats:2_mean\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_ExplicitRepetitions/repeats:2_median\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_ExplicitRepetitions/repeats:2_stddev\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_ExplicitRepetitions/repeats:2\",%csv_report$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_ExplicitRepetitions/repeats:2\",%csv_report$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_ExplicitRepetitions/repeats:2_mean\",%csv_report$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_ExplicitRepetitions/repeats:2_median\",%csv_report$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_ExplicitRepetitions/repeats:2_stddev\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Basic Output --------------------------- //
// ========================================================================= //
void BM_ImplicitRepetitions(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_ImplicitRepetitions);
ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_mean %console_report$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_median %console_report$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_stddev %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"},
{"\"family_index\": 1,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"},
{"\"family_index\": 1,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"},
{"\"family_index\": 1,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_mean\",$"},
{"\"family_index\": 1,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_median\",$"},
{"\"family_index\": 1,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_stddev\",$"},
{"\"family_index\": 1,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions\",%csv_report$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions\",%csv_report$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_mean\",%csv_report$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_median\",%csv_report$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_stddev\",%csv_report$"}});
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
07070100000082000081A400000000000000000000000160C0813C00000553000000000000000000000000000000000000003400000000benchmark-1.5.5/test/report_aggregates_only_test.cc
#undef NDEBUG
#include <cstdio>
#include <string>
#include "benchmark/benchmark.h"
#include "output_test.h"
// Ok this test is super ugly. We want to check what happens with the file
// reporter in the presence of ReportAggregatesOnly().
// We do not care about console output, the normal tests check that already.
void BM_SummaryRepeat(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
int main(int argc, char* argv[]) {
const std::string output = GetFileReporterOutput(argc, argv);
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
1) {
std::cout << "Precondition mismatch. Expected to only find three "
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
"output:\n";
std::cout << output;
return 1;
}
return 0;
}
07070100000083000081A400000000000000000000000160C0813C0000BE57000000000000000000000000000000000000002D00000000benchmark-1.5.5/test/reporter_output_test.cc
#undef NDEBUG
#include <utility>
#include "benchmark/benchmark.h"
#include "output_test.h"
// ========================================================================= //
// ---------------------- Testing Prologue Output -------------------------- //
// ========================================================================= //
ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
{"^[-]+$", MR_Next}});
static int AddContextCases() {
AddCases(TC_ConsoleErr,
{
{"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default},
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
});
AddCases(TC_JSONOut,
{{"^\\{", MR_Default},
{"\"context\":", MR_Next},
{"\"date\": \"", MR_Next},
{"\"host_name\":", MR_Next},
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",",
MR_Next},
{"\"num_cpus\": %int,$", MR_Next},
{"\"mhz_per_cpu\": %float,$", MR_Next},
{"\"caches\": \\[$", MR_Default}});
auto const& Info = benchmark::CPUInfo::Get();
auto const& Caches = Info.caches;
if (!Caches.empty()) {
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
}
for (size_t I = 0; I < Caches.size(); ++I) {
std::string num_caches_str =
Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
AddCases(TC_ConsoleErr,
{{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str,
MR_Next}});
AddCases(TC_JSONOut, {{"\\{$", MR_Next},
{"\"type\": \"", MR_Next},
{"\"level\": %int,$", MR_Next},
{"\"size\": %int,$", MR_Next},
{"\"num_sharing\": %int$", MR_Next},
{"}[,]{0,1}$", MR_Next}});
}
AddCases(TC_JSONOut, {{"],$"}});
auto const& LoadAvg = Info.load_avg;
if (!LoadAvg.empty()) {
AddCases(TC_ConsoleErr,
{{"Load Average: (%float, ){0,2}%float$", MR_Next}});
}
AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
return 0;
}
int dummy_register = AddContextCases();
ADD_CASES(TC_CSVOut, {{"%csv_header"}});
// ========================================================================= //
// ------------------------ Testing Basic Output --------------------------- //
// ========================================================================= //
void BM_basic(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_basic);
ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_basic\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Bytes per Second Output ---------------- //
// ========================================================================= //
void BM_bytes_per_second(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetBytesProcessed(1);
}
BENCHMARK(BM_bytes_per_second);
ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report "
"bytes_per_second=%float[kM]{0,1}/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
{"\"family_index\": 1,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_bytes_per_second\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bytes_per_second\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
// ========================================================================= //
// ------------------------ Testing Items per Second Output ---------------- //
// ========================================================================= //
void BM_items_per_second(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetItemsProcessed(1);
}
BENCHMARK(BM_items_per_second);
ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report "
"items_per_second=%float[kM]{0,1}/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
{"\"family_index\": 2,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_items_per_second\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"items_per_second\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
// ========================================================================= //
// ------------------------ Testing Label Output --------------------------- //
// ========================================================================= //
void BM_label(benchmark::State& state) {
for (auto _ : state) {
}
state.SetLabel("some label");
}
BENCHMARK(BM_label);
ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
{"\"family_index\": 3,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_label\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"label\": \"some label\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
"label\"%csv_label_report_end$"}});
// ========================================================================= //
// ------------------------ Testing Time Label Output ---------------------- //
// ========================================================================= //
void BM_time_label_nanosecond(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_time_label_nanosecond)->Unit(benchmark::kNanosecond);
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_nanosecond %console_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_time_label_nanosecond\",$"},
{"\"family_index\": 4,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_time_label_nanosecond\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_nanosecond\",%csv_report$"}});
void BM_time_label_microsecond(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_time_label_microsecond)->Unit(benchmark::kMicrosecond);
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_microsecond %console_us_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_time_label_microsecond\",$"},
{"\"family_index\": 5,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_time_label_microsecond\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"us\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_microsecond\",%csv_us_report$"}});
void BM_time_label_millisecond(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_time_label_millisecond)->Unit(benchmark::kMillisecond);
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_millisecond %console_ms_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_time_label_millisecond\",$"},
{"\"family_index\": 6,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_time_label_millisecond\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ms\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_millisecond\",%csv_ms_report$"}});
void BM_time_label_second(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_time_label_second)->Unit(benchmark::kSecond);
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_second %console_s_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_second\",$"},
{"\"family_index\": 7,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_time_label_second\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"s\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_second\",%csv_s_report$"}});
// ========================================================================= //
// ------------------------ Testing Error Output --------------------------- //
// ========================================================================= //
void BM_error(benchmark::State& state) {
state.SkipWithError("message");
for (auto _ : state) {
}
}
BENCHMARK(BM_error);
ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"},
{"\"family_index\": 8,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_error\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"error_occurred\": true,$", MR_Next},
{"\"error_message\": \"message\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
// ========================================================================= //
// ------------------------ Testing No Arg Name Output -----------------------
// //
// ========================================================================= //
void BM_no_arg_name(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_no_arg_name)->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"},
{"\"family_index\": 9,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Arg Name Output ----------------------- //
// ========================================================================= //
void BM_arg_name(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"},
{"\"family_index\": 10,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Arg Names Output ----------------------- //
// ========================================================================= //
void BM_arg_names(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
ADD_CASES(TC_ConsoleOut,
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
{"\"family_index\": 11,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Name Output ---------------------------- //
// ========================================================================= //
void BM_name(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_name)->Name("BM_custom_name");
ADD_CASES(TC_ConsoleOut, {{"^BM_custom_name %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_custom_name\",$"},
{"\"family_index\": 12,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_custom_name\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_custom_name\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Big Args Output ------------------------ //
// ========================================================================= //
void BM_BigArgs(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U);
ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
{"^BM_BigArgs/2147483648 %console_report$"}});
// ========================================================================= //
// ----------------------- Testing Complexity Output ----------------------- //
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"},
{"%RMS", "[ ]*[0-9]+ %"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
{"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
// ========================================================================= //
// ----------------------- Testing Aggregate Output ------------------------ //
// ========================================================================= //
// Test that non-aggregate data is printed by default
void BM_Repeat(benchmark::State& state) {
for (auto _ : state) {
}
}
// need two repetitions min to be able to output any aggregate output
BENCHMARK(BM_Repeat)->Repetitions(2);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
{"\"family_index\": 15,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2\",$"},
{"\"family_index\": 15,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2_mean\",$"},
{"\"family_index\": 15,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2_median\",$"},
{"\"family_index\": 15,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2_stddev\",$"},
{"\"family_index\": 15,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_mean\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_median\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
// but for two repetitions, mean and median is the same, so let's repeat..
BENCHMARK(BM_Repeat)->Repetitions(3);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3_median\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3_stddev\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_mean\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_median\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
// median differs between even/odd number of repetitions, so just to be sure
BENCHMARK(BM_Repeat)->Repetitions(4);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"repetition_index\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4_mean\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 4,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4_median\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 4,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4_stddev\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 4,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
{"^\"BM_Repeat/repeats:4_mean\",%csv_report$"},
{"^\"BM_Repeat/repeats:4_median\",%csv_report$"},
{"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}});
// Test that a non-repeated test still prints non-aggregate results even when
// only-aggregate reports have been requested
void BM_RepeatOnce(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
{"\"family_index\": 18,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported
void BM_SummaryRepeat(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
ADD_CASES(
TC_ConsoleOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
{"\"family_index\": 19,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
{"\"family_index\": 19,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"},
{"\"family_index\": 19,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
{"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"},
{"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
// Test that non-aggregate data is not displayed.
// NOTE: this test is kinda bad. we are only testing the display output.
// But we don't check that the file output still contains everything...
void BM_SummaryDisplay(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly();
ADD_CASES(
TC_ConsoleOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
{"\"family_index\": 20,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"},
{"\"family_index\": 20,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"},
{"\"family_index\": 20,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}});
ADD_CASES(TC_CSVOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
// Test repeats with custom time unit.
void BM_RepeatTimeUnit(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_RepeatTimeUnit)
->Repetitions(3)
->ReportAggregatesOnly()
->Unit(benchmark::kMicrosecond);
ADD_CASES(
TC_ConsoleOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
"]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
"]*3$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
{"\"family_index\": 21,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
{"\"family_index\": 21,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
{"\"family_index\": 21,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"}});
ADD_CASES(TC_CSVOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
// ========================================================================= //
// -------------------- Testing user-provided statistics ------------------- //
// ========================================================================= //
const auto UserStatistics = [](const std::vector<double>& v) {
return v.back();
};
void BM_UserStats(benchmark::State& state) {
for (auto _ : state) {
state.SetIterationTime(150 / 10e8);
}
}
// clang-format off
BENCHMARK(BM_UserStats)
->Repetitions(3)
->Iterations(5)
->UseManualTime()
->ComputeStatistics("", UserStatistics);
// clang-format on
// check that user-provided stats is calculated, and is after the default-ones
// empty string as name is intentional, it would sort before anything else
ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
"]* 150 ns %time [ ]*5$"},
{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
"]* 150 ns %time [ ]*5$"},
{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
"]* 150 ns %time [ ]*5$"},
{"^BM_UserStats/iterations:5/repeats:3/"
"manual_time_mean [ ]* 150 ns %time [ ]*3$"},
{"^BM_UserStats/iterations:5/repeats:3/"
"manual_time_median [ ]* 150 ns %time [ ]*3$"},
{"^BM_UserStats/iterations:5/repeats:3/"
"manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
{"^BM_UserStats/iterations:5/repeats:3/manual_time_ "
"[ ]* 150 ns %time [ ]*3$"}});
ADD_CASES(
TC_JSONOut,
{{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/"
"manual_time_median\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/"
"manual_time_stddev\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
// ========================================================================= //
// ------------------------- Testing StrEscape JSON ------------------------ //
// ========================================================================= //
#if 0 // enable when csv testing code correctly handles multi-line fields
void BM_JSON_Format(benchmark::State& state) {
state.SkipWithError("val\b\f\n\r\t\\\"with\"es,capes");
for (auto _ : state) {
}
}
BENCHMARK(BM_JSON_Format);
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"},
{"\"family_index\": 23,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_JSON_Format\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"error_occurred\": true,$", MR_Next},
{R"("error_message": "val\\b\\f\\n\\r\\t\\\\\\"with\\"es,capes",$)", MR_Next}});
#endif
// ========================================================================= //
// -------------------------- Testing CsvEscape ---------------------------- //
// ========================================================================= //
void BM_CSV_Format(benchmark::State& state) {
state.SkipWithError("\"freedom\"");
for (auto _ : state) {
}
}
BENCHMARK(BM_CSV_Format);
ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
07070100000084000081A400000000000000000000000160C0813C0000197D000000000000000000000000000000000000002D00000000benchmark-1.5.5/test/skip_with_error_test.cc
#undef NDEBUG
#include <cassert>
#include <vector>
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h"
namespace {
class TestReporter : public benchmark::ConsoleReporter {
public:
virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE {
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE {
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
TestReporter() {}
virtual ~TestReporter() {}
mutable std::vector<Run> all_runs_;
};
struct TestCase {
std::string name;
bool error_occurred;
std::string error_message;
typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const {
CHECK(name == run.benchmark_name())
<< "expected " << name << " got " << run.benchmark_name();
CHECK(error_occurred == run.error_occurred);
CHECK(error_message == run.error_message);
if (error_occurred) {
// CHECK(run.iterations == 0);
} else {
CHECK(run.iterations != 0);
}
}
};
std::vector<TestCase> ExpectedResults;
int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
for (auto TC : v) {
TC.name = base_name + TC.name;
ExpectedResults.push_back(std::move(TC));
}
return 0;
}
#define CONCAT(x, y) CONCAT2(x, y)
#define CONCAT2(x, y) x##y
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__)
} // end namespace
void BM_error_no_running(benchmark::State& state) {
state.SkipWithError("error message");
}
BENCHMARK(BM_error_no_running);
ADD_CASES("BM_error_no_running", {{"", true, "error message"}});
void BM_error_before_running(benchmark::State& state) {
state.SkipWithError("error message");
while (state.KeepRunning()) {
assert(false);
}
}
BENCHMARK(BM_error_before_running);
ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
void BM_error_before_running_batch(benchmark::State& state) {
state.SkipWithError("error message");
while (state.KeepRunningBatch(17)) {
assert(false);
}
}
BENCHMARK(BM_error_before_running_batch);
ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}});
void BM_error_before_running_range_for(benchmark::State& state) {
state.SkipWithError("error message");
for (auto _ : state) {
assert(false);
}
}
BENCHMARK(BM_error_before_running_range_for);
ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}});
void BM_error_during_running(benchmark::State& state) {
int first_iter = true;
while (state.KeepRunning()) {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
assert(first_iter);
first_iter = false;
state.SkipWithError("error message");
} else {
state.PauseTiming();
state.ResumeTiming();
}
}
}
BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8);
ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
{"/1/threads:2", true, "error message"},
{"/1/threads:4", true, "error message"},
{"/1/threads:8", true, "error message"},
{"/2/threads:1", false, ""},
{"/2/threads:2", false, ""},
{"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}});
void BM_error_during_running_ranged_for(benchmark::State& state) {
assert(state.max_iterations > 3 && "test requires at least a few iterations");
int first_iter = true;
// NOTE: Users should not write the for loop explicitly.
for (auto It = state.begin(), End = state.end(); It != End; ++It) {
if (state.range(0) == 1) {
assert(first_iter);
first_iter = false;
state.SkipWithError("error message");
// Test the unfortunate but documented behavior that the ranged-for loop
// doesn't automatically terminate when SkipWithError is set.
assert(++It != End);
break; // Required behavior
}
}
}
BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5);
ADD_CASES("BM_error_during_running_ranged_for",
{{"/1/iterations:5", true, "error message"},
{"/2/iterations:5", false, ""}});
void BM_error_after_running(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
if (state.thread_index <= (state.threads / 2))
state.SkipWithError("error message");
}
BENCHMARK(BM_error_after_running)->ThreadRange(1, 8);
ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
{"/threads:2", true, "error message"},
{"/threads:4", true, "error message"},
{"/threads:8", true, "error message"}});
void BM_error_while_paused(benchmark::State& state) {
bool first_iter = true;
while (state.KeepRunning()) {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
assert(first_iter);
first_iter = false;
state.PauseTiming();
state.SkipWithError("error message");
} else {
state.PauseTiming();
state.ResumeTiming();
}
}
}
BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8);
ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"},
{"/1/threads:2", true, "error message"},
{"/1/threads:4", true, "error message"},
{"/1/threads:8", true, "error message"},
{"/2/threads:1", false, ""},
{"/2/threads:2", false, ""},
{"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}});
int main(int argc, char* argv[]) {
benchmark::Initialize(&argc, argv);
TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter);
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
return 0;
}
07070100000085000081A400000000000000000000000160C0813C00000701000000000000000000000000000000000000002C00000000benchmark-1.5.5/test/state_assembly_test.cc#include <benchmark/benchmark.h>
#ifdef __clang__
#pragma clang diagnostic ignored "-Wreturn-type"
#endif
// clang-format off
extern "C" {
extern int ExternInt;
benchmark::State& GetState();
void Fn();
}
// clang-format on
using benchmark::State;
// CHECK-LABEL: test_for_auto_loop:
extern "C" int test_for_auto_loop() {
State& S = GetState();
int x = 42;
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
// CHECK-NEXT: testq %rbx, %rbx
// CHECK-NEXT: je [[LOOP_END:.*]]
for (auto _ : S) {
// CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]:
// CHECK-GNU-NEXT: subq $1, %rbx
// CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}}
// CHECK-NEXT: jne .L[[LOOP_HEAD]]
benchmark::DoNotOptimize(x);
}
// CHECK: [[LOOP_END]]:
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
// CHECK: movl $101, %eax
// CHECK: ret
return 101;
}
// CHECK-LABEL: test_while_loop:
extern "C" int test_while_loop() {
State& S = GetState();
int x = 42;
// CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]]
// CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]:
while (S.KeepRunning()) {
// CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]]
// CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]]
// CHECK: movq %[[IREG]], [[DEST:.*]]
benchmark::DoNotOptimize(x);
}
// CHECK-DAG: movq [[DEST]], %[[IREG]]
// CHECK-DAG: testq %[[IREG]], %[[IREG]]
// CHECK-DAG: jne .L[[LOOP_BODY]]
// CHECK-DAG: .L[[LOOP_HEADER]]:
// CHECK: cmpb $0
// CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]]
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
// CHECK: .L[[LOOP_END]]:
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
// CHECK: movl $101, %eax
// CHECK: ret
return 101;
}
07070100000086000081A400000000000000000000000160C0813C0000043D000000000000000000000000000000000000002900000000benchmark-1.5.5/test/statistics_gtest.cc//===---------------------------------------------------------------------===//
// statistics_test - Unit tests for src/statistics.cc
//===---------------------------------------------------------------------===//
#include "../src/statistics.h"
#include "gtest/gtest.h"
namespace {
TEST(StatisticsTest, Mean) {
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0);
}
TEST(StatisticsTest, Median) {
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0);
}
TEST(StatisticsTest, StdDev) {
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}),
1.151086443322134);
}
} // end namespace
07070100000087000081A400000000000000000000000160C0813C00000ECC000000000000000000000000000000000000002A00000000benchmark-1.5.5/test/string_util_gtest.cc//===---------------------------------------------------------------------===//
// statistics_test - Unit tests for src/statistics.cc
//===---------------------------------------------------------------------===//
#include "../src/string_util.h"
#include "../src/internal_macros.h"
#include "gtest/gtest.h"
namespace {
TEST(StringUtilTest, stoul) {
{
size_t pos = 0;
EXPECT_EQ(0ul, benchmark::stoul("0", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(7ul, benchmark::stoul("7", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(135ul, benchmark::stoul("135", &pos));
EXPECT_EQ(3ul, pos);
}
#if ULONG_MAX == 0xFFFFFFFFul
{
size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos));
EXPECT_EQ(10ul, pos);
}
#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul
{
size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos));
EXPECT_EQ(20ul, pos);
}
#endif
{
size_t pos = 0;
EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos);
}
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument);
}
#endif
}
TEST(StringUtilTest, stoi) {
{
size_t pos = 0;
EXPECT_EQ(0, benchmark::stoi("0", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1357, benchmark::stoi("1357", &pos));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos);
}
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument);
}
#endif
}
TEST(StringUtilTest, stod) {
{
size_t pos = 0;
EXPECT_EQ(0.0, benchmark::stod("0", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(-84.0, benchmark::stod("-84", &pos));
EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1234.0, benchmark::stod("1234", &pos));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1.5, benchmark::stod("1.5", &pos));
EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
/* Note: exactly representable as double */
EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
EXPECT_EQ(8ul, pos);
}
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument);
}
#endif
}
TEST(StringUtilTest, StrSplit) {
EXPECT_EQ(benchmark::StrSplit("", ','), std::vector<std::string>{});
EXPECT_EQ(benchmark::StrSplit("hello", ','),
std::vector<std::string>({"hello"}));
EXPECT_EQ(benchmark::StrSplit("hello,there,is,more", ','),
std::vector<std::string>({"hello", "there", "is", "more"}));
}
} // end namespace
07070100000088000081A400000000000000000000000160C0813C000001E2000000000000000000000000000000000000002F00000000benchmark-1.5.5/test/templated_fixture_test.cc
#include "benchmark/benchmark.h"
#include <cassert>
#include <memory>
template <typename T>
class MyFixture : public ::benchmark::Fixture {
public:
MyFixture() : data(0) {}
T data;
};
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) {
for (auto _ : st) {
data += 1;
}
}
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) {
for (auto _ : st) {
data += 1.0;
}
}
BENCHMARK_REGISTER_F(MyFixture, Bar);
BENCHMARK_MAIN();
07070100000089000081A400000000000000000000000160C0813C000062BA000000000000000000000000000000000000003300000000benchmark-1.5.5/test/user_counters_tabular_test.cc
#undef NDEBUG
#include "benchmark/benchmark.h"
#include "output_test.h"
// @todo: <jpmag> this checks the full output at once; the rule for
// CounterSet1 was failing because it was not matching "^[-]+$".
// @todo: <jpmag> check that the counters are vertically aligned.
ADD_CASES(TC_ConsoleOut,
{
// keeping these lines long improves readability, so:
// clang-format off
{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next},
{"^[-]+$", MR_Next},
{"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_Counters_Tabular/repeats:2/threads:1_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_Counters_Tabular/repeats:2/threads:1_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_Counters_Tabular/repeats:2/threads:1_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_Counters_Tabular/repeats:2/threads:2_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_Counters_Tabular/repeats:2/threads:2_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_Counters_Tabular/repeats:2/threads:2_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Baz %s Foo$", MR_Next},
{"^[-]+$", MR_Next},
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations %s Bat %s Baz %s Foo$", MR_Next},
{"^[-]+$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"},
// clang-format on
});
ADD_CASES(TC_CSVOut, {{"%csv_header,"
"\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}});
// ========================================================================= //
// ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= //
void BM_Counters_Tabular(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {1, bm::Counter::kAvgThreads}},
{"Bar", {2, bm::Counter::kAvgThreads}},
{"Baz", {4, bm::Counter::kAvgThreads}},
{"Bat", {8, bm::Counter::kAvgThreads}},
{"Frob", {16, bm::Counter::kAvgThreads}},
{"Lob", {32, bm::Counter::kAvgThreads}},
});
}
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 2)->Repetitions(2);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_mean\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_median\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_stddev\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 1,$", MR_Next},
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 2,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 1,$", MR_Next},
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 2,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_median\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 1,$", MR_Next},
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 2,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_stddev\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 1,$", MR_Next},
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 2,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_mean\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_median\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_stddev\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_mean\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_median\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_stddev\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckTabular(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4);
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8);
CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16);
CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:1$",
&CheckTabular);
CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$",
&CheckTabular);
// ========================================================================= //
// -------------------- Tabular+Rate Counters Output ----------------------- //
// ========================================================================= //
void BM_CounterRates_Tabular(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {1, bm::Counter::kAvgThreadsRate}},
{"Bar", {2, bm::Counter::kAvgThreadsRate}},
{"Baz", {4, bm::Counter::kAvgThreadsRate}},
{"Bat", {8, bm::Counter::kAvgThreadsRate}},
{"Frob", {16, bm::Counter::kAvgThreadsRate}},
{"Lob", {32, bm::Counter::kAvgThreadsRate}},
});
}
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
{"\"family_index\": 1,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckTabularRate(Results const& e) {
double t = e.DurationCPUTime();
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
&CheckTabularRate);
// ========================================================================= //
// ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= //
// set only some of the counters
void BM_CounterSet0_Tabular(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {10, bm::Counter::kAvgThreads}},
{"Bar", {20, bm::Counter::kAvgThreads}},
{"Baz", {40, bm::Counter::kAvgThreads}},
});
}
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
{"\"family_index\": 2,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSet0(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
}
CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
// again.
void BM_CounterSet1_Tabular(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {15, bm::Counter::kAvgThreads}},
{"Bar", {25, bm::Counter::kAvgThreads}},
{"Baz", {45, bm::Counter::kAvgThreads}},
});
}
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
{"\"family_index\": 3,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSet1(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45);
}
CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
// ========================================================================= //
// ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= //
// set only some of the counters, different set now.
void BM_CounterSet2_Tabular(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {10, bm::Counter::kAvgThreads}},
{"Bat", {30, bm::Counter::kAvgThreads}},
{"Baz", {40, bm::Counter::kAvgThreads}},
});
}
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
{"\"family_index\": 4,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
",%float,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSet2(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
}
CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
0707010000008A000081A400000000000000000000000160C0813C00006706000000000000000000000000000000000000002B00000000benchmark-1.5.5/test/user_counters_test.cc
#undef NDEBUG
#include "benchmark/benchmark.h"
#include "output_test.h"
// ========================================================================= //
// ---------------------- Testing Prologue Output -------------------------- //
// ========================================================================= //
// clang-format off
ADD_CASES(TC_ConsoleOut,
{{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next},
{"^[-]+$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
// clang-format on
// ========================================================================= //
// ------------------------- Simple Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_Simple(benchmark::State& state) {
for (auto _ : state) {
}
state.counters["foo"] = 1;
state.counters["bar"] = 2 * (double)state.iterations();
}
BENCHMARK(BM_Counters_Simple);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSimple(Results const& e) {
double its = e.NumIterations();
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
// check that the value of bar is within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
// ========================================================================= //
// --------------------- Counters+Items+Bytes/s Output --------------------- //
// ========================================================================= //
namespace {
int num_calls1 = 0;
}
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.counters["foo"] = 1;
state.counters["bar"] = ++num_calls1;
state.SetBytesProcessed(364);
state.SetItemsProcessed(150);
}
BENCHMARK(BM_Counters_WithBytesAndItemsPSec);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
"bar=%hrfloat bytes_per_second=%hrfloat/s "
"foo=%hrfloat items_per_second=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
{"\"family_index\": 1,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"bytes_per_second\": %float,$", MR_Next},
{"\"foo\": %float,$", MR_Next},
{"\"items_per_second\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\","
"%csv_bytes_items_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckBytesAndItemsPSec(Results const& e) {
double t = e.DurationCPUTime(); // this (and not real time) is the time used
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1);
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001);
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
&CheckBytesAndItemsPSec);
// ========================================================================= //
// ------------------------- Rate Counters Output -------------------------- //
// ========================================================================= //
void BM_Counters_Rate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate};
}
BENCHMARK(BM_Counters_Rate);
ADD_CASES(
TC_ConsoleOut,
{{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
{"\"family_index\": 2,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckRate(Results const& e) {
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
// ========================================================================= //
// ----------------------- Inverted Counters Output ------------------------ //
// ========================================================================= //
void BM_Invert(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert};
state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert};
}
BENCHMARK(BM_Invert);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
{"\"family_index\": 3,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Invert\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckInvert(Results const& e) {
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001);
}
CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
// ========================================================================= //
// ------------------------- InvertedRate Counters Output
// -------------------------- //
// ========================================================================= //
void BM_Counters_InvertedRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] =
bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert};
state.counters["bar"] =
bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
}
BENCHMARK(BM_Counters_InvertedRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report "
"bar=%hrfloats foo=%hrfloats$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_InvertedRate\",$"},
{"\"family_index\": 4,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckInvertedRate(Results const& e) {
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate);
// ========================================================================= //
// ------------------------- Thread Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_Threads(benchmark::State& state) {
for (auto _ : state) {
}
state.counters["foo"] = 1;
state.counters["bar"] = 2;
}
BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
{"\"family_index\": 5,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckThreads(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads());
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads());
}
CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
// ========================================================================= //
// ---------------------- ThreadAvg Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_AvgThreads(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads};
}
BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
"%console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
{"\"family_index\": 6,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgThreads(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
&CheckAvgThreads);
// ========================================================================= //
// ---------------------- ThreadAvg Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate};
}
BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
{"\"family_index\": 7,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/"
"threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgThreadsRate(Results const& e) {
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int",
&CheckAvgThreadsRate);
// ========================================================================= //
// ------------------- IterationInvariant Counters Output ------------------ //
// ========================================================================= //
void BM_Counters_IterationInvariant(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant};
}
BENCHMARK(BM_Counters_IterationInvariant);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_IterationInvariant\",$"},
{"\"family_index\": 8,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckIterationInvariant(Results const& e) {
double its = e.NumIterations();
// check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
&CheckIterationInvariant);
// ========================================================================= //
// ----------------- IterationInvariantRate Counters Output ---------------- //
// ========================================================================= //
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] =
bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
state.counters["bar"] =
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
}
BENCHMARK(BM_Counters_kIsIterationInvariantRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
{"\"family_index\": 9,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report,"
"%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckIsIterationInvariantRate(Results const& e) {
double its = e.NumIterations();
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate",
&CheckIsIterationInvariantRate);
// ========================================================================= //
// ------------------- AvgIterations Counters Output ------------------ //
// ========================================================================= //
void BM_Counters_AvgIterations(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations};
}
BENCHMARK(BM_Counters_AvgIterations);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgIterations\",$"},
{"\"family_index\": 10,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgIterations(Results const& e) {
double its = e.NumIterations();
// check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
// ========================================================================= //
// ----------------- AvgIterationsRate Counters Output ---------------- //
// ========================================================================= //
void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
state.counters["bar"] =
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
}
BENCHMARK(BM_Counters_kAvgIterationsRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
{"\"family_index\": 11,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report,"
"%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgIterationsRate(Results const& e) {
double its = e.NumIterations();
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate",
&CheckAvgIterationsRate);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
0707010000008B000081A400000000000000000000000160C0813C00002526000000000000000000000000000000000000003500000000benchmark-1.5.5/test/user_counters_thousands_test.cc
#undef NDEBUG
#include "benchmark/benchmark.h"
#include "output_test.h"
// ========================================================================= //
// ------------------------ Thousands Customisation ------------------------ //
// ========================================================================= //
void BM_Counters_Thousands(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
{"t0_1000000DefaultBase",
bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
{"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1000)},
{"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1024)},
{"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1000)},
{"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1024)},
});
}
BENCHMARK(BM_Counters_Thousands)->Repetitions(2);
ADD_CASES(
TC_ConsoleOut,
{
{"^BM_Counters_Thousands/repeats:2 %console_report "
"t0_1000000DefaultBase=1000k "
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2 %console_report "
"t0_1000000DefaultBase=1000k "
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2_mean %console_report "
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
"t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2_median %console_report "
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
"t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ "
"]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
"t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
{"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_Thousands/"
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
"0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/"
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
"0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/"
"repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/"
"repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckThousands(Results const& e) {
if (e.name != "BM_Counters_Thousands/repeats:2")
return; // Do not check the aggregates!
// check that the values are within 0.01% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000,
0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
0707010000008C000041ED00000000000000000000000360C0813C00000000000000000000000000000000000000000000001600000000benchmark-1.5.5/tools0707010000008D000081A400000000000000000000000160C0813C00000147000000000000000000000000000000000000002200000000benchmark-1.5.5/tools/BUILD.bazelload("@py_deps//:requirements.bzl", "requirement")
py_library(
name = "gbench",
srcs = glob(["gbench/*.py"]),
deps = [
requirement("numpy"),
requirement("scipy"),
],
)
py_binary(
name = "compare",
srcs = ["compare.py"],
python_version = "PY2",
deps = [
":gbench",
],
)
0707010000008E000081ED00000000000000000000000160C0813C0000477B000000000000000000000000000000000000002100000000benchmark-1.5.5/tools/compare.py#!/usr/bin/env python
import unittest
"""
compare.py - versatile benchmark output compare tool
"""
import argparse
from argparse import ArgumentParser
import json
import sys
import gbench
from gbench import util, report
from gbench.util import *
def check_inputs(in1, in2, flags):
"""
Perform checking on the user provided inputs and diagnose any abnormalities
"""
in1_kind, in1_err = classify_input_file(in1)
in2_kind, in2_err = classify_input_file(in2)
output_file = find_benchmark_flag('--benchmark_out=', flags)
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
print(("WARNING: '--benchmark_out=%s' will be passed to both "
"benchmarks causing it to be overwritten") % output_file)
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
print("WARNING: passing optional flags has no effect since both "
"inputs are JSON")
if output_type is not None and output_type != 'json':
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
" is not supported.") % output_type)
sys.exit(1)
def create_parser():
parser = ArgumentParser(
description='versatile benchmark output compare tool')
parser.add_argument(
'-a',
'--display_aggregates_only',
dest='display_aggregates_only',
action="store_true",
help="If there are repetitions, by default, we display everything - the"
" actual runs, and the aggregates computed. Sometimes, it is "
"desirable to only view the aggregates. E.g. when there are a lot "
"of repetitions. Do note that only the display is affected. "
"Internally, all the actual runs are still used, e.g. for U test.")
parser.add_argument(
'--no-color',
dest='color',
default=True,
action="store_false",
help="Do not use colors in the terminal output"
)
parser.add_argument(
'-d',
'--dump_to_json',
dest='dump_to_json',
help="Additionally, dump benchmark comparison output to this file in JSON format.")
utest = parser.add_argument_group()
utest.add_argument(
'--no-utest',
dest='utest',
default=True,
action="store_false",
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
alpha_default = 0.05
utest.add_argument(
"--alpha",
dest='utest_alpha',
default=alpha_default,
type=float,
help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
alpha_default)
subparsers = parser.add_subparsers(
help='This tool has multiple modes of operation:',
dest='mode')
parser_a = subparsers.add_parser(
'benchmarks',
help='The most simple use-case, compare all the output of these two benchmarks')
baseline = parser_a.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
contender = parser_a.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
parser_a.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_b = subparsers.add_parser(
'filters', help='Compare filter one with the filter two of benchmark')
baseline = parser_b.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test',
metavar='test',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_b.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_b.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_c = subparsers.add_parser(
'benchmarksfiltered',
help='Compare filter one of first benchmark with filter two of the second benchmark')
baseline = parser_c.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_c.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='The second benchmark executable or JSON output file, that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_c.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
return parser
def main():
# Parse the command line flags
parser = create_parser()
args, unknown_args = parser.parse_known_args()
if args.mode is None:
parser.print_help()
exit(1)
assert not unknown_args
benchmark_options = args.benchmark_options
if args.mode == 'benchmarks':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = ''
filter_contender = ''
# NOTE: if test_baseline == test_contender, you are analyzing the stdev
description = 'Comparing %s to %s' % (test_baseline, test_contender)
elif args.mode == 'filters':
test_baseline = args.test[0].name
test_contender = args.test[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if filter_baseline == filter_contender, you are analyzing the
# stdev
description = 'Comparing %s to %s (from %s)' % (
filter_baseline, filter_contender, args.test[0].name)
elif args.mode == 'benchmarksfiltered':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if test_baseline == test_contender and
# filter_baseline == filter_contender, you are analyzing the stdev
description = 'Comparing %s (from %s) to %s (from %s)' % (
filter_baseline, test_baseline, filter_contender, test_contender)
else:
# should never happen
print("Unrecognized mode of operation: '%s'" % args.mode)
parser.print_help()
exit(1)
check_inputs(test_baseline, test_contender, benchmark_options)
if args.display_aggregates_only:
benchmark_options += ['--benchmark_display_aggregates_only=true']
options_baseline = []
options_contender = []
if filter_baseline and filter_contender:
options_baseline = ['--benchmark_filter=%s' % filter_baseline]
options_contender = ['--benchmark_filter=%s' % filter_contender]
# Run the benchmarks and report the results
json1 = json1_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
test_baseline, benchmark_options + options_baseline))
json2 = json2_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
test_contender, benchmark_options + options_contender))
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
json1 = gbench.report.filter_benchmark(
json1_orig, filter_baseline, replacement)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
diff_report = gbench.report.get_difference_report(
json1, json2, args.utest)
output_lines = gbench.report.print_difference_report(
diff_report,
args.display_aggregates_only,
args.utest, args.utest_alpha, args.color)
print(description)
for ln in output_lines:
print(ln)
# Optionally, diff and output to JSON
if args.dump_to_json is not None:
with open(args.dump_to_json, 'w') as f_json:
json.dump(diff_report, f_json)
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'gbench',
'Inputs')
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
def test_benchmarks_basic(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest(self):
parsed = self.parser.parse_args(
['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.05)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_display_aggregates_only(self):
parsed = self.parser.parse_args(
['-a', 'benchmarks', self.testInput0, self.testInput1])
self.assertTrue(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['d'])
def test_benchmarks_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_basic(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertFalse(parsed.benchmark_options)
def test_filters_with_remainder(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', '--', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['f'])
def test_benchmarksfiltered_basic(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertFalse(parsed.benchmark_options)
def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'f')
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'g')
if __name__ == '__main__':
# unittest.main()
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
0707010000008F000041ED00000000000000000000000360C0813C00000000000000000000000000000000000000000000001D00000000benchmark-1.5.5/tools/gbench07070100000090000041ED00000000000000000000000260C0813C00000000000000000000000000000000000000000000002400000000benchmark-1.5.5/tools/gbench/Inputs07070100000091000081A400000000000000000000000160C0813C000009E4000000000000000000000000000000000000003400000000benchmark-1.5.5/tools/gbench/Inputs/test1_run1.json{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_SameTimes",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_2xFaster",
"iterations": 1000,
"real_time": 50,
"cpu_time": 50,
"time_unit": "ns"
},
{
"name": "BM_2xSlower",
"iterations": 1000,
"real_time": 50,
"cpu_time": 50,
"time_unit": "ns"
},
{
"name": "BM_1PercentFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_1PercentSlower",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentSlower",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_100xSlower",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_100xFaster",
"iterations": 1000,
"real_time": 10000,
"cpu_time": 10000,
"time_unit": "ns"
},
{
"name": "BM_10PercentCPUToTime",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_ThirdFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "MyComplexityTest_BigO",
"run_name": "MyComplexityTest",
"run_type": "aggregate",
"aggregate_name": "BigO",
"cpu_coefficient": 4.2749856294592886e+00,
"real_coefficient": 6.4789275289789780e+00,
"big_o": "N",
"time_unit": "ns"
},
{
"name": "MyComplexityTest_RMS",
"run_name": "MyComplexityTest",
"run_type": "aggregate",
"aggregate_name": "RMS",
"rms": 4.5097802512472874e-03
},
{
"name": "BM_NotBadTimeUnit",
"iterations": 1000,
"real_time": 0.4,
"cpu_time": 0.5,
"time_unit": "s"
},
{
"name": "BM_DifferentTimeUnit",
"iterations": 1,
"real_time": 1,
"cpu_time": 1,
"time_unit": "s"
}
]
}
07070100000092000081A400000000000000000000000160C0813C00000A17000000000000000000000000000000000000003400000000benchmark-1.5.5/tools/gbench/Inputs/test1_run2.json{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_SameTimes",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_2xFaster",
"iterations": 1000,
"real_time": 25,
"cpu_time": 25,
"time_unit": "ns"
},
{
"name": "BM_2xSlower",
"iterations": 20833333,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_1PercentFaster",
"iterations": 1000,
"real_time": 98.9999999,
"cpu_time": 98.9999999,
"time_unit": "ns"
},
{
"name": "BM_1PercentSlower",
"iterations": 1000,
"real_time": 100.9999999,
"cpu_time": 100.9999999,
"time_unit": "ns"
},
{
"name": "BM_10PercentFaster",
"iterations": 1000,
"real_time": 90,
"cpu_time": 90,
"time_unit": "ns"
},
{
"name": "BM_10PercentSlower",
"iterations": 1000,
"real_time": 110,
"cpu_time": 110,
"time_unit": "ns"
},
{
"name": "BM_100xSlower",
"iterations": 1000,
"real_time": 1.0000e+04,
"cpu_time": 1.0000e+04,
"time_unit": "ns"
},
{
"name": "BM_100xFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentCPUToTime",
"iterations": 1000,
"real_time": 110,
"cpu_time": 90,
"time_unit": "ns"
},
{
"name": "BM_ThirdFaster",
"iterations": 1000,
"real_time": 66.665,
"cpu_time": 66.664,
"time_unit": "ns"
},
{
"name": "MyComplexityTest_BigO",
"run_name": "MyComplexityTest",
"run_type": "aggregate",
"aggregate_name": "BigO",
"cpu_coefficient": 5.6215779594361486e+00,
"real_coefficient": 5.6288314793554610e+00,
"big_o": "N",
"time_unit": "ns"
},
{
"name": "MyComplexityTest_RMS",
"run_name": "MyComplexityTest",
"run_type": "aggregate",
"aggregate_name": "RMS",
"rms": 3.3128901852342174e-03
},
{
"name": "BM_NotBadTimeUnit",
"iterations": 1000,
"real_time": 0.04,
"cpu_time": 0.6,
"time_unit": "s"
},
{
"name": "BM_DifferentTimeUnit",
"iterations": 1,
"real_time": 1,
"cpu_time": 1,
"time_unit": "ns"
}
]
}
07070100000093000081A400000000000000000000000160C0813C0000060F000000000000000000000000000000000000003300000000benchmark-1.5.5/tools/gbench/Inputs/test2_run.json{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_Hi",
"iterations": 1234,
"real_time": 42,
"cpu_time": 24,
"time_unit": "ms"
},
{
"name": "BM_Zero",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_Zero/4",
"iterations": 4000,
"real_time": 40,
"cpu_time": 40,
"time_unit": "ns"
},
{
"name": "Prefix/BM_Zero",
"iterations": 2000,
"real_time": 20,
"cpu_time": 20,
"time_unit": "ns"
},
{
"name": "Prefix/BM_Zero/3",
"iterations": 3000,
"real_time": 30,
"cpu_time": 30,
"time_unit": "ns"
},
{
"name": "BM_One",
"iterations": 5000,
"real_time": 5,
"cpu_time": 5,
"time_unit": "ns"
},
{
"name": "BM_One/4",
"iterations": 2000,
"real_time": 20,
"cpu_time": 20,
"time_unit": "ns"
},
{
"name": "Prefix/BM_One",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "Prefix/BM_One/3",
"iterations": 1500,
"real_time": 15,
"cpu_time": 15,
"time_unit": "ns"
},
{
"name": "BM_Bye",
"iterations": 5321,
"real_time": 11,
"cpu_time": 63,
"time_unit": "ns"
}
]
}
07070100000094000081A400000000000000000000000160C0813C000004F2000000000000000000000000000000000000003400000000benchmark-1.5.5/tools/gbench/Inputs/test3_run0.json{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_One",
"run_type": "aggregate",
"iterations": 1000,
"real_time": 10,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_Two",
"iterations": 1000,
"real_time": 9,
"cpu_time": 90,
"time_unit": "ns"
},
{
"name": "BM_Two",
"iterations": 1000,
"real_time": 8,
"cpu_time": 86,
"time_unit": "ns"
},
{
"name": "short",
"run_type": "aggregate",
"iterations": 1000,
"real_time": 8,
"cpu_time": 80,
"time_unit": "ns"
},
{
"name": "short",
"run_type": "aggregate",
"iterations": 1000,
"real_time": 8,
"cpu_time": 77,
"time_unit": "ns"
},
{
"name": "medium",
"run_type": "iteration",
"iterations": 1000,
"real_time": 8,
"cpu_time": 80,
"time_unit": "ns"
},
{
"name": "medium",
"run_type": "iteration",
"iterations": 1000,
"real_time": 9,
"cpu_time": 82,
"time_unit": "ns"
}
]
}
07070100000095000081A400000000000000000000000160C0813C000004F6000000000000000000000000000000000000003400000000benchmark-1.5.5/tools/gbench/Inputs/test3_run1.json{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_One",
"iterations": 1000,
"real_time": 9,
"cpu_time": 110,
"time_unit": "ns"
},
{
"name": "BM_Two",
"run_type": "aggregate",
"iterations": 1000,
"real_time": 10,
"cpu_time": 89,
"time_unit": "ns"
},
{
"name": "BM_Two",
"iterations": 1000,
"real_time": 7,
"cpu_time": 72,
"time_unit": "ns"
},
{
"name": "short",
"run_type": "aggregate",
"iterations": 1000,
"real_time": 7,
"cpu_time": 75,
"time_unit": "ns"
},
{
"name": "short",
"run_type": "aggregate",
"iterations": 762,
"real_time": 4.54,
"cpu_time": 66.6,
"time_unit": "ns"
},
{
"name": "short",
"run_type": "iteration",
"iterations": 1000,
"real_time": 800,
"cpu_time": 1,
"time_unit": "ns"
},
{
"name": "medium",
"run_type": "iteration",
"iterations": 1200,
"real_time": 5,
"cpu_time": 53,
"time_unit": "ns"
}
]
}
07070100000096000081A400000000000000000000000160C0813C0000090D000000000000000000000000000000000000003300000000benchmark-1.5.5/tools/gbench/Inputs/test4_run.json{
"benchmarks": [
{
"name": "99 family 0 instance 0 repetition 0",
"run_type": "iteration",
"family_index": 0,
"per_family_instance_index": 0,
"repetition_index": 0
},
{
"name": "98 family 0 instance 0 repetition 1",
"run_type": "iteration",
"family_index": 0,
"per_family_instance_index": 0,
"repetition_index": 1
},
{
"name": "97 family 0 instance 0 aggregate",
"run_type": "aggregate",
"family_index": 0,
"per_family_instance_index": 0,
"aggregate_name": "9 aggregate"
},
{
"name": "96 family 0 instance 1 repetition 0",
"run_type": "iteration",
"family_index": 0,
"per_family_instance_index": 1,
"repetition_index": 0
},
{
"name": "95 family 0 instance 1 repetition 1",
"run_type": "iteration",
"family_index": 0,
"per_family_instance_index": 1,
"repetition_index": 1
},
{
"name": "94 family 0 instance 1 aggregate",
"run_type": "aggregate",
"family_index": 0,
"per_family_instance_index": 1,
"aggregate_name": "9 aggregate"
},
{
"name": "93 family 1 instance 0 repetition 0",
"run_type": "iteration",
"family_index": 1,
"per_family_instance_index": 0,
"repetition_index": 0
},
{
"name": "92 family 1 instance 0 repetition 1",
"run_type": "iteration",
"family_index": 1,
"per_family_instance_index": 0,
"repetition_index": 1
},
{
"name": "91 family 1 instance 0 aggregate",
"run_type": "aggregate",
"family_index": 1,
"per_family_instance_index": 0,
"aggregate_name": "9 aggregate"
},
{
"name": "90 family 1 instance 1 repetition 0",
"run_type": "iteration",
"family_index": 1,
"per_family_instance_index": 1,
"repetition_index": 0
},
{
"name": "89 family 1 instance 1 repetition 1",
"run_type": "iteration",
"family_index": 1,
"per_family_instance_index": 1,
"repetition_index": 1
},
{
"name": "88 family 1 instance 1 aggregate",
"run_type": "aggregate",
"family_index": 1,
"per_family_instance_index": 1,
"aggregate_name": "9 aggregate"
}
]
}
07070100000097000081A400000000000000000000000160C0813C000000C2000000000000000000000000000000000000002900000000benchmark-1.5.5/tools/gbench/__init__.py"""Google Benchmark tooling"""
__author__ = 'Eric Fiselier'
__email__ = 'eric@efcs.ca'
__versioninfo__ = (0, 5, 0)
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []
07070100000098000081A400000000000000000000000160C0813C00009673000000000000000000000000000000000000002700000000benchmark-1.5.5/tools/gbench/report.py"""report.py - Utilities for reporting statistics about benchmark results
"""
import unittest
import os
import re
import copy
import random
from scipy.stats import mannwhitneyu
class BenchmarkColor(object):
def __init__(self, name, code):
self.name = name
self.code = code
def __repr__(self):
return '%s%r' % (self.__class__.__name__,
(self.name, self.code))
def __format__(self, format):
return self.code
# Benchmark Colors Enumeration
BC_NONE = BenchmarkColor('NONE', '')
BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m')
BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
UTEST_MIN_REPETITIONS = 2
UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better.
UTEST_COL_NAME = "_pvalue"
def color_format(use_color, fmt_str, *args, **kwargs):
"""
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
is False then all color codes in 'args' and 'kwargs' are replaced with
the empty string.
"""
assert use_color is True or use_color is False
if not use_color:
args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for arg in args]
kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for key, arg in kwargs.items()}
return fmt_str.format(*args, **kwargs)
def find_longest_name(benchmark_list):
"""
Return the length of the longest benchmark name in a given list of
benchmark JSON objects
"""
longest_name = 1
for bc in benchmark_list:
if len(bc['name']) > longest_name:
longest_name = len(bc['name'])
return longest_name
def calculate_change(old_val, new_val):
"""
Return a float representing the decimal change between old_val and new_val.
"""
if old_val == 0 and new_val == 0:
return 0.0
if old_val == 0:
return float(new_val - old_val) / (float(old_val + new_val) / 2)
return float(new_val - old_val) / abs(old_val)
def filter_benchmark(json_orig, family, replacement=""):
"""
Apply a filter to the json, and only leave the 'family' of benchmarks.
"""
regex = re.compile(family)
filtered = {}
filtered['benchmarks'] = []
for be in json_orig['benchmarks']:
if not regex.search(be['name']):
continue
filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
filtered['benchmarks'].append(filteredbench)
return filtered
def get_unique_benchmark_names(json):
"""
While *keeping* the order, give all the unique 'names' used for benchmarks.
"""
seen = set()
uniqued = [x['name'] for x in json['benchmarks']
if x['name'] not in seen and
(seen.add(x['name']) or True)]
return uniqued
def intersect(list1, list2):
"""
Given two lists, get a new list consisting of the elements only contained
in *both of the input lists*, while preserving the ordering.
"""
return [x for x in list1 if x in list2]
def is_potentially_comparable_benchmark(x):
return ('time_unit' in x and 'real_time' in x and 'cpu_time' in x)
def partition_benchmarks(json1, json2):
"""
While preserving the ordering, find benchmarks with the same names in
both of the inputs, and group them.
(i.e. partition/filter into groups with common name)
"""
json1_unique_names = get_unique_benchmark_names(json1)
json2_unique_names = get_unique_benchmark_names(json2)
names = intersect(json1_unique_names, json2_unique_names)
partitions = []
for name in names:
time_unit = None
# Pick the time unit from the first entry of the lhs benchmark.
# We should be careful not to crash with unexpected input.
for x in json1['benchmarks']:
if (x['name'] == name and is_potentially_comparable_benchmark(x)):
time_unit = x['time_unit']
break
if time_unit is None:
continue
# Filter by name and time unit.
# All the repetitions are assumed to be comparable.
lhs = [x for x in json1['benchmarks'] if x['name'] == name and
x['time_unit'] == time_unit]
rhs = [x for x in json2['benchmarks'] if x['name'] == name and
x['time_unit'] == time_unit]
partitions.append([lhs, rhs])
return partitions
def extract_field(partition, field_name):
# The count of elements may be different. We want *all* of them.
lhs = [x[field_name] for x in partition[0]]
rhs = [x[field_name] for x in partition[1]]
return [lhs, rhs]
def calc_utest(timings_cpu, timings_time):
min_rep_cnt = min(len(timings_time[0]),
len(timings_time[1]),
len(timings_cpu[0]),
len(timings_cpu[1]))
# Does *everything* has at least UTEST_MIN_REPETITIONS repetitions?
if min_rep_cnt < UTEST_MIN_REPETITIONS:
return False, None, None
time_pvalue = mannwhitneyu(
timings_time[0], timings_time[1], alternative='two-sided').pvalue
cpu_pvalue = mannwhitneyu(
timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue
return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue
def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
def get_utest_color(pval):
return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
# Check if we failed miserably with minimum required repetitions for utest
if not utest['have_optimal_repetitions'] and utest['cpu_pvalue'] is None and utest['time_pvalue'] is None:
return []
dsc = "U Test, Repetitions: {} vs {}".format(
utest['nr_of_repetitions'], utest['nr_of_repetitions_other'])
dsc_color = BC_OKGREEN
# We still got some results to show but issue a warning about it.
if not utest['have_optimal_repetitions']:
dsc_color = BC_WARNING
dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
UTEST_OPTIMAL_REPETITIONS)
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
return [color_format(use_color,
special_str,
BC_HEADER,
"{}{}".format(bc_name, UTEST_COL_NAME),
first_col_width,
get_utest_color(
utest['time_pvalue']), utest['time_pvalue'],
get_utest_color(
utest['cpu_pvalue']), utest['cpu_pvalue'],
dsc_color, dsc,
endc=BC_ENDC)]
def get_difference_report(
json1,
json2,
utest=False):
"""
Calculate and report the difference between each test of two benchmarks
runs specified as 'json1' and 'json2'. Output is another json containing
relevant details for each test run.
"""
assert utest is True or utest is False
diff_report = []
partitions = partition_benchmarks(json1, json2)
for partition in partitions:
benchmark_name = partition[0][0]['name']
time_unit = partition[0][0]['time_unit']
measurements = []
utest_results = {}
# Careful, we may have different repetition count.
for i in range(min(len(partition[0]), len(partition[1]))):
bn = partition[0][i]
other_bench = partition[1][i]
measurements.append({
'real_time': bn['real_time'],
'cpu_time': bn['cpu_time'],
'real_time_other': other_bench['real_time'],
'cpu_time_other': other_bench['cpu_time'],
'time': calculate_change(bn['real_time'], other_bench['real_time']),
'cpu': calculate_change(bn['cpu_time'], other_bench['cpu_time'])
})
# After processing the whole partition, if requested, do the U test.
if utest:
timings_cpu = extract_field(partition, 'cpu_time')
timings_time = extract_field(partition, 'real_time')
have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(timings_cpu, timings_time)
if cpu_pvalue and time_pvalue:
utest_results = {
'have_optimal_repetitions': have_optimal_repetitions,
'cpu_pvalue': cpu_pvalue,
'time_pvalue': time_pvalue,
'nr_of_repetitions': len(timings_cpu[0]),
'nr_of_repetitions_other': len(timings_cpu[1])
}
# Store only if we had any measurements for given benchmark.
# E.g. partition_benchmarks will filter out the benchmarks having
# time units which are not compatible with other time units in the
# benchmark suite.
if measurements:
run_type = partition[0][0]['run_type'] if 'run_type' in partition[0][0] else ''
aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else ''
diff_report.append({
'name': benchmark_name,
'measurements': measurements,
'time_unit': time_unit,
'run_type': run_type,
'aggregate_name': aggregate_name,
'utest': utest_results
})
return diff_report
def print_difference_report(
json_diff_report,
include_aggregates_only=False,
utest=False,
utest_alpha=0.05,
use_color=True):
"""
Calculate and report the difference between each test of two benchmarks
runs specified as 'json1' and 'json2'.
"""
assert utest is True or utest is False
def get_color(res):
if res > 0.05:
return BC_FAIL
elif res > -0.07:
return BC_WHITE
else:
return BC_CYAN
first_col_width = find_longest_name(json_diff_report)
first_col_width = max(
first_col_width,
len('Benchmark'))
first_col_width += len(UTEST_COL_NAME)
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
'Benchmark', 12 + first_col_width)
output_strs = [first_line, '-' * len(first_line)]
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
for benchmark in json_diff_report:
# *If* we were asked to only include aggregates,
# and if it is non-aggregate, then don't print it.
if not include_aggregates_only or not 'run_type' in benchmark or benchmark['run_type'] == 'aggregate':
for measurement in benchmark['measurements']:
output_strs += [color_format(use_color,
fmt_str,
BC_HEADER,
benchmark['name'],
first_col_width,
get_color(measurement['time']),
measurement['time'],
get_color(measurement['cpu']),
measurement['cpu'],
measurement['real_time'],
measurement['real_time_other'],
measurement['cpu_time'],
measurement['cpu_time_other'],
endc=BC_ENDC)]
# After processing the measurements, if requested and
# if applicable (e.g. u-test exists for given benchmark),
# print the U test.
if utest and benchmark['utest']:
output_strs += print_utest(benchmark['name'],
benchmark['utest'],
utest_alpha=utest_alpha,
first_col_width=first_col_width,
use_color=use_color)
return output_strs
###############################################################################
# Unit tests
class TestGetUniqueBenchmarkNames(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput = os.path.join(testInputs, 'test3_run0.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
def test_basic(self):
expect_lines = [
'BM_One',
'BM_Two',
'short', # These two are not sorted
'medium', # These two are not sorted
]
json = self.load_results()
output_lines = get_unique_benchmark_names(json)
print("\n")
print("\n".join(output_lines))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
self.assertEqual(expect_lines[i], output_lines[i])
class TestReportDifference(unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_results():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test1_run1.json')
testOutput2 = os.path.join(testInputs, 'test1_run2.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
json1, json2 = load_results()
cls.json_diff_report = get_difference_report(json1, json2)
def test_json_diff_report_pretty_printing(self):
expect_lines = [
['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
['BM_100xSlower', '+99.0000', '+99.0000',
'100', '10000', '100', '10000'],
['BM_100xFaster', '-0.9900', '-0.9900',
'10000', '100', '10000', '100'],
['BM_10PercentCPUToTime', '+0.1000',
'-0.1000', '100', '110', '100', '90'],
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
]
output_lines_with_header = print_difference_report(
self.json_diff_report, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report_output(self):
expected_output = [
{
'name': 'BM_SameTimes',
'measurements': [{'time': 0.0000, 'cpu': 0.0000, 'real_time': 10, 'real_time_other': 10, 'cpu_time': 10, 'cpu_time_other': 10}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_2xFaster',
'measurements': [{'time': -0.5000, 'cpu': -0.5000, 'real_time': 50, 'real_time_other': 25, 'cpu_time': 50, 'cpu_time_other': 25}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_2xSlower',
'measurements': [{'time': 1.0000, 'cpu': 1.0000, 'real_time': 50, 'real_time_other': 100, 'cpu_time': 50, 'cpu_time_other': 100}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_1PercentFaster',
'measurements': [{'time': -0.0100, 'cpu': -0.0100, 'real_time': 100, 'real_time_other': 98.9999999, 'cpu_time': 100, 'cpu_time_other': 98.9999999}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_1PercentSlower',
'measurements': [{'time': 0.0100, 'cpu': 0.0100, 'real_time': 100, 'real_time_other': 101, 'cpu_time': 100, 'cpu_time_other': 101}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_10PercentFaster',
'measurements': [{'time': -0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 90, 'cpu_time': 100, 'cpu_time_other': 90}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_10PercentSlower',
'measurements': [{'time': 0.1000, 'cpu': 0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 110}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_100xSlower',
'measurements': [{'time': 99.0000, 'cpu': 99.0000, 'real_time': 100, 'real_time_other': 10000, 'cpu_time': 100, 'cpu_time_other': 10000}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_100xFaster',
'measurements': [{'time': -0.9900, 'cpu': -0.9900, 'real_time': 10000, 'real_time_other': 100, 'cpu_time': 10000, 'cpu_time_other': 100}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_10PercentCPUToTime',
'measurements': [{'time': 0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 90}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_ThirdFaster',
'measurements': [{'time': -0.3333, 'cpu': -0.3334, 'real_time': 100, 'real_time_other': 67, 'cpu_time': 100, 'cpu_time_other': 67}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_NotBadTimeUnit',
'measurements': [{'time': -0.9000, 'cpu': 0.2000, 'real_time': 0.4, 'real_time_other': 0.04, 'cpu_time': 0.5, 'cpu_time_other': 0.6}],
'time_unit': 's',
'utest': {}
},
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(
self.json_diff_report, expected_output):
self.assertEqual(out['name'], expected['name'])
self.assertEqual(out['time_unit'], expected['time_unit'])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_result():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput = os.path.join(testInputs, 'test2_run.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
json = load_result()
json1 = filter_benchmark(json, "BM_Z.ro", ".")
json2 = filter_benchmark(json, "BM_O.e", ".")
cls.json_diff_report = get_difference_report(json1, json2)
def test_json_diff_report_pretty_printing(self):
expect_lines = [
['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
]
output_lines_with_header = print_difference_report(
self.json_diff_report, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self):
expected_output = [
{
'name': u'.',
'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 10, 'real_time_other': 5, 'cpu_time': 10, 'cpu_time_other': 5}],
'time_unit': 'ns',
'utest': {}
},
{
'name': u'./4',
'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 40, 'real_time_other': 20, 'cpu_time': 40, 'cpu_time_other': 20}],
'time_unit': 'ns',
'utest': {},
},
{
'name': u'Prefix/.',
'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 20, 'real_time_other': 10, 'cpu_time': 20, 'cpu_time_other': 10}],
'time_unit': 'ns',
'utest': {}
},
{
'name': u'Prefix/./3',
'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}],
'time_unit': 'ns',
'utest': {}
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(
self.json_diff_report, expected_output):
self.assertEqual(out['name'], expected['name'])
self.assertEqual(out['time_unit'], expected['time_unit'])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
class TestReportDifferenceWithUTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_results():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test3_run0.json')
testOutput2 = os.path.join(testInputs, 'test3_run1.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
json1, json2 = load_results()
cls.json_diff_report = get_difference_report(
json1, json2, utest=True)
def test_json_diff_report_pretty_printing(self):
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
'0.6985',
'0.6985',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'2.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
'0.1489',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'3.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
]
output_lines_with_header = print_difference_report(
self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report_pretty_printing_aggregates_only(self):
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two_pvalue',
'0.6985',
'0.6985',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'2.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
'0.1489',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'3.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
]
output_lines_with_header = print_difference_report(
self.json_diff_report, include_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self):
expected_output = [
{
'name': u'BM_One',
'measurements': [
{'time': -0.1,
'cpu': 0.1,
'real_time': 10,
'real_time_other': 9,
'cpu_time': 100,
'cpu_time_other': 110}
],
'time_unit': 'ns',
'utest': {}
},
{
'name': u'BM_Two',
'measurements': [
{'time': 0.1111111111111111,
'cpu': -0.011111111111111112,
'real_time': 9,
'real_time_other': 10,
'cpu_time': 90,
'cpu_time_other': 89},
{'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8,
'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72}
],
'time_unit': 'ns',
'utest': {
'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387
}
},
{
'name': u'short',
'measurements': [
{'time': -0.125,
'cpu': -0.0625,
'real_time': 8,
'real_time_other': 7,
'cpu_time': 80,
'cpu_time_other': 75},
{'time': -0.4325,
'cpu': -0.13506493506493514,
'real_time': 8,
'real_time_other': 4.54,
'cpu_time': 77,
'cpu_time_other': 66.6}
],
'time_unit': 'ns',
'utest': {
'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772
}
},
{
'name': u'medium',
'measurements': [
{'time': -0.375,
'cpu': -0.3375,
'real_time': 8,
'real_time_other': 5,
'cpu_time': 80,
'cpu_time_other': 53}
],
'time_unit': 'ns',
'utest': {}
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(
self.json_diff_report, expected_output):
self.assertEqual(out['name'], expected['name'])
self.assertEqual(out['time_unit'], expected['time_unit'])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_results():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test3_run0.json')
testOutput2 = os.path.join(testInputs, 'test3_run1.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
json1, json2 = load_results()
cls.json_diff_report = get_difference_report(
json1, json2, utest=True)
def test_json_diff_report_pretty_printing(self):
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
'0.6985',
'0.6985',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'2.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
'0.1489',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'3.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['medium', '-0.3750', '-0.3375', '8', '5', '80', '53']
]
output_lines_with_header = print_difference_report(
self.json_diff_report,
utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
def test_json_diff_report(self):
expected_output = [
{
'name': u'BM_One',
'measurements': [
{'time': -0.1,
'cpu': 0.1,
'real_time': 10,
'real_time_other': 9,
'cpu_time': 100,
'cpu_time_other': 110}
],
'time_unit': 'ns',
'utest': {}
},
{
'name': u'BM_Two',
'measurements': [
{'time': 0.1111111111111111,
'cpu': -0.011111111111111112,
'real_time': 9,
'real_time_other': 10,
'cpu_time': 90,
'cpu_time_other': 89},
{'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8,
'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72}
],
'time_unit': 'ns',
'utest': {
'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387
}
},
{
'name': u'short',
'measurements': [
{'time': -0.125,
'cpu': -0.0625,
'real_time': 8,
'real_time_other': 7,
'cpu_time': 80,
'cpu_time_other': 75},
{'time': -0.4325,
'cpu': -0.13506493506493514,
'real_time': 8,
'real_time_other': 4.54,
'cpu_time': 77,
'cpu_time_other': 66.6}
],
'time_unit': 'ns',
'utest': {
'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772
}
},
{
'name': u'medium',
'measurements': [
{'real_time_other': 5,
'cpu_time': 80,
'time': -0.375,
'real_time': 8,
'cpu_time_other': 53,
'cpu': -0.3375
}
],
'utest': {},
'time_unit': u'ns',
'aggregate_name': ''
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(
self.json_diff_report, expected_output):
self.assertEqual(out['name'], expected['name'])
self.assertEqual(out['time_unit'], expected['time_unit'])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
class TestReportSorting(unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_result():
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput = os.path.join(testInputs, 'test4_run.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
cls.json = load_result()
def test_json_diff_report_pretty_printing(self):
import util
expected_names = [
"99 family 0 instance 0 repetition 0",
"98 family 0 instance 0 repetition 1",
"97 family 0 instance 0 aggregate",
"96 family 0 instance 1 repetition 0",
"95 family 0 instance 1 repetition 1",
"94 family 0 instance 1 aggregate",
"93 family 1 instance 0 repetition 0",
"92 family 1 instance 0 repetition 1",
"91 family 1 instance 0 aggregate",
"90 family 1 instance 1 repetition 0",
"89 family 1 instance 1 repetition 1",
"88 family 1 instance 1 aggregate"
]
for n in range(len(self.json['benchmarks']) ** 2):
random.shuffle(self.json['benchmarks'])
sorted_benchmarks = util.sort_benchmark_results(self.json)[
'benchmarks']
self.assertEqual(len(expected_names), len(sorted_benchmarks))
for out, expected in zip(sorted_benchmarks, expected_names):
self.assertEqual(out['name'], expected)
def assert_utest(unittest_instance, lhs, rhs):
if lhs['utest']:
unittest_instance.assertAlmostEqual(
lhs['utest']['cpu_pvalue'],
rhs['utest']['cpu_pvalue'])
unittest_instance.assertAlmostEqual(
lhs['utest']['time_pvalue'],
rhs['utest']['time_pvalue'])
unittest_instance.assertEqual(
lhs['utest']['have_optimal_repetitions'],
rhs['utest']['have_optimal_repetitions'])
else:
# lhs is empty. assert if rhs is not.
unittest_instance.assertEqual(lhs['utest'], rhs['utest'])
def assert_measurements(unittest_instance, lhs, rhs):
for m1, m2 in zip(lhs['measurements'], rhs['measurements']):
unittest_instance.assertEqual(m1['real_time'], m2['real_time'])
unittest_instance.assertEqual(m1['cpu_time'], m2['cpu_time'])
# m1['time'] and m1['cpu'] hold values which are being calculated,
# and therefore we must use almost-equal pattern.
unittest_instance.assertAlmostEqual(m1['time'], m2['time'], places=4)
unittest_instance.assertAlmostEqual(m1['cpu'], m2['cpu'], places=4)
if __name__ == '__main__':
unittest.main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
07070100000099000081A400000000000000000000000160C0813C0000172E000000000000000000000000000000000000002500000000benchmark-1.5.5/tools/gbench/util.py"""util.py - General utilities for running, loading, and processing benchmarks
"""
import json
import os
import tempfile
import subprocess
import sys
import functools
# Input file type enumeration
IT_Invalid = 0
IT_JSON = 1
IT_Executable = 2
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
def is_executable_file(filename):
"""
Return 'True' if 'filename' names a valid file which is likely
an executable. A file is considered an executable if it starts with the
magic bytes for a EXE, Mach O, or ELF file.
"""
if not os.path.isfile(filename):
return False
with open(filename, mode='rb') as f:
magic_bytes = f.read(_num_magic_bytes)
if sys.platform == 'darwin':
return magic_bytes in [
b'\xfe\xed\xfa\xce', # MH_MAGIC
b'\xce\xfa\xed\xfe', # MH_CIGAM
b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
b'\xca\xfe\xba\xbe', # FAT_MAGIC
b'\xbe\xba\xfe\xca' # FAT_CIGAM
]
elif sys.platform.startswith('win'):
return magic_bytes == b'MZ'
else:
return magic_bytes == b'\x7FELF'
def is_json_file(filename):
"""
Returns 'True' if 'filename' names a valid JSON output file.
'False' otherwise.
"""
try:
with open(filename, 'r') as f:
json.load(f)
return True
except BaseException:
pass
return False
def classify_input_file(filename):
"""
Return a tuple (type, msg) where 'type' specifies the classified type
of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
string represeting the error.
"""
ftype = IT_Invalid
err_msg = None
if not os.path.exists(filename):
err_msg = "'%s' does not exist" % filename
elif not os.path.isfile(filename):
err_msg = "'%s' does not name a file" % filename
elif is_executable_file(filename):
ftype = IT_Executable
elif is_json_file(filename):
ftype = IT_JSON
else:
err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
return ftype, err_msg
def check_input_file(filename):
"""
Classify the file named by 'filename' and return the classification.
If the file is classified as 'IT_Invalid' print an error message and exit
the program.
"""
ftype, msg = classify_input_file(filename)
if ftype == IT_Invalid:
print("Invalid input file: %s" % msg)
sys.exit(1)
return ftype
def find_benchmark_flag(prefix, benchmark_flags):
"""
Search the specified list of flags for a flag matching `<prefix><arg>` and
if it is found return the arg it specifies. If specified more than once the
last value is returned. If the flag is not found None is returned.
"""
assert prefix.startswith('--') and prefix.endswith('=')
result = None
for f in benchmark_flags:
if f.startswith(prefix):
result = f[len(prefix):]
return result
def remove_benchmark_flags(prefix, benchmark_flags):
"""
Return a new list containing the specified benchmark_flags except those
with the specified prefix.
"""
assert prefix.startswith('--') and prefix.endswith('=')
return [f for f in benchmark_flags if not f.startswith(prefix)]
def load_benchmark_results(fname):
"""
Read benchmark output from a file and return the JSON object.
REQUIRES: 'fname' names a file containing JSON benchmark output.
"""
with open(fname, 'r') as f:
return json.load(f)
def sort_benchmark_results(result):
benchmarks = result['benchmarks']
# From inner key to the outer key!
benchmarks = sorted(
benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1)
benchmarks = sorted(
benchmarks, key=lambda benchmark: 1 if 'run_type' in benchmark and benchmark['run_type'] == "aggregate" else 0)
benchmarks = sorted(
benchmarks, key=lambda benchmark: benchmark['per_family_instance_index'] if 'per_family_instance_index' in benchmark else -1)
benchmarks = sorted(
benchmarks, key=lambda benchmark: benchmark['family_index'] if 'family_index' in benchmark else -1)
result['benchmarks'] = benchmarks
return result
def run_benchmark(exe_name, benchmark_flags):
"""
Run a benchmark specified by 'exe_name' with the specified
'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
real time console output.
RETURNS: A JSON object representing the benchmark output
"""
output_name = find_benchmark_flag('--benchmark_out=',
benchmark_flags)
is_temp_output = False
if output_name is None:
is_temp_output = True
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
benchmark_flags = list(benchmark_flags) + \
['--benchmark_out=%s' % output_name]
cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % ' '.join(cmd))
exitCode = subprocess.call(cmd)
if exitCode != 0:
print('TEST FAILED...')
sys.exit(exitCode)
json_res = load_benchmark_results(output_name)
if is_temp_output:
os.unlink(output_name)
return json_res
def run_or_load_benchmark(filename, benchmark_flags):
"""
Get the results for a specified benchmark. If 'filename' specifies
an executable benchmark then the results are generated by running the
benchmark. Otherwise 'filename' must name a valid JSON output file,
which is loaded and the result returned.
"""
ftype = check_input_file(filename)
if ftype == IT_JSON:
return load_benchmark_results(filename)
if ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
raise ValueError('Unknown file type %s' % ftype)
0707010000009A000081A400000000000000000000000160C0813C0000000C000000000000000000000000000000000000002700000000benchmark-1.5.5/tools/requirements.txtscipy>=1.5.00707010000009B000081ED00000000000000000000000160C0813C00001142000000000000000000000000000000000000002300000000benchmark-1.5.5/tools/strip_asm.py#!/usr/bin/env python
"""
strip_asm.py - Cleanup ASM output for the specified file
"""
from argparse import ArgumentParser
import sys
import os
import re
def find_used_labels(asm):
found = set()
label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)")
for l in asm.splitlines():
m = label_re.match(l)
if m:
found.add('.L%s' % m.group(1))
return found
def normalize_labels(asm):
decls = set()
label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if m:
decls.add(m.group(0))
if len(decls) == 0:
return asm
needs_dot = next(iter(decls))[0] != '.'
if not needs_dot:
return asm
for ld in decls:
asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm)
return asm
def transform_labels(asm):
asm = normalize_labels(asm)
used_decls = find_used_labels(asm)
new_asm = ''
label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if not m or m.group(0) in used_decls:
new_asm += l
new_asm += '\n'
return new_asm
def is_identifier(tk):
if len(tk) == 0:
return False
first = tk[0]
if not first.isalpha() and first != '_':
return False
for i in range(1, len(tk)):
c = tk[i]
if not c.isalnum() and c != '_':
return False
return True
def process_identifiers(l):
"""
process_identifiers - process all identifiers and modify them to have
consistent names across all platforms; specifically across ELF and MachO.
For example, MachO inserts an additional understore at the beginning of
names. This function removes that.
"""
parts = re.split(r'([a-zA-Z0-9_]+)', l)
new_line = ''
for tk in parts:
if is_identifier(tk):
if tk.startswith('__Z'):
tk = tk[1:]
elif tk.startswith('_') and len(tk) > 1 and \
tk[1].isalpha() and tk[1] != 'Z':
tk = tk[1:]
new_line += tk
return new_line
def process_asm(asm):
"""
Strip the ASM of unwanted directives and lines
"""
new_contents = ''
asm = transform_labels(asm)
# TODO: Add more things we want to remove
discard_regexes = [
re.compile("\s+\..*$"), # directive
re.compile("\s*#(NO_APP|APP)$"), #inline ASM
re.compile("\s*#.*$"), # comment line
re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive
re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"),
]
keep_regexes = [
]
fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:")
for l in asm.splitlines():
# Remove Mach-O attribute
l = l.replace('@GOTPCREL', '')
add_line = True
for reg in discard_regexes:
if reg.match(l) is not None:
add_line = False
break
for reg in keep_regexes:
if reg.match(l) is not None:
add_line = True
break
if add_line:
if fn_label_def.match(l) and len(new_contents) != 0:
new_contents += '\n'
l = process_identifiers(l)
new_contents += l
new_contents += '\n'
return new_contents
def main():
parser = ArgumentParser(
description='generate a stripped assembly file')
parser.add_argument(
'input', metavar='input', type=str, nargs=1,
help='An input assembly file')
parser.add_argument(
'out', metavar='output', type=str, nargs=1,
help='The output file')
args, unknown_args = parser.parse_known_args()
input = args.input[0]
output = args.out[0]
if not os.path.isfile(input):
print(("ERROR: input file '%s' does not exist") % input)
sys.exit(1)
contents = None
with open(input, 'r') as f:
contents = f.read()
new_contents = process_asm(contents)
with open(output, 'w') as f:
f.write(new_contents)
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!1517 blocks