30 #ifndef ANKERL_NANOBENCH_H_INCLUDED
31 #define ANKERL_NANOBENCH_H_INCLUDED
34 #define ANKERL_NANOBENCH_VERSION_MAJOR 4
35 #define ANKERL_NANOBENCH_VERSION_MINOR 3
36 #define ANKERL_NANOBENCH_VERSION_PATCH 10
46 #include <unordered_map>
49 #define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x()
51 #define ANKERL_NANOBENCH_PRIVATE_CXX() __cplusplus
52 #define ANKERL_NANOBENCH_PRIVATE_CXX98() 199711L
53 #define ANKERL_NANOBENCH_PRIVATE_CXX11() 201103L
54 #define ANKERL_NANOBENCH_PRIVATE_CXX14() 201402L
55 #define ANKERL_NANOBENCH_PRIVATE_CXX17() 201703L
57 #if ANKERL_NANOBENCH(CXX) >= ANKERL_NANOBENCH(CXX17)
58 # define ANKERL_NANOBENCH_PRIVATE_NODISCARD() [[nodiscard]]
60 # define ANKERL_NANOBENCH_PRIVATE_NODISCARD()
63 #if defined(__clang__)
64 # define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH() \
65 _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wpadded\"")
66 # define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP() _Pragma("clang diagnostic pop")
68 # define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH()
69 # define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP()
73 # define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH() _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic ignored \"-Weffc++\"")
74 # define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP() _Pragma("GCC diagnostic pop")
76 # define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH()
77 # define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP()
80 #if defined(ANKERL_NANOBENCH_LOG_ENABLED)
82 # define ANKERL_NANOBENCH_LOG(x) \
84 std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl; \
87 # define ANKERL_NANOBENCH_LOG(x) \
92 #define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 0
93 #if defined(__linux__) && !defined(ANKERL_NANOBENCH_DISABLE_PERF_COUNTERS)
94 # include <linux/version.h>
95 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
98 # undef ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS
99 # define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 1
103 #if defined(__clang__)
104 # define ANKERL_NANOBENCH_NO_SANITIZE(...) __attribute__((no_sanitize(__VA_ARGS__)))
106 # define ANKERL_NANOBENCH_NO_SANITIZE(...)
109 #if defined(_MSC_VER)
110 # define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __declspec(noinline)
112 # define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __attribute__((noinline))
117 #if defined(__GNUC__) && __GNUC__ < 5
118 # define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
120 # define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
126 namespace nanobench {
128 using Clock = std::conditional<std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock,
129 std::chrono::steady_clock>::type;
286 void render(
char const* mustacheTemplate,
Bench const& bench, std::ostream& out);
287 void render(std::string
const& mustacheTemplate,
Bench const& bench, std::ostream& out);
297 void render(
char const* mustacheTemplate, std::vector<Result>
const& results, std::ostream& out);
298 void render(std::string
const& mustacheTemplate, std::vector<Result>
const& results, std::ostream& out);
301 namespace templates {
312 char const*
csv() noexcept;
349 template <
typename T>
355 #if ANKERL_NANOBENCH(PERF_COUNTERS)
356 class LinuxPerformanceCounters;
366 namespace nanobench {
369 template <
typename T>
384 std::string mBenchmarkTitle =
"benchmark";
385 std::string mBenchmarkName =
"noname";
386 std::string mUnit =
"op";
388 double mComplexityN = -1.0;
389 size_t mNumEpochs = 11;
390 size_t mClockResolutionMultiple =
static_cast<size_t>(1000);
391 std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100);
392 std::chrono::nanoseconds mMinEpochTime = std::chrono::milliseconds(1);
393 uint64_t mMinEpochIterations{1};
395 uint64_t mEpochIterations{0};
396 uint64_t mWarmup = 0;
397 std::ostream* mOut =
nullptr;
398 std::chrono::duration<double> mTimeUnit = std::chrono::nanoseconds{1};
399 std::string mTimeUnitName =
"ns";
400 bool mShowPerformanceCounters =
true;
401 bool mIsRelative =
false;
402 std::unordered_map<std::string, std::string> mContext{};
463 std::vector<std::vector<double>> mNameToMeasurements{};
491 static constexpr uint64_t(min)();
492 static constexpr uint64_t(max)();
508 Rng& operator=(
Rng&&) noexcept = default;
509 ~
Rng() noexcept = default;
536 explicit
Rng(uint64_t seed) noexcept;
537 Rng(uint64_t x, uint64_t y) noexcept;
538 explicit
Rng(
std::vector<uint64_t> const& data);
552 inline uint64_t operator()() noexcept;
570 inline uint32_t bounded(uint32_t range) noexcept;
581 inline
double uniform01() noexcept;
590 template <typename Container>
591 void shuffle(Container& container) noexcept;
599 std::vector<uint64_t> state() const;
602 static constexpr uint64_t
rotl(uint64_t x,
unsigned k) noexcept;
654 template <typename Op>
656 Bench& run(
char const* benchmarkName, Op&& op);
658 template <typename Op>
660 Bench& run(
std::
string const& benchmarkName, Op&& op);
666 template <typename Op>
675 Bench& title(
char const* benchmarkTitle);
722 template <typename T>
723 Bench& batch(T b) noexcept;
747 Bench& timeUnit(
std::chrono::duration<
double> const& tu,
std::
string const& tuName);
749 ANKERL_NANOBENCH(NODISCARD)
std::chrono::duration<
double> const& timeUnit() const noexcept;
781 Bench& clockResolutionMultiple(
size_t multiple) noexcept;
799 Bench& epochs(
size_t numEpochs) noexcept;
812 Bench& maxEpochTime(
std::chrono::nanoseconds t) noexcept;
825 Bench& minEpochTime(
std::chrono::nanoseconds t) noexcept;
838 Bench& minEpochIterations(uint64_t numIters) noexcept;
847 Bench& epochIterations(uint64_t numIters) noexcept;
859 Bench& warmup(uint64_t numWarmupIters) noexcept;
879 Bench& relative(
bool isRelativeEnabled) noexcept;
910 template <typename Arg>
927 template <typename T>
928 Bench& complexityN(T n) noexcept;
987 template <typename Op>
988 BigO complexityBigO(
char const*
name, Op op) const;
990 template <typename Op>
991 BigO complexityBigO(
std::
string const&
name, Op op) const;
1008 std::vector<Result> mResults{};
1018 template <
typename Arg>
1023 #if defined(_MSC_VER)
1024 void doNotOptimizeAwaySink(
void const*);
1026 template <
typename T>
1034 template <
typename T>
1037 asm volatile(
"" : :
"r,m"(val) :
"memory");
1040 template <
typename T>
1042 # if defined(__clang__)
1044 asm volatile(
"" :
"+r,m"(val) : :
"memory");
1047 asm volatile(
"" :
"+m,r"(val) : :
"memory");
1094 #if ANKERL_NANOBENCH(PERF_COUNTERS)
1095 LinuxPerformanceCounters* mPc =
nullptr;
1111 template <
typename Op>
1113 for (
auto& rangeMeasure : data) {
1114 rangeMeasure.first = op(rangeMeasure.first);
1121 template <
typename Op>
1123 :
BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
1125 template <
typename Op>
1127 :
BigO(
std::move(bigOName), mapRangeMeasure(rangeMeasure, rangeToN)) {}
1139 double mNormalizedRootMeanSquare{};
1142 std::ostream&
operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO>
const& bigOs);
1150 namespace nanobench {
1157 return (std::numeric_limits<uint64_t>::max)();
1161 uint64_t
Rng::operator()() noexcept {
1164 mX = UINT64_C(15241094284759029579) * mY;
1165 mY =
rotl(mY - x, 27);
1171 uint32_t
Rng::bounded(uint32_t range) noexcept {
1172 uint64_t
const r32 =
static_cast<uint32_t
>(operator()());
1173 auto multiresult = r32 * range;
1174 return static_cast<uint32_t
>(multiresult >> 32U);
1178 auto i = (UINT64_C(0x3ff) << 52U) | (
operator()() >> 12U);
1182 std::memcpy(&d, &i,
sizeof(
double));
1186 template <
typename Container>
1188 auto i = container.size();
1191 auto n = operator()();
1193 auto b1 =
static_cast<decltype(i)
>((
static_cast<uint32_t
>(n) *
static_cast<uint64_t
>(i)) >> 32U);
1194 swap(container[--i], container[b1]);
1196 auto b2 =
static_cast<decltype(i)
>(((n >> 32U) *
static_cast<uint64_t
>(i)) >> 32U);
1197 swap(container[--i], container[b2]);
1202 constexpr uint64_t
Rng::
rotl(uint64_t x,
unsigned k) noexcept {
1203 return (x <<
k) | (x >> (64U -
k));
1206 template <
typename Op>
1213 while (
auto n = iterationLogic.numIters()) {
1215 Clock::time_point
const before = Clock::now();
1219 Clock::time_point
const after = Clock::now();
1221 pc.updateResults(iterationLogic.numIters());
1222 iterationLogic.
add(after - before, pc);
1229 template <
typename Op>
1231 name(benchmarkName);
1232 return run(std::forward<Op>(op));
1235 template <
typename Op>
1237 name(benchmarkName);
1238 return run(std::forward<Op>(op));
1241 template <
typename Op>
1246 template <
typename Op>
1253 template <
typename T>
1255 mConfig.mBatch =
static_cast<double>(b);
1260 template <
typename T>
1262 mConfig.mComplexityN =
static_cast<double>(n);
1267 template <
typename Arg>
1274 template <
typename Arg>
1281 #if defined(_MSC_VER)
1282 template <
typename T>
1284 doNotOptimizeAwaySink(&val);
1293 #if defined(ANKERL_NANOBENCH_IMPLEMENT)
1299 # include <algorithm>
1305 # include <iostream>
1309 # include <stdexcept>
1311 # if defined(__linux__)
1312 # include <unistd.h>
1314 # if ANKERL_NANOBENCH(PERF_COUNTERS)
1317 # include <linux/perf_event.h>
1318 # include <sys/ioctl.h>
1319 # include <sys/syscall.h>
1325 namespace nanobench {
1336 class StreamStateRestorer;
1338 class MarkDownColumn;
1349 namespace nanobench {
1351 uint64_t splitMix64(uint64_t& state) noexcept;
1356 template <
typename T>
1357 inline double d(T t) noexcept {
1358 return static_cast<double>(
t);
1360 inline double d(Clock::duration duration) noexcept {
1361 return std::chrono::duration_cast<std::chrono::duration<double>>(duration).
count();
1365 inline Clock::duration clockResolution() noexcept;
1369 namespace templates {
1371 char const*
csv() noexcept {
1372 return R
"DELIM("title";"name";"unit";"batch";"elapsed";"error %";"instructions";"branches";"branch misses";"total"
1373 {{#result}}"{{title}}";"{{name}}";"{{unit}}";{{batch}};{{median(elapsed)}};{{medianAbsolutePercentError(elapsed)}};{{median(instructions)}};{{median(branchinstructions)}};{{median(branchmisses)}};{{sumProduct(iterations, elapsed)}}
1378 return R
"DELIM(<html>
1381 <script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
1385 <div id="myDiv"></div>
1390 y: [{{#measurement}}{{elapsed}}{{^-last}}, {{/last}}{{/measurement}}],
1394 var title = '{{title}}';
1396 data = data.map(a => Object.assign(a, { boxpoints: 'all', pointpos: 0, type: 'box' }));
1397 var layout = { title: { text: title }, showlegend: false, yaxis: { title: 'time per unit', rangemode: 'tozero', autorange: true } }; Plotly.newPlot('myDiv', data, layout, {responsive: true});
1404 char const*
pyperf() noexcept {
1411 {{#measurement}} {{elapsed}}{{^-last}},
1412 {{/last}}{{/measurement}}
1419 "loops": {{sum(iterations)}},
1420 "inner_loops": {{batch}},
1421 "name": "{{title}}",
1428 char const*
json() noexcept {
1432 "title": "{{title}}",
1436 "complexityN": {{complexityN}},
1437 "epochs": {{epochs}},
1438 "clockResolution": {{clockResolution}},
1439 "clockResolutionMultiple": {{clockResolutionMultiple}},
1440 "maxEpochTime": {{maxEpochTime}},
1441 "minEpochTime": {{minEpochTime}},
1442 "minEpochIterations": {{minEpochIterations}},
1443 "epochIterations": {{epochIterations}},
1444 "warmup": {{warmup}},
1445 "relative": {{relative}},
1446 "median(elapsed)": {{median(elapsed)}},
1447 "medianAbsolutePercentError(elapsed)": {{medianAbsolutePercentError(elapsed)}},
1448 "median(instructions)": {{median(instructions)}},
1449 "medianAbsolutePercentError(instructions)": {{medianAbsolutePercentError(instructions)}},
1450 "median(cpucycles)": {{median(cpucycles)}},
1451 "median(contextswitches)": {{median(contextswitches)}},
1452 "median(pagefaults)": {{median(pagefaults)}},
1453 "median(branchinstructions)": {{median(branchinstructions)}},
1454 "median(branchmisses)": {{median(branchmisses)}},
1455 "totalTime": {{sumProduct(iterations, elapsed)}},
1458 "iterations": {{iterations}},
1459 "elapsed": {{elapsed}},
1460 "pagefaults": {{pagefaults}},
1461 "cpucycles": {{cpucycles}},
1462 "contextswitches": {{contextswitches}},
1463 "instructions": {{instructions}},
1464 "branchinstructions": {{branchinstructions}},
1465 "branchmisses": {{branchmisses}}
1466 }{{^-last}},{{/-last}}
1468 }{{^-last}},{{/-last}}
1475 enum class Type { tag, content, section, inverted_section };
1479 std::vector<Node> children;
1484 bool operator==(
char const (&str)[N])
const noexcept {
1486 return static_cast<size_t>(std::distance(begin, end) + 1) == N && 0 == strncmp(str, begin, N - 1);
1492 static std::vector<Node> parseMustacheTemplate(
char const** tpl) {
1493 std::vector<Node> nodes;
1496 auto const* begin = std::strstr(*tpl,
"{{");
1497 auto const* end = begin;
1498 if (begin !=
nullptr) {
1501 end = std::strstr(begin,
"}}");
1504 if (begin ==
nullptr || end ==
nullptr) {
1507 nodes.emplace_back(Node{*tpl, *tpl + std::strlen(*tpl), std::vector<Node>{}, Node::Type::content});
1512 nodes.emplace_back(Node{*tpl, begin - 2, std::vector<Node>{}, Node::Type::content});
1524 nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::section});
1529 nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::inverted_section});
1533 nodes.emplace_back(Node{begin, end, std::vector<Node>{}, Node::Type::tag});
1539 static bool generateFirstLast(Node
const& n,
size_t idx,
size_t size, std::ostream& out) {
1541 bool const matchFirst = n ==
"-first";
1542 bool const matchLast = n ==
"-last";
1543 if (!matchFirst && !matchLast) {
1547 bool doWrite =
false;
1548 if (n.type == Node::Type::section) {
1549 doWrite = (matchFirst && idx == 0) || (matchLast && idx == size - 1);
1550 }
else if (n.type == Node::Type::inverted_section) {
1551 doWrite = (matchFirst && idx != 0) || (matchLast && idx != size - 1);
1555 for (
auto const& child : n.children) {
1556 if (child.type == Node::Type::content) {
1557 out.write(child.begin, std::distance(child.begin, child.end));
1564 static bool matchCmdArgs(std::string
const& str, std::vector<std::string>& matchResult) {
1565 matchResult.clear();
1566 auto idxOpen = str.find(
'(');
1567 auto idxClose = str.find(
')', idxOpen);
1568 if (idxClose == std::string::npos) {
1572 matchResult.emplace_back(str.substr(0, idxOpen));
1575 matchResult.emplace_back();
1576 for (
size_t i = idxOpen + 1; i != idxClose; ++i) {
1577 if (str[i] ==
' ' || str[i] ==
'\t') {
1581 if (str[i] ==
',') {
1583 matchResult.emplace_back();
1587 matchResult.back() += str[i];
1592 static bool generateConfigTag(Node
const& n, Config
const& config, std::ostream& out) {
1596 out << config.mBenchmarkTitle;
1600 out << config.mBenchmarkName;
1604 out << config.mUnit;
1608 out << config.mBatch;
1611 if (n ==
"complexityN") {
1612 out << config.mComplexityN;
1615 if (n ==
"epochs") {
1616 out << config.mNumEpochs;
1619 if (n ==
"clockResolution") {
1620 out << d(detail::clockResolution());
1623 if (n ==
"clockResolutionMultiple") {
1624 out << config.mClockResolutionMultiple;
1627 if (n ==
"maxEpochTime") {
1628 out << d(config.mMaxEpochTime);
1631 if (n ==
"minEpochTime") {
1632 out << d(config.mMinEpochTime);
1635 if (n ==
"minEpochIterations") {
1636 out << config.mMinEpochIterations;
1639 if (n ==
"epochIterations") {
1640 out << config.mEpochIterations;
1643 if (n ==
"warmup") {
1644 out << config.mWarmup;
1647 if (n ==
"relative") {
1648 out << config.mIsRelative;
1655 static std::ostream& generateResultTag(Node
const& n,
Result const& r, std::ostream& out) {
1656 if (generateConfigTag(n, r.config(), out)) {
1664 std::vector<std::string> matchResult;
1665 if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) {
1666 if (matchResult.size() == 2) {
1667 if (matchResult[0] ==
"context") {
1668 return out << r.context(matchResult[1]);
1676 if (matchResult[0] ==
"median") {
1677 return out << r.median(m);
1679 if (matchResult[0] ==
"average") {
1680 return out << r.average(m);
1682 if (matchResult[0] ==
"medianAbsolutePercentError") {
1683 return out << r.medianAbsolutePercentError(m);
1685 if (matchResult[0] ==
"sum") {
1686 return out << r.sum(m);
1688 if (matchResult[0] ==
"minimum") {
1689 return out << r.minimum(m);
1691 if (matchResult[0] ==
"maximum") {
1692 return out << r.maximum(m);
1694 }
else if (matchResult.size() == 3) {
1701 if (matchResult[0] ==
"sumProduct") {
1702 return out << r.sumProduct(m1, m2);
1711 throw std::runtime_error(
"command '" + std::string(n.begin, n.end) +
"' not understood");
1714 static void generateResultMeasurement(std::vector<Node>
const& nodes,
size_t idx,
Result const& r, std::ostream& out) {
1715 for (
auto const& n : nodes) {
1716 if (!generateFirstLast(n, idx, r.size(), out)) {
1719 case Node::Type::content:
1720 out.write(n.begin, std::distance(n.begin, n.end));
1723 case Node::Type::inverted_section:
1724 throw std::runtime_error(
"got a inverted section inside measurement");
1726 case Node::Type::section:
1727 throw std::runtime_error(
"got a section inside measurement");
1729 case Node::Type::tag: {
1734 out << r.get(idx, m);
1743 static void generateResult(std::vector<Node>
const& nodes,
size_t idx, std::vector<Result>
const& results, std::ostream& out) {
1744 auto const& r = results[idx];
1745 for (
auto const& n : nodes) {
1746 if (!generateFirstLast(n, idx, results.size(), out)) {
1749 case Node::Type::content:
1750 out.write(n.begin, std::distance(n.begin, n.end));
1753 case Node::Type::inverted_section:
1754 throw std::runtime_error(
"got a inverted section inside result");
1756 case Node::Type::section:
1757 if (n ==
"measurement") {
1758 for (
size_t i = 0; i < r.size(); ++i) {
1759 generateResultMeasurement(n.children, i, r, out);
1762 throw std::runtime_error(
"got a section inside result");
1766 case Node::Type::tag:
1767 generateResultTag(n, r, out);
1779 char const* getEnv(
char const*
name);
1780 bool isEndlessRunning(std::string
const&
name);
1781 bool isWarningsEnabled();
1783 template <
typename T>
1784 T parseFile(std::string
const& filename);
1786 void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations);
1787 void printStabilityInformationOnce(std::ostream* outStream);
1790 uint64_t& singletonHeaderHash() noexcept;
1793 Clock::duration calcClockResolution(
size_t numEvaluations) noexcept;
1800 class NumSep :
public std::numpunct<char> {
1802 explicit NumSep(
char sep);
1803 char do_thousands_sep()
const override;
1804 std::string do_grouping()
const override;
1813 class StreamStateRestorer {
1815 explicit StreamStateRestorer(std::ostream& s);
1816 ~StreamStateRestorer();
1822 StreamStateRestorer(StreamStateRestorer
const&) =
delete;
1823 StreamStateRestorer& operator=(StreamStateRestorer
const&) =
delete;
1824 StreamStateRestorer(StreamStateRestorer&&) =
delete;
1825 StreamStateRestorer& operator=(StreamStateRestorer&&) =
delete;
1828 std::ostream& mStream;
1829 std::locale mLocale;
1830 std::streamsize
const mPrecision;
1831 std::streamsize
const mWidth;
1832 std::ostream::char_type
const mFill;
1833 std::ostream::fmtflags
const mFmtFlags;
1840 Number(
int width,
int precision,
double value);
1841 Number(
int width,
int precision, int64_t value);
1842 std::string to_s()
const;
1845 friend std::ostream&
operator<<(std::ostream& os, Number
const& n);
1846 std::ostream& write(std::ostream& os)
const;
1854 std::string to_s(uint64_t n);
1856 std::ostream&
operator<<(std::ostream& os, Number
const& n);
1858 class MarkDownColumn {
1860 MarkDownColumn(
int w,
int prec, std::string tit, std::string suff,
double val);
1861 std::string title()
const;
1862 std::string separator()
const;
1863 std::string invalid()
const;
1864 std::string value()
const;
1870 std::string mSuffix;
1875 class MarkDownCode {
1877 explicit MarkDownCode(std::string
const& what);
1880 friend std::ostream&
operator<<(std::ostream& os, MarkDownCode
const& mdCode);
1881 std::ostream& write(std::ostream& os)
const;
1883 std::string mWhat{};
1886 std::ostream&
operator<<(std::ostream& os, MarkDownCode
const& mdCode);
1896 namespace nanobench {
1899 void render(
char const* mustacheTemplate, std::vector<Result>
const& results, std::ostream& out) {
1900 detail::fmt::StreamStateRestorer
const restorer(out);
1902 out.precision(std::numeric_limits<double>::digits10);
1903 auto nodes = templates::parseMustacheTemplate(&mustacheTemplate);
1905 for (
auto const& n : nodes) {
1908 case templates::Node::Type::content:
1909 out.write(n.begin, std::distance(n.begin, n.end));
1912 case templates::Node::Type::inverted_section:
1913 throw std::runtime_error(
"unknown list '" + std::string(n.begin, n.end) +
"'");
1915 case templates::Node::Type::section:
1916 if (n ==
"result") {
1917 const size_t nbResults = results.size();
1918 for (
size_t i = 0; i < nbResults; ++i) {
1919 generateResult(n.children, i, results, out);
1921 }
else if (n ==
"measurement") {
1922 if (results.size() != 1) {
1923 throw std::runtime_error(
1924 "render: can only use section 'measurement' here if there is a single result, but there are " +
1925 detail::fmt::to_s(results.size()));
1928 auto const& r = results.front();
1929 for (
size_t i = 0; i < r.size(); ++i) {
1930 generateResultMeasurement(n.children, i, r, out);
1933 throw std::runtime_error(
"render: unknown section '" + std::string(n.begin, n.end) +
"'");
1937 case templates::Node::Type::tag:
1938 if (results.size() == 1) {
1940 generateResultTag(n, results.front(), out);
1943 if (!generateConfigTag(n, results.back().config(), out)) {
1944 throw std::runtime_error(
"unknown tag '" + std::string(n.begin, n.end) +
"'");
1952 void render(std::string
const& mustacheTemplate, std::vector<Result>
const& results, std::ostream& out) {
1953 render(mustacheTemplate.c_str(), results, out);
1956 void render(
char const* mustacheTemplate,
const Bench& bench, std::ostream& out) {
1957 render(mustacheTemplate, bench.results(), out);
1960 void render(std::string
const& mustacheTemplate,
const Bench& bench, std::ostream& out) {
1961 render(mustacheTemplate.c_str(), bench.results(), out);
1967 # if defined(__clang__)
1968 # pragma clang diagnostic push
1969 # pragma clang diagnostic ignored "-Wexit-time-destructors"
1971 static PerformanceCounters pc;
1972 # if defined(__clang__)
1973 # pragma clang diagnostic pop
1982 # if defined(_MSC_VER)
1983 # pragma optimize("", off)
1984 void doNotOptimizeAwaySink(
void const*) {}
1985 # pragma optimize("", on)
1988 template <
typename T>
1989 T parseFile(std::string
const& filename) {
1990 std::ifstream fin(filename);
1996 char const* getEnv(
char const*
name) {
1997 # if defined(_MSC_VER)
1998 # pragma warning(push)
1999 # pragma warning(disable : 4996)
2001 return std::getenv(
name);
2002 # if defined(_MSC_VER)
2003 # pragma warning(pop)
2007 bool isEndlessRunning(std::string
const&
name) {
2008 auto const*
const endless = getEnv(
"NANOBENCH_ENDLESS");
2009 return nullptr != endless && endless ==
name;
2013 bool isWarningsEnabled() {
2014 auto const*
const suppression = getEnv(
"NANOBENCH_SUPPRESS_WARNINGS");
2015 return nullptr == suppression || suppression == std::string(
"0");
2018 void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations) {
2020 recommendations.clear();
2023 warnings.emplace_back(
"DEBUG defined");
2024 bool const recommendCheckFlags =
true;
2026 bool const recommendCheckFlags =
false;
2029 bool recommendPyPerf =
false;
2030 # if defined(__linux__)
2031 auto nprocs = sysconf(_SC_NPROCESSORS_CONF);
2033 warnings.emplace_back(
"couldn't figure out number of processors - no governor, turbo check possible");
2037 for (
long id = 0;
id < nprocs; ++id) {
2038 auto idStr = detail::fmt::to_s(
static_cast<uint64_t
>(
id));
2039 auto sysCpu =
"/sys/devices/system/cpu/cpu" + idStr;
2040 auto minFreq = parseFile<int64_t>(sysCpu +
"/cpufreq/scaling_min_freq");
2041 auto maxFreq = parseFile<int64_t>(sysCpu +
"/cpufreq/scaling_max_freq");
2042 if (minFreq != maxFreq) {
2043 auto minMHz =
static_cast<double>(minFreq) / 1000.0;
2044 auto maxMHz =
static_cast<double>(maxFreq) / 1000.0;
2045 warnings.emplace_back(
"CPU frequency scaling enabled: CPU " + idStr +
" between " +
2046 detail::fmt::Number(1, 1, minMHz).to_s() +
" and " + detail::fmt::Number(1, 1, maxMHz).to_s() +
2048 recommendPyPerf =
true;
2053 auto currentGovernor = parseFile<std::string>(
"/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor");
2054 if (
"performance" != currentGovernor) {
2055 warnings.emplace_back(
"CPU governor is '" + currentGovernor +
"' but should be 'performance'");
2056 recommendPyPerf =
true;
2059 if (0 == parseFile<int>(
"/sys/devices/system/cpu/intel_pstate/no_turbo")) {
2060 warnings.emplace_back(
"Turbo is enabled, CPU frequency will fluctuate");
2061 recommendPyPerf =
true;
2066 if (recommendCheckFlags) {
2067 recommendations.emplace_back(
"Make sure you compile for Release");
2069 if (recommendPyPerf) {
2070 recommendations.emplace_back(
"Use 'pyperf system tune' before benchmarking. See https://github.com/psf/pyperf");
2074 void printStabilityInformationOnce(std::ostream* outStream) {
2075 static bool shouldPrint =
true;
2076 if (shouldPrint && (
nullptr != outStream) && isWarningsEnabled()) {
2077 auto& os = *outStream;
2078 shouldPrint =
false;
2079 std::vector<std::string> warnings;
2080 std::vector<std::string> recommendations;
2081 gatherStabilityInformation(warnings, recommendations);
2082 if (warnings.empty()) {
2086 os <<
"Warning, results might be unstable:" << std::endl;
2087 for (
auto const& w : warnings) {
2088 os <<
"* " << w << std::endl;
2091 os << std::endl <<
"Recommendations" << std::endl;
2092 for (
auto const& r : recommendations) {
2093 os <<
"* " << r << std::endl;
2099 uint64_t& singletonHeaderHash() noexcept {
2100 static uint64_t sHeaderHash{};
2105 inline uint64_t hash_combine(uint64_t seed, uint64_t val) {
2106 return seed ^ (val + UINT64_C(0x9e3779b9) + (seed << 6U) + (seed >> 2U));
2110 Clock::duration calcClockResolution(
size_t numEvaluations) noexcept {
2111 auto bestDuration = Clock::duration::max();
2112 Clock::time_point tBegin;
2113 Clock::time_point tEnd;
2114 for (
size_t i = 0; i < numEvaluations; ++i) {
2115 tBegin = Clock::now();
2117 tEnd = Clock::now();
2118 }
while (tBegin == tEnd);
2119 bestDuration = (std::min)(bestDuration, tEnd - tBegin);
2121 return bestDuration;
2125 Clock::duration clockResolution() noexcept {
2126 static Clock::duration
const sResolution = calcClockResolution(20);
2131 struct IterationLogic::Impl {
2132 enum class State { warmup, upscaling_runtime, measuring, endless };
2134 explicit Impl(Bench
const& bench)
2136 , mResult(bench.config()) {
2137 printStabilityInformationOnce(mBench.output());
2140 mTargetRuntimePerEpoch = detail::clockResolution() * mBench.clockResolutionMultiple();
2141 if (mTargetRuntimePerEpoch > mBench.maxEpochTime()) {
2142 mTargetRuntimePerEpoch = mBench.maxEpochTime();
2144 if (mTargetRuntimePerEpoch < mBench.minEpochTime()) {
2145 mTargetRuntimePerEpoch = mBench.minEpochTime();
2148 if (isEndlessRunning(mBench.name())) {
2149 std::cerr <<
"NANOBENCH_ENDLESS set: running '" << mBench.name() <<
"' endlessly" << std::endl;
2150 mNumIters = (std::numeric_limits<uint64_t>::max)();
2151 mState = State::endless;
2152 }
else if (0 != mBench.warmup()) {
2153 mNumIters = mBench.warmup();
2154 mState = State::warmup;
2155 }
else if (0 != mBench.epochIterations()) {
2157 mNumIters = mBench.epochIterations();
2158 mState = State::measuring;
2160 mNumIters = mBench.minEpochIterations();
2161 mState = State::upscaling_runtime;
2166 ANKERL_NANOBENCH(NODISCARD) uint64_t calcBestNumIters(std::chrono::nanoseconds elapsed, uint64_t iters) noexcept {
2167 auto doubleElapsed = d(elapsed);
2168 auto doubleTargetRuntimePerEpoch = d(mTargetRuntimePerEpoch);
2169 auto doubleNewIters = doubleTargetRuntimePerEpoch / doubleElapsed * d(iters);
2171 auto doubleMinEpochIters = d(mBench.minEpochIterations());
2172 if (doubleNewIters < doubleMinEpochIters) {
2173 doubleNewIters = doubleMinEpochIters;
2175 doubleNewIters *= 1.0 + 0.2 * mRng.uniform01();
2179 return static_cast<uint64_t
>(doubleNewIters + 0.5);
2183 if (elapsed * 10 < mTargetRuntimePerEpoch) {
2185 if (mNumIters * 10 < mNumIters) {
2187 showResult(
"iterations overflow. Maybe your code got optimized away?");
2193 mNumIters = calcBestNumIters(elapsed, mNumIters);
2197 void add(std::chrono::nanoseconds elapsed, PerformanceCounters
const& pc) noexcept {
2198 # if defined(ANKERL_NANOBENCH_LOG_ENABLED)
2199 auto oldIters = mNumIters;
2204 if (isCloseEnoughForMeasurements(elapsed)) {
2207 mState = State::measuring;
2208 mNumIters = calcBestNumIters(elapsed, mNumIters);
2211 mState = State::upscaling_runtime;
2216 case State::upscaling_runtime:
2217 if (isCloseEnoughForMeasurements(elapsed)) {
2219 mState = State::measuring;
2220 mTotalElapsed += elapsed;
2221 mTotalNumIters += mNumIters;
2222 mResult.add(elapsed, mNumIters, pc);
2223 mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters);
2229 case State::measuring:
2232 mTotalElapsed += elapsed;
2233 mTotalNumIters += mNumIters;
2234 mResult.add(elapsed, mNumIters, pc);
2235 if (0 != mBench.epochIterations()) {
2236 mNumIters = mBench.epochIterations();
2238 mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters);
2242 case State::endless:
2243 mNumIters = (std::numeric_limits<uint64_t>::max)();
2247 if (
static_cast<uint64_t
>(mResult.size()) == mBench.epochs()) {
2253 ANKERL_NANOBENCH_LOG(mBench.name() <<
": " << detail::fmt::Number(20, 3,
static_cast<double>(elapsed.count())) <<
" elapsed, "
2254 << detail::fmt::Number(20, 3,
static_cast<double>(mTargetRuntimePerEpoch.count()))
2255 <<
" target. oldIters=" << oldIters <<
", mNumIters=" << mNumIters
2256 <<
", mState=" <<
static_cast<int>(mState));
2260 void showResult(std::string
const& errorMessage)
const {
2263 if (mBench.output() !=
nullptr) {
2265 std::vector<fmt::MarkDownColumn> columns;
2269 if (mBench.relative()) {
2271 if (!mBench.results().empty()) {
2274 columns.emplace_back(11, 1,
"relative",
"%", d);
2277 if (mBench.complexityN() > 0) {
2278 columns.emplace_back(14, 0,
"complexityN",
"", mBench.complexityN());
2281 columns.emplace_back(22, 2, mBench.timeUnitName() +
"/" + mBench.unit(),
"",
2282 rMedian / (mBench.timeUnit().count() * mBench.batch()));
2283 columns.emplace_back(22, 2, mBench.unit() +
"/s",
"", rMedian <= 0.0 ? 0.0 : mBench.batch() / rMedian);
2286 columns.emplace_back(10, 1,
"err%",
"%", rErrorMedian * 100.0);
2288 double rInsMedian = -1.0;
2291 columns.emplace_back(18, 2,
"ins/" + mBench.unit(),
"", rInsMedian / mBench.batch());
2294 double rCycMedian = -1.0;
2297 columns.emplace_back(18, 2,
"cyc/" + mBench.unit(),
"", rCycMedian / mBench.batch());
2299 if (rInsMedian > 0.0 && rCycMedian > 0.0) {
2300 columns.emplace_back(9, 3,
"IPC",
"", rCycMedian <= 0.0 ? 0.0 : rInsMedian / rCycMedian);
2304 columns.emplace_back(17, 2,
"bra/" + mBench.unit(),
"", rBraMedian / mBench.batch());
2307 if (rBraMedian >= 1e-9) {
2310 columns.emplace_back(10, 1,
"miss%",
"%", p);
2317 auto& os = *mBench.output();
2321 hash = hash_combine(std::hash<std::string>{}(mBench.unit()), hash);
2322 hash = hash_combine(std::hash<std::string>{}(mBench.title()), hash);
2323 hash = hash_combine(std::hash<std::string>{}(mBench.timeUnitName()), hash);
2324 hash = hash_combine(std::hash<double>{}(mBench.timeUnit().
count()), hash);
2325 hash = hash_combine(std::hash<bool>{}(mBench.relative()), hash);
2326 hash = hash_combine(std::hash<bool>{}(mBench.performanceCounters()), hash);
2328 if (hash != singletonHeaderHash()) {
2329 singletonHeaderHash() = hash;
2333 for (
auto const& col : columns) {
2336 os <<
"| " << mBench.title() << std::endl;
2338 for (
auto const& col : columns) {
2339 os << col.separator();
2341 os <<
"|:" << std::string(mBench.title().size() + 1U,
'-') << std::endl;
2344 if (!errorMessage.empty()) {
2345 for (
auto const& col : columns) {
2346 os << col.invalid();
2348 os <<
"| :boom: " << fmt::MarkDownCode(mBench.name()) <<
" (" << errorMessage <<
')' << std::endl;
2350 for (
auto const& col : columns) {
2354 auto showUnstable = isWarningsEnabled() && rErrorMedian >= 0.05;
2356 os <<
":wavy_dash: ";
2358 os << fmt::MarkDownCode(mBench.name());
2360 auto avgIters =
static_cast<double>(mTotalNumIters) /
static_cast<double>(mBench.epochs());
2362 auto suggestedIters =
static_cast<uint64_t
>(avgIters * 10 + 0.5);
2364 os <<
" (Unstable with ~" << detail::fmt::Number(1, 1, avgIters)
2365 <<
" iters. Increase `minEpochIterations` to e.g. " << suggestedIters <<
")";
2372 ANKERL_NANOBENCH(NODISCARD)
bool isCloseEnoughForMeasurements(std::chrono::nanoseconds elapsed)
const noexcept {
2373 return elapsed * 3 >= mTargetRuntimePerEpoch * 2;
2376 uint64_t mNumIters = 1;
2377 Bench
const& mBench;
2378 std::chrono::nanoseconds mTargetRuntimePerEpoch{};
2381 std::chrono::nanoseconds mTotalElapsed{};
2382 uint64_t mTotalNumIters = 0;
2383 State mState = State::upscaling_runtime;
2387 IterationLogic::IterationLogic(Bench
const& bench)
2388 : mPimpl(new Impl(bench)) {}
2390 IterationLogic::~IterationLogic() {
2394 uint64_t IterationLogic::numIters() const noexcept {
2396 return mPimpl->mNumIters;
2399 void IterationLogic::add(std::chrono::nanoseconds elapsed, PerformanceCounters
const& pc) noexcept {
2400 mPimpl->add(elapsed, pc);
2403 void IterationLogic::moveResultTo(std::vector<Result>& results) noexcept {
2404 results.emplace_back(std::move(mPimpl->mResult));
2407 # if ANKERL_NANOBENCH(PERF_COUNTERS)
2410 class LinuxPerformanceCounters {
2413 Target(uint64_t* targetValue_,
bool correctMeasuringOverhead_,
bool correctLoopOverhead_)
2414 : targetValue(targetValue_)
2415 , correctMeasuringOverhead(correctMeasuringOverhead_)
2416 , correctLoopOverhead(correctLoopOverhead_) {}
2418 uint64_t* targetValue{};
2419 bool correctMeasuringOverhead{};
2420 bool correctLoopOverhead{};
2423 LinuxPerformanceCounters() =
default;
2424 LinuxPerformanceCounters(LinuxPerformanceCounters
const&) =
delete;
2425 LinuxPerformanceCounters(LinuxPerformanceCounters&&) =
delete;
2426 LinuxPerformanceCounters& operator=(LinuxPerformanceCounters
const&) =
delete;
2427 LinuxPerformanceCounters& operator=(LinuxPerformanceCounters&&) =
delete;
2428 ~LinuxPerformanceCounters();
2431 inline void start() {}
2433 inline void stop() {}
2435 bool monitor(perf_sw_ids swId, Target target);
2436 bool monitor(perf_hw_id hwId, Target target);
2438 bool hasError() const noexcept {
2444 inline void beginMeasure() {
2450 mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP);
2456 mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP);
2459 inline void endMeasure() {
2465 mHasError = (-1 == ioctl(mFd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP));
2470 auto const numBytes =
sizeof(uint64_t) * mCounters.size();
2471 auto ret = read(mFd, mCounters.data(), numBytes);
2472 mHasError =
ret !=
static_cast<ssize_t
>(numBytes);
2475 void updateResults(uint64_t numIters);
2478 template <
typename T>
2479 static inline T divRounded(T a, T divisor) {
2480 return (a + divisor / 2) / divisor;
2484 static inline uint32_t mix(uint32_t x) noexcept {
2491 template <
typename Op>
2493 void calibrate(Op&& op) {
2495 for (
auto& v : mCalibratedOverhead) {
2500 auto newCalibration = mCalibratedOverhead;
2501 for (
auto& v : newCalibration) {
2502 v = (std::numeric_limits<uint64_t>::max)();
2504 for (
size_t iter = 0; iter < 100; ++iter) {
2512 for (
size_t i = 0; i < newCalibration.size(); ++i) {
2513 auto diff = mCounters[i];
2514 if (newCalibration[i] > diff) {
2515 newCalibration[i] = diff;
2520 mCalibratedOverhead = std::move(newCalibration);
2527 uint64_t
const numIters = 100000U + (std::random_device{}() & 3U);
2528 uint64_t n = numIters;
2529 uint32_t x = 1234567;
2537 auto measure1 = mCounters;
2548 auto measure2 = mCounters;
2550 for (
size_t i = 0; i < mCounters.size(); ++i) {
2552 auto m1 = measure1[i] > mCalibratedOverhead[i] ? measure1[i] - mCalibratedOverhead[i] : 0;
2553 auto m2 = measure2[i] > mCalibratedOverhead[i] ? measure2[i] - mCalibratedOverhead[i] : 0;
2554 auto overhead = m1 * 2 > m2 ? m1 * 2 - m2 : 0;
2556 mLoopOverhead[i] = divRounded(overhead, numIters);
2562 bool monitor(uint32_t type, uint64_t eventid, Target target);
2564 std::map<uint64_t, Target> mIdToTarget{};
2567 std::vector<uint64_t> mCounters{3};
2568 std::vector<uint64_t> mCalibratedOverhead{3};
2569 std::vector<uint64_t> mLoopOverhead{3};
2571 uint64_t mTimeEnabledNanos = 0;
2572 uint64_t mTimeRunningNanos = 0;
2574 bool mHasError =
false;
2578 LinuxPerformanceCounters::~LinuxPerformanceCounters() {
2584 bool LinuxPerformanceCounters::monitor(perf_sw_ids swId, LinuxPerformanceCounters::Target target) {
2585 return monitor(PERF_TYPE_SOFTWARE, swId, target);
2588 bool LinuxPerformanceCounters::monitor(perf_hw_id hwId, LinuxPerformanceCounters::Target target) {
2589 return monitor(PERF_TYPE_HARDWARE, hwId, target);
2594 void LinuxPerformanceCounters::updateResults(uint64_t numIters) {
2596 for (
auto& id_value : mIdToTarget) {
2597 *id_value.second.targetValue = UINT64_C(0);
2604 mTimeEnabledNanos = mCounters[1] - mCalibratedOverhead[1];
2605 mTimeRunningNanos = mCounters[2] - mCalibratedOverhead[2];
2607 for (uint64_t i = 0; i < mCounters[0]; ++i) {
2608 auto idx =
static_cast<size_t>(3 + i * 2 + 0);
2609 auto id = mCounters[idx + 1U];
2611 auto it = mIdToTarget.find(
id);
2612 if (it != mIdToTarget.end()) {
2614 auto& tgt = it->second;
2615 *tgt.targetValue = mCounters[idx];
2616 if (tgt.correctMeasuringOverhead) {
2617 if (*tgt.targetValue >= mCalibratedOverhead[idx]) {
2618 *tgt.targetValue -= mCalibratedOverhead[idx];
2620 *tgt.targetValue = 0U;
2623 if (tgt.correctLoopOverhead) {
2624 auto correctionVal = mLoopOverhead[idx] * numIters;
2625 if (*tgt.targetValue >= correctionVal) {
2626 *tgt.targetValue -= correctionVal;
2628 *tgt.targetValue = 0U;
2635 bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target target) {
2636 *target.targetValue = (std::numeric_limits<uint64_t>::max)();
2641 auto pea = perf_event_attr();
2642 std::memset(&pea, 0,
sizeof(perf_event_attr));
2644 pea.size =
sizeof(perf_event_attr);
2645 pea.config = eventid;
2647 pea.exclude_kernel = 1;
2651 pea.read_format = PERF_FORMAT_GROUP | PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
2655 # if defined(PERF_FLAG_FD_CLOEXEC)
2656 const unsigned long flags = PERF_FLAG_FD_CLOEXEC;
2658 const unsigned long flags = 0;
2662 auto fd =
static_cast<int>(syscall(__NR_perf_event_open, &pea, pid, cpu, mFd,
flags));
2672 if (-1 == ioctl(fd, PERF_EVENT_IOC_ID, &
id)) {
2678 mIdToTarget.emplace(
id, target);
2681 auto size = 3 + 2 * mIdToTarget.size();
2682 mCounters.resize(size);
2683 mCalibratedOverhead.resize(size);
2684 mLoopOverhead.resize(size);
2689 PerformanceCounters::PerformanceCounters()
2690 : mPc(new LinuxPerformanceCounters())
2694 mHas.pageFaults = mPc->monitor(PERF_COUNT_SW_PAGE_FAULTS, LinuxPerformanceCounters::Target(&mVal.pageFaults,
true,
false));
2695 mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_REF_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles,
true,
false));
2696 mHas.contextSwitches =
2697 mPc->monitor(PERF_COUNT_SW_CONTEXT_SWITCHES, LinuxPerformanceCounters::Target(&mVal.contextSwitches,
true,
false));
2698 mHas.instructions = mPc->monitor(PERF_COUNT_HW_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.instructions,
true,
true));
2699 mHas.branchInstructions =
2700 mPc->monitor(PERF_COUNT_HW_BRANCH_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.branchInstructions,
true,
false));
2701 mHas.branchMisses = mPc->monitor(PERF_COUNT_HW_BRANCH_MISSES, LinuxPerformanceCounters::Target(&mVal.branchMisses,
true,
false));
2706 auto before = ankerl::nanobench::Clock::now();
2707 auto after = ankerl::nanobench::Clock::now();
2712 if (mPc->hasError()) {
2714 mHas = PerfCountSet<bool>{};
2718 PerformanceCounters::~PerformanceCounters() {
2723 void PerformanceCounters::beginMeasure() {
2724 mPc->beginMeasure();
2727 void PerformanceCounters::endMeasure() {
2731 void PerformanceCounters::updateResults(uint64_t numIters) {
2732 mPc->updateResults(numIters);
2737 PerformanceCounters::PerformanceCounters() =
default;
2738 PerformanceCounters::~PerformanceCounters() =
default;
2739 void PerformanceCounters::beginMeasure() {}
2740 void PerformanceCounters::endMeasure() {}
2741 void PerformanceCounters::updateResults(uint64_t) {}
2745 ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t>
const& PerformanceCounters::val() const noexcept {
2748 ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool>
const& PerformanceCounters::has() const noexcept {
2756 NumSep::NumSep(
char sep)
2759 char NumSep::do_thousands_sep()
const {
2763 std::string NumSep::do_grouping()
const {
2768 StreamStateRestorer::StreamStateRestorer(std::ostream& s)
2770 , mLocale(s.getloc())
2771 , mPrecision(s.precision())
2774 , mFmtFlags(s.
flags()) {}
2776 StreamStateRestorer::~StreamStateRestorer() {
2781 void StreamStateRestorer::restore() {
2782 mStream.imbue(mLocale);
2783 mStream.precision(mPrecision);
2784 mStream.width(mWidth);
2785 mStream.fill(mFill);
2786 mStream.flags(mFmtFlags);
2789 Number::Number(
int width,
int precision, int64_t value)
2791 , mPrecision(precision)
2792 , mValue(static_cast<double>(value)) {}
2794 Number::Number(
int width,
int precision,
double value)
2796 , mPrecision(precision)
2799 std::ostream& Number::write(std::ostream& os)
const {
2800 StreamStateRestorer
const restorer(os);
2801 os.imbue(std::locale(os.getloc(),
new NumSep(
',')));
2802 os << std::setw(mWidth) << std::setprecision(mPrecision) << std::fixed << mValue;
2806 std::string Number::to_s()
const {
2807 std::stringstream ss;
2812 std::string to_s(uint64_t n) {
2815 str +=
static_cast<char>(
'0' +
static_cast<char>(n % 10));
2818 std::reverse(str.begin(), str.end());
2822 std::ostream&
operator<<(std::ostream& os, Number
const& n) {
2826 MarkDownColumn::MarkDownColumn(
int w,
int prec, std::string tit, std::string suff,
double val)
2829 , mTitle(
std::move(tit))
2830 , mSuffix(
std::move(suff))
2833 std::string MarkDownColumn::title()
const {
2834 std::stringstream ss;
2835 ss <<
'|' << std::setw(mWidth - 2) << std::right << mTitle <<
' ';
2839 std::string MarkDownColumn::separator()
const {
2840 std::string sep(
static_cast<size_t>(mWidth),
'-');
2846 std::string MarkDownColumn::invalid()
const {
2847 std::string sep(
static_cast<size_t>(mWidth),
' ');
2849 sep[sep.size() - 2] =
'-';
2853 std::string MarkDownColumn::value()
const {
2854 std::stringstream ss;
2855 auto width = mWidth - 2 -
static_cast<int>(mSuffix.size());
2856 ss <<
'|' << Number(width, mPrecision, mValue) << mSuffix <<
' ';
2861 MarkDownCode::MarkDownCode(std::string
const& what) {
2862 mWhat.reserve(what.size() + 2);
2863 mWhat.push_back(
'`');
2864 for (
char const c : what) {
2867 mWhat.push_back(
'`');
2870 mWhat.push_back(
'`');
2873 std::ostream& MarkDownCode::write(std::ostream& os)
const {
2877 std::ostream&
operator<<(std::ostream& os, MarkDownCode
const& mdCode) {
2878 return mdCode.write(os);
2884 Config::Config() =
default;
2885 Config::~Config() =
default;
2886 Config& Config::operator=(Config
const&) =
default;
2887 Config& Config::operator=(Config&&) noexcept = default;
2888 Config::Config(Config const&) = default;
2889 Config::Config(Config&&) noexcept = default;
2899 template <
typename T>
2900 inline constexpr
typename std::underlying_type<T>::type u(T val) noexcept {
2901 return static_cast<typename std::underlying_type<T>::type
>(val);
2907 : mConfig(
std::move(benchmarkConfig))
2908 , mNameToMeasurements{detail::u(
Result::Measure::_size)} {}
2910 void Result::add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters
const& pc) {
2914 double const dIters = d(iters);
2915 mNameToMeasurements[u(Result::Measure::iterations)].push_back(dIters);
2917 mNameToMeasurements[u(Result::Measure::elapsed)].push_back(d(totalElapsed) / dIters);
2918 if (pc.has().pageFaults) {
2919 mNameToMeasurements[u(Result::Measure::pagefaults)].push_back(d(pc.val().pageFaults) / dIters);
2921 if (pc.has().cpuCycles) {
2922 mNameToMeasurements[u(Result::Measure::cpucycles)].push_back(d(pc.val().cpuCycles) / dIters);
2924 if (pc.has().contextSwitches) {
2925 mNameToMeasurements[u(Result::Measure::contextswitches)].push_back(d(pc.val().contextSwitches) / dIters);
2927 if (pc.has().instructions) {
2928 mNameToMeasurements[u(Result::Measure::instructions)].push_back(d(pc.val().instructions) / dIters);
2930 if (pc.has().branchInstructions) {
2931 double branchInstructions = 0.0;
2933 if (pc.val().branchInstructions > iters + 1U) {
2934 branchInstructions = d(pc.val().branchInstructions - (iters + 1U));
2936 mNameToMeasurements[u(Result::Measure::branchinstructions)].push_back(branchInstructions / dIters);
2938 if (pc.has().branchMisses) {
2940 double branchMisses = d(pc.val().branchMisses);
2941 if (branchMisses > branchInstructions) {
2943 branchMisses = branchInstructions;
2947 branchMisses -= 1.0;
2948 if (branchMisses < 1.0) {
2951 mNameToMeasurements[u(Result::Measure::branchmisses)].push_back(branchMisses / dIters);
2956 Config
const& Result::config() const noexcept {
2960 inline double calcMedian(std::vector<double>& data) {
2964 std::sort(data.begin(), data.end());
2966 auto midIdx = data.size() / 2U;
2967 if (1U == (data.size() & 1U)) {
2968 return data[midIdx];
2970 return (data[midIdx - 1U] + data[midIdx]) / 2U;
2973 double Result::median(Measure m)
const {
2975 auto data = mNameToMeasurements[detail::u(m)];
2976 return calcMedian(data);
2979 double Result::average(Measure m)
const {
2981 auto const& data = mNameToMeasurements[detail::u(m)];
2987 return sum(m) / d(data.size());
2990 double Result::medianAbsolutePercentError(Measure m)
const {
2992 auto data = mNameToMeasurements[detail::u(m)];
2996 auto med = calcMedian(data);
2999 for (
auto& x : data) {
3005 return calcMedian(data);
3009 auto const& data = mNameToMeasurements[detail::u(m)];
3010 return std::accumulate(data.begin(), data.end(), 0.0);
3013 double Result::sumProduct(Measure m1, Measure m2)
const noexcept {
3014 auto const& data1 = mNameToMeasurements[detail::u(m1)];
3015 auto const& data2 = mNameToMeasurements[detail::u(m2)];
3017 if (data1.size() != data2.size()) {
3021 double result = 0.0;
3022 for (
size_t i = 0, s = data1.size(); i != s; ++i) {
3023 result += data1[i] * data2[i];
3028 bool Result::has(Measure m)
const noexcept {
3029 return !mNameToMeasurements[detail::u(m)].empty();
3032 double Result::get(
size_t idx, Measure m)
const {
3033 auto const& data = mNameToMeasurements[detail::u(m)];
3034 return data.at(idx);
3037 bool Result::empty() const noexcept {
3038 return 0U == size();
3041 size_t Result::size() const noexcept {
3042 auto const& data = mNameToMeasurements[detail::u(Measure::elapsed)];
3046 double Result::minimum(Measure m)
const noexcept {
3047 auto const& data = mNameToMeasurements[detail::u(m)];
3053 return *std::min_element(data.begin(), data.end());
3056 double Result::maximum(Measure m)
const noexcept {
3057 auto const& data = mNameToMeasurements[detail::u(m)];
3063 return *std::max_element(data.begin(), data.end());
3067 return mConfig.mContext.at(variableName);
3070 std::string
const&
Result::context(std::string
const& variableName)
const {
3071 return mConfig.mContext.at(variableName);
3074 Result::Measure Result::fromString(std::string
const& str) {
3075 if (str ==
"elapsed") {
3076 return Measure::elapsed;
3078 if (str ==
"iterations") {
3079 return Measure::iterations;
3081 if (str ==
"pagefaults") {
3082 return Measure::pagefaults;
3084 if (str ==
"cpucycles") {
3085 return Measure::cpucycles;
3087 if (str ==
"contextswitches") {
3088 return Measure::contextswitches;
3090 if (str ==
"instructions") {
3091 return Measure::instructions;
3093 if (str ==
"branchinstructions") {
3094 return Measure::branchinstructions;
3096 if (str ==
"branchmisses") {
3097 return Measure::branchmisses;
3100 return Measure::_size;
3105 mConfig.mOut = &std::cout;
3108 Bench::Bench(Bench&&) noexcept = default;
3109 Bench& Bench::operator=(Bench&&) noexcept = default;
3110 Bench::Bench(Bench const&) = default;
3111 Bench& Bench::operator=(Bench const&) = default;
3112 Bench::~Bench() noexcept = default;
3114 double Bench::batch() const noexcept {
3115 return mConfig.mBatch;
3118 double Bench::complexityN() const noexcept {
3119 return mConfig.mComplexityN;
3124 Bench& Bench::relative(
bool isRelativeEnabled) noexcept {
3125 mConfig.mIsRelative = isRelativeEnabled;
3128 bool Bench::relative() const noexcept {
3129 return mConfig.mIsRelative;
3133 mConfig.mShowPerformanceCounters = showPerformanceCounters;
3137 return mConfig.mShowPerformanceCounters;
3143 Bench& Bench::unit(
char const* u) {
3144 if (u != mConfig.mUnit) {
3151 Bench& Bench::unit(std::string
const& u) {
3152 return unit(u.c_str());
3155 std::string
const& Bench::unit() const noexcept {
3156 return mConfig.mUnit;
3159 Bench& Bench::timeUnit(std::chrono::duration<double>
const& tu, std::string
const& tuName) {
3160 mConfig.mTimeUnit = tu;
3161 mConfig.mTimeUnitName = tuName;
3165 std::string
const& Bench::timeUnitName() const noexcept {
3166 return mConfig.mTimeUnitName;
3169 std::chrono::duration<double>
const& Bench::timeUnit() const noexcept {
3170 return mConfig.mTimeUnit;
3174 Bench& Bench::title(
const char* benchmarkTitle) {
3175 if (benchmarkTitle != mConfig.mBenchmarkTitle) {
3178 mConfig.mBenchmarkTitle = benchmarkTitle;
3181 Bench& Bench::title(std::string
const& benchmarkTitle) {
3182 if (benchmarkTitle != mConfig.mBenchmarkTitle) {
3185 mConfig.mBenchmarkTitle = benchmarkTitle;
3189 std::string
const& Bench::title() const noexcept {
3190 return mConfig.mBenchmarkTitle;
3194 mConfig.mBenchmarkName = benchmarkName;
3198 Bench&
Bench::name(std::string
const& benchmarkName) {
3199 mConfig.mBenchmarkName = benchmarkName;
3204 return mConfig.mBenchmarkName;
3207 Bench&
Bench::context(
char const* variableName,
char const* variableValue) {
3208 mConfig.mContext[variableName] = variableValue;
3212 Bench&
Bench::context(std::string
const& variableName, std::string
const& variableValue) {
3213 mConfig.mContext[variableName] = variableValue;
3217 Bench& Bench::clearContext() {
3218 mConfig.mContext.clear();
3223 Bench& Bench::epochs(
size_t numEpochs) noexcept {
3224 mConfig.mNumEpochs = numEpochs;
3227 size_t Bench::epochs() const noexcept {
3228 return mConfig.mNumEpochs;
3232 Bench& Bench::clockResolutionMultiple(
size_t multiple) noexcept {
3233 mConfig.mClockResolutionMultiple = multiple;
3236 size_t Bench::clockResolutionMultiple() const noexcept {
3237 return mConfig.mClockResolutionMultiple;
3241 Bench& Bench::maxEpochTime(std::chrono::nanoseconds t) noexcept {
3242 mConfig.mMaxEpochTime =
t;
3245 std::chrono::nanoseconds Bench::maxEpochTime() const noexcept {
3246 return mConfig.mMaxEpochTime;
3250 Bench& Bench::minEpochTime(std::chrono::nanoseconds t) noexcept {
3251 mConfig.mMinEpochTime =
t;
3254 std::chrono::nanoseconds Bench::minEpochTime() const noexcept {
3255 return mConfig.mMinEpochTime;
3258 Bench& Bench::minEpochIterations(uint64_t numIters) noexcept {
3259 mConfig.mMinEpochIterations = (numIters == 0) ? 1 : numIters;
3262 uint64_t Bench::minEpochIterations() const noexcept {
3263 return mConfig.mMinEpochIterations;
3266 Bench& Bench::epochIterations(uint64_t numIters) noexcept {
3267 mConfig.mEpochIterations = numIters;
3270 uint64_t Bench::epochIterations() const noexcept {
3271 return mConfig.mEpochIterations;
3274 Bench& Bench::warmup(uint64_t numWarmupIters) noexcept {
3275 mConfig.mWarmup = numWarmupIters;
3278 uint64_t Bench::warmup() const noexcept {
3279 return mConfig.mWarmup;
3282 Bench& Bench::config(Config
const& benchmarkConfig) {
3283 mConfig = benchmarkConfig;
3286 Config
const& Bench::config() const noexcept {
3290 Bench& Bench::output(std::ostream* outstream) noexcept {
3291 mConfig.mOut = outstream;
3296 return mConfig.mOut;
3299 std::vector<Result>
const& Bench::results() const noexcept {
3303 Bench&
Bench::render(
char const* templateContent, std::ostream& os) {
3308 Bench&
Bench::render(std::string
const& templateContent, std::ostream& os) {
3313 std::vector<BigO> Bench::complexityBigO()
const {
3314 std::vector<BigO> bigOs;
3315 auto rangeMeasure = BigO::collectRangeMeasure(mResults);
3316 bigOs.emplace_back(
"O(1)", rangeMeasure, [](
double) {
3319 bigOs.emplace_back(
"O(n)", rangeMeasure, [](
double n) {
3322 bigOs.emplace_back(
"O(log n)", rangeMeasure, [](
double n) {
3323 return std::log2(n);
3325 bigOs.emplace_back(
"O(n log n)", rangeMeasure, [](
double n) {
3326 return n * std::log2(n);
3328 bigOs.emplace_back(
"O(n^2)", rangeMeasure, [](
double n) {
3331 bigOs.emplace_back(
"O(n^3)", rangeMeasure, [](
double n) {
3334 std::sort(bigOs.begin(), bigOs.end());
3341 std::random_device rd;
3342 std::uniform_int_distribution<uint64_t> dist;
3346 }
while (mX == 0 && mY == 0);
3350 uint64_t splitMix64(uint64_t& state) noexcept {
3351 uint64_t z = (state += UINT64_C(0x9e3779b97f4a7c15));
3352 z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9);
3353 z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb);
3354 return z ^ (z >> 31U);
3358 Rng::Rng(uint64_t seed) noexcept
3359 : mX(splitMix64(seed))
3360 , mY(splitMix64(seed)) {
3361 for (
size_t i = 0; i < 10; ++i) {
3367 Rng::Rng(uint64_t x, uint64_t y) noexcept
3371 Rng Rng::copy() const noexcept {
3375 Rng::Rng(std::vector<uint64_t>
const& data)
3378 if (data.size() != 2) {
3379 throw std::runtime_error(
"ankerl::nanobench::Rng::Rng: needed exactly 2 entries in data, but got " +
3380 detail::fmt::to_s(data.size()));
3386 std::vector<uint64_t> Rng::state()
const {
3387 std::vector<uint64_t> data(2);
3393 BigO::RangeMeasure BigO::collectRangeMeasure(std::vector<Result>
const& results) {
3394 BigO::RangeMeasure rangeMeasure;
3395 for (
auto const& result : results) {
3396 if (result.config().mComplexityN > 0.0) {
3397 rangeMeasure.emplace_back(result.config().mComplexityN, result.median(Result::Measure::elapsed));
3400 return rangeMeasure;
3403 BigO::BigO(std::string bigOName, RangeMeasure
const& rangeMeasure)
3404 : mName(
std::move(bigOName)) {
3407 double sumRangeMeasure = 0.0;
3408 double sumRangeRange = 0.0;
3410 for (
const auto& rm : rangeMeasure) {
3411 sumRangeMeasure += rm.first * rm.second;
3412 sumRangeRange += rm.first * rm.first;
3414 mConstant = sumRangeMeasure / sumRangeRange;
3418 double sumMeasure = 0.0;
3419 for (
const auto& rm : rangeMeasure) {
3420 auto diff = mConstant * rm.first - rm.second;
3423 sumMeasure += rm.second;
3426 auto n =
static_cast<double>(rangeMeasure.size());
3427 auto mean = sumMeasure / n;
3428 mNormalizedRootMeanSquare = std::sqrt(err / n) / mean;
3431 BigO::BigO(
const char* bigOName, RangeMeasure
const& rangeMeasure)
3432 : BigO(
std::string(bigOName), rangeMeasure) {}
3434 std::string
const&
BigO::name() const noexcept {
3438 double BigO::constant() const noexcept {
3442 double BigO::normalizedRootMeanSquare() const noexcept {
3443 return mNormalizedRootMeanSquare;
3447 return std::tie(mNormalizedRootMeanSquare, mName) < std::tie(other.mNormalizedRootMeanSquare, other.mName);
3450 std::ostream&
operator<<(std::ostream& os, BigO
const& bigO) {
3451 return os << bigO.constant() <<
" * " << bigO.name() <<
", rms=" << bigO.normalizedRootMeanSquare();
3454 std::ostream&
operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO>
const& bigOs) {
3455 detail::fmt::StreamStateRestorer
const restorer(os);
3456 os << std::endl <<
"| coefficient | err% | complexity" << std::endl <<
"|--------------:|-------:|------------" << std::endl;
3457 for (
auto const& bigO : bigOs) {
3458 os <<
"|" << std::setw(14) << std::setprecision(7) << std::scientific << bigO.constant() <<
" ";
3459 os <<
"|" << detail::fmt::Number(6, 1, bigO.normalizedRootMeanSquare() * 100.0) <<
"% ";
3460 os <<
"| " << bigO.name();
Main entry point to nanobench's benchmarking facility.
Bench & operator=(Bench &&other) noexcept
Bench & run(char const *benchmarkName, Op &&op)
Repeatedly calls op() based on the configuration, and performs measurements.
ANKERL_NANOBENCH(NODISCARD) std Bench & doNotOptimizeAway(Arg &&arg)
Retrieves all benchmark results collected by the bench object so far.
Bench & batch(T b) noexcept
Sets the batch size.
Bench()
Creates a new benchmark for configuration and running of benchmarks.
std::vector< BigO > complexityBigO() const
Bench(Bench &&other) noexcept
Bench & operator=(Bench const &other)
Bench(Bench const &other)
Bench & complexityN(T n) noexcept
static RangeMeasure mapRangeMeasure(RangeMeasure data, Op op)
BigO(std::string bigOName, RangeMeasure const &scaledRangeMeasure)
std::vector< std::pair< double, double > > RangeMeasure
BigO(char const *bigOName, RangeMeasure const &rangeMeasure, Op rangeToN)
static RangeMeasure collectRangeMeasure(std::vector< Result > const &results)
BigO(std::string bigOName, RangeMeasure const &rangeMeasure, Op rangeToN)
BigO(char const *bigOName, RangeMeasure const &scaledRangeMeasure)
Result(Config benchmarkConfig)
static Measure fromString(std::string const &str)
void add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const &pc)
Result(Result &&other) noexcept
Result & operator=(Result &&other) noexcept
ANKERL_NANOBENCH(NODISCARD) Config const &config() const noexcept
Result(Result const &other)
Result & operator=(Result const &other)
An extremely fast random generator.
static constexpr uint64_t() min()
Rng(Rng const &)=delete
As a safety precaution, we don't allow copying.
void shuffle(Container &container) noexcept
Shuffles all entries in the given container.
Rng(Rng &&) noexcept=default
Rng & operator=(Rng const &)=delete
Same as Rng(Rng const&), we don't allow assignment.
static constexpr uint64_t() max()
double uniform01() noexcept
Provides a random uniform double value between 0 and 1.
uint64_t result_type
This RNG provides 64bit randomness.
void moveResultTo(std::vector< Result > &results) noexcept
void add(std::chrono::nanoseconds elapsed, PerformanceCounters const &pc) noexcept
IterationLogic(IterationLogic &&)=delete
IterationLogic & operator=(IterationLogic &&)=delete
ANKERL_NANOBENCH(NODISCARD) uint64_t numIters() const noexcept
IterationLogic(IterationLogic const &)=delete
IterationLogic & operator=(IterationLogic const &)=delete
IterationLogic(Bench const &bench)
#define T(expected, seed, data)
PerformanceCounters & performanceCounters()
void doNotOptimizeAway(T &val)
void doNotOptimizeAway(T const &val)
char const * json() noexcept
Template to generate JSON data.
char const * pyperf() noexcept
Output in pyperf compatible JSON format, which can be used for more analyzation.
char const * csv() noexcept
CSV data for the benchmark results.
char const * htmlBoxplot() noexcept
HTML output that uses plotly to generate an interactive boxplot chart. See the tutorial for an exampl...
void render(char const *mustacheTemplate, Bench const &bench, std::ostream &out)
Renders output from a mustache-like template and benchmark results.
std::ostream & operator<<(std::ostream &os, std::vector< ankerl::nanobench::BigO > const &bigOs)
std::conditional< std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock, std::chrono::steady_clock >::type Clock
void render(std::string const &mustacheTemplate, std::vector< Result > const &results, std::ostream &out)
void doNotOptimizeAway(Arg &&arg)
Makes sure none of the given arguments are optimized away by the compiler.
std::ostream & operator<<(std::ostream &os, BigO const &bigO)
#define ANKERL_NANOBENCH_LOG(x)
#define ANKERL_NANOBENCH_NO_SANITIZE(...)
#define ANKERL_NANOBENCH(x)
bool operator==(const CNetAddr &a, const CNetAddr &b)
bool operator<(const CNetAddr &a, const CNetAddr &b)
Config & operator=(Config &&other) noexcept
Config(Config const &other)
Config(Config &&other) noexcept
Config & operator=(Config const &other)
static SECP256K1_INLINE uint64_t rotl(const uint64_t x, int k)