xref: /aosp_15_r20/external/google-benchmark/src/benchmark.cc (revision dbb99499c3810fa1611fa2242a2fc446be01a57c)
1 // Copyright 2015 Google Inc. All rights reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "benchmark/benchmark.h"
16 
17 #include "benchmark_api_internal.h"
18 #include "benchmark_runner.h"
19 #include "internal_macros.h"
20 
21 #ifndef BENCHMARK_OS_WINDOWS
22 #if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
23 #include <sys/resource.h>
24 #endif
25 #include <sys/time.h>
26 #include <unistd.h>
27 #endif
28 
29 #include <algorithm>
30 #include <atomic>
31 #include <condition_variable>
32 #include <cstdio>
33 #include <cstdlib>
34 #include <fstream>
35 #include <iostream>
36 #include <limits>
37 #include <map>
38 #include <memory>
39 #include <random>
40 #include <string>
41 #include <thread>
42 #include <utility>
43 
44 #include "check.h"
45 #include "colorprint.h"
46 #include "commandlineflags.h"
47 #include "complexity.h"
48 #include "counter.h"
49 #include "internal_macros.h"
50 #include "log.h"
51 #include "mutex.h"
52 #include "perf_counters.h"
53 #include "re.h"
54 #include "statistics.h"
55 #include "string_util.h"
56 #include "thread_manager.h"
57 #include "thread_timer.h"
58 
59 namespace benchmark {
60 // Print a list of benchmarks. This option overrides all other options.
61 BM_DEFINE_bool(benchmark_list_tests, false);
62 
63 // A regular expression that specifies the set of benchmarks to execute.  If
64 // this flag is empty, or if this flag is the string \"all\", all benchmarks
65 // linked into the binary are run.
66 BM_DEFINE_string(benchmark_filter, "");
67 
68 // Specification of how long to run the benchmark.
69 //
70 // It can be either an exact number of iterations (specified as `<integer>x`),
71 // or a minimum number of seconds (specified as `<float>s`). If the latter
72 // format (ie., min seconds) is used, the system may run the benchmark longer
73 // until the results are considered significant.
74 //
75 // For backward compatibility, the `s` suffix may be omitted, in which case,
76 // the specified number is interpreted as the number of seconds.
77 //
78 // For cpu-time based tests, this is the lower bound
79 // on the total cpu time used by all threads that make up the test.  For
80 // real-time based tests, this is the lower bound on the elapsed time of the
81 // benchmark execution, regardless of number of threads.
82 BM_DEFINE_string(benchmark_min_time, kDefaultMinTimeStr);
83 
84 // Minimum number of seconds a benchmark should be run before results should be
85 // taken into account. This e.g can be necessary for benchmarks of code which
86 // needs to fill some form of cache before performance is of interest.
87 // Note: results gathered within this period are discarded and not used for
88 // reported result.
89 BM_DEFINE_double(benchmark_min_warmup_time, 0.0);
90 
91 // The number of runs of each benchmark. If greater than 1, the mean and
92 // standard deviation of the runs will be reported.
93 BM_DEFINE_int32(benchmark_repetitions, 1);
94 
95 // If set, enable random interleaving of repetitions of all benchmarks.
96 // See http://github.com/google/benchmark/issues/1051 for details.
97 BM_DEFINE_bool(benchmark_enable_random_interleaving, false);
98 
99 // Report the result of each benchmark repetitions. When 'true' is specified
100 // only the mean, standard deviation, and other statistics are reported for
101 // repeated benchmarks. Affects all reporters.
102 BM_DEFINE_bool(benchmark_report_aggregates_only, false);
103 
104 // Display the result of each benchmark repetitions. When 'true' is specified
105 // only the mean, standard deviation, and other statistics are displayed for
106 // repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects
107 // the display reporter, but  *NOT* file reporter, which will still contain
108 // all the output.
109 BM_DEFINE_bool(benchmark_display_aggregates_only, false);
110 
111 // The format to use for console output.
112 // Valid values are 'console', 'json', or 'csv'.
113 BM_DEFINE_string(benchmark_format, "console");
114 
115 // The format to use for file output.
116 // Valid values are 'console', 'json', or 'csv'.
117 BM_DEFINE_string(benchmark_out_format, "json");
118 
119 // The file to write additional output to.
120 BM_DEFINE_string(benchmark_out, "");
121 
122 // Whether to use colors in the output.  Valid values:
123 // 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if
124 // the output is being sent to a terminal and the TERM environment variable is
125 // set to a terminal type that supports colors.
126 BM_DEFINE_string(benchmark_color, "auto");
127 
128 // Whether to use tabular format when printing user counters to the console.
129 // Valid values: 'true'/'yes'/1, 'false'/'no'/0.  Defaults to false.
130 BM_DEFINE_bool(benchmark_counters_tabular, false);
131 
132 // List of additional perf counters to collect, in libpfm format. For more
133 // information about libpfm: https://man7.org/linux/man-pages/man3/libpfm.3.html
134 BM_DEFINE_string(benchmark_perf_counters, "");
135 
136 // Extra context to include in the output formatted as comma-separated key-value
137 // pairs. Kept internal as it's only used for parsing from env/command line.
138 BM_DEFINE_kvpairs(benchmark_context, {});
139 
140 // Set the default time unit to use for reports
141 // Valid values are 'ns', 'us', 'ms' or 's'
142 BM_DEFINE_string(benchmark_time_unit, "");
143 
144 // The level of verbose logging to output
145 BM_DEFINE_int32(v, 0);
146 
147 namespace internal {
148 
149 std::map<std::string, std::string>* global_context = nullptr;
150 
GetGlobalContext()151 BENCHMARK_EXPORT std::map<std::string, std::string>*& GetGlobalContext() {
152   return global_context;
153 }
154 
155 static void const volatile* volatile global_force_escape_pointer;
156 
157 // FIXME: Verify if LTO still messes this up?
UseCharPointer(char const volatile * const v)158 void UseCharPointer(char const volatile* const v) {
159   // We want to escape the pointer `v` so that the compiler can not eliminate
160   // computations that produced it. To do that, we escape the pointer by storing
161   // it into a volatile variable, since generally, volatile store, is not
162   // something the compiler is allowed to elide.
163   global_force_escape_pointer = reinterpret_cast<void const volatile*>(v);
164 }
165 
166 }  // namespace internal
167 
State(std::string name,IterationCount max_iters,const std::vector<int64_t> & ranges,int thread_i,int n_threads,internal::ThreadTimer * timer,internal::ThreadManager * manager,internal::PerfCountersMeasurement * perf_counters_measurement,ProfilerManager * profiler_manager)168 State::State(std::string name, IterationCount max_iters,
169              const std::vector<int64_t>& ranges, int thread_i, int n_threads,
170              internal::ThreadTimer* timer, internal::ThreadManager* manager,
171              internal::PerfCountersMeasurement* perf_counters_measurement,
172              ProfilerManager* profiler_manager)
173     : total_iterations_(0),
174       batch_leftover_(0),
175       max_iterations(max_iters),
176       started_(false),
177       finished_(false),
178       skipped_(internal::NotSkipped),
179       range_(ranges),
180       complexity_n_(0),
181       name_(std::move(name)),
182       thread_index_(thread_i),
183       threads_(n_threads),
184       timer_(timer),
185       manager_(manager),
186       perf_counters_measurement_(perf_counters_measurement),
187       profiler_manager_(profiler_manager) {
188   BM_CHECK(max_iterations != 0) << "At least one iteration must be run";
189   BM_CHECK_LT(thread_index_, threads_)
190       << "thread_index must be less than threads";
191 
192   // Add counters with correct flag now.  If added with `counters[name]` in
193   // `PauseTiming`, a new `Counter` will be inserted the first time, which
194   // won't have the flag.  Inserting them now also reduces the allocations
195   // during the benchmark.
196   if (perf_counters_measurement_) {
197     for (const std::string& counter_name :
198          perf_counters_measurement_->names()) {
199       counters[counter_name] = Counter(0.0, Counter::kAvgIterations);
200     }
201   }
202 
203   // Note: The use of offsetof below is technically undefined until C++17
204   // because State is not a standard layout type. However, all compilers
205   // currently provide well-defined behavior as an extension (which is
206   // demonstrated since constexpr evaluation must diagnose all undefined
207   // behavior). However, GCC and Clang also warn about this use of offsetof,
208   // which must be suppressed.
209 #if defined(__INTEL_COMPILER)
210 #pragma warning push
211 #pragma warning(disable : 1875)
212 #elif defined(__GNUC__) || defined(__clang__)
213 #pragma GCC diagnostic push
214 #pragma GCC diagnostic ignored "-Winvalid-offsetof"
215 #endif
216 #if defined(__NVCC__)
217 #pragma nv_diagnostic push
218 #pragma nv_diag_suppress 1427
219 #endif
220 #if defined(__NVCOMPILER)
221 #pragma diagnostic push
222 #pragma diag_suppress offset_in_non_POD_nonstandard
223 #endif
224   // Offset tests to ensure commonly accessed data is on the first cache line.
225   const int cache_line_size = 64;
226   static_assert(
227       offsetof(State, skipped_) <= (cache_line_size - sizeof(skipped_)), "");
228 #if defined(__INTEL_COMPILER)
229 #pragma warning pop
230 #elif defined(__GNUC__) || defined(__clang__)
231 #pragma GCC diagnostic pop
232 #endif
233 #if defined(__NVCC__)
234 #pragma nv_diagnostic pop
235 #endif
236 #if defined(__NVCOMPILER)
237 #pragma diagnostic pop
238 #endif
239 }
240 
PauseTiming()241 void State::PauseTiming() {
242   // Add in time accumulated so far
243   BM_CHECK(started_ && !finished_ && !skipped());
244   timer_->StopTimer();
245   if (perf_counters_measurement_) {
246     std::vector<std::pair<std::string, double>> measurements;
247     if (!perf_counters_measurement_->Stop(measurements)) {
248       BM_CHECK(false) << "Perf counters read the value failed.";
249     }
250     for (const auto& name_and_measurement : measurements) {
251       const std::string& name = name_and_measurement.first;
252       const double measurement = name_and_measurement.second;
253       // Counter was inserted with `kAvgIterations` flag by the constructor.
254       assert(counters.find(name) != counters.end());
255       counters[name].value += measurement;
256     }
257   }
258 }
259 
ResumeTiming()260 void State::ResumeTiming() {
261   BM_CHECK(started_ && !finished_ && !skipped());
262   timer_->StartTimer();
263   if (perf_counters_measurement_) {
264     perf_counters_measurement_->Start();
265   }
266 }
267 
SkipWithMessage(const std::string & msg)268 void State::SkipWithMessage(const std::string& msg) {
269   skipped_ = internal::SkippedWithMessage;
270   {
271     MutexLock l(manager_->GetBenchmarkMutex());
272     if (internal::NotSkipped == manager_->results.skipped_) {
273       manager_->results.skip_message_ = msg;
274       manager_->results.skipped_ = skipped_;
275     }
276   }
277   total_iterations_ = 0;
278   if (timer_->running()) timer_->StopTimer();
279 }
280 
SkipWithError(const std::string & msg)281 void State::SkipWithError(const std::string& msg) {
282   skipped_ = internal::SkippedWithError;
283   {
284     MutexLock l(manager_->GetBenchmarkMutex());
285     if (internal::NotSkipped == manager_->results.skipped_) {
286       manager_->results.skip_message_ = msg;
287       manager_->results.skipped_ = skipped_;
288     }
289   }
290   total_iterations_ = 0;
291   if (timer_->running()) timer_->StopTimer();
292 }
293 
SetIterationTime(double seconds)294 void State::SetIterationTime(double seconds) {
295   timer_->SetIterationTime(seconds);
296 }
297 
SetLabel(const std::string & label)298 void State::SetLabel(const std::string& label) {
299   MutexLock l(manager_->GetBenchmarkMutex());
300   manager_->results.report_label_ = label;
301 }
302 
StartKeepRunning()303 void State::StartKeepRunning() {
304   BM_CHECK(!started_ && !finished_);
305   started_ = true;
306   total_iterations_ = skipped() ? 0 : max_iterations;
307   if (BENCHMARK_BUILTIN_EXPECT(profiler_manager_ != nullptr, false))
308     profiler_manager_->AfterSetupStart();
309   manager_->StartStopBarrier();
310   if (!skipped()) ResumeTiming();
311 }
312 
FinishKeepRunning()313 void State::FinishKeepRunning() {
314   BM_CHECK(started_ && (!finished_ || skipped()));
315   if (!skipped()) {
316     PauseTiming();
317   }
318   // Total iterations has now wrapped around past 0. Fix this.
319   total_iterations_ = 0;
320   finished_ = true;
321   manager_->StartStopBarrier();
322   if (BENCHMARK_BUILTIN_EXPECT(profiler_manager_ != nullptr, false))
323     profiler_manager_->BeforeTeardownStop();
324 }
325 
326 namespace internal {
327 namespace {
328 
329 // Flushes streams after invoking reporter methods that write to them. This
330 // ensures users get timely updates even when streams are not line-buffered.
FlushStreams(BenchmarkReporter * reporter)331 void FlushStreams(BenchmarkReporter* reporter) {
332   if (!reporter) return;
333   std::flush(reporter->GetOutputStream());
334   std::flush(reporter->GetErrorStream());
335 }
336 
337 // Reports in both display and file reporters.
Report(BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter,const RunResults & run_results)338 void Report(BenchmarkReporter* display_reporter,
339             BenchmarkReporter* file_reporter, const RunResults& run_results) {
340   auto report_one = [](BenchmarkReporter* reporter, bool aggregates_only,
341                        const RunResults& results) {
342     assert(reporter);
343     // If there are no aggregates, do output non-aggregates.
344     aggregates_only &= !results.aggregates_only.empty();
345     if (!aggregates_only) reporter->ReportRuns(results.non_aggregates);
346     if (!results.aggregates_only.empty())
347       reporter->ReportRuns(results.aggregates_only);
348   };
349 
350   report_one(display_reporter, run_results.display_report_aggregates_only,
351              run_results);
352   if (file_reporter)
353     report_one(file_reporter, run_results.file_report_aggregates_only,
354                run_results);
355 
356   FlushStreams(display_reporter);
357   FlushStreams(file_reporter);
358 }
359 
RunBenchmarks(const std::vector<BenchmarkInstance> & benchmarks,BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter)360 void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
361                    BenchmarkReporter* display_reporter,
362                    BenchmarkReporter* file_reporter) {
363   // Note the file_reporter can be null.
364   BM_CHECK(display_reporter != nullptr);
365 
366   // Determine the width of the name field using a minimum width of 10.
367   bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
368   size_t name_field_width = 10;
369   size_t stat_field_width = 0;
370   for (const BenchmarkInstance& benchmark : benchmarks) {
371     name_field_width =
372         std::max<size_t>(name_field_width, benchmark.name().str().size());
373     might_have_aggregates |= benchmark.repetitions() > 1;
374 
375     for (const auto& Stat : benchmark.statistics())
376       stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
377   }
378   if (might_have_aggregates) name_field_width += 1 + stat_field_width;
379 
380   // Print header here
381   BenchmarkReporter::Context context;
382   context.name_field_width = name_field_width;
383 
384   // Keep track of running times of all instances of each benchmark family.
385   std::map<int /*family_index*/, BenchmarkReporter::PerFamilyRunReports>
386       per_family_reports;
387 
388   if (display_reporter->ReportContext(context) &&
389       (!file_reporter || file_reporter->ReportContext(context))) {
390     FlushStreams(display_reporter);
391     FlushStreams(file_reporter);
392 
393     size_t num_repetitions_total = 0;
394 
395     // This perfcounters object needs to be created before the runners vector
396     // below so it outlasts their lifetime.
397     PerfCountersMeasurement perfcounters(
398         StrSplit(FLAGS_benchmark_perf_counters, ','));
399 
400     // Vector of benchmarks to run
401     std::vector<internal::BenchmarkRunner> runners;
402     runners.reserve(benchmarks.size());
403 
404     // Count the number of benchmarks with threads to warn the user in case
405     // performance counters are used.
406     int benchmarks_with_threads = 0;
407 
408     // Loop through all benchmarks
409     for (const BenchmarkInstance& benchmark : benchmarks) {
410       BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr;
411       if (benchmark.complexity() != oNone)
412         reports_for_family = &per_family_reports[benchmark.family_index()];
413       benchmarks_with_threads += (benchmark.threads() > 1);
414       runners.emplace_back(benchmark, &perfcounters, reports_for_family);
415       int num_repeats_of_this_instance = runners.back().GetNumRepeats();
416       num_repetitions_total +=
417           static_cast<size_t>(num_repeats_of_this_instance);
418       if (reports_for_family)
419         reports_for_family->num_runs_total += num_repeats_of_this_instance;
420     }
421     assert(runners.size() == benchmarks.size() && "Unexpected runner count.");
422 
423     // The use of performance counters with threads would be unintuitive for
424     // the average user so we need to warn them about this case
425     if ((benchmarks_with_threads > 0) && (perfcounters.num_counters() > 0)) {
426       GetErrorLogInstance()
427           << "***WARNING*** There are " << benchmarks_with_threads
428           << " benchmarks with threads and " << perfcounters.num_counters()
429           << " performance counters were requested. Beware counters will "
430              "reflect the combined usage across all "
431              "threads.\n";
432     }
433 
434     std::vector<size_t> repetition_indices;
435     repetition_indices.reserve(num_repetitions_total);
436     for (size_t runner_index = 0, num_runners = runners.size();
437          runner_index != num_runners; ++runner_index) {
438       const internal::BenchmarkRunner& runner = runners[runner_index];
439       std::fill_n(std::back_inserter(repetition_indices),
440                   runner.GetNumRepeats(), runner_index);
441     }
442     assert(repetition_indices.size() == num_repetitions_total &&
443            "Unexpected number of repetition indexes.");
444 
445     if (FLAGS_benchmark_enable_random_interleaving) {
446       std::random_device rd;
447       std::mt19937 g(rd());
448       std::shuffle(repetition_indices.begin(), repetition_indices.end(), g);
449     }
450 
451     for (size_t repetition_index : repetition_indices) {
452       internal::BenchmarkRunner& runner = runners[repetition_index];
453       runner.DoOneRepetition();
454       if (runner.HasRepeatsRemaining()) continue;
455       // FIXME: report each repetition separately, not all of them in bulk.
456 
457       display_reporter->ReportRunsConfig(
458           runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters());
459       if (file_reporter)
460         file_reporter->ReportRunsConfig(
461             runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters());
462 
463       RunResults run_results = runner.GetResults();
464 
465       // Maybe calculate complexity report
466       if (const auto* reports_for_family = runner.GetReportsForFamily()) {
467         if (reports_for_family->num_runs_done ==
468             reports_for_family->num_runs_total) {
469           auto additional_run_stats = ComputeBigO(reports_for_family->Runs);
470           run_results.aggregates_only.insert(run_results.aggregates_only.end(),
471                                              additional_run_stats.begin(),
472                                              additional_run_stats.end());
473           per_family_reports.erase(
474               static_cast<int>(reports_for_family->Runs.front().family_index));
475         }
476       }
477 
478       Report(display_reporter, file_reporter, run_results);
479     }
480   }
481   display_reporter->Finalize();
482   if (file_reporter) file_reporter->Finalize();
483   FlushStreams(display_reporter);
484   FlushStreams(file_reporter);
485 }
486 
487 // Disable deprecated warnings temporarily because we need to reference
488 // CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
489 BENCHMARK_DISABLE_DEPRECATED_WARNING
490 
CreateReporter(std::string const & name,ConsoleReporter::OutputOptions output_opts)491 std::unique_ptr<BenchmarkReporter> CreateReporter(
492     std::string const& name, ConsoleReporter::OutputOptions output_opts) {
493   typedef std::unique_ptr<BenchmarkReporter> PtrType;
494   if (name == "console") {
495     return PtrType(new ConsoleReporter(output_opts));
496   }
497   if (name == "json") {
498     return PtrType(new JSONReporter());
499   }
500   if (name == "csv") {
501     return PtrType(new CSVReporter());
502   }
503   std::cerr << "Unexpected format: '" << name << "'\n";
504   std::exit(1);
505 }
506 
507 BENCHMARK_RESTORE_DEPRECATED_WARNING
508 
509 }  // end namespace
510 
IsZero(double n)511 bool IsZero(double n) {
512   return std::abs(n) < std::numeric_limits<double>::epsilon();
513 }
514 
GetOutputOptions(bool force_no_color)515 ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
516   int output_opts = ConsoleReporter::OO_Defaults;
517   auto is_benchmark_color = [force_no_color]() -> bool {
518     if (force_no_color) {
519       return false;
520     }
521     if (FLAGS_benchmark_color == "auto") {
522       return IsColorTerminal();
523     }
524     return IsTruthyFlagValue(FLAGS_benchmark_color);
525   };
526   if (is_benchmark_color()) {
527     output_opts |= ConsoleReporter::OO_Color;
528   } else {
529     output_opts &= ~ConsoleReporter::OO_Color;
530   }
531   if (FLAGS_benchmark_counters_tabular) {
532     output_opts |= ConsoleReporter::OO_Tabular;
533   } else {
534     output_opts &= ~ConsoleReporter::OO_Tabular;
535   }
536   return static_cast<ConsoleReporter::OutputOptions>(output_opts);
537 }
538 
539 }  // end namespace internal
540 
CreateDefaultDisplayReporter()541 BenchmarkReporter* CreateDefaultDisplayReporter() {
542   static auto default_display_reporter =
543       internal::CreateReporter(FLAGS_benchmark_format,
544                                internal::GetOutputOptions())
545           .release();
546   return default_display_reporter;
547 }
548 
RunSpecifiedBenchmarks()549 size_t RunSpecifiedBenchmarks() {
550   return RunSpecifiedBenchmarks(nullptr, nullptr, FLAGS_benchmark_filter);
551 }
552 
RunSpecifiedBenchmarks(std::string spec)553 size_t RunSpecifiedBenchmarks(std::string spec) {
554   return RunSpecifiedBenchmarks(nullptr, nullptr, std::move(spec));
555 }
556 
RunSpecifiedBenchmarks(BenchmarkReporter * display_reporter)557 size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
558   return RunSpecifiedBenchmarks(display_reporter, nullptr,
559                                 FLAGS_benchmark_filter);
560 }
561 
RunSpecifiedBenchmarks(BenchmarkReporter * display_reporter,std::string spec)562 size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
563                               std::string spec) {
564   return RunSpecifiedBenchmarks(display_reporter, nullptr, std::move(spec));
565 }
566 
RunSpecifiedBenchmarks(BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter)567 size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
568                               BenchmarkReporter* file_reporter) {
569   return RunSpecifiedBenchmarks(display_reporter, file_reporter,
570                                 FLAGS_benchmark_filter);
571 }
572 
RunSpecifiedBenchmarks(BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter,std::string spec)573 size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
574                               BenchmarkReporter* file_reporter,
575                               std::string spec) {
576   if (spec.empty() || spec == "all")
577     spec = ".";  // Regexp that matches all benchmarks
578 
579   // Setup the reporters
580   std::ofstream output_file;
581   std::unique_ptr<BenchmarkReporter> default_display_reporter;
582   std::unique_ptr<BenchmarkReporter> default_file_reporter;
583   if (!display_reporter) {
584     default_display_reporter.reset(CreateDefaultDisplayReporter());
585     display_reporter = default_display_reporter.get();
586   }
587   auto& Out = display_reporter->GetOutputStream();
588   auto& Err = display_reporter->GetErrorStream();
589 
590   std::string const& fname = FLAGS_benchmark_out;
591   if (fname.empty() && file_reporter) {
592     Err << "A custom file reporter was provided but "
593            "--benchmark_out=<file> was not specified."
594         << std::endl;
595     Out.flush();
596     Err.flush();
597     std::exit(1);
598   }
599   if (!fname.empty()) {
600     output_file.open(fname);
601     if (!output_file.is_open()) {
602       Err << "invalid file name: '" << fname << "'" << std::endl;
603       Out.flush();
604       Err.flush();
605       std::exit(1);
606     }
607     if (!file_reporter) {
608       default_file_reporter = internal::CreateReporter(
609           FLAGS_benchmark_out_format, FLAGS_benchmark_counters_tabular
610                                           ? ConsoleReporter::OO_Tabular
611                                           : ConsoleReporter::OO_None);
612       file_reporter = default_file_reporter.get();
613     }
614     file_reporter->SetOutputStream(&output_file);
615     file_reporter->SetErrorStream(&output_file);
616   }
617 
618   std::vector<internal::BenchmarkInstance> benchmarks;
619   if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) {
620     Out.flush();
621     Err.flush();
622     return 0;
623   }
624 
625   if (benchmarks.empty()) {
626     Err << "Failed to match any benchmarks against regex: " << spec << "\n";
627     Out.flush();
628     Err.flush();
629     return 0;
630   }
631 
632   if (FLAGS_benchmark_list_tests) {
633     for (auto const& benchmark : benchmarks)
634       Out << benchmark.name().str() << "\n";
635   } else {
636     internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
637   }
638 
639   Out.flush();
640   Err.flush();
641   return benchmarks.size();
642 }
643 
644 namespace {
645 // stores the time unit benchmarks use by default
646 TimeUnit default_time_unit = kNanosecond;
647 }  // namespace
648 
GetDefaultTimeUnit()649 TimeUnit GetDefaultTimeUnit() { return default_time_unit; }
650 
SetDefaultTimeUnit(TimeUnit unit)651 void SetDefaultTimeUnit(TimeUnit unit) { default_time_unit = unit; }
652 
GetBenchmarkFilter()653 std::string GetBenchmarkFilter() { return FLAGS_benchmark_filter; }
654 
SetBenchmarkFilter(std::string value)655 void SetBenchmarkFilter(std::string value) {
656   FLAGS_benchmark_filter = std::move(value);
657 }
658 
GetBenchmarkVerbosity()659 int32_t GetBenchmarkVerbosity() { return FLAGS_v; }
660 
RegisterMemoryManager(MemoryManager * manager)661 void RegisterMemoryManager(MemoryManager* manager) {
662   internal::memory_manager = manager;
663 }
664 
RegisterProfilerManager(ProfilerManager * manager)665 void RegisterProfilerManager(ProfilerManager* manager) {
666   internal::profiler_manager = manager;
667 }
668 
AddCustomContext(const std::string & key,const std::string & value)669 void AddCustomContext(const std::string& key, const std::string& value) {
670   if (internal::global_context == nullptr) {
671     internal::global_context = new std::map<std::string, std::string>();
672   }
673   if (!internal::global_context->emplace(key, value).second) {
674     std::cerr << "Failed to add custom context \"" << key << "\" as it already "
675               << "exists with value \"" << value << "\"\n";
676   }
677 }
678 
679 namespace internal {
680 
681 void (*HelperPrintf)();
682 
PrintUsageAndExit()683 void PrintUsageAndExit() {
684   HelperPrintf();
685   exit(0);
686 }
687 
SetDefaultTimeUnitFromFlag(const std::string & time_unit_flag)688 void SetDefaultTimeUnitFromFlag(const std::string& time_unit_flag) {
689   if (time_unit_flag == "s") {
690     return SetDefaultTimeUnit(kSecond);
691   }
692   if (time_unit_flag == "ms") {
693     return SetDefaultTimeUnit(kMillisecond);
694   }
695   if (time_unit_flag == "us") {
696     return SetDefaultTimeUnit(kMicrosecond);
697   }
698   if (time_unit_flag == "ns") {
699     return SetDefaultTimeUnit(kNanosecond);
700   }
701   if (!time_unit_flag.empty()) {
702     PrintUsageAndExit();
703   }
704 }
705 
ParseCommandLineFlags(int * argc,char ** argv)706 void ParseCommandLineFlags(int* argc, char** argv) {
707   using namespace benchmark;
708   BenchmarkReporter::Context::executable_name =
709       (argc && *argc > 0) ? argv[0] : "unknown";
710   for (int i = 1; argc && i < *argc; ++i) {
711     if (ParseBoolFlag(argv[i], "benchmark_list_tests",
712                       &FLAGS_benchmark_list_tests) ||
713         ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
714         ParseStringFlag(argv[i], "benchmark_min_time",
715                         &FLAGS_benchmark_min_time) ||
716         ParseDoubleFlag(argv[i], "benchmark_min_warmup_time",
717                         &FLAGS_benchmark_min_warmup_time) ||
718         ParseInt32Flag(argv[i], "benchmark_repetitions",
719                        &FLAGS_benchmark_repetitions) ||
720         ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving",
721                       &FLAGS_benchmark_enable_random_interleaving) ||
722         ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
723                       &FLAGS_benchmark_report_aggregates_only) ||
724         ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
725                       &FLAGS_benchmark_display_aggregates_only) ||
726         ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
727         ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
728         ParseStringFlag(argv[i], "benchmark_out_format",
729                         &FLAGS_benchmark_out_format) ||
730         ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
731         ParseBoolFlag(argv[i], "benchmark_counters_tabular",
732                       &FLAGS_benchmark_counters_tabular) ||
733         ParseStringFlag(argv[i], "benchmark_perf_counters",
734                         &FLAGS_benchmark_perf_counters) ||
735         ParseKeyValueFlag(argv[i], "benchmark_context",
736                           &FLAGS_benchmark_context) ||
737         ParseStringFlag(argv[i], "benchmark_time_unit",
738                         &FLAGS_benchmark_time_unit) ||
739         ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
740       for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
741 
742       --(*argc);
743       --i;
744     } else if (IsFlag(argv[i], "help")) {
745       PrintUsageAndExit();
746     }
747   }
748   for (auto const* flag :
749        {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) {
750     if (*flag != "console" && *flag != "json" && *flag != "csv") {
751       PrintUsageAndExit();
752     }
753   }
754   SetDefaultTimeUnitFromFlag(FLAGS_benchmark_time_unit);
755   if (FLAGS_benchmark_color.empty()) {
756     PrintUsageAndExit();
757   }
758   for (const auto& kv : FLAGS_benchmark_context) {
759     AddCustomContext(kv.first, kv.second);
760   }
761 }
762 
InitializeStreams()763 int InitializeStreams() {
764   static std::ios_base::Init init;
765   return 0;
766 }
767 
768 }  // end namespace internal
769 
GetBenchmarkVersion()770 std::string GetBenchmarkVersion() {
771 #ifdef BENCHMARK_VERSION
772   return {BENCHMARK_VERSION};
773 #else
774   return {""};
775 #endif
776 }
777 
PrintDefaultHelp()778 void PrintDefaultHelp() {
779   fprintf(stdout,
780           "benchmark"
781           " [--benchmark_list_tests={true|false}]\n"
782           "          [--benchmark_filter=<regex>]\n"
783           "          [--benchmark_min_time=`<integer>x` OR `<float>s` ]\n"
784           "          [--benchmark_min_warmup_time=<min_warmup_time>]\n"
785           "          [--benchmark_repetitions=<num_repetitions>]\n"
786           "          [--benchmark_enable_random_interleaving={true|false}]\n"
787           "          [--benchmark_report_aggregates_only={true|false}]\n"
788           "          [--benchmark_display_aggregates_only={true|false}]\n"
789           "          [--benchmark_format=<console|json|csv>]\n"
790           "          [--benchmark_out=<filename>]\n"
791           "          [--benchmark_out_format=<json|console|csv>]\n"
792           "          [--benchmark_color={auto|true|false}]\n"
793           "          [--benchmark_counters_tabular={true|false}]\n"
794 #if defined HAVE_LIBPFM
795           "          [--benchmark_perf_counters=<counter>,...]\n"
796 #endif
797           "          [--benchmark_context=<key>=<value>,...]\n"
798           "          [--benchmark_time_unit={ns|us|ms|s}]\n"
799           "          [--v=<verbosity>]\n");
800 }
801 
Initialize(int * argc,char ** argv,void (* HelperPrintf)())802 void Initialize(int* argc, char** argv, void (*HelperPrintf)()) {
803   internal::HelperPrintf = HelperPrintf;
804   internal::ParseCommandLineFlags(argc, argv);
805   internal::LogLevel() = FLAGS_v;
806 }
807 
Shutdown()808 void Shutdown() { delete internal::global_context; }
809 
ReportUnrecognizedArguments(int argc,char ** argv)810 bool ReportUnrecognizedArguments(int argc, char** argv) {
811   for (int i = 1; i < argc; ++i) {
812     fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
813             argv[i]);
814   }
815   return argc > 1;
816 }
817 
818 }  // end namespace benchmark
819