xref: /aosp_15_r20/external/skia/tools/testrunners/benchmark/target/RasterBenchmarkTarget.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2023 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "bench/Benchmark.h"
9 #include "tools/flags/CommandLineFlags.h"
10 #include "tools/testrunners/benchmark/target/BenchmarkTarget.h"
11 #include "tools/testrunners/common/TestRunner.h"
12 
13 static DEFINE_int(maxCalibrationAttempts,
14                   3,
15                   "Try up to this many times to guess loops for a benchmark, or skip the "
16                   "benchmark.");
17 static DEFINE_double(overheadGoal,
18                      0.0001,
19                      "Loop until timer overhead is at most this fraction of our measurements.");
20 static DEFINE_int(overheadLoops, 100000, "Loops to estimate timer overhead.");
21 
22 // Defined in BazelBenchmarkTestRunner.cpp.
23 SkString humanize(double ms);
24 
printGlobalStats()25 void BenchmarkTarget::printGlobalStats() {}
26 
27 class RasterBenchmarkTarget : public BenchmarkTarget {
28 public:
RasterBenchmarkTarget(std::unique_ptr<SurfaceManager> surfaceManager,Benchmark * benchmark)29     RasterBenchmarkTarget(std::unique_ptr<SurfaceManager> surfaceManager, Benchmark* benchmark)
30             : BenchmarkTarget(std::move(surfaceManager), benchmark) {}
31 
getBackend() const32     Benchmark::Backend getBackend() const override { return Benchmark::Backend::kRaster; }
33 
34     // Based on nanobench's setup_cpu_bench():
35     // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#466.
autoTuneLoops() const36     std::tuple<int, bool> autoTuneLoops() const override {
37         // Estimate timer overhead. Based on:
38         // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#402.
39         double overhead = 0;
40         for (int i = 0; i < FLAGS_overheadLoops; i++) {
41             double start = nowMs();
42             overhead += nowMs() - start;
43         }
44         overhead /= FLAGS_overheadLoops;
45 
46         // First figure out approximately how many loops of bench it takes to make overhead
47         // negligible.
48         double bench_plus_overhead = 0.0;
49         int round = 0;
50         while (bench_plus_overhead < overhead) {
51             if (round++ == FLAGS_maxCalibrationAttempts) {
52                 TestRunner::Log("Warning: Cannot estimate loops for %s (%s vs. %s); skipping.",
53                                 fBenchmark->getUniqueName(),
54                                 humanize(bench_plus_overhead).c_str(),
55                                 humanize(overhead).c_str());
56                 return std::make_tuple(0, false);
57             }
58             bench_plus_overhead = time(1);
59         }
60 
61         // Later we'll just start and stop the timer once but loop N times.
62         // We'll pick N to make timer overhead negligible:
63         //
64         //          overhead
65         //  -------------------------  < FLAGS_overheadGoal
66         //  overhead + N * Bench Time
67         //
68         // where bench_plus_overhead ~=~ overhead + Bench Time.
69         //
70         // Doing some math, we get:
71         //
72         //  (overhead / FLAGS_overheadGoal) - overhead
73         //  ------------------------------------------  < N
74         //       bench_plus_overhead - overhead)
75         //
76         // Luckily, this also works well in practice. :)
77         const double numer = overhead / FLAGS_overheadGoal - overhead;
78         const double denom = bench_plus_overhead - overhead;
79         int loops = (int)ceil(numer / denom);
80 
81         return std::make_tuple(loops, true);
82     }
83 };
84 
85 class NonRenderingBenchmarkTarget : public RasterBenchmarkTarget {
86 public:
NonRenderingBenchmarkTarget(Benchmark * benchmark)87     NonRenderingBenchmarkTarget(Benchmark* benchmark) : RasterBenchmarkTarget(nullptr, benchmark) {}
88 
getBackend() const89     Benchmark::Backend getBackend() const override { return Benchmark::Backend::kNonRendering; }
90 
isCpuOrGpuBound() const91     SurfaceManager::CpuOrGpu isCpuOrGpuBound() const override {
92         return SurfaceManager::CpuOrGpu::kCPU;
93     }
94 
getKeyValuePairs(std::string cpuName,std::string gpuName) const95     std::map<std::string, std::string> getKeyValuePairs(std::string cpuName,
96                                                         std::string gpuName) const override {
97         if (cpuName == "") {
98             return std::map<std::string, std::string>();
99         }
100         return {
101                 {"cpu_or_gpu", "CPU"},
102                 {"cpu_or_gpu_value", cpuName},
103         };
104     }
105 };
106 
FromConfig(std::string surfaceConfig,Benchmark * benchmark)107 std::unique_ptr<BenchmarkTarget> BenchmarkTarget::FromConfig(std::string surfaceConfig,
108                                                              Benchmark* benchmark) {
109     if (surfaceConfig == "nonrendering") {
110         return std::make_unique<NonRenderingBenchmarkTarget>(benchmark);
111     }
112 
113     std::unique_ptr<SurfaceManager> surfaceManager = SurfaceManager::FromConfig(
114             surfaceConfig, {benchmark->getSize().width(), benchmark->getSize().height()});
115     if (surfaceManager == nullptr) {
116         SK_ABORT("Unknown --surfaceConfig flag value: %s.", surfaceConfig.c_str());
117     }
118 
119     return std::make_unique<RasterBenchmarkTarget>(std::move(surfaceManager), benchmark);
120 }
121