1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <algorithm>
7 #include <cfloat>
8 #include <cmath>
9 #include <functional>
10 #include <memory>
11 #include <numeric>
12 #include <random>
13 #include <vector>
14
15 #include <cpuinfo.h>
16 #include <pthreadpool.h>
17
18 #include <benchmark/benchmark.h>
19 #include <fp16/fp16.h>
20
21 #include "bench/utils.h"
22 #include <xnnpack/aligned-allocator.h>
23 #include <xnnpack/common.h>
24 #include <xnnpack/math.h>
25 #include <xnnpack/math-stubs.h>
26
27
28 struct ComputeErrorContext {
29 const float* input;
30 const float* output;
31 float* error;
32 };
33
ComputeError(struct ComputeErrorContext * context,size_t start,size_t range)34 static void ComputeError(
35 struct ComputeErrorContext* context,
36 size_t start,
37 size_t range)
38 {
39 const float* input = context->input;
40 const float* output = context->output;
41 float* error = context->error;
42 for (size_t i = start; i < start + range; i++) {
43 const double output_ref = std::exp(double(input[i]));
44 const double abs_error = std::abs(output_ref - double(output[i]));
45 const float output_abs = std::abs(output_ref);
46 const float output_ulp = uint32_as_float(float_as_uint32(output_abs) + 1) - output_abs;
47 error[i] = float(abs_error / output_ulp);
48 }
49 }
50
ExpError(benchmark::State & state,xnn_f32_unary_math_function exp,benchmark::utils::IsaCheckFunction isa_check=nullptr)51 static void ExpError(benchmark::State& state,
52 xnn_f32_unary_math_function exp,
53 benchmark::utils::IsaCheckFunction isa_check = nullptr)
54 {
55 if (!cpuinfo_initialize()) {
56 state.SkipWithError("failed cpuinfo init");
57 return;
58 }
59 if (isa_check && !isa_check(state)) {
60 return;
61 }
62
63 // The smallest x for which expf(x) is normalized (-0x1.5D589Ep6f).
64 const uint32_t min_input = UINT32_C(0xC2AEAC4F);
65 // Number of elements in one block of inputs/outputs.
66 // Combining multiple elements in a block reduce function call overhead.
67 const size_t block_size = 16384;
68 // Number of elements in one parallelization tile. Worker threads process this many elements in each task.
69 const size_t tile_size = 64;
70
71 uint32_t num_threads = cpuinfo_get_cores_count();
72 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
73 // Use all cores except for the least performant cluster
74 if (cpuinfo_get_clusters_count() > 1) {
75 num_threads -= cpuinfo_get_cluster(cpuinfo_get_clusters_count() - 1)->core_count;
76 }
77 #endif // XNN_ARCH_ARM || XNN_ARCH_ARM64
78
79 std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool(
80 pthreadpool_create(num_threads), pthreadpool_destroy);
81
82 std::vector<float, AlignedAllocator<float, 64>> x(block_size);
83 std::vector<float, AlignedAllocator<float, 64>> y(block_size);
84 std::vector<float> ulp_error(block_size);
85 float max_ulp_error = 0.0f;
86
87 ComputeErrorContext context;
88 context.input = x.data();
89 context.output = y.data();
90 context.error = ulp_error.data();
91 for (auto _ : state) {
92 for (uint32_t n = min_input; int32_t(n) < 0; n -= block_size) {
93 for (uint32_t i = 0; i < block_size; i++) {
94 x[i] = uint32_as_float(std::max<uint32_t>(n - i, 0x80000000));
95 }
96 std::fill(y.begin(), y.end(), std::nanf(""));
97
98 exp(block_size * sizeof(float), x.data(), y.data());
99
100 pthreadpool_parallelize_1d_tile_1d(
101 threadpool.get(),
102 reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
103 static_cast<void*>(&context),
104 block_size, tile_size, 0 /* flags */);
105
106 max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
107 static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
108 }
109 }
110
111 state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
112 }
113
114 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
115 BENCHMARK_CAPTURE(ExpError, neonfma_rr2_lut64_p2,
116 xnn_math_f32_expminus__neonfma_rr2_lut64_p2,
117 benchmark::utils::CheckNEONFMA)
118 ->Unit(benchmark::kMillisecond)
119 ->Iterations(1);
120 BENCHMARK_CAPTURE(ExpError, neonfma_rr2_lut2048_p1,
121 xnn_math_f32_expminus__neonfma_rr2_lut2048_p1,
122 benchmark::utils::CheckNEONFMA)
123 ->Unit(benchmark::kMillisecond)
124 ->Iterations(1);
125 BENCHMARK_CAPTURE(ExpError, neonfma_rr2_p5,
126 xnn_math_f32_expminus__neonfma_rr2_p5,
127 benchmark::utils::CheckNEONFMA)
128 ->Unit(benchmark::kMillisecond)
129 ->Iterations(1);
130 #endif // XNN_ARCH_ARM || XNN_ARCH_ARM64
131
132 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
133 BENCHMARK_CAPTURE(ExpError, avx2_rr1_p5,
134 xnn_math_f32_expminus__avx2_rr1_p5,
135 benchmark::utils::CheckAVX2)
136 ->Unit(benchmark::kMillisecond)
137 ->Iterations(1);
138 BENCHMARK_CAPTURE(ExpError, avx2_rr2_p5,
139 xnn_math_f32_expminus__avx2_rr2_p5,
140 benchmark::utils::CheckAVX2)
141 ->Unit(benchmark::kMillisecond)
142 ->Iterations(1);
143
144 BENCHMARK_CAPTURE(ExpError, sse2_rr2_p5,
145 xnn_math_f32_expminus__sse2_rr2_p5,
146 benchmark::utils::CheckAVX2)
147 ->Unit(benchmark::kMillisecond)
148 ->Iterations(1);
149 #endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
150
151 BENCHMARK_CAPTURE(ExpError, scalar_rr2_lut64_p2,
152 xnn_math_f32_expminus__scalar_rr2_lut64_p2)
153 ->Unit(benchmark::kMillisecond)
154 ->Iterations(1);
155 BENCHMARK_CAPTURE(ExpError, scalar_rr2_lut2048_p1,
156 xnn_math_f32_expminus__scalar_rr2_lut2048_p1)
157 ->Unit(benchmark::kMillisecond)
158 ->Iterations(1);
159 BENCHMARK_CAPTURE(ExpError, scalar_rr2_p5,
160 xnn_math_f32_expminus__scalar_rr2_p5)
161 ->Unit(benchmark::kMillisecond)
162 ->Iterations(1);
163
164 #ifndef XNNPACK_BENCHMARK_NO_MAIN
165 BENCHMARK_MAIN();
166 #endif
167