1 // Copyright 2022 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <algorithm>
7 #include <cfloat>
8 #include <cmath>
9 #include <functional>
10 #include <memory>
11 #include <numeric>
12 #include <random>
13 #include <vector>
14
15 #include <cpuinfo.h>
16 #include <pthreadpool.h>
17
18 #include <benchmark/benchmark.h>
19 #include <fp16/fp16.h>
20
21 #include "bench/utils.h"
22 #include <xnnpack/aligned-allocator.h>
23 #include <xnnpack/common.h>
24 #include <xnnpack/math-stubs.h>
25
26
27 struct ComputeErrorContext {
28 const uint16_t* input;
29 const uint16_t* output;
30 float* error;
31 };
32
ComputeError(struct ComputeErrorContext * context,size_t start,size_t range)33 static void ComputeError(
34 struct ComputeErrorContext* context,
35 size_t start,
36 size_t range)
37 {
38 const uint16_t* input = context->input;
39 const uint16_t* output = context->output;
40 float* error = context->error;
41 for (size_t i = start; i < start + range; i++) {
42 const float input_val = fp16_ieee_to_fp32_value(input[i]);
43 float output_ref = 0.0f;
44 if (input_val < 0.0f) {
45 const float exp_val = std::exp(input_val);
46 output_ref = exp_val / (1.0f + exp_val);
47 } else {
48 output_ref = 1.0f / (1.0f + std::exp(-input_val));
49 }
50 const float abs_error = std::abs(output_ref - fp16_ieee_to_fp32_value(output[i]));
51 const uint16_t output_abs = fp16_ieee_from_fp32_value(std::abs(output_ref));
52 const float output_ulp = fp16_ieee_to_fp32_value(output_abs + 1) - fp16_ieee_to_fp32_value(output_abs);
53 error[i] = float(abs_error / output_ulp);
54 }
55 }
56
SigmoidError(benchmark::State & state,xnn_f16_unary_math_function sigmoid,benchmark::utils::IsaCheckFunction isa_check=nullptr)57 static void SigmoidError(benchmark::State& state,
58 xnn_f16_unary_math_function sigmoid,
59 benchmark::utils::IsaCheckFunction isa_check = nullptr)
60 {
61 if (!cpuinfo_initialize()) {
62 state.SkipWithError("failed cpuinfo init");
63 return;
64 }
65 if (isa_check && !isa_check(state)) {
66 return;
67 }
68
69 // The smallest x for which sigmoidf(x) is normalized (-0x1.368p+3h).
70 const uint16_t min_input = UINT16_C(0xC8DA);
71 // The largest x for which sigmoidf(x) is not 1.0f (0x1.0A0p3h).
72 const uint16_t max_input = UINT16_C(0x4828);
73 // Number of elements in one block of inputs/outputs.
74 // Combining multiple elements in a block reduce function call overhead.
75 const size_t block_size = 16384;
76 // Number of elements in one parallelization tile. Worker threads process this many elements in each task.
77 const size_t tile_size = 64;
78
79 uint32_t num_threads = cpuinfo_get_cores_count();
80 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
81 // Use all cores except for the least performant cluster
82 if (cpuinfo_get_clusters_count() > 1) {
83 num_threads -= cpuinfo_get_cluster(cpuinfo_get_clusters_count() - 1)->core_count;
84 }
85 #endif // XNN_ARCH_ARM || XNN_ARCH_ARM64
86
87 std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool(
88 pthreadpool_create(num_threads), pthreadpool_destroy);
89
90 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> x(block_size);
91 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> y(block_size);
92 std::vector<float> ulp_error(block_size);
93 float max_ulp_error = 0.0f;
94
95 ComputeErrorContext context;
96 context.input = x.data();
97 context.output = y.data();
98 context.error = ulp_error.data();
99 for (auto _ : state) {
100 for (uint16_t n = min_input; int16_t(n) < 0; n -= block_size) {
101 for (uint16_t i = 0; i < block_size; i++) {
102 x[i] = std::max<uint16_t>(n - i, UINT16_C(0x8000));
103 }
104 std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */);
105
106 sigmoid(block_size * sizeof(uint16_t), x.data(), y.data());
107
108 pthreadpool_parallelize_1d_tile_1d(
109 threadpool.get(),
110 reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
111 static_cast<void*>(&context),
112 block_size, tile_size, 0 /* flags */);
113
114 max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
115 static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
116 }
117 for (uint16_t n = 0; n < max_input; n += block_size) {
118 for (uint16_t i = 0; i < block_size; i++) {
119 x[i] = std::min<uint16_t>(n + i, max_input);
120 }
121 std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */);
122
123 sigmoid(block_size * sizeof(uint16_t), x.data(), y.data());
124
125 pthreadpool_parallelize_1d_tile_1d(
126 threadpool.get(),
127 reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
128 static_cast<void*>(&context),
129 block_size, tile_size, 0 /* flags */);
130
131 max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
132 static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
133 }
134 }
135
136 state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
137 }
138
139 #if XNN_ENABLE_ARM_FP16 && XNN_ARCH_ARM64
140 BENCHMARK_CAPTURE(SigmoidError, neonfp16arith_rr1_p2_div,
141 xnn_math_f16_sigmoid__neonfp16arith_rr1_p2_div,
142 benchmark::utils::CheckNEONFP16ARITH)
143 ->Unit(benchmark::kMillisecond)
144 ->Iterations(1);
145 BENCHMARK_CAPTURE(SigmoidError, neonfp16arith_rr1_p3_div,
146 xnn_math_f16_sigmoid__neonfp16arith_rr1_p3_div,
147 benchmark::utils::CheckNEONFP16ARITH)
148 ->Unit(benchmark::kMillisecond)
149 ->Iterations(1);
150 BENCHMARK_CAPTURE(SigmoidError, neonfp16arith_rr2_p2_div,
151 xnn_math_f16_sigmoid__neonfp16arith_rr2_p2_div,
152 benchmark::utils::CheckNEONFP16ARITH)
153 ->Unit(benchmark::kMillisecond)
154 ->Iterations(1);
155 BENCHMARK_CAPTURE(SigmoidError, neonfp16arith_rr2_p3_div,
156 xnn_math_f16_sigmoid__neonfp16arith_rr2_p3_div,
157 benchmark::utils::CheckNEONFP16ARITH)
158 ->Unit(benchmark::kMillisecond)
159 ->Iterations(1);
160 #endif // XNN_ENABLE_ARM_FP16 && XNN_ARCH_ARM64
161
162 #if XNN_ENABLE_ARM_FP16 && (XNN_ARCH_ARM || XNN_ARCH_ARM64)
163 BENCHMARK_CAPTURE(SigmoidError, neonfp16arith_rr2_p2_nr1fma,
164 xnn_math_f16_sigmoid__neonfp16arith_rr2_p2_nr1fma,
165 benchmark::utils::CheckNEONFP16ARITH)
166 ->Unit(benchmark::kMillisecond)
167 ->Iterations(1);
168 BENCHMARK_CAPTURE(SigmoidError, neonfp16arith_rr2_p2_nr1recps,
169 xnn_math_f16_sigmoid__neonfp16arith_rr2_p2_nr1recps,
170 benchmark::utils::CheckNEONFP16ARITH)
171 ->Unit(benchmark::kMillisecond)
172 ->Iterations(1);
173 BENCHMARK_CAPTURE(SigmoidError, neonfp16arith_rr2_p2_recpe,
174 xnn_math_f16_sigmoid__neonfp16arith_rr2_p2_recpe,
175 benchmark::utils::CheckNEONFP16ARITH)
176 ->Unit(benchmark::kMillisecond)
177 ->Iterations(1);
178 BENCHMARK_CAPTURE(SigmoidError, neonfp16arith_rr2_p3_nr1fma,
179 xnn_math_f16_sigmoid__neonfp16arith_rr2_p3_nr1fma,
180 benchmark::utils::CheckNEONFP16ARITH)
181 ->Unit(benchmark::kMillisecond)
182 ->Iterations(1);
183 BENCHMARK_CAPTURE(SigmoidError, neonfp16arith_rr2_p3_nr1recps,
184 xnn_math_f16_sigmoid__neonfp16arith_rr2_p3_nr1recps,
185 benchmark::utils::CheckNEONFP16ARITH)
186 ->Unit(benchmark::kMillisecond)
187 ->Iterations(1);
188 BENCHMARK_CAPTURE(SigmoidError, neonfp16arith_rr2_p3_recpe,
189 xnn_math_f16_sigmoid__neonfp16arith_rr2_p3_recpe,
190 benchmark::utils::CheckNEONFP16ARITH)
191 ->Unit(benchmark::kMillisecond)
192 ->Iterations(1);
193 #endif // XNN_ENABLE_ARM_FP16 && (XNN_ARCH_ARM || XNN_ARCH_ARM64)
194
195 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
196 BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_p2_div,
197 xnn_math_f16_sigmoid__avx2_rr1_p2_div,
198 benchmark::utils::CheckAVX2)
199 ->Unit(benchmark::kMillisecond)
200 ->Iterations(1);
201 BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_p2_rcp,
202 xnn_math_f16_sigmoid__avx2_rr1_p2_rcp,
203 benchmark::utils::CheckAVX2)
204 ->Unit(benchmark::kMillisecond)
205 ->Iterations(1);
206 BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_p3_div,
207 xnn_math_f16_sigmoid__avx2_rr1_p3_div,
208 benchmark::utils::CheckAVX2)
209 ->Unit(benchmark::kMillisecond)
210 ->Iterations(1);
211 BENCHMARK_CAPTURE(SigmoidError, avx2_rr1_p3_rcp,
212 xnn_math_f16_sigmoid__avx2_rr1_p3_rcp,
213 benchmark::utils::CheckAVX2)
214 ->Unit(benchmark::kMillisecond)
215 ->Iterations(1);
216 #endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
217
218 #ifndef XNNPACK_BENCHMARK_NO_MAIN
219 BENCHMARK_MAIN();
220 #endif
221