xref: /aosp_15_r20/external/XNNPACK/eval/f16-exp-ulp.cc (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright 2022 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <algorithm>
7 #include <cfloat>
8 #include <cmath>
9 #include <functional>
10 #include <memory>
11 #include <numeric>
12 #include <random>
13 #include <vector>
14 
15 #include <cpuinfo.h>
16 #include <pthreadpool.h>
17 
18 #include <benchmark/benchmark.h>
19 #include <fp16/fp16.h>
20 
21 #include "bench/utils.h"
22 #include <xnnpack/aligned-allocator.h>
23 #include <xnnpack/common.h>
24 #include <xnnpack/math-stubs.h>
25 
26 
27 struct ComputeErrorContext {
28   const uint16_t* input;
29   const uint16_t* output;
30   float* error;
31 };
32 
ComputeError(struct ComputeErrorContext * context,size_t start,size_t range)33 static void ComputeError(
34   struct ComputeErrorContext* context,
35   size_t start,
36   size_t range)
37 {
38   const uint16_t* input = context->input;
39   const uint16_t* output = context->output;
40   float* error = context->error;
41   for (size_t i = start; i < start + range; i++) {
42     const float output_ref = std::exp(fp16_ieee_to_fp32_value(input[i]));
43     const float abs_error = std::abs(output_ref - fp16_ieee_to_fp32_value(output[i]));
44     const uint16_t output_abs = fp16_ieee_from_fp32_value(std::abs(output_ref));
45     const float output_ulp = fp16_ieee_to_fp32_value(output_abs + 1) - fp16_ieee_to_fp32_value(output_abs);
46     error[i] = float(abs_error / output_ulp);
47   }
48 }
49 
ExpError(benchmark::State & state,xnn_f16_unary_math_function exp,benchmark::utils::IsaCheckFunction isa_check=nullptr)50 static void ExpError(
51   benchmark::State& state,
52   xnn_f16_unary_math_function exp,
53   benchmark::utils::IsaCheckFunction isa_check = nullptr)
54 {
55   if (!cpuinfo_initialize()) {
56     state.SkipWithError("failed cpuinfo init");
57     return;
58   }
59   if (isa_check && !isa_check(state)) {
60     return;
61   }
62 
63   // The smallest x for which exph(x) is non-zero (-0x2.2A8p+3h).
64   const uint16_t min_input = UINT16_C(0xCC55);
65   // The largest x for which exph(x) is finite (0x1.63Cp+3h).
66   const uint16_t max_input = UINT16_C(0x498F);
67 
68   // Number of elements in one block of inputs/outputs.
69   // Combining multiple elements in a block reduce function call overhead.
70   const size_t block_size = 16384;
71   // Number of elements in one parallelization tile. Worker threads process this many elements in each task.
72   const size_t tile_size = 64;
73 
74   uint32_t num_threads = cpuinfo_get_cores_count();
75   #if XNN_ARCH_ARM || XNN_ARCH_ARM64
76     // Use all cores except for the least performant cluster
77     if (cpuinfo_get_clusters_count() > 1) {
78       num_threads -= cpuinfo_get_cluster(cpuinfo_get_clusters_count() - 1)->core_count;
79     }
80   #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
81 
82   std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool(
83     pthreadpool_create(num_threads), pthreadpool_destroy);
84 
85   std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> x(block_size);
86   std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> y(block_size);
87   std::vector<float> ulp_error(block_size);
88   float max_ulp_error = 0.0f;
89 
90   ComputeErrorContext context;
91   context.input = x.data();
92   context.output = y.data();
93   context.error = ulp_error.data();
94   for (auto _ : state) {
95     for (uint16_t n = min_input; int16_t(n) < 0; n -= block_size) {
96       for (uint16_t i = 0; i < block_size; i++) {
97         x[i] = std::max<uint16_t>(n - i, UINT16_C(0x8000));
98       }
99       std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */);
100 
101       exp(block_size * sizeof(uint16_t), x.data(), y.data());
102 
103       pthreadpool_parallelize_1d_tile_1d(
104           threadpool.get(),
105           reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
106           static_cast<void*>(&context),
107           block_size, tile_size, 0 /* flags */);
108 
109       max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
110         static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
111     }
112     for (uint16_t n = 0; n < max_input; n += block_size) {
113       for (uint16_t i = 0; i < block_size; i++) {
114         x[i] = std::min<uint16_t>(n + i, max_input);
115       }
116       std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */);
117 
118       exp(block_size * sizeof(uint16_t), x.data(), y.data());
119 
120       pthreadpool_parallelize_1d_tile_1d(
121           threadpool.get(),
122           reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
123           static_cast<void*>(&context),
124           block_size, tile_size, 0 /* flags */);
125 
126       max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
127         static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
128     }
129   }
130 
131   state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
132 }
133 
134 #if XNN_ENABLE_ARM_FP16 && (XNN_ARCH_ARM || XNN_ARCH_ARM64)
135   BENCHMARK_CAPTURE(ExpError, neonfp16arith_rr2_p3,
136                     xnn_math_f16_exp__neonfp16arith_rr2_p3,
137                     benchmark::utils::CheckNEONFP16ARITH)
138     ->Unit(benchmark::kMillisecond)
139     ->Iterations(1);
140 #endif  // XNN_ENABLE_ARM_FP16 && (XNN_ARCH_ARM || XNN_ARCH_ARM64)
141 
142 #ifndef XNNPACK_BENCHMARK_NO_MAIN
143 BENCHMARK_MAIN();
144 #endif
145