xref: /aosp_15_r20/external/XNNPACK/test/vlog-microkernel-tester.h (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright 2022 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #pragma once
7 
8 #include <gtest/gtest.h>
9 
10 #include <algorithm>
11 #include <cassert>
12 #include <cmath>
13 #include <cstddef>
14 #include <cstdlib>
15 #include <random>
16 #include <vector>
17 
18 #include <xnnpack.h>
19 #include <xnnpack/aligned-allocator.h>
20 #include <xnnpack/math.h>
21 #include <xnnpack/microfnptr.h>
22 
23 
24 extern XNN_INTERNAL const uint16_t xnn_table_vlog[129];
25 
26 class VLogMicrokernelTester {
27  public:
batch(size_t batch)28   inline VLogMicrokernelTester& batch(size_t batch) {
29     assert(batch != 0);
30     this->batch_ = batch;
31     return *this;
32   }
33 
batch()34   inline size_t batch() const {
35     return this->batch_;
36   }
37 
input_lshift(uint32_t input_lshift)38   inline VLogMicrokernelTester& input_lshift(uint32_t input_lshift) {
39     assert(input_lshift < 32);
40     this->input_lshift_ = input_lshift;
41     return *this;
42   }
43 
input_lshift()44   inline uint32_t input_lshift() const {
45     return this->input_lshift_;
46   }
47 
output_scale(uint32_t output_scale)48   inline VLogMicrokernelTester& output_scale(uint32_t output_scale) {
49     this->output_scale_ = output_scale;
50     return *this;
51   }
52 
output_scale()53   inline uint32_t output_scale() const {
54     return this->output_scale_;
55   }
56 
inplace(bool inplace)57   inline VLogMicrokernelTester& inplace(bool inplace) {
58     this->inplace_ = inplace;
59     return *this;
60   }
61 
inplace()62   inline bool inplace() const {
63     return this->inplace_;
64   }
65 
iterations(size_t iterations)66   inline VLogMicrokernelTester& iterations(size_t iterations) {
67     this->iterations_ = iterations;
68     return *this;
69   }
70 
iterations()71   inline size_t iterations() const {
72     return this->iterations_;
73   }
74 
Test(xnn_u32_vlog_ukernel_function vlog)75   void Test(xnn_u32_vlog_ukernel_function vlog) const {
76     std::random_device random_device;
77     auto rng = std::mt19937(random_device());
78     auto i16rng = std::bind(std::uniform_int_distribution<uint16_t>(), std::ref(rng));
79     auto i32rng = std::bind(std::uniform_int_distribution<uint32_t>(), std::ref(rng));
80 
81     std::vector<uint32_t> x(batch() + XNN_EXTRA_BYTES / sizeof(uint32_t));
82     std::vector<uint16_t> y(batch() * (inplace() ? sizeof(uint32_t) / sizeof(uint16_t) : 1) + XNN_EXTRA_BYTES / sizeof(uint32_t));
83     std::vector<uint16_t> y_ref(batch());
84     const uint32_t* x_data = inplace() ? reinterpret_cast<const uint32_t*>(y.data()) : x.data();
85 
86     for (size_t iteration = 0; iteration < iterations(); iteration++) {
87       std::generate(x.begin(), x.end(), std::ref(i32rng));
88       std::generate(y.begin(), y.end(), std::ref(i16rng));
89       std::generate(y_ref.begin(), y_ref.end(), std::ref(i16rng));
90 
91       // Compute reference results.
92       for (size_t n = 0; n < batch(); n++) {
93         const uint32_t x_value = x_data[n];
94         const uint32_t scaled = x_value << input_lshift();
95         uint32_t log_value = 0;
96         if (scaled != 0) {
97           const uint32_t out_scale = output_scale();
98 
99           const int log_scale = 65536;
100           const int log_scale_log2 = 16;
101           const int log_coeff = 45426;
102           const uint32_t log2x = math_clz_nonzero_u32(scaled) ^ 31;  // log2 of scaled
103           assert(log2x < 32);
104 
105           // Number of segments in the log lookup table. The table will be log_segments+1
106           // in length (with some padding).
107           const int log_segments_log2 = 7;
108 
109           // Part 1
110           uint32_t frac = scaled - (UINT32_C(1) << log2x);
111 
112           // Shift the fractional part into msb of 16 bits
113           frac =  XNN_UNPREDICTABLE(log2x < log_scale_log2) ?
114               (frac << (log_scale_log2 - log2x)) :
115               (frac >> (log2x - log_scale_log2));
116 
117           // Part 2
118           const uint32_t base_seg = frac >> (log_scale_log2 - log_segments_log2);
119           const uint32_t seg_unit = (UINT32_C(1) << log_scale_log2) >> log_segments_log2;
120 
121           assert(128 == (1 << log_segments_log2));
122           assert(base_seg < (1 << log_segments_log2));
123 
124           const uint32_t c0 = xnn_table_vlog[base_seg];
125           const uint32_t c1 = xnn_table_vlog[base_seg + 1];
126           const uint32_t seg_base = seg_unit * base_seg;
127           const uint32_t rel_pos = ((c1 - c0) * (frac - seg_base)) >> log_scale_log2;
128           const uint32_t fraction =  frac + c0 + rel_pos;
129 
130           const uint32_t log2 = (log2x << log_scale_log2) + fraction;
131           const uint32_t round = log_scale / 2;
132           const uint32_t loge = (((uint64_t) log_coeff) * log2 + round) >> log_scale_log2;
133 
134           // Finally scale to our output scale
135           log_value = (out_scale * loge + round) >> log_scale_log2;
136         }
137 
138         const uint32_t vout = math_min_u32(log_value, (uint32_t) INT16_MAX);
139         y_ref[n] = vout;
140       }
141 
142       // Call optimized micro-kernel.
143       vlog(batch(), x_data, input_lshift(), output_scale(), y.data());
144 
145       // Verify results.
146       for (size_t n = 0; n < batch(); n++) {
147         ASSERT_EQ(y[n], y_ref[n])
148           << ", input_lshift " << input_lshift()
149           << ", output_scale " << output_scale()
150           << ", batch " << n << " / " << batch();
151       }
152     }
153   }
154 
155  private:
156   size_t batch_{1};
157   uint32_t input_lshift_{4};
158   uint32_t output_scale_{16};
159   bool inplace_{false};
160   size_t iterations_{15};
161 };
162