xref: /aosp_15_r20/external/XNNPACK/test/constant-pad-operator-tester.h (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #pragma once
7 
8 #include <gtest/gtest.h>
9 
10 #include <algorithm>
11 #include <array>
12 #include <cstddef>
13 #include <cstdlib>
14 #include <initializer_list>
15 #include <numeric>
16 #include <random>
17 #include <vector>
18 
19 #include <xnnpack.h>
20 
21 
22 class ConstantPadOperatorTester {
23  public:
input_shape(std::initializer_list<size_t> input_shape)24   inline ConstantPadOperatorTester& input_shape(std::initializer_list<size_t> input_shape) {
25     assert(input_shape.size() <= XNN_MAX_TENSOR_DIMS);
26     input_shape_ = std::vector<size_t>(input_shape);
27     return *this;
28   }
29 
input_shape()30   inline const std::vector<size_t>& input_shape() const {
31     return input_shape_;
32   }
33 
input_dim(size_t i)34   inline size_t input_dim(size_t i) const {
35     return i < input_shape_.size() ? input_shape_[i] : 1;
36   }
37 
num_dims()38   inline size_t num_dims() const {
39     return input_shape_.size();
40   }
41 
num_input_elements()42   inline size_t num_input_elements() const {
43     return std::accumulate(
44       input_shape_.cbegin(), input_shape_.cend(), size_t(1), std::multiplies<size_t>());
45   }
46 
pre_paddings(std::initializer_list<size_t> pre_paddings)47   inline ConstantPadOperatorTester& pre_paddings(std::initializer_list<size_t> pre_paddings) {
48     assert(pre_paddings.size() <= XNN_MAX_TENSOR_DIMS);
49     pre_paddings_ = std::vector<size_t>(pre_paddings);
50     return *this;
51   }
52 
pre_paddings()53   inline const std::vector<size_t>& pre_paddings() const {
54     return pre_paddings_;
55   }
56 
pre_padding(size_t i)57   inline size_t pre_padding(size_t i) const {
58     return i < pre_paddings_.size() ? pre_paddings_[i] : 0;
59   }
60 
num_pre_paddings()61   inline size_t num_pre_paddings() const {
62     return pre_paddings_.size();
63   }
64 
post_paddings(std::initializer_list<size_t> post_paddings)65   inline ConstantPadOperatorTester& post_paddings(std::initializer_list<size_t> post_paddings) {
66     assert(post_paddings.size() <= XNN_MAX_TENSOR_DIMS);
67     post_paddings_ = std::vector<size_t>(post_paddings);
68     return *this;
69   }
70 
post_paddings()71   inline const std::vector<size_t>& post_paddings() const {
72     return post_paddings_;
73   }
74 
post_padding(size_t i)75   inline size_t post_padding(size_t i) const {
76     return i < post_paddings_.size() ? post_paddings_[i] : 0;
77   }
78 
num_post_paddings()79   inline size_t num_post_paddings() const {
80     return post_paddings_.size();
81   }
82 
output_dim(size_t i)83   inline size_t output_dim(size_t i) const {
84     return pre_padding(i) + input_dim(i) + post_padding(i);
85   }
86 
num_output_elements()87   inline size_t num_output_elements() const {
88     size_t elements = 1;
89     for (size_t i = 0; i < num_dims(); i++) {
90       elements *= output_dim(i);
91     }
92     return elements;
93   }
94 
iterations(size_t iterations)95   inline ConstantPadOperatorTester& iterations(size_t iterations) {
96     this->iterations_ = iterations;
97     return *this;
98   }
99 
iterations()100   inline size_t iterations() const {
101     return this->iterations_;
102   }
103 
TestX8()104   void TestX8() const {
105     ASSERT_EQ(num_dims(), num_pre_paddings());
106     ASSERT_EQ(num_dims(), num_post_paddings());
107 
108     std::random_device random_device;
109     auto rng = std::mt19937(random_device());
110     std::uniform_int_distribution<int32_t> u8dist(
111       std::numeric_limits<uint8_t>::min(), std::numeric_limits<uint8_t>::max());
112 
113     // Compute generalized shapes.
114     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims;
115     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_pre_paddings;
116     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_post_paddings;
117     std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims;
118     std::fill(input_dims.begin(), input_dims.end(), 1);
119     std::fill(input_pre_paddings.begin(), input_pre_paddings.end(), 0);
120     std::fill(input_post_paddings.begin(), input_post_paddings.end(), 0);
121     std::fill(output_dims.begin(), output_dims.end(), 1);
122     for (size_t i = 0; i < num_dims(); i++) {
123       input_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = input_dim(i);
124       input_pre_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = pre_padding(i);
125       input_post_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = post_padding(i);
126       output_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = output_dim(i);
127     }
128 
129     // Compute generalized strides.
130     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_strides;
131     std::array<size_t, XNN_MAX_TENSOR_DIMS> output_strides;
132     size_t input_stride = 1, output_stride = 1;
133     for (size_t i = XNN_MAX_TENSOR_DIMS; i != 0; i--) {
134       input_strides[i - 1] = input_stride;
135       output_strides[i - 1] = output_stride;
136       input_stride *= input_dims[i - 1];
137       output_stride *= output_dims[i - 1];
138     }
139 
140     std::vector<uint8_t> input(XNN_EXTRA_BYTES / sizeof(uint8_t) + num_input_elements());
141     std::vector<uint8_t> output(num_output_elements());
142     std::vector<uint8_t> output_ref(num_output_elements());
143     for (size_t iteration = 0; iteration < iterations(); iteration++) {
144       std::generate(input.begin(), input.end(), [&]() { return u8dist(rng); });
145       std::fill(output.begin(), output.end(), UINT32_C(0xAA));
146       const uint8_t padding_value = u8dist(rng);
147 
148       // Compute reference results.
149       std::fill(output_ref.begin(), output_ref.end(), padding_value);
150       for (size_t i = 0; i < input_dims[0]; i++) {
151         for (size_t j = 0; j < input_dims[1]; j++) {
152           for (size_t k = 0; k < input_dims[2]; k++) {
153             for (size_t l = 0; l < input_dims[3]; l++) {
154               for (size_t m = 0; m < input_dims[4]; m++) {
155                 for (size_t n = 0; n < input_dims[5]; n++) {
156                   const size_t output_index =
157                     (i + input_pre_paddings[0]) * output_strides[0] +
158                     (j + input_pre_paddings[1]) * output_strides[1] +
159                     (k + input_pre_paddings[2]) * output_strides[2] +
160                     (l + input_pre_paddings[3]) * output_strides[3] +
161                     (m + input_pre_paddings[4]) * output_strides[4] +
162                     (n + input_pre_paddings[5]) * output_strides[5];
163                   const size_t input_index =
164                     i * input_strides[0] + j * input_strides[1] + k * input_strides[2] +
165                     l * input_strides[3] + m * input_strides[4] + n * input_strides[5];
166                   output_ref[output_index] = input[input_index];
167                 }
168               }
169             }
170           }
171         }
172       }
173 
174       // Create, setup, run, and destroy a binary elementwise operator.
175       ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
176       xnn_operator_t pad_op = nullptr;
177 
178       ASSERT_EQ(xnn_status_success,
179         xnn_create_constant_pad_nd_x8(
180           &padding_value, 0, &pad_op));
181       ASSERT_NE(nullptr, pad_op);
182 
183       // Smart pointer to automatically delete pad_op.
184       std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_pad_op(pad_op, xnn_delete_operator);
185 
186       ASSERT_EQ(xnn_status_success,
187         xnn_setup_constant_pad_nd_x8(
188           pad_op,
189           num_dims(),
190           input_shape().data(), pre_paddings().data(), post_paddings().data(),
191           input.data(), output.data(),
192           nullptr /* thread pool */));
193 
194       ASSERT_EQ(xnn_status_success,
195         xnn_run_operator(pad_op, nullptr /* thread pool */));
196 
197       // Verify results.
198       for (size_t i = 0; i < output_dims[0]; i++) {
199         for (size_t j = 0; j < output_dims[1]; j++) {
200           for (size_t k = 0; k < output_dims[2]; k++) {
201             for (size_t l = 0; l < output_dims[3]; l++) {
202               for (size_t m = 0; m < output_dims[4]; m++) {
203                 for (size_t n = 0; n < output_dims[5]; n++) {
204                   const size_t index =
205                     i * output_strides[0] + j * output_strides[1] + k * output_strides[2] +
206                     l * output_strides[3] + m * output_strides[4] + n * output_strides[5];
207                   ASSERT_EQ(output[index], output_ref[index])
208                     << "(i, j, k, l, m, n) = ("
209                     << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ")"
210                     << ", padding value = " << padding_value;
211                 }
212               }
213             }
214           }
215         }
216       }
217     }
218   }
219 
TestX16()220   void TestX16() const {
221     ASSERT_EQ(num_dims(), num_pre_paddings());
222     ASSERT_EQ(num_dims(), num_post_paddings());
223 
224     std::random_device random_device;
225     auto rng = std::mt19937(random_device());
226     std::uniform_int_distribution<uint16_t> u16dist;
227 
228     // Compute generalized shapes.
229     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims;
230     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_pre_paddings;
231     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_post_paddings;
232     std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims;
233     std::fill(input_dims.begin(), input_dims.end(), 1);
234     std::fill(input_pre_paddings.begin(), input_pre_paddings.end(), 0);
235     std::fill(input_post_paddings.begin(), input_post_paddings.end(), 0);
236     std::fill(output_dims.begin(), output_dims.end(), 1);
237     for (size_t i = 0; i < num_dims(); i++) {
238       input_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = input_dim(i);
239       input_pre_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = pre_padding(i);
240       input_post_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = post_padding(i);
241       output_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = output_dim(i);
242     }
243 
244     // Compute generalized strides.
245     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_strides;
246     std::array<size_t, XNN_MAX_TENSOR_DIMS> output_strides;
247     size_t input_stride = 1, output_stride = 1;
248     for (size_t i = XNN_MAX_TENSOR_DIMS; i != 0; i--) {
249       input_strides[i - 1] = input_stride;
250       output_strides[i - 1] = output_stride;
251       input_stride *= input_dims[i - 1];
252       output_stride *= output_dims[i - 1];
253     }
254 
255     std::vector<uint16_t> input(XNN_EXTRA_BYTES / sizeof(uint16_t) + num_input_elements());
256     std::vector<uint16_t> output(num_output_elements());
257     std::vector<uint16_t> output_ref(num_output_elements());
258     for (size_t iteration = 0; iteration < iterations(); iteration++) {
259       std::generate(input.begin(), input.end(), [&]() { return u16dist(rng); });
260       std::fill(output.begin(), output.end(), UINT16_C(0xDEAD));
261       const uint16_t padding_value = u16dist(rng);
262 
263       // Compute reference results.
264       std::fill(output_ref.begin(), output_ref.end(), padding_value);
265       for (size_t i = 0; i < input_dims[0]; i++) {
266         for (size_t j = 0; j < input_dims[1]; j++) {
267           for (size_t k = 0; k < input_dims[2]; k++) {
268             for (size_t l = 0; l < input_dims[3]; l++) {
269               for (size_t m = 0; m < input_dims[4]; m++) {
270                 for (size_t n = 0; n < input_dims[5]; n++) {
271                   const size_t output_index =
272                     (i + input_pre_paddings[0]) * output_strides[0] +
273                     (j + input_pre_paddings[1]) * output_strides[1] +
274                     (k + input_pre_paddings[2]) * output_strides[2] +
275                     (l + input_pre_paddings[3]) * output_strides[3] +
276                     (m + input_pre_paddings[4]) * output_strides[4] +
277                     (n + input_pre_paddings[5]) * output_strides[5];
278                   const size_t input_index =
279                     i * input_strides[0] + j * input_strides[1] + k * input_strides[2] +
280                     l * input_strides[3] + m * input_strides[4] + n * input_strides[5];
281                   output_ref[output_index] = input[input_index];
282                 }
283               }
284             }
285           }
286         }
287       }
288 
289       // Create, setup, run, and destroy a binary elementwise operator.
290       ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
291       xnn_operator_t pad_op = nullptr;
292 
293       ASSERT_EQ(xnn_status_success,
294         xnn_create_constant_pad_nd_x16(
295           &padding_value, 0, &pad_op));
296       ASSERT_NE(nullptr, pad_op);
297 
298       // Smart pointer to automatically delete pad_op.
299       std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_pad_op(pad_op, xnn_delete_operator);
300 
301       ASSERT_EQ(xnn_status_success,
302         xnn_setup_constant_pad_nd_x16(
303           pad_op,
304           num_dims(),
305           input_shape().data(), pre_paddings().data(), post_paddings().data(),
306           input.data(), output.data(),
307           nullptr /* thread pool */));
308 
309       ASSERT_EQ(xnn_status_success,
310         xnn_run_operator(pad_op, nullptr /* thread pool */));
311 
312       // Verify results.
313       for (size_t i = 0; i < output_dims[0]; i++) {
314         for (size_t j = 0; j < output_dims[1]; j++) {
315           for (size_t k = 0; k < output_dims[2]; k++) {
316             for (size_t l = 0; l < output_dims[3]; l++) {
317               for (size_t m = 0; m < output_dims[4]; m++) {
318                 for (size_t n = 0; n < output_dims[5]; n++) {
319                   const size_t index =
320                     i * output_strides[0] + j * output_strides[1] + k * output_strides[2] +
321                     l * output_strides[3] + m * output_strides[4] + n * output_strides[5];
322                   ASSERT_EQ(output[index], output_ref[index])
323                     << "(i, j, k, l, m, n) = ("
324                     << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ")"
325                     << ", padding value = " << padding_value;
326                 }
327               }
328             }
329           }
330         }
331       }
332     }
333   }
334 
TestX32()335   void TestX32() const {
336     ASSERT_EQ(num_dims(), num_pre_paddings());
337     ASSERT_EQ(num_dims(), num_post_paddings());
338 
339     std::random_device random_device;
340     auto rng = std::mt19937(random_device());
341     std::uniform_int_distribution<uint32_t> u32dist;
342 
343     // Compute generalized shapes.
344     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims;
345     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_pre_paddings;
346     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_post_paddings;
347     std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims;
348     std::fill(input_dims.begin(), input_dims.end(), 1);
349     std::fill(input_pre_paddings.begin(), input_pre_paddings.end(), 0);
350     std::fill(input_post_paddings.begin(), input_post_paddings.end(), 0);
351     std::fill(output_dims.begin(), output_dims.end(), 1);
352     for (size_t i = 0; i < num_dims(); i++) {
353       input_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = input_dim(i);
354       input_pre_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = pre_padding(i);
355       input_post_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = post_padding(i);
356       output_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = output_dim(i);
357     }
358 
359     // Compute generalized strides.
360     std::array<size_t, XNN_MAX_TENSOR_DIMS> input_strides;
361     std::array<size_t, XNN_MAX_TENSOR_DIMS> output_strides;
362     size_t input_stride = 1, output_stride = 1;
363     for (size_t i = XNN_MAX_TENSOR_DIMS; i != 0; i--) {
364       input_strides[i - 1] = input_stride;
365       output_strides[i - 1] = output_stride;
366       input_stride *= input_dims[i - 1];
367       output_stride *= output_dims[i - 1];
368     }
369 
370     std::vector<uint32_t> input(XNN_EXTRA_BYTES / sizeof(uint32_t) + num_input_elements());
371     std::vector<uint32_t> output(num_output_elements());
372     std::vector<uint32_t> output_ref(num_output_elements());
373     for (size_t iteration = 0; iteration < iterations(); iteration++) {
374       std::generate(input.begin(), input.end(), [&]() { return u32dist(rng); });
375       std::fill(output.begin(), output.end(), UINT32_C(0xDEADBEEF));
376       const uint32_t padding_value = u32dist(rng);
377 
378       // Compute reference results.
379       std::fill(output_ref.begin(), output_ref.end(), padding_value);
380       for (size_t i = 0; i < input_dims[0]; i++) {
381         for (size_t j = 0; j < input_dims[1]; j++) {
382           for (size_t k = 0; k < input_dims[2]; k++) {
383             for (size_t l = 0; l < input_dims[3]; l++) {
384               for (size_t m = 0; m < input_dims[4]; m++) {
385                 for (size_t n = 0; n < input_dims[5]; n++) {
386                   const size_t output_index =
387                     (i + input_pre_paddings[0]) * output_strides[0] +
388                     (j + input_pre_paddings[1]) * output_strides[1] +
389                     (k + input_pre_paddings[2]) * output_strides[2] +
390                     (l + input_pre_paddings[3]) * output_strides[3] +
391                     (m + input_pre_paddings[4]) * output_strides[4] +
392                     (n + input_pre_paddings[5]) * output_strides[5];
393                   const size_t input_index =
394                     i * input_strides[0] + j * input_strides[1] + k * input_strides[2] +
395                     l * input_strides[3] + m * input_strides[4] + n * input_strides[5];
396                   output_ref[output_index] = input[input_index];
397                 }
398               }
399             }
400           }
401         }
402       }
403 
404       // Create, setup, run, and destroy a binary elementwise operator.
405       ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
406       xnn_operator_t pad_op = nullptr;
407 
408       ASSERT_EQ(xnn_status_success,
409         xnn_create_constant_pad_nd_x32(
410           &padding_value, 0, &pad_op));
411       ASSERT_NE(nullptr, pad_op);
412 
413       // Smart pointer to automatically delete pad_op.
414       std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_pad_op(pad_op, xnn_delete_operator);
415 
416       ASSERT_EQ(xnn_status_success,
417         xnn_setup_constant_pad_nd_x32(
418           pad_op,
419           num_dims(),
420           input_shape().data(), pre_paddings().data(), post_paddings().data(),
421           input.data(), output.data(),
422           nullptr /* thread pool */));
423 
424       ASSERT_EQ(xnn_status_success,
425         xnn_run_operator(pad_op, nullptr /* thread pool */));
426 
427       // Verify results.
428       for (size_t i = 0; i < output_dims[0]; i++) {
429         for (size_t j = 0; j < output_dims[1]; j++) {
430           for (size_t k = 0; k < output_dims[2]; k++) {
431             for (size_t l = 0; l < output_dims[3]; l++) {
432               for (size_t m = 0; m < output_dims[4]; m++) {
433                 for (size_t n = 0; n < output_dims[5]; n++) {
434                   const size_t index =
435                     i * output_strides[0] + j * output_strides[1] + k * output_strides[2] +
436                     l * output_strides[3] + m * output_strides[4] + n * output_strides[5];
437                   ASSERT_EQ(output[index], output_ref[index])
438                     << "(i, j, k, l, m, n) = ("
439                     << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ")"
440                     << ", padding value = " << padding_value;
441                 }
442               }
443             }
444           }
445         }
446       }
447     }
448   }
449 
450  private:
451   std::vector<size_t> input_shape_;
452   std::vector<size_t> pre_paddings_;
453   std::vector<size_t> post_paddings_;
454   size_t iterations_{3};
455 };
456