1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16 #include <executorch/test/utils/DeathTest.h>
17 #include <gtest/gtest.h>
18 #include <cmath>
19
20 using namespace ::testing;
21 using exec_aten::ArrayRef;
22 using exec_aten::optional;
23 using exec_aten::Scalar;
24 using exec_aten::ScalarType;
25 using exec_aten::Tensor;
26 using torch::executor::testing::TensorFactory;
27
28 class OpVarOutTest : public OperatorTest {
29 protected:
op_var_out(const Tensor & self,optional<ArrayRef<int64_t>> dim,bool unbiased,bool keepdim,Tensor & out)30 Tensor& op_var_out(
31 const Tensor& self,
32 optional<ArrayRef<int64_t>> dim,
33 bool unbiased,
34 bool keepdim,
35 Tensor& out) {
36 return torch::executor::aten::var_outf(
37 context_, self, dim, unbiased, keepdim, out);
38 }
39
40 template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_var_out_invalid_dimensions()41 void test_var_out_invalid_dimensions() {
42 TensorFactory<IN_DTYPE> tf_in;
43 TensorFactory<OUT_DTYPE> tf_out;
44
45 // clang-format off
46 Tensor self = tf_in.make(
47 {2, 3, 4},
48 {
49 0, 1, 2, 3,
50 4, 5, 6, 7,
51 8, 9, 10, 11,
52
53 12, 13, 14, 15,
54 16, 17, 18, 19,
55 20, 21, 22, 23,
56 });
57 // clang-format on
58 Tensor out = tf_out.zeros({2, 3, 1});
59 optional<ScalarType> dtype = OUT_DTYPE;
60
61 // out-of-bound dim in dim list
62 int64_t dims_1[1] = {3};
63 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
64 ET_EXPECT_KERNEL_FAILURE(
65 context_,
66 op_var_out(
67 self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out));
68
69 // the same dim appears multiple times in list of dims
70 int64_t dims_2[2] = {2, 2};
71 optional_dim_list = ArrayRef<int64_t>{dims_2, 2};
72 ET_EXPECT_KERNEL_FAILURE(
73 context_,
74 op_var_out(
75 self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out));
76 }
77
78 template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_var_out_invalid_shape()79 void test_var_out_invalid_shape() {
80 TensorFactory<IN_DTYPE> tf_in;
81 TensorFactory<OUT_DTYPE> tf_out;
82
83 // clang-format off
84 Tensor self = tf_in.make(
85 {2, 3, 4},
86 {
87 0, 1, 2, 3,
88 4, 5, 6, 7,
89 8, 9, 10, 11,
90
91 12, 13, 14, 15,
92 16, 17, 18, 19,
93 20, 21, 22, 23,
94 });
95 // clang-format on
96
97 // dimension size mismatch when keepdim is true
98 Tensor out = tf_out.zeros({2, 4});
99 optional<ScalarType> dtype = OUT_DTYPE;
100 int64_t dims_1[1] = {1};
101 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
102 ET_EXPECT_KERNEL_FAILURE(
103 context_,
104 op_var_out(
105 self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out));
106
107 // dimension size mismatch when keepdim is false
108 out = tf_out.zeros({2, 1, 4});
109 ET_EXPECT_KERNEL_FAILURE(
110 context_,
111 op_var_out(
112 self,
113 optional_dim_list,
114 /*unbiased=*/true,
115 /*keepdim=*/false,
116 out));
117 }
118
119 template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_var_out_dtype()120 void test_var_out_dtype() {
121 TensorFactory<IN_DTYPE> tf_in;
122 TensorFactory<OUT_DTYPE> tf_out;
123 // clang-format off
124 Tensor self = tf_in.make(
125 {2, 3, 4},
126 {
127 0, 1, 2, 3,
128 4, 5, 6, 7,
129 8, 9, 10, 11,
130
131 12, 13, 14, 15,
132 16, 17, 18, 19,
133 20, 21, 22, 23,
134 });
135 // clang-format on
136
137 // keepdim=true should work
138 Tensor out = tf_out.zeros({2, 3, 1});
139 int64_t dims_1[1] = {2};
140 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
141 optional<ScalarType> dtype = OUT_DTYPE;
142 op_var_out(
143 self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out);
144 // clang-format off
145 EXPECT_TENSOR_CLOSE(out, tf_out.make(
146 {2, 3, 1},
147 {
148 1.666667,
149 1.666667,
150 1.666667,
151
152 1.666667,
153 1.666667,
154 1.666667
155 }));
156 // clang-format on
157
158 // keepdim=false should work
159 out = tf_out.zeros({2, 3});
160 op_var_out(
161 self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/false, out);
162 // clang-format off
163 EXPECT_TENSOR_CLOSE(out, tf_out.make(
164 {2, 3},
165 {
166 1.666667, 1.666667, 1.666667,
167 1.666667, 1.666667, 1.666667
168 }));
169 // clang-format on
170
171 // dim list with multiple dimensions should work
172 out = tf_out.zeros({1, 1, 4});
173 int64_t dims_2[2] = {0, 1};
174 optional_dim_list = ArrayRef<int64_t>{dims_2, 2};
175 op_var_out(
176 self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out);
177 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 4}, {56.0, 56.0, 56.0, 56.0}));
178
179 out = tf_out.zeros({4});
180 op_var_out(
181 self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/false, out);
182 EXPECT_TENSOR_CLOSE(out, tf_out.make({4}, {56.0, 56.0, 56.0, 56.0}));
183
184 // dim list with negative dimensions should work
185 out = tf_out.zeros({2, 1, 4});
186 int64_t dims_3[1] = {-2};
187 optional_dim_list = ArrayRef<int64_t>{dims_3, 1};
188 op_var_out(
189 self, optional_dim_list, /*unbiased=*/false, /*keepdim=*/true, out);
190 // clang-format off
191 EXPECT_TENSOR_CLOSE(out, tf_out.make(
192 {2, 1, 4},
193 {
194 10.666667, 10.666667, 10.666667, 10.666667,
195
196 10.666667, 10.666667, 10.666667, 10.666667,
197 }));
198 // clang-format on
199
200 // empty/null dim list should work
201 out = tf_out.zeros({1, 1, 1});
202 optional<ArrayRef<int64_t>> null_dim_list;
203 op_var_out(self, null_dim_list, /*unbiased=*/true, /*keepdim=*/true, out);
204 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 1}, {50.0}));
205
206 optional<ArrayRef<int64_t>> empty_dim_list{ArrayRef<int64_t>{}};
207 op_var_out(self, empty_dim_list, /*unbiased=*/false, /*keepdim=*/true, out);
208 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 1}, {47.916668}));
209
210 out = tf_out.zeros({});
211 op_var_out(self, null_dim_list, /*unbiased=*/false, /*keepdim=*/false, out);
212 EXPECT_TENSOR_CLOSE(out, tf_out.make({}, {47.916668}));
213
214 op_var_out(self, empty_dim_list, /*unbiased=*/true, /*keepdim=*/false, out);
215 EXPECT_TENSOR_CLOSE(out, tf_out.make({}, {50.0}));
216 }
217 };
218
219 class OpVarCorrectionOutTest : public OperatorTest {
220 protected:
op_var_correction_out(const Tensor & self,optional<ArrayRef<int64_t>> dim,optional<Scalar> & correction,bool keepdim,Tensor & out)221 Tensor& op_var_correction_out(
222 const Tensor& self,
223 optional<ArrayRef<int64_t>> dim,
224 optional<Scalar>& correction,
225 bool keepdim,
226 Tensor& out) {
227 return torch::executor::aten::var_outf(
228 context_, self, dim, correction, keepdim, out);
229 }
230 };
231
TEST_F(OpVarOutTest,InvalidDimensionListDies)232 TEST_F(OpVarOutTest, InvalidDimensionListDies) {
233 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
234 GTEST_SKIP() << "ATen kernel test fails";
235 }
236 // Use a two layer switch to hanldle each possible data pair
237 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
238 test_var_out_invalid_dimensions< \
239 ScalarType::INPUT_DTYPE, \
240 ScalarType::OUTPUT_DTYPE>();
241
242 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
243 ET_FORALL_FLOAT_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
244
245 ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
246 #undef TEST_ENTRY
247 #undef TEST_KERNEL
248 }
249
TEST_F(OpVarOutTest,InvalidShapeDies)250 TEST_F(OpVarOutTest, InvalidShapeDies) {
251 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
252 GTEST_SKIP() << "ATen kernel test fails";
253 }
254 // Use a two layer switch to hanldle each possible data pair
255 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
256 test_var_out_invalid_shape< \
257 ScalarType::INPUT_DTYPE, \
258 ScalarType::OUTPUT_DTYPE>();
259
260 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
261 ET_FORALL_FLOAT_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
262
263 ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
264 #undef TEST_ENTRY
265 #undef TEST_KERNEL
266 }
267
TEST_F(OpVarOutTest,InvalidDTypeDies)268 TEST_F(OpVarOutTest, InvalidDTypeDies) {
269 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
270 GTEST_SKIP() << "ATen kernel test fails";
271 }
272 TensorFactory<ScalarType::Float> tf_float;
273 TensorFactory<ScalarType::Int> tf_int;
274
275 // clang-format off
276 Tensor self = tf_int.make(
277 {2, 3, 4},
278 {
279 0, 1, 2, 3,
280 4, 5, 6, 7,
281 8, 9, 10, 11,
282
283 12, 13, 14, 15,
284 16, 17, 18, 19,
285 20, 21, 22, 23,
286 });
287 // clang-format on
288
289 // keepdim=true should work
290 Tensor out = tf_float.zeros({2, 3, 1});
291 int64_t dims_1[1] = {2};
292 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
293
294 ET_EXPECT_KERNEL_FAILURE(
295 context_,
296 op_var_out(
297 self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out));
298 }
299
TEST_F(OpVarOutTest,AllFloatInputFloatOutputPasses)300 TEST_F(OpVarOutTest, AllFloatInputFloatOutputPasses) {
301 // Use a two layer switch to hanldle each possible data pair
302 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
303 test_var_out_dtype<ScalarType::INPUT_DTYPE, ScalarType::OUTPUT_DTYPE>();
304
305 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
306 ET_FORALL_FLOAT_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
307
308 ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
309 #undef TEST_ENTRY
310 #undef TEST_KERNEL
311 }
312
TEST_F(OpVarOutTest,InfinityAndNANTest)313 TEST_F(OpVarOutTest, InfinityAndNANTest) {
314 TensorFactory<ScalarType::Float> tf_float;
315 // clang-format off
316 Tensor self = tf_float.make(
317 {2, 3, 4},
318 {
319 0, 1, 2, INFINITY,
320 INFINITY, -INFINITY, 1, 0,
321 NAN, INFINITY, -INFINITY, 2,
322
323 NAN, NAN, 1, 0,
324 0, INFINITY, NAN, 4,
325 1, NAN, 3.14, 2,
326 });
327 // clang-format on
328
329 Tensor out = tf_float.zeros({2, 3, 1});
330 int64_t dims[1] = {-1};
331 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims, 1}};
332 optional<ScalarType> dtype;
333 op_var_out(self, optional_dim_list, /*unbiased=*/true, /*keepdim=*/true, out);
334 // clang-format off
335 EXPECT_TENSOR_CLOSE(out, tf_float.make(
336 {2, 3, 1},
337 {
338 NAN,
339 NAN,
340 NAN,
341
342 NAN,
343 NAN,
344 NAN
345 }));
346 // clang-format on
347 }
348
TEST_F(OpVarOutTest,DynamicShapeUpperBoundSameAsExpected)349 TEST_F(OpVarOutTest, DynamicShapeUpperBoundSameAsExpected) {
350 TensorFactory<ScalarType::Float> tf;
351
352 Tensor x = tf.make({3, 2}, {0.49, 0.40, 0.56, 0.38, 0.49, 0.56});
353 Tensor expected_result = tf.make({3}, {0.004050, 0.016200, 0.002450});
354
355 Tensor out =
356 tf.zeros({3}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
357 Tensor ret = op_var_out(
358 x, ArrayRef<int64_t>{1}, /*unbiased=*/true, /*keepdim=*/false, out);
359 EXPECT_TENSOR_CLOSE(out, expected_result);
360 }
361
TEST_F(OpVarOutTest,DynamicShapeUpperBoundLargerThanExpected)362 TEST_F(OpVarOutTest, DynamicShapeUpperBoundLargerThanExpected) {
363 TensorFactory<ScalarType::Float> tf;
364
365 Tensor x = tf.make({3, 2}, {0.49, 0.40, 0.56, 0.38, 0.49, 0.56});
366 Tensor expected_result = tf.make({3}, {0.004050, 0.016200, 0.002450});
367
368 Tensor out =
369 tf.zeros({10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
370 Tensor ret = op_var_out(
371 x, ArrayRef<int64_t>{1}, /*unbiased=*/true, /*keepdim=*/false, out);
372 EXPECT_TENSOR_CLOSE(out, expected_result);
373 }
374
TEST_F(OpVarOutTest,DynamicShapeUnbound)375 TEST_F(OpVarOutTest, DynamicShapeUnbound) {
376 GTEST_SKIP() << "Dynamic shape unbound not supported";
377 TensorFactory<ScalarType::Float> tf;
378
379 Tensor x = tf.make({3, 2}, {0.49, 0.40, 0.56, 0.38, 0.49, 0.56});
380 Tensor expected_result = tf.make({3}, {0.004050, 0.016200, 0.002450});
381
382 Tensor out =
383 tf.zeros({1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
384 Tensor ret = op_var_out(
385 x, ArrayRef<int64_t>{1}, /*unbiased=*/true, /*keepdim=*/false, out);
386 EXPECT_TENSOR_CLOSE(out, expected_result);
387 }
388
TEST_F(OpVarCorrectionOutTest,SmokeTest)389 TEST_F(OpVarCorrectionOutTest, SmokeTest) {
390 TensorFactory<ScalarType::Float> tf;
391
392 Tensor x = tf.make({2, 3}, {4.9, 4.0, 5.6, 3.8, 4.9, 5.6});
393 Tensor expected = tf.make({2}, {0.72693, 0.93032});
394 optional<Scalar> correction(1.23);
395 Tensor out = tf.zeros({2});
396
397 op_var_correction_out(
398 x, ArrayRef<int64_t>{1}, correction, /*keepdim=*/false, out);
399 EXPECT_TENSOR_CLOSE(out, expected);
400 }
401