1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16
17 #include <gtest/gtest.h>
18 #include <cstdint>
19
20 using namespace ::testing;
21 using exec_aten::ArrayRef;
22 using exec_aten::ScalarType;
23 using exec_aten::Tensor;
24 using exec_aten::TensorList;
25 using torch::executor::testing::TensorFactory;
26
27 class OpStackOutTest : public OperatorTest {
28 protected:
op_stack_out(TensorList tensors,int64_t dim,Tensor & out)29 Tensor& op_stack_out(TensorList tensors, int64_t dim, Tensor& out) {
30 return torch::executor::aten::stack_outf(context_, tensors, dim, out);
31 }
32
33 template <class CTYPE, exec_aten::ScalarType DTYPE>
test_dtype()34 void test_dtype() {
35 TensorFactory<DTYPE> tf;
36
37 // Will be stackd along out.dim(1). Use different input values so we can see
38 // where each output value came from.
39 Tensor x = tf.ones({3, 4});
40 Tensor y = tf.zeros({3, 4});
41 std::vector<Tensor> inputs = {x, y};
42
43 Tensor out = tf.ones({3, 2, 4});
44 op_stack_out(
45 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/1, out);
46
47 // The two tensors x and y are stacked along the 1st dimension with the
48 // order [x, y], so the x and y should be equal to expected[:, 0, :] and
49 // expected[:, 1, :] e.g. expected[i, 0, j] = x[i, j] and expected[i, 1, j]
50 // = y[i, j] for any i in [-x.size(0), x.size(0)-1] and j in [-x.size(1),
51 // x.size(1)-1]
52 // clang-format off
53 Tensor expected = tf.make(
54 {3, 2, 4},
55 {
56 // all ones below are from x,
57 // and all zeros are from y.
58 // [0, :, :]
59 1, 1, 1, 1, // [0, 0, :]
60 0, 0, 0, 0, // [0, 1, :]
61
62 // [1, :, :]
63 1, 1, 1, 1, // [1, 0, :]
64 0, 0, 0, 0, // [1, 1, :]
65
66 // [2, :, :]
67 1, 1, 1, 1, // [2, 0, :]
68 0, 0, 0, 0, // [2, 1, :]
69 });
70 // clang-format on
71
72 EXPECT_TENSOR_EQ(out, expected);
73 }
74
75 // Running stacking experiments along given dim.
run_stack_tests(const std::vector<Tensor> & inputs,int64_t dim,const Tensor & expected)76 void run_stack_tests(
77 const std::vector<Tensor>& inputs,
78 int64_t dim,
79 const Tensor& expected) {
80 ArrayRef<Tensor> inputs_array(inputs.data(), inputs.size());
81
82 TensorFactory<ScalarType::Double> tf;
83 const std::vector<int32_t> out_size(
84 expected.sizes().begin(), expected.sizes().end());
85 Tensor out = tf.zeros(out_size);
86
87 // Should always return the provided out Tensor.
88 Tensor ret = op_stack_out(inputs_array, dim, out);
89 EXPECT_TENSOR_EQ(out, ret);
90 EXPECT_TENSOR_EQ(out, expected);
91
92 ret = op_stack_out(inputs_array, /*dim=*/dim - out.dim(), out);
93 EXPECT_TENSOR_EQ(out, ret);
94 EXPECT_TENSOR_EQ(out, expected);
95 }
96 };
97
TEST_F(OpStackOutTest,InsertFront)98 TEST_F(OpStackOutTest, InsertFront) {
99 TensorFactory<ScalarType::Double> tf;
100
101 // clang-format off
102 Tensor x = tf.make(
103 {3, 4},
104 {
105 1., 2., 3., 4., // [0, :]
106 5., 6., 7., 8., // [1, :]
107 9., 10., 11., 12., // [2, :]
108
109 });
110 Tensor y = tf.make(
111 {3, 4},
112 {
113 -1., -2., -3., -4., // [0, :]
114 -5., -6., -7., -8., // [1, :]
115 -9., -10., -11., -12., // [2, :]
116 });
117 // clang-format on
118
119 const std::vector<Tensor> inputs = {x, y};
120
121 // Try to stack the two tensors in the front (dim = 0)
122 // The size of expected output tensor should follow the below rules:
123 // - For any input tensor, its size(i) == output.size(i) if i < dim, and its
124 // size(i) == output.size(i+1) if i >= dim
125 // - For the stack dimension (output[dim]), its size should be the number of
126 // input tensors
127 std::vector<int32_t> expected_size = {2, 3, 4};
128
129 // The two tensors x and y are stacked along the 0th dimension with the order
130 // [x, y], so the x and y should be equal to expected[0, :, :] and expected[1,
131 // :, :] e.g. expected[0, i, j] = x[i, j] and expected[1, i, j] = y[i, j] for
132 // any i in [-x.size(0), x.size(0)-1] and j in [-x.size(1), x.size(1)-1]
133 // clang-format off
134 Tensor expected = tf.make(
135 expected_size,
136 {
137 // [0, :, :] equals to x
138 1., 2., 3., 4., // [0, 0, :]
139 5., 6., 7., 8., // [0, 1, :]
140 9., 10., 11., 12., // [0, 2, :]
141
142 // [1, :, :] equals to y
143 -1., -2., -3., -4., // [1, 0, :]
144 -5., -6., -7., -8., // [1, 1, :]
145 -9., -10., -11., -12., // [1, 2, :]
146 });
147 // clang-format on
148
149 run_stack_tests(inputs, /*dim=*/0, expected);
150 }
151
TEST_F(OpStackOutTest,InsertMiddle)152 TEST_F(OpStackOutTest, InsertMiddle) {
153 TensorFactory<ScalarType::Double> tf;
154
155 // Two tensors with same size. Stack them on multiple dimensions
156 // clang-format off
157 Tensor x = tf.make(
158 {3, 4},
159 {
160 1., 2., 3., 4., // [0, :]
161 5., 6., 7., 8., // [1, :]
162 9., 10., 11., 12., // [2, :]
163
164 });
165 Tensor y = tf.make(
166 {3, 4},
167 {
168 -1., -2., -3., -4., // [0, :]
169 -5., -6., -7., -8., // [1, :]
170 -9., -10., -11., -12., // [2, :]
171 });
172 // clang-format on
173
174 const std::vector<Tensor> inputs = {x, y};
175
176 // Try to stack the two tensors in the middle (dim = 1)
177 // The size of expected output tensor should follow the below rules:
178 // - For any input tensor, its size(i) == output.size(i) if i < dim, and its
179 // size(i) == output.size(i+1) if i >= dim
180 // - For the stack dimension (output[dim]), its size should be the number of
181 // input tensors
182 std::vector<int32_t> expected_size = {3, 2, 4};
183
184 // The two tensors x and y are stacked along the 1st dimension with the order
185 // [x, y], so the x and y should be equal to expected[:, 0, :] and expected[:,
186 // 1, :] e.g. expected[i, 0, j] = x[i, j] and expected[i, 1, j] = y[i, j] for
187 // any i in [-x.size(0), x.size(0)-1] and j in [-x.size(1), x.size(1)-1]
188 // clang-format off
189 Tensor expected = tf.make(
190 expected_size,
191 {
192 // [0, :, :]
193 1., 2., 3., 4., // [0, 0, :] = x[0, :]
194 -1., -2., -3., -4., // [0, 1, :] = y[0, :]
195
196 // [1, :, :]
197 5., 6., 7., 8., // [1, 0, :] = x[1, :]
198 -5., -6., -7., -8., // [1, 1, :] = y[1, :]
199
200 // [2, :, :]
201 9., 10., 11., 12., // [2, 0, :] = x[2, :]
202 -9., -10., -11., -12., // [2, 1, :] = y[2, :]
203 });
204 // clang-format on
205
206 run_stack_tests(inputs, /*dim=*/1, expected);
207 }
208
TEST_F(OpStackOutTest,InsertEnd)209 TEST_F(OpStackOutTest, InsertEnd) {
210 TensorFactory<ScalarType::Double> tf;
211
212 // Two tensors with same size. Stack them on multiple dimensions
213 // clang-format off
214 Tensor x = tf.make(
215 {3, 4},
216 {
217 1., 2., 3., 4., // [0, :]
218 5., 6., 7., 8., // [1, :]
219 9., 10., 11., 12., // [2, :]
220
221 });
222 Tensor y = tf.make(
223 {3, 4},
224 {
225 -1., -2., -3., -4., // [0, :]
226 -5., -6., -7., -8., // [1, :]
227 -9., -10., -11., -12., // [2, :]
228 });
229 // clang-format on
230
231 const std::vector<Tensor> inputs = {x, y};
232
233 // Try to stack the two tensors at the end (dim = 2)
234 // The size of expected output tensor should follow the below rules:
235 // - For any input tensor, its size(i) == output.size(i) if i < dim, and its
236 // size(i) == output.size(i+1) if i >= dim
237 // - For the stack dimension (output[dim]), its size should be the number of
238 // input tensors
239 std::vector<int32_t> expected_size = {3, 4, 2};
240
241 // The two tensors x and y are stacked along the 2nd dimension with the order
242 // [x, y], so the x and y should be equal to expected[:, :, 0] and expected[:,
243 // :, 1] e.g. expected[i, j, 0] = x[i, j] and expected[i, j, 1] = y[i, j] for
244 // any i in [-x.size(0), x.size(0)-1] and j in [-x.size(1), x.size(1)-1]
245 // clang-format off
246 Tensor expected = tf.make(
247 expected_size,
248 {
249 // All values in the first column are from x,
250 // and the second column are from y
251
252 // [0, :, :]
253 1., -1., // [0, 0, :]
254 2., -2., // [0, 1, :]
255 3., -3., // [0, 2, :]
256 4., -4., // [0, 3, :]
257
258 // [1, :, :]
259 5., -5., // [1, 0, :]
260 6., -6., // [1, 1, :]
261 7., -7., // [1, 2, :]
262 8., -8., // [1, 3, :]
263
264 // [2, :, :]
265 9., -9., // [2, 0, :]
266 10., -10., // [2, 1, :]
267 11., -11., // [2, 2, :]
268 12., -12., // [2, 3, :]
269 });
270 // clang-format on
271
272 run_stack_tests(inputs, /*dim=*/2, expected);
273 }
274
275 /// A generic smoke test that works for any dtype that supports ones() and
276 /// zeros().
TEST_F(OpStackOutTest,AllDtypesSupported)277 TEST_F(OpStackOutTest, AllDtypesSupported) {
278 #define TEST_ENTRY(ctype, dtype) test_dtype<ctype, ScalarType::dtype>();
279 ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
280 #undef TEST_ENTRY
281 // TODO: Also add tests for half, complex, quantized, and other types. Easiest
282 // way to do that would be to make TensorFactory support zeros() and ones()
283 // for those types.
284 }
285
TEST_F(OpStackOutTest,NoInputTensorsWithEmptyOutTensorFails)286 TEST_F(OpStackOutTest, NoInputTensorsWithEmptyOutTensorFails) {
287 TensorFactory<ScalarType::Int> tf;
288
289 // Make an empty out tensor and demonstrate that it's empty.
290 Tensor out = tf.make({0}, {});
291 EXPECT_EQ(out.numel(), 0);
292
293 // Pass an empty list of input tensors.
294 ET_EXPECT_KERNEL_FAILURE(
295 context_, op_stack_out(ArrayRef<Tensor>(), /*dim=*/0, out));
296 }
297
TEST_F(OpStackOutTest,AllEmptyInputTensors)298 TEST_F(OpStackOutTest, AllEmptyInputTensors) {
299 TensorFactory<ScalarType::Int> tf;
300
301 // Using empty tensors as input.
302 Tensor empty = tf.make({0, 10, 3}, {});
303 EXPECT_EQ(empty.numel(), 0);
304 std::vector<Tensor> inputs = {empty, empty, empty};
305
306 Tensor x = tf.ones({2, 2});
307
308 // Output whose shape is appropriate for stacking along out.dim(0).
309 Tensor out = tf.make({3, 0, 10, 3}, {});
310 EXPECT_EQ(out.numel(), 0);
311
312 Tensor ret = op_stack_out(
313 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out);
314 EXPECT_EQ(ret.numel(), 0);
315 // Success if it doesn't assert on the weird-shaped empty input and the
316 // empty_out is still a empty array
317 }
318
TEST_F(OpStackOutTest,DimOutOfBoundDies)319 TEST_F(OpStackOutTest, DimOutOfBoundDies) {
320 TensorFactory<ScalarType::Int> tf;
321
322 // Stack a single tensor with size [1, 1]. The size of output would always be
323 // [1, 1, 1] no matter stack on which dimension.
324 Tensor x = tf.ones({1, 1});
325 ArrayRef<Tensor> inputs(&x, 1);
326
327 Tensor out = tf.zeros({1, 1, 1});
328
329 // Some invalid dim values.
330 const std::vector<int64_t> invalid_dims = {3, 4, 5, -4, -5, -6};
331 for (int64_t dim : invalid_dims) {
332 ET_EXPECT_KERNEL_FAILURE(context_, op_stack_out(inputs, dim, out));
333 }
334 }
335
TEST_F(OpStackOutTest,MismatchedDtypesDies)336 TEST_F(OpStackOutTest, MismatchedDtypesDies) {
337 TensorFactory<ScalarType::Int> tf_int;
338 TensorFactory<ScalarType::Float> tf_float;
339 Tensor out = tf_int.zeros({1, 2, 2});
340
341 // Size is compatible to the output, but a mismatched dtype.
342 std::vector<Tensor> inputs = {tf_float.ones({2, 2})};
343
344 ET_EXPECT_KERNEL_FAILURE(
345 context_,
346 op_stack_out(
347 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out));
348 }
349
TEST_F(OpStackOutTest,OutMatchNumelWithExtraDimAtEndDies)350 TEST_F(OpStackOutTest, OutMatchNumelWithExtraDimAtEndDies) {
351 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
352 GTEST_SKIP() << "ATen kernel can handle out with mismatched dimensions";
353 }
354 TensorFactory<ScalarType::Int> tf;
355 Tensor out = tf.zeros({1, 2, 2, 1});
356
357 // Same dtype and numel as the output, but a mixmatched size (output.dim()
358 // should always one greater than input.dim())
359 std::vector<Tensor> inputs = {tf.ones({2, 2})};
360
361 ET_EXPECT_KERNEL_FAILURE(
362 context_,
363 op_stack_out(
364 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out));
365 }
366
TEST_F(OpStackOutTest,OutMatchNumelLackDimAtFrontDies)367 TEST_F(OpStackOutTest, OutMatchNumelLackDimAtFrontDies) {
368 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
369 GTEST_SKIP() << "ATen kernel can handle out with mismatched dimensions";
370 }
371 TensorFactory<ScalarType::Int> tf;
372 Tensor out = tf.zeros({2, 2});
373
374 // Same dtype and numel as the output, but a mixmatched size (output.dim()
375 // should always one greater than input.dim())
376 std::vector<Tensor> inputs = {tf.ones({2, 2})};
377
378 ET_EXPECT_KERNEL_FAILURE(
379 context_,
380 op_stack_out(
381 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out));
382 }
383
TEST_F(OpStackOutTest,OutRegularMismatchDimDies)384 TEST_F(OpStackOutTest, OutRegularMismatchDimDies) {
385 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
386 GTEST_SKIP() << "ATen kernel can handle out with mismatched dimensions";
387 }
388 TensorFactory<ScalarType::Int> tf;
389
390 // Should be {2, 2, 3} to match the inputs when calling stack() with dim 0.
391 Tensor out = tf.zeros({2, 4, 5});
392
393 std::vector<Tensor> inputs = {
394 tf.ones({2, 3}),
395 tf.ones({2, 3}),
396 };
397
398 ET_EXPECT_KERNEL_FAILURE(
399 context_,
400 op_stack_out(
401 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out));
402 }
403
404 /* %python
405 import torch
406 torch.manual_seed(0)
407 x = [torch.randint(10, (2, 3)),
408 torch.randint(10, (2, 3)),
409 torch.randint(10, (2, 3)),
410 torch.randint(10, (2, 3))]
411 res = torch.stack(x, 0)
412 op = "op_stack_out"
413 opt_extra_params = "0,"
414 dtype = "ScalarType::Int"
415 check = "EXPECT_TENSOR_EQ" */
416
TEST_F(OpStackOutTest,DynamicShapeUpperBoundSameAsExpected)417 TEST_F(OpStackOutTest, DynamicShapeUpperBoundSameAsExpected) {
418 /* %python
419 out_args = "{4, 2, 3}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
420 %rewrite(unary_op_tensor_list_in) */
421
422 TensorFactory<ScalarType::Int> tf;
423
424 std::vector<Tensor> xv = {
425 tf.make({2, 3}, {4, 9, 3, 0, 3, 9}),
426 tf.make({2, 3}, {7, 3, 7, 3, 1, 6}),
427 tf.make({2, 3}, {6, 9, 8, 6, 6, 8}),
428 tf.make({2, 3}, {4, 3, 6, 9, 1, 4})};
429 TensorList x(xv.data(), xv.size());
430 Tensor expected = tf.make({4, 2, 3}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6,
431 6, 9, 8, 6, 6, 8, 4, 3, 6, 9, 1, 4});
432
433 Tensor out =
434 tf.zeros({4, 2, 3}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
435 op_stack_out(x, 0, out);
436 EXPECT_TENSOR_EQ(out, expected);
437 }
438
TEST_F(OpStackOutTest,DynamicShapeUpperBoundLargerThanExpected)439 TEST_F(OpStackOutTest, DynamicShapeUpperBoundLargerThanExpected) {
440 if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
441 GTEST_SKIP() << "Dynamic shape not supported";
442 }
443 /* %python
444 out_args = "{5, 5, 5}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
445 %rewrite(unary_op_tensor_list_in) */
446
447 TensorFactory<ScalarType::Int> tf;
448
449 std::vector<Tensor> xv = {
450 tf.make({2, 3}, {4, 9, 3, 0, 3, 9}),
451 tf.make({2, 3}, {7, 3, 7, 3, 1, 6}),
452 tf.make({2, 3}, {6, 9, 8, 6, 6, 8}),
453 tf.make({2, 3}, {4, 3, 6, 9, 1, 4})};
454 TensorList x(xv.data(), xv.size());
455 Tensor expected = tf.make({4, 2, 3}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6,
456 6, 9, 8, 6, 6, 8, 4, 3, 6, 9, 1, 4});
457
458 Tensor out =
459 tf.zeros({5, 5, 5}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
460 op_stack_out(x, 0, out);
461 EXPECT_TENSOR_EQ(out, expected);
462 }
463
TEST_F(OpStackOutTest,DynamicShapeUnbound)464 TEST_F(OpStackOutTest, DynamicShapeUnbound) {
465 if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
466 GTEST_SKIP() << "Dynamic shape not supported";
467 }
468 /* %python
469 out_args = "{1, 1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND"
470 %rewrite(unary_op_tensor_list_in) */
471
472 TensorFactory<ScalarType::Int> tf;
473
474 std::vector<Tensor> xv = {
475 tf.make({2, 3}, {4, 9, 3, 0, 3, 9}),
476 tf.make({2, 3}, {7, 3, 7, 3, 1, 6}),
477 tf.make({2, 3}, {6, 9, 8, 6, 6, 8}),
478 tf.make({2, 3}, {4, 3, 6, 9, 1, 4})};
479 TensorList x(xv.data(), xv.size());
480 Tensor expected = tf.make({4, 2, 3}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6,
481 6, 9, 8, 6, 6, 8, 4, 3, 6, 9, 1, 4});
482
483 Tensor out = tf.zeros(
484 {1, 1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
485 op_stack_out(x, 0, out);
486 EXPECT_TENSOR_EQ(out, expected);
487 }
488