1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16
17 #include <gtest/gtest.h>
18
19 using namespace ::testing;
20 using exec_aten::ArrayRef;
21 using exec_aten::optional;
22 using exec_aten::ScalarType;
23 using exec_aten::Tensor;
24 using torch::executor::testing::TensorFactory;
25
26 class OpSliceCopyTensorOutTest : public OperatorTest {
27 protected:
op_slice_copy_tensor_out(const Tensor & self,int64_t dim,optional<int64_t> start,optional<int64_t> end,int64_t step,Tensor & out)28 Tensor& op_slice_copy_tensor_out(
29 const Tensor& self,
30 int64_t dim,
31 optional<int64_t> start,
32 optional<int64_t> end,
33 int64_t step,
34 Tensor& out) {
35 return torch::executor::aten::slice_copy_outf(
36 context_, self, dim, start, end, step, out);
37 }
38
39 template <class CTYPE, exec_aten::ScalarType DTYPE>
test_dtype()40 void test_dtype() {
41 TensorFactory<DTYPE> tf;
42
43 // clang-format off
44 Tensor input = tf.make(
45 /*sizes=*/{3, 4},
46 /*data=*/{
47 1, 2, 3, 4, // [0, :]
48 5, 6, 7, 8, // [1, :]
49 9, 10, 11, 12, // [2, :]
50 });
51
52 // op_slice_copy_tensor_out(input, /*dim=*/0, /*start=*/0, /*end=*/2, /*step=*/1, out),
53 // The result should equal to input[0:2:1, :]
54 Tensor expect_ret = tf.make(
55 /*sizes=*/{2, 4},
56 /*data=*/{
57 1, 2, 3, 4, // [0, :]
58 5, 6, 7, 8, // [1, :]
59 });
60 // clang-format on
61
62 Tensor out = tf.zeros({2, 4});
63 Tensor ret = op_slice_copy_tensor_out(
64 input, /*dim=*/0, /*start=*/0, /*end=*/2, /*step=*/1, out);
65
66 EXPECT_TENSOR_EQ(out, ret);
67 EXPECT_TENSOR_EQ(ret, expect_ret);
68 }
69 };
70
TEST_F(OpSliceCopyTensorOutTest,LegalDimSupported)71 TEST_F(OpSliceCopyTensorOutTest, LegalDimSupported) {
72 TensorFactory<ScalarType::Double> tf;
73
74 // clang-format off
75 Tensor input = tf.make(
76 /*sizes=*/{2, 3, 4},
77 /*data=*/{
78 // [0, :, :]
79 1., 2., 3., 4., // [0, 0, :]
80 5., 6., 7., 8., // [0, 1, :]
81 9., 10., 11., 12., // [0, 2, :]
82
83 // [1, :, :]
84 -1., -2., -3., -4., // [1, 0, :]
85 -5., -6., -7., -8., // [1, 1, :]
86 -9., -10., -11., -12., // [1, 2, :]
87 });
88 // clang-format on
89
90 // clang-format off
91 // The size of expected output tensor should follow these rules:
92 // - output.size(i) shall equal input.size(i) if i != dim,
93 // - output.size(i) shall equal num_values if i == dim
94 // The definition of num_values could be found at https://fburl.com/code/mnnxkowm
95
96 // op_slice_copy_tensor_out(input, /*dim=*/0, /*start=*/0, /*end=*/1, /*step=*/1, out),
97 // The result should equal to input[0:1:1,:, :]
98 Tensor expected_dim_0 = tf.make(
99 /*sizes=*/{1, 3, 4},
100 /*data=*/{
101 1., 2., 3., 4., // [0, :]
102 5., 6., 7., 8., // [1, :]
103 9., 10., 11., 12., // [2, :]
104 });
105 // op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/0, /*end=*/1, /*step=*/1, out),
106 // The result should equal to input[:,0:1:1, :]
107 Tensor expected_dim_1 = tf.make(
108 /*sizes=*/{2, 1, 4},
109 /*data=*/{
110 1., 2., 3., 4., // [0, :, :]
111 -1., -2., -3., -4., // [1, :, :]
112 });
113 // op_slice_copy_tensor_out(input, /*dim=*/2, /*start=*/0, /*end=*/1, /*step=*/1, out),
114 // The result should equal to input[:,:, 0:1:1]
115 Tensor expected_dim_2 = tf.make(
116 /*sizes=*/{2, 3, 1},
117 /*data=*/{
118 1., 5., 9., // [0, :, :]
119 -1., -5., -9., // [1, :, :]
120 });
121 // clang-format on
122 std::vector<Tensor> expected_rets = {
123 // Groud truth for dim=-3
124 expected_dim_0,
125 // Groud truth for dim=-2
126 expected_dim_1,
127 // Groud truth for dim=-1
128 expected_dim_2,
129 // Groud truth for dim=0
130 expected_dim_0,
131 // Groud truth for dim=1
132 expected_dim_1,
133 // Groud truth for dim=2
134 expected_dim_2,
135 };
136
137 for (int64_t dim = -3; dim < 3; dim++) {
138 int64_t testcase_idx = dim + 3;
139 auto expected_ret = expected_rets[testcase_idx];
140 Tensor out = tf.zeros_like(expected_ret);
141
142 // Slice input on dim with start=0, end = 0 and step = 1
143 // Should always return the provided out Tensor.
144 // The ret shall meet the expectation.
145 Tensor ret = op_slice_copy_tensor_out(
146 input, dim, /*start=*/0, /*end=*/1, /*step=*/1, out);
147 EXPECT_TENSOR_EQ(out, ret);
148 EXPECT_TENSOR_EQ(ret, expected_rets[testcase_idx]);
149 }
150 }
151
TEST_F(OpSliceCopyTensorOutTest,AllStartValsSupported)152 TEST_F(OpSliceCopyTensorOutTest, AllStartValsSupported) {
153 TensorFactory<ScalarType::Double> tf;
154
155 // clang-format off
156 Tensor input = tf.make(
157 /*sizes=*/{2, 3, 4},
158 /*data=*/{
159 // [0, :, :]
160 1., 2., 3., 4., // [0, 0, :]
161 5., 6., 7., 8., // [0, 1, :]
162 9., 10., 11., 12., // [0, 2, :]
163
164 // [1, :, :]
165 -1., -2., -3., -4., // [1, 0, :]
166 -5., -6., -7., -8., // [1, 1, :]
167 -9., -10., -11., -12., // [1, 2, :]
168 });
169 // clang-format on
170
171 // clang-format off
172 // Set the end large enough to hold any start
173
174 // The size of expected output tensor should follow these rules:
175 // - output.size(i) shall equal input.size(i) if i != dim,
176 // - output.size(i) shall equal num_values if i == dim
177 // The definition of num_values could be found at https://fburl.com/code/mnnxkowm
178
179 // op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/ <= 0, /*end=*/10, /*step=*/1, out),
180 // The result shall equal to input[:,0:3:1, :]
181 Tensor expected_start_0_or_below = tf.make(
182 /*sizes=*/{2, 3, 4},
183 /*data=*/{
184 // [0, :, :]
185 1., 2., 3., 4., // [0, 0, :]
186 5., 6., 7., 8., // [0, 1, :]
187 9., 10., 11., 12., // [0, 2, :]
188
189 // [1, :, :]
190 -1., -2., -3., -4., // [1, 0, :]
191 -5., -6., -7., -8., // [1, 1, :]
192 -9., -10., -11., -12., // [1, 2, :]
193 });
194 // op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/1, /*end=*/10, /*step=*/1, out),
195 // The result shall equal to input[:,1:3:1, :]
196 Tensor expected_start_1 = tf.make(
197 /*sizes=*/{2, 2, 4},
198 /*data=*/{
199 // [0, :, :]
200 5., 6., 7., 8., // [0, 0, :]
201 9., 10., 11., 12., // [0, 1, :]
202
203 // [1, :, :]
204 -5., -6., -7., -8., // [1, 0, :]
205 -9., -10., -11., -12., // [1, 1, :]
206 });
207 // op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/2, /*end=*/10, /*step=*/1, out),
208 // The result shall equal to input[:,2:3:1, :] = input
209 Tensor expected_start_2 = tf.make(
210 /*sizes=*/{2, 1, 4},
211 /*data=*/{
212 9., 10., 11., 12., // [0, 0, :]
213 -9., -10., -11., -12., // [1, 0, :]
214 });
215
216 // op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/ > input.size(1) = 2, /*end=*/10, /*step=*/1, out),
217 // The result shall equal to input[:, 3:3:1, :], which is an empty tensor
218 Tensor expected_start_3_or_above = tf.make({2, 0, 4}, {});
219 // clang-format on
220 std::vector<Tensor> expected_rets = {// start = -3
221 expected_start_0_or_below,
222 // start = -2
223 expected_start_1,
224 // start = -1
225 expected_start_2,
226 // start = 0
227 expected_start_0_or_below,
228 // start = 1
229 expected_start_1,
230 // start = 2
231 expected_start_2,
232 // start = 3
233 expected_start_3_or_above};
234
235 // In this test, we maintain dim and step as 1 and 1, also set the end
236 // large enough to hold any start
237 int64_t dim = 1;
238 int64_t end = 10;
239 int64_t step = 1;
240 for (int64_t start = -3; start < 4; start++) {
241 int64_t testcase_idx = start + 3;
242 auto expected_ret = expected_rets[testcase_idx];
243 Tensor out = tf.zeros_like(expected_ret);
244
245 // Should always return the provided out Tensor.
246 // The ret shall meet the expectation.
247 Tensor ret = op_slice_copy_tensor_out(input, dim, start, end, step, out);
248 EXPECT_TENSOR_EQ(out, ret);
249 EXPECT_TENSOR_EQ(ret, expected_ret);
250 }
251 }
252
TEST_F(OpSliceCopyTensorOutTest,AllEndValsSupported)253 TEST_F(OpSliceCopyTensorOutTest, AllEndValsSupported) {
254 TensorFactory<ScalarType::Double> tf;
255
256 // clang-format off
257 Tensor input = tf.make(
258 /*sizes=*/{2, 3, 4},
259 /*data=*/{
260 // [0, :, :]
261 1., 2., 3., 4., // [0, 0, :]
262 5., 6., 7., 8., // [0, 1, :]
263 9., 10., 11., 12., // [0, 2, :]
264
265 // [1, :, :]
266 -1., -2., -3., -4., // [1, 0, :]
267 -5., -6., -7., -8., // [1, 1, :]
268 -9., -10., -11., -12., // [1, 2, :]
269 });
270
271 // The size of expected output tensor should follow these rules:
272 // - output.size(i) shall equal input.size(i) if i != dim,
273 // - output.size(i) shall equal num_values if i == dim
274 // The definition of num_values could be found at https://fburl.com/code/mnnxkowm
275
276 // op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/0, /*end=*/ <= 0, /*step=*/1, out),
277 // The result should equal input[:,0:0:1, :], which should be an empty tensor
278 Tensor expected_end_0_or_below = tf.make({2, 0, 4}, {});
279
280 // op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/0, /*end=*/1, /*step=*/1, out),
281 // The result should equal to input[:,0:1:1, :]
282 Tensor expected_end_1 = tf.make(
283 /*sizes=*/{2, 1, 4},
284 /*data=*/{
285 1., 2., 3., 4., // [0, :, :]
286 -1., -2., -3., -4., // [1, :, :]
287 });
288
289 // op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/0, /*end=*/2, /*step=*/1, out),
290 // The result should equal input[:,0:2:1, :]
291 Tensor expected_end_2 = tf.make(
292 /*sizes=*/{2, 2, 4},
293 /*data=*/{
294 // [0, :, :]
295 1., 2., 3., 4., // [0, 0, :]
296 5., 6., 7., 8., // [0, 1, :]
297
298 // [1, :, :]
299 -1., -2., -3., -4., // [1, 0, :]
300 -5., -6., -7., -8., // [1, 1, :]
301 });
302 // op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/0, /*end=*/ >= 3, /*step=*/1, out),
303 // The result should equal input[:,0:3:1, :] = input for any end >= 3
304 Tensor expected_end_3_or_above = tf.make(
305 /*sizes=*/{2, 3, 4},
306 /*data=*/{
307 // [0, :, :]
308 1., 2., 3., 4., // [0, 0, :]
309 5., 6., 7., 8., // [0, 1, :]
310 9., 10., 11., 12., // [0, 2, :]
311
312 // [1, :, :]
313 -1., -2., -3., -4., // [1, 0, :]
314 -5., -6., -7., -8., // [1, 1, :]
315 -9., -10., -11., -12., // [1, 2, :]
316 });
317 // clang-format on
318 std::vector<Tensor> expected_rets = {// end = -3
319 expected_end_0_or_below,
320 // end = -2
321 expected_end_1,
322 // end = -1
323 expected_end_2,
324 // end = 0
325 expected_end_0_or_below,
326 // end = 1
327 expected_end_1,
328 // end = 2
329 expected_end_2,
330 // end = 3
331 expected_end_3_or_above};
332
333 int64_t dim = 1;
334 int64_t start = 0;
335 int64_t step = 1;
336 for (int64_t end = -3; end < 4; end++) {
337 int64_t testcase_idx = end + 3;
338
339 auto expected_ret = expected_rets[testcase_idx];
340 Tensor out = tf.zeros_like(expected_ret);
341
342 // Should always return the provided out Tensor.
343 // The ret shall meet the expectation.
344 Tensor ret = op_slice_copy_tensor_out(input, dim, start, end, step, out);
345 EXPECT_TENSOR_EQ(out, ret);
346 EXPECT_TENSOR_EQ(ret, expected_ret);
347 }
348 }
349
TEST_F(OpSliceCopyTensorOutTest,LegalStepsSupported)350 TEST_F(OpSliceCopyTensorOutTest, LegalStepsSupported) {
351 TensorFactory<ScalarType::Double> tf;
352
353 // clang-format off
354 Tensor input = tf.make(
355 /*sizes=*/{2, 3, 4},
356 /*data=*/{
357 // [0, :, :]
358 1., 2., 3., 4., // [0, 0, :]
359 5., 6., 7., 8., // [0, 1, :]
360 9., 10., 11., 12., // [0, 2, :]
361
362 // [1, :, :]
363 -1., -2., -3., -4., // [1, 0, :]
364 -5., -6., -7., -8., // [1, 1, :]
365 -9., -10., -11., -12., // [1, 2, :]
366 });
367
368 // Set the end large enough to hold any step
369
370 // Expected ret for op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/0, /*end=*/10, /*step=*/1, out),
371 // The result should equal to input[:,0:3:1, :]
372 Tensor expected_0 = tf.make(
373 /*sizes=*/{2, 3, 4},
374 /*data=*/{
375 // [0, :, :]
376 1., 2., 3., 4., // [0, 0, :]
377 5., 6., 7., 8., // [0, 1, :]
378 9., 10., 11., 12., // [0, 2, :]
379
380 // [1, :, :]
381 -1., -2., -3., -4., // [1, 0, :]
382 -5., -6., -7., -8., // [1, 1, :]
383 -9., -10., -11., -12., // [1, 2, :]
384 });
385 // Expected ret for op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/0, /*end=*/10, /*step=*/2, out),
386 // The result should equal to input[:,0:3:2, :]
387 Tensor expected_1 = tf.make(
388 /*sizes=*/{2, 2, 4},
389 /*data=*/{
390 // [0, :, :]
391 1., 2., 3., 4., // [0, 0, :]
392 9., 10., 11., 12., // [0, 1, :]
393
394 // [1, :, :]
395 -1., -2., -3., -4., // [1, 0, :]
396 -9., -10., -11., -12., // [1, 1, :]
397 });
398 // Expected ret for op_slice_copy_tensor_out(input, /*dim=*/1, /*start=*/0, /*end=*/10, /*step=*/3, out),
399 // The result should equal to input[:,0:3:3, :] = input
400 Tensor expected_2 = tf.make(
401 /*sizes=*/{2, 1, 4},
402 /*data=*/{
403 1., 2., 3., 4., // [0, 0, :]
404 -1., -2., -3., -4., // [1, 0, :]
405 });
406 // clang-format on
407 std::vector<Tensor> expected_rets = {expected_0, expected_1, expected_2};
408
409 // In this test, we maintain start and dim as 0 and 1, also set the
410 // end large enough to hold any step
411 int64_t start = 0;
412 int64_t dim = 1;
413 int64_t end = 10;
414 for (int64_t step = 1; step < 4; step++) {
415 int64_t testcase_idx = step - 1;
416
417 auto expected_ret = expected_rets[testcase_idx];
418 Tensor out = tf.zeros_like(expected_ret);
419
420 // Should always return the provided out Tensor.
421 // The ret shall meet the expectation.
422 Tensor ret = op_slice_copy_tensor_out(input, dim, start, end, step, out);
423 EXPECT_TENSOR_EQ(out, ret);
424 EXPECT_TENSOR_EQ(ret, expected_ret);
425 }
426 }
427
428 /// A generic smoke test that works for any dtype that supports ones() and
429 /// zeros().
TEST_F(OpSliceCopyTensorOutTest,AllDtypesSupported)430 TEST_F(OpSliceCopyTensorOutTest, AllDtypesSupported) {
431 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
432 GTEST_SKIP() << "ATen kernel test fails";
433 }
434 #define TEST_ENTRY(ctype, dtype) test_dtype<ctype, ScalarType::dtype>();
435 ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
436 #undef TEST_ENTRY
437 // TODO: Also add tests for half, complex, quantized, and other types. Easiest
438 // way to do that would be to make TensorFactory support zeros() and ones()
439 // for those types.
440 }
441
TEST_F(OpSliceCopyTensorOutTest,EmptyInputSupported)442 TEST_F(OpSliceCopyTensorOutTest, EmptyInputSupported) {
443 TensorFactory<ScalarType::Int> tf;
444
445 Tensor input = tf.ones({1, 0, 1});
446 Tensor out = tf.zeros({1, 0, 1});
447
448 Tensor expect = tf.ones({1, 0, 1});
449
450 // Some invalid dim values.
451 for (int64_t dim = 0; dim > input.dim(); dim++) {
452 Tensor ret = op_slice_copy_tensor_out(
453 input, dim, /*start=*/0, /*end=*/1, /*step=*/1, out);
454 EXPECT_TENSOR_EQ(ret, out);
455
456 // All operations in this test share same ground truth
457 EXPECT_TENSOR_EQ(ret, expect);
458 }
459 }
460
TEST_F(OpSliceCopyTensorOutTest,EmptySizeInputDies)461 TEST_F(OpSliceCopyTensorOutTest, EmptySizeInputDies) {
462 TensorFactory<ScalarType::Int> tf;
463
464 Tensor input = tf.ones({});
465 Tensor out = tf.ones({});
466
467 // The operation shall die whatever the end is.
468 ET_EXPECT_KERNEL_FAILURE(
469 context_,
470 op_slice_copy_tensor_out(
471 input, /*dim=*/0, /*start=*/0, /*end=*/0, /*step=*/1, out));
472 ET_EXPECT_KERNEL_FAILURE(
473 context_,
474 op_slice_copy_tensor_out(
475 input, /*dim=*/0, /*start=*/0, /*end=*/1, /*step=*/1, out));
476 }
477
TEST_F(OpSliceCopyTensorOutTest,ZeroLengthSupported)478 TEST_F(OpSliceCopyTensorOutTest, ZeroLengthSupported) {
479 TensorFactory<ScalarType::Int> tf;
480
481 Tensor input = tf.ones({2, 3});
482 Tensor out = tf.ones({2, 0});
483
484 Tensor expect = tf.ones({2, 0});
485
486 Tensor ret = op_slice_copy_tensor_out(
487 input, /*dim=*/1, /*start=*/1, /*end=*/1, /*step=*/1, out);
488 EXPECT_TENSOR_EQ(ret, out);
489 EXPECT_TENSOR_EQ(ret, expect);
490
491 ret = op_slice_copy_tensor_out(
492 input, /*dim=*/1, /*start=*/-1, /*end=*/-1, /*step=*/1, out);
493 EXPECT_TENSOR_EQ(ret, out);
494 EXPECT_TENSOR_EQ(ret, expect);
495 }
496
TEST_F(OpSliceCopyTensorOutTest,NonPostiveStepsDies)497 TEST_F(OpSliceCopyTensorOutTest, NonPostiveStepsDies) {
498 TensorFactory<ScalarType::Int> tf;
499
500 Tensor input = tf.ones({1, 1, 1});
501 Tensor out = tf.zeros({1, 1, 1});
502
503 // Some invalid step values.
504 const std::vector<int64_t> invalid_steps = {-2, -1, 0};
505 for (int64_t step : invalid_steps) {
506 ET_EXPECT_KERNEL_FAILURE(
507 context_,
508 op_slice_copy_tensor_out(
509 input, /*dim=*/0, /*start=*/0, /*end=*/1, /*step=*/step, out));
510 }
511 }
512
TEST_F(OpSliceCopyTensorOutTest,DimOutOfBoundDies)513 TEST_F(OpSliceCopyTensorOutTest, DimOutOfBoundDies) {
514 TensorFactory<ScalarType::Int> tf;
515
516 Tensor input = tf.ones({1, 1, 1});
517 Tensor out = tf.zeros({1, 1, 1});
518
519 // Some invalid dim values.
520 const std::vector<int64_t> invalid_dims = {3, 4, 5, -4, -5, -6};
521 for (int64_t dim : invalid_dims) {
522 ET_EXPECT_KERNEL_FAILURE(
523 context_,
524 op_slice_copy_tensor_out(
525 input, dim, /*start=*/0, /*end=*/1, /*step=*/1, out));
526 }
527 }
528
TEST_F(OpSliceCopyTensorOutTest,MismatchedDtypesDies)529 TEST_F(OpSliceCopyTensorOutTest, MismatchedDtypesDies) {
530 TensorFactory<ScalarType::Int> tf_int;
531 TensorFactory<ScalarType::Float> tf_float;
532 Tensor input = tf_int.zeros({1, 2, 2});
533
534 // Size is compatible to the output, but a mismatched dtype.
535 Tensor out = tf_float.ones({1, 2, 2});
536
537 ET_EXPECT_KERNEL_FAILURE(
538 context_,
539 op_slice_copy_tensor_out(
540 input, /*dim=*/0, /*start=*/0, /*end=*/1, /*step=*/1, out));
541 }
542
TEST_F(OpSliceCopyTensorOutTest,OutSizeMismatchDimDies)543 TEST_F(OpSliceCopyTensorOutTest, OutSizeMismatchDimDies) {
544 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
545 GTEST_SKIP() << "ATen kernel can handle out with mismatched dimensions";
546 }
547 TensorFactory<ScalarType::Int> tf;
548
549 Tensor input = tf.zeros({2, 4, 7, 5});
550
551 // Should be {2, 4, 7, 5}
552 Tensor out = tf.zeros({2, 4, 7});
553
554 ET_EXPECT_KERNEL_FAILURE(
555 context_,
556 op_slice_copy_tensor_out(
557 input, /*dim=*/0, /*start=*/0, /*end=*/2, /*step=*/1, out));
558 }
559
TEST_F(OpSliceCopyTensorOutTest,DefaultStartValSupported)560 TEST_F(OpSliceCopyTensorOutTest, DefaultStartValSupported) {
561 TensorFactory<ScalarType::Int> tf;
562
563 Tensor input = tf.zeros({2, 4, 7, 5});
564
565 Tensor out = tf.ones({2, 4, 7, 5});
566 Tensor expected = tf.zeros({2, 4, 7, 5});
567
568 Tensor ret_default_start = op_slice_copy_tensor_out(
569 input,
570 /*dim=*/0,
571 /*start=*/exec_aten::nullopt,
572 /*end=*/2,
573 /*step=*/1,
574 out);
575 EXPECT_TENSOR_EQ(ret_default_start, out);
576 EXPECT_TENSOR_EQ(ret_default_start, expected);
577 }
578
TEST_F(OpSliceCopyTensorOutTest,DefaultEndValSupported)579 TEST_F(OpSliceCopyTensorOutTest, DefaultEndValSupported) {
580 TensorFactory<ScalarType::Int> tf;
581
582 Tensor input = tf.zeros({2, 4, 7, 5});
583
584 Tensor out = tf.ones({2, 4, 7, 5});
585 Tensor expected = tf.zeros({2, 4, 7, 5});
586
587 Tensor ret_default_end = op_slice_copy_tensor_out(
588 input,
589 /*dim=*/0,
590 /*start=*/0,
591 /*end=*/exec_aten::nullopt,
592 /*step=*/1,
593 out);
594 EXPECT_TENSOR_EQ(ret_default_end, out);
595 EXPECT_TENSOR_EQ(ret_default_end, expected);
596 }
597
598 /* %python
599 import torch
600 torch.manual_seed(0)
601 x = torch.rand(2, 6, 3)
602 res = x[:, 1:5:2, :]
603 print(res.size())
604 op = "op_slice_copy_tensor_out"
605 opt_extra_params = "1, 1, 5, 2,"
606 dtype = "ScalarType::Float"
607 check = "EXPECT_TENSOR_EQ" */
608
TEST_F(OpSliceCopyTensorOutTest,DynamicShapeUpperBoundSameAsExpected)609 TEST_F(OpSliceCopyTensorOutTest, DynamicShapeUpperBoundSameAsExpected) {
610 /* %python
611 out_args = "{2, 2, 3}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
612 %rewrite(unary_op) */
613
614 TensorFactory<ScalarType::Float> tf;
615
616 Tensor x = tf.make(
617 {2, 6, 3},
618 {0.49625658988952637, 0.7682217955589294, 0.08847743272781372,
619 0.13203048706054688, 0.30742281675338745, 0.6340786814689636,
620 0.4900934100151062, 0.8964447379112244, 0.455627977848053,
621 0.6323062777519226, 0.3488934636116028, 0.40171730518341064,
622 0.022325754165649414, 0.16885894536972046, 0.2938884496688843,
623 0.518521785736084, 0.6976675987243652, 0.800011396408081,
624 0.16102945804595947, 0.28226858377456665, 0.6816085577011108,
625 0.9151939749717712, 0.39709991216659546, 0.8741558790206909,
626 0.41940832138061523, 0.5529070496559143, 0.9527381062507629,
627 0.036164820194244385, 0.1852310299873352, 0.37341737747192383,
628 0.3051000237464905, 0.9320003986358643, 0.17591017484664917,
629 0.2698335647583008, 0.15067976713180542, 0.03171950578689575});
630 Tensor expected = tf.make(
631 {2, 2, 3},
632 {0.13203048706054688,
633 0.30742281675338745,
634 0.6340786814689636,
635 0.6323062777519226,
636 0.3488934636116028,
637 0.40171730518341064,
638 0.9151939749717712,
639 0.39709991216659546,
640 0.8741558790206909,
641 0.036164820194244385,
642 0.1852310299873352,
643 0.37341737747192383});
644
645 Tensor out =
646 tf.zeros({2, 2, 3}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
647 op_slice_copy_tensor_out(x, 1, 1, 5, 2, out);
648 EXPECT_TENSOR_EQ(out, expected);
649 }
650
TEST_F(OpSliceCopyTensorOutTest,DynamicShapeUpperBoundLargerThanExpected)651 TEST_F(OpSliceCopyTensorOutTest, DynamicShapeUpperBoundLargerThanExpected) {
652 if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
653 GTEST_SKIP() << "Dynamic shape not supported";
654 }
655 /* %python
656 out_args = "{10, 10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
657 %rewrite(unary_op) */
658
659 TensorFactory<ScalarType::Float> tf;
660
661 Tensor x = tf.make(
662 {2, 6, 3},
663 {0.49625658988952637, 0.7682217955589294, 0.08847743272781372,
664 0.13203048706054688, 0.30742281675338745, 0.6340786814689636,
665 0.4900934100151062, 0.8964447379112244, 0.455627977848053,
666 0.6323062777519226, 0.3488934636116028, 0.40171730518341064,
667 0.022325754165649414, 0.16885894536972046, 0.2938884496688843,
668 0.518521785736084, 0.6976675987243652, 0.800011396408081,
669 0.16102945804595947, 0.28226858377456665, 0.6816085577011108,
670 0.9151939749717712, 0.39709991216659546, 0.8741558790206909,
671 0.41940832138061523, 0.5529070496559143, 0.9527381062507629,
672 0.036164820194244385, 0.1852310299873352, 0.37341737747192383,
673 0.3051000237464905, 0.9320003986358643, 0.17591017484664917,
674 0.2698335647583008, 0.15067976713180542, 0.03171950578689575});
675 Tensor expected = tf.make(
676 {2, 2, 3},
677 {0.13203048706054688,
678 0.30742281675338745,
679 0.6340786814689636,
680 0.6323062777519226,
681 0.3488934636116028,
682 0.40171730518341064,
683 0.9151939749717712,
684 0.39709991216659546,
685 0.8741558790206909,
686 0.036164820194244385,
687 0.1852310299873352,
688 0.37341737747192383});
689
690 Tensor out = tf.zeros(
691 {10, 10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
692 op_slice_copy_tensor_out(x, 1, 1, 5, 2, out);
693 EXPECT_TENSOR_EQ(out, expected);
694 }
695
TEST_F(OpSliceCopyTensorOutTest,DynamicShapeUnbound)696 TEST_F(OpSliceCopyTensorOutTest, DynamicShapeUnbound) {
697 if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
698 GTEST_SKIP() << "Dynamic shape not supported";
699 }
700 /* %python
701 out_args = "{1, 1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND"
702 %rewrite(unary_op) */
703
704 TensorFactory<ScalarType::Float> tf;
705
706 Tensor x = tf.make(
707 {2, 6, 3},
708 {0.49625658988952637, 0.7682217955589294, 0.08847743272781372,
709 0.13203048706054688, 0.30742281675338745, 0.6340786814689636,
710 0.4900934100151062, 0.8964447379112244, 0.455627977848053,
711 0.6323062777519226, 0.3488934636116028, 0.40171730518341064,
712 0.022325754165649414, 0.16885894536972046, 0.2938884496688843,
713 0.518521785736084, 0.6976675987243652, 0.800011396408081,
714 0.16102945804595947, 0.28226858377456665, 0.6816085577011108,
715 0.9151939749717712, 0.39709991216659546, 0.8741558790206909,
716 0.41940832138061523, 0.5529070496559143, 0.9527381062507629,
717 0.036164820194244385, 0.1852310299873352, 0.37341737747192383,
718 0.3051000237464905, 0.9320003986358643, 0.17591017484664917,
719 0.2698335647583008, 0.15067976713180542, 0.03171950578689575});
720 Tensor expected = tf.make(
721 {2, 2, 3},
722 {0.13203048706054688,
723 0.30742281675338745,
724 0.6340786814689636,
725 0.6323062777519226,
726 0.3488934636116028,
727 0.40171730518341064,
728 0.9151939749717712,
729 0.39709991216659546,
730 0.8741558790206909,
731 0.036164820194244385,
732 0.1852310299873352,
733 0.37341737747192383});
734
735 Tensor out = tf.zeros(
736 {1, 1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
737 op_slice_copy_tensor_out(x, 1, 1, 5, 2, out);
738 EXPECT_TENSOR_EQ(out, expected);
739 }
740