1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16
17 #include <gtest/gtest.h>
18 #include <limits>
19
20 using namespace ::testing;
21 using exec_aten::ArrayRef;
22 using exec_aten::Scalar;
23 using exec_aten::ScalarType;
24 using exec_aten::Tensor;
25 using torch::executor::testing::TensorFactory;
26
27 class OpAddmmOutTest : public OperatorTest {
28 protected:
op_addmm_out(const Tensor & self,const Tensor & mat1,const Tensor & mat2,const Scalar & beta,const Scalar & alpha,Tensor & out)29 Tensor& op_addmm_out(
30 const Tensor& self,
31 const Tensor& mat1,
32 const Tensor& mat2,
33 const Scalar& beta,
34 const Scalar& alpha,
35 Tensor& out) {
36 return torch::executor::aten::addmm_outf(
37 context_, self, mat1, mat2, beta, alpha, out);
38 }
39
40 template <class CTYPE, exec_aten::ScalarType DTYPE>
test_dtype()41 void test_dtype() {
42 TensorFactory<DTYPE> tf;
43
44 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
45 if (DTYPE == ScalarType::Half) {
46 GTEST_SKIP()
47 << "skip Half because torch::executor::aten::mm_out does not support Half";
48 return;
49 }
50 }
51
52 // matmul gives 4 * 2 * 3 = 24, α * 24 = 48, 48 + β * self = 51
53 Tensor self = tf.full({3, 5}, 1);
54 Tensor x = tf.full({3, 4}, 2);
55 Tensor y = tf.full({4, 5}, 3);
56
57 // Output shape should be (3, 5)
58 Tensor out = tf.zeros({3, 5});
59
60 Scalar alpha = Scalar(2.0);
61 Scalar beta = Scalar(3.0);
62
63 op_addmm_out(self, x, y, beta, alpha, out);
64
65 Tensor expected = tf.full({3, 5}, 51);
66
67 EXPECT_TENSOR_EQ(out, expected);
68 }
69 };
70
TEST_F(OpAddmmOutTest,OutputDim)71 TEST_F(OpAddmmOutTest, OutputDim) {
72 TensorFactory<ScalarType::Int> tf;
73
74 // 3 tensors with compatible dimensions: (3, 5), (3, 4) and (4, 5).
75 Tensor self = tf.ones({3, 5});
76 Tensor x = tf.ones({3, 4});
77 Tensor y = tf.ones({4, 5});
78
79 // Output shape should be (3, 5)
80 Tensor out = tf.zeros({3, 5});
81
82 Scalar alpha = Scalar(1);
83 Scalar beta = Scalar(1);
84
85 Tensor ret = op_addmm_out(self, x, y, beta, alpha, out);
86
87 // Should always return the provided out Tensor.
88 EXPECT_TENSOR_EQ(ret, out);
89
90 // Expected tensor, filled with 5.
91 Tensor expected = tf.full({3, 5}, 5);
92
93 EXPECT_TENSOR_EQ(out, expected);
94 }
95
96 /// A generic smoke test that works for any dtype that supports ones() and
97 /// zeros().
TEST_F(OpAddmmOutTest,AllDtypesSupported)98 TEST_F(OpAddmmOutTest, AllDtypesSupported) {
99 #define TEST_ENTRY(ctype, dtype) test_dtype<ctype, ScalarType::dtype>();
100 ET_FORALL_REAL_TYPES_AND(Half, TEST_ENTRY);
101 #undef TEST_ENTRY
102 // TODO: Also add tests for half, complex, quantized, and other types. Easiest
103 // way to do that would be to make TensorFactory support zeros() and ones()
104 // for those types.
105 }
106
TEST_F(OpAddmmOutTest,EmptyInputWithEmptyOutTensorPasses)107 TEST_F(OpAddmmOutTest, EmptyInputWithEmptyOutTensorPasses) {
108 TensorFactory<ScalarType::Float> tf;
109
110 // Empty input matrices
111 Tensor self = tf.make({0, 0}, {});
112 Tensor x = tf.make({0, 3}, {});
113 Tensor y = tf.make({3, 0}, {});
114
115 // Output matrix is also empty
116 Tensor out = tf.make({0, 0}, {});
117
118 Tensor expected = tf.make({0, 0}, {});
119
120 EXPECT_TENSOR_EQ(
121 op_addmm_out(self, x, y, Scalar(2), Scalar(3), out), expected);
122 }
123
TEST_F(OpAddmmOutTest,FloatTensorDtypeAndIntScalarTypePasses)124 TEST_F(OpAddmmOutTest, FloatTensorDtypeAndIntScalarTypePasses) {
125 // case 1: Tensor dtype float, scalar type int
126 TensorFactory<ScalarType::Float> tff;
127 // matmul gives 4 * 2 * 3 = 24, α * 24 = 72, 72 + β * self = 74
128 Tensor self = tff.full({3, 5}, 1);
129 Tensor x = tff.full({3, 4}, 2);
130 Tensor y = tff.full({4, 5}, 3);
131
132 // Output shape should be (3, 5)
133 Tensor out = tff.zeros({3, 5});
134
135 Tensor expected = tff.full({3, 5}, 74);
136
137 EXPECT_TENSOR_EQ(
138 op_addmm_out(self, x, y, Scalar(2), Scalar(3), out), expected);
139 }
140
TEST_F(OpAddmmOutTest,IntTensorDtypeAndFloatScalarTypePasses)141 TEST_F(OpAddmmOutTest, IntTensorDtypeAndFloatScalarTypePasses) {
142 // case 2: Tensor dtype int, scalar type loat
143 TensorFactory<ScalarType::Int> tfi;
144 // matmul gives 4 * 2 * 3 = 24, α * 24 = 72, 72 + β * self = 74
145 Tensor self = tfi.full({3, 5}, 1);
146 Tensor x = tfi.full({3, 4}, 2);
147 Tensor y = tfi.full({4, 5}, 3);
148
149 // Output shape should be (3, 5)
150 Tensor out = tfi.zeros({3, 5});
151
152 Tensor expected = tfi.full({3, 5}, 74);
153
154 EXPECT_TENSOR_EQ(
155 op_addmm_out(self, x, y, Scalar(2.0), Scalar(3.0), out), expected);
156 }
157
TEST_F(OpAddmmOutTest,InfinityTensorAndFloatScalarTypePasses)158 TEST_F(OpAddmmOutTest, InfinityTensorAndFloatScalarTypePasses) {
159 // case 2: Tensor dtype int, scalar type loat
160 TensorFactory<ScalarType::Float> tff;
161
162 Tensor self = tff.full({3, 5}, std::numeric_limits<float>::infinity());
163 Tensor x = tff.full({3, 4}, 2);
164 Tensor y = tff.full({4, 5}, 3);
165
166 // Output shape should be (3, 5)
167 Tensor out = tff.zeros({3, 5});
168
169 Tensor expected = tff.full({3, 5}, std::numeric_limits<float>::infinity());
170
171 EXPECT_TENSOR_EQ(
172 op_addmm_out(self, x, y, Scalar(2), Scalar(3), out), expected);
173 }
174
TEST_F(OpAddmmOutTest,MismatchedDimensionsDies)175 TEST_F(OpAddmmOutTest, MismatchedDimensionsDies) {
176 TensorFactory<ScalarType::Int> tf;
177
178 Tensor self = tf.full({2, 2}, 3);
179 Tensor x = tf.full({2, 2}, 3);
180
181 Tensor wrong_y = tf.full({3, 1}, 1);
182 Tensor right_y = tf.full({2, 2}, 1);
183
184 // Make an empty out tensor and demonstrate that it's empty.
185 Tensor out = tf.full({2, 2}, 0);
186
187 Tensor expected = tf.full({2, 2}, 9);
188 ET_EXPECT_KERNEL_FAILURE(
189 context_, op_addmm_out(self, x, wrong_y, Scalar(1), Scalar(1), out));
190
191 EXPECT_TENSOR_EQ(
192 op_addmm_out(self, x, right_y, Scalar(1), Scalar(1), out), expected);
193 }
194
TEST_F(OpAddmmOutTest,MismatchedDimensionSizeDies)195 TEST_F(OpAddmmOutTest, MismatchedDimensionSizeDies) {
196 TensorFactory<ScalarType::Int> tf;
197 Tensor self = tf.full({2, 2}, 3);
198 Tensor x = tf.full({2, 2}, 3);
199
200 // wrong_y has incompatible dim
201 Tensor wrong_y = tf.full({2, 2, 2}, 1);
202 Tensor right_y = tf.full({2, 2}, 1);
203
204 // wrong_out has incompatible dim
205 Tensor right_out = tf.ones({2, 2});
206 Tensor wrong_out = tf.ones({2, 2, 3});
207
208 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
209 GTEST_SKIP() << "ATen kernel can handle mismatched dimensions";
210 }
211
212 ET_EXPECT_KERNEL_FAILURE(
213 context_,
214 op_addmm_out(self, x, right_y, Scalar(1), Scalar(1), wrong_out));
215 ET_EXPECT_KERNEL_FAILURE(
216 context_,
217 op_addmm_out(self, x, wrong_y, Scalar(1), Scalar(1), right_out));
218 }
219
TEST_F(OpAddmmOutTest,WrongOutShapeDies)220 TEST_F(OpAddmmOutTest, WrongOutShapeDies) {
221 TensorFactory<ScalarType::Int> tf;
222 Tensor self = tf.ones({10, 4});
223 Tensor x = tf.ones({10, 3});
224
225 Tensor y = tf.ones({3, 4});
226
227 // wrong_out has incompatible shape
228 Tensor right_out = tf.ones({10, 4});
229 Tensor wrong_out = tf.ones({7, 5});
230
231 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
232 GTEST_SKIP() << "ATen kernel can handle wrong out shape";
233 }
234
235 ET_EXPECT_KERNEL_FAILURE(
236 context_, op_addmm_out(self, x, y, Scalar(1), Scalar(1), wrong_out));
237
238 EXPECT_TENSOR_EQ(
239 op_addmm_out(self, x, y, Scalar(1), Scalar(1), right_out),
240 tf.full({10, 4}, 4));
241 }
242
TEST_F(OpAddmmOutTest,BroadcastTest)243 TEST_F(OpAddmmOutTest, BroadcastTest) {
244 TensorFactory<ScalarType::Int> tf;
245
246 Tensor self = tf.make({1}, {1});
247 Tensor x = tf.make({2, 2}, {1, 2, 3, 4});
248 Tensor y = tf.make({2, 2}, {1, 2, 3, 4});
249
250 Tensor out = tf.make({2, 2}, {0, 0, 0, 0});
251
252 EXPECT_TENSOR_EQ(
253 op_addmm_out(self, x, y, Scalar(1), Scalar(1), out),
254 tf.make({2, 2}, {8, 11, 16, 23}));
255 }
TEST_F(OpAddmmOutTest,BroadcastDimSize1)256 TEST_F(OpAddmmOutTest, BroadcastDimSize1) {
257 TensorFactory<ScalarType::Float> tf;
258
259 Tensor x = tf.make({1, 2}, {0.9937992691993713, 0.7011417150497437});
260 Tensor y = tf.make(
261 {3, 6},
262 {0.3271445035934448,
263 0.4104803800582886,
264 0.26973772048950195,
265 0.29142987728118896,
266 0.20096111297607422,
267 0.7686975002288818,
268 0.07416731119155884,
269 0.276896595954895,
270 0.43525755405426025,
271 0.8261672854423523,
272 0.22888076305389404,
273 0.042113542556762695,
274 0.8771350979804993,
275 0.4088439345359802,
276 0.0258103609085083,
277 0.26305103302001953,
278 0.6766068339347839,
279 0.3576545715332031});
280 Tensor z = tf.make(
281 {6, 2},
282 {0.5702318549156189,
283 0.8886868953704834,
284 0.8667161464691162,
285 0.7151150107383728,
286 0.19591552019119263,
287 0.7918031811714172,
288 0.8956874012947083,
289 0.7162176966667175,
290 0.34151601791381836,
291 0.16078311204910278,
292 0.6722156405448914,
293 0.048251569271087646});
294 Tensor expected_result = tf.make(
295 {3, 2},
296 {2.4353551864624023,
297 1.7771198749542236,
298 2.207819700241089,
299 1.9402521848678589,
300 2.5604825019836426,
301 2.107893466949463});
302
303 Tensor out = tf.zeros({3, 2});
304 Tensor ret = op_addmm_out(x, y, z, Scalar(1), Scalar(1), out);
305 EXPECT_TENSOR_CLOSE(out, expected_result);
306 }
307
TEST_F(OpAddmmOutTest,BroadcastDimSizeMissing)308 TEST_F(OpAddmmOutTest, BroadcastDimSizeMissing) {
309 TensorFactory<ScalarType::Float> tf;
310
311 Tensor x = tf.make({2}, {0.9937992691993713, 0.7011417150497437});
312 Tensor y = tf.make(
313 {3, 6},
314 {0.3271445035934448,
315 0.4104803800582886,
316 0.26973772048950195,
317 0.29142987728118896,
318 0.20096111297607422,
319 0.7686975002288818,
320 0.07416731119155884,
321 0.276896595954895,
322 0.43525755405426025,
323 0.8261672854423523,
324 0.22888076305389404,
325 0.042113542556762695,
326 0.8771350979804993,
327 0.4088439345359802,
328 0.0258103609085083,
329 0.26305103302001953,
330 0.6766068339347839,
331 0.3576545715332031});
332 Tensor z = tf.make(
333 {6, 2},
334 {0.5702318549156189,
335 0.8886868953704834,
336 0.8667161464691162,
337 0.7151150107383728,
338 0.19591552019119263,
339 0.7918031811714172,
340 0.8956874012947083,
341 0.7162176966667175,
342 0.34151601791381836,
343 0.16078311204910278,
344 0.6722156405448914,
345 0.048251569271087646});
346 Tensor expected_result = tf.make(
347 {3, 2},
348 {2.4353551864624023,
349 1.7771198749542236,
350 2.207819700241089,
351 1.9402521848678589,
352 2.5604825019836426,
353 2.107893466949463});
354
355 Tensor out = tf.zeros({3, 2});
356 Tensor ret = op_addmm_out(x, y, z, Scalar(1), Scalar(1), out);
357 EXPECT_TENSOR_CLOSE(out, expected_result);
358 }
359
TEST_F(OpAddmmOutTest,BroadcastDimSizeIsOne)360 TEST_F(OpAddmmOutTest, BroadcastDimSizeIsOne) {
361 TensorFactory<ScalarType::Float> tf;
362
363 Tensor x = tf.make({1, 2}, {0.9093303680419922, 0.37621551752090454});
364 Tensor y = tf.make(
365 {3, 6},
366 {0.5741164088249207,
367 0.3001101613044739,
368 0.6543494462966919,
369 0.8815506100654602,
370 0.8948686122894287,
371 0.3319156765937805,
372 0.6683467030525208,
373 0.37235790491104126,
374 0.15439540147781372,
375 0.05733710527420044,
376 0.5467379093170166,
377 0.9564069509506226,
378 0.2915573716163635,
379 0.5548340082168579,
380 0.20116734504699707,
381 0.8199875950813293,
382 0.270835816860199,
383 0.1414813995361328});
384 Tensor z = tf.make(
385 {6, 2},
386 {0.6883938312530518,
387 0.9387704133987427,
388 0.6991894841194153,
389 0.2945629954338074,
390 0.48106586933135986,
391 0.932110607624054,
392 0.9461215138435364,
393 0.7682468295097351,
394 0.6223915219306946,
395 0.0702824592590332,
396 0.9750580787658691,
397 0.05068659782409668});
398 Tensor expected_result = tf.make(
399 {3, 2},
400 {3.5438172817230225,
401 2.3704721927642822,
402 3.0311243534088135,
403 1.388188123703003,
404 2.6770718097686768,
405 1.6570236682891846});
406
407 Tensor out = tf.zeros({3, 2});
408 Tensor ret = op_addmm_out(x, y, z, Scalar(1), Scalar(1), out);
409 EXPECT_TENSOR_CLOSE(out, expected_result);
410 }
411
TEST_F(OpAddmmOutTest,DynamicShapeUpperBoundSameAsExpected)412 TEST_F(OpAddmmOutTest, DynamicShapeUpperBoundSameAsExpected) {
413 TensorFactory<ScalarType::Float> tf;
414
415 Tensor x = tf.make(
416 {3, 2},
417 {0.5024666786193848,
418 0.8311734795570374,
419 0.17922323942184448,
420 0.5711425542831421,
421 0.23492926359176636,
422 0.6693081259727478});
423 Tensor y = tf.make(
424 {3, 6},
425 {0.8927820920944214,
426 0.13490021228790283,
427 0.49518370628356934,
428 0.027777791023254395,
429 0.7909245491027832,
430 0.07999932765960693,
431 0.9496669173240662,
432 0.18807870149612427,
433 0.44375330209732056,
434 0.761903703212738,
435 0.24175149202346802,
436 0.31033122539520264,
437 0.8609206080436707,
438 0.1580638885498047,
439 0.2585788369178772,
440 0.4787442088127136,
441 0.17180007696151733,
442 0.2109091877937317});
443 Tensor z = tf.make(
444 {6, 2},
445 {0.06361657381057739,
446 0.8065286874771118,
447 0.610871434211731,
448 0.19808048009872437,
449 0.7010428309440613,
450 0.904334545135498,
451 0.8460395932197571,
452 0.34137529134750366,
453 0.4836529493331909,
454 0.2751874327659607,
455 0.22036516666412354,
456 0.742312490940094});
457 Tensor expected_result = tf.make(
458 {3, 2},
459 {1.4124772548675537,
460 2.3122801780700684,
461 1.495530605316162,
462 2.3326172828674316,
463 1.1021348237991333,
464 1.9960856437683105});
465
466 Tensor out =
467 tf.zeros({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
468 Tensor ret = op_addmm_out(x, y, z, Scalar(1), Scalar(1), out);
469 EXPECT_TENSOR_CLOSE(out, expected_result);
470 }
471
TEST_F(OpAddmmOutTest,DynamicShapeUpperBoundLargerThanExpected)472 TEST_F(OpAddmmOutTest, DynamicShapeUpperBoundLargerThanExpected) {
473 TensorFactory<ScalarType::Float> tf;
474
475 Tensor x = tf.make(
476 {3, 2},
477 {0.5024666786193848,
478 0.8311734795570374,
479 0.17922323942184448,
480 0.5711425542831421,
481 0.23492926359176636,
482 0.6693081259727478});
483 Tensor y = tf.make(
484 {3, 6},
485 {0.8927820920944214,
486 0.13490021228790283,
487 0.49518370628356934,
488 0.027777791023254395,
489 0.7909245491027832,
490 0.07999932765960693,
491 0.9496669173240662,
492 0.18807870149612427,
493 0.44375330209732056,
494 0.761903703212738,
495 0.24175149202346802,
496 0.31033122539520264,
497 0.8609206080436707,
498 0.1580638885498047,
499 0.2585788369178772,
500 0.4787442088127136,
501 0.17180007696151733,
502 0.2109091877937317});
503 Tensor z = tf.make(
504 {6, 2},
505 {0.06361657381057739,
506 0.8065286874771118,
507 0.610871434211731,
508 0.19808048009872437,
509 0.7010428309440613,
510 0.904334545135498,
511 0.8460395932197571,
512 0.34137529134750366,
513 0.4836529493331909,
514 0.2751874327659607,
515 0.22036516666412354,
516 0.742312490940094});
517 Tensor expected_result = tf.make(
518 {3, 2},
519 {1.4124772548675537,
520 2.3122801780700684,
521 1.495530605316162,
522 2.3326172828674316,
523 1.1021348237991333,
524 1.9960856437683105});
525
526 Tensor out =
527 tf.zeros({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
528 Tensor ret = op_addmm_out(x, y, z, Scalar(1), Scalar(1), out);
529 EXPECT_TENSOR_CLOSE(out, expected_result);
530 }
531
TEST_F(OpAddmmOutTest,DynamicShapeUnbound)532 TEST_F(OpAddmmOutTest, DynamicShapeUnbound) {
533 GTEST_SKIP() << "Dynamic shape unbound not supported";
534 TensorFactory<ScalarType::Float> tf;
535
536 Tensor x = tf.make(
537 {3, 2},
538 {0.754013180732727,
539 0.16418755054473877,
540 0.8077310919761658,
541 0.7187556624412537,
542 0.0470539927482605,
543 0.2438456416130066});
544 Tensor y = tf.make(
545 {3, 6},
546 {0.5899912118911743,
547 0.5052928328514099,
548 0.13990312814712524,
549 0.22438400983810425,
550 0.1697748899459839,
551 0.6022286415100098,
552 0.08701932430267334,
553 0.7246091961860657,
554 0.44388288259506226,
555 0.9451560974121094,
556 0.8658323884010315,
557 0.781434953212738,
558 0.02855396270751953,
559 0.49756181240081787,
560 0.506054699420929,
561 0.12560266256332397,
562 0.7099084854125977,
563 0.04813879728317261});
564 Tensor z = tf.make(
565 {6, 2},
566 {0.19827371835708618,
567 0.486919641494751,
568 0.7659645080566406,
569 0.7863746285438538,
570 0.032599568367004395,
571 0.8414170145988464,
572 0.7014893293380737,
573 0.2445545196533203,
574 0.07429623603820801,
575 0.12777382135391235,
576 0.39169949293136597,
577 0.80079185962677});
578 Tensor expected_result = tf.make(
579 {3, 2},
580 {1.6684993505477905,
581 1.5253589153289795,
582 2.427912712097168,
583 2.6719717979431152,
584 0.6100357174873352,
585 1.2347958087921143});
586
587 Tensor out =
588 tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
589 Tensor ret = op_addmm_out(x, y, z, Scalar(1), Scalar(1), out);
590 EXPECT_TENSOR_CLOSE(out, expected_result);
591 }
592