xref: /aosp_15_r20/external/executorch/kernels/test/op_log_softmax_test.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16 
17 #include <gtest/gtest.h>
18 
19 using namespace ::testing;
20 using exec_aten::ArrayRef;
21 using exec_aten::ScalarType;
22 using exec_aten::Tensor;
23 using torch::executor::testing::SupportedFeatures;
24 using torch::executor::testing::TensorFactory;
25 
26 class OpLogSoftmaxOutTest : public OperatorTest {
27  protected:
op_log_softmax_out(const Tensor & self,int64_t dim,bool half_to_float,Tensor & out)28   Tensor& op_log_softmax_out(
29       const Tensor& self,
30       int64_t dim,
31       bool half_to_float,
32       Tensor& out) {
33     return torch::executor::aten::_log_softmax_outf(
34         context_, self, dim, half_to_float, out);
35   }
36 
37   // A generic smoke test that works for the supported dtypes.
38   template <class CTYPE, exec_aten::ScalarType DTYPE>
test_dtype()39   void test_dtype() {
40     TensorFactory<DTYPE> tf;
41 
42     // Input tensor with shape (2, 3) and values (0, 1, 2, 3, 4, 5).
43     // clang-format off
44     Tensor x = tf.make(
45       {2, 3},
46       {
47         0, 1, 2,
48         3, 4, 5
49       });
50     // clang-format on
51 
52     Tensor out = tf.zeros({2, 3});
53 
54     op_log_softmax_out(x, /*dim=*/1, /*half_to_float*/ false, out);
55 
56     // clang-format off
57     Tensor expected = tf.make(
58       {2, 3},
59       {
60         -2.40761, -1.40761, -0.407606,
61         -2.40761, -1.40761, -0.407606
62       });
63     // clang-format on
64 
65     EXPECT_TENSOR_CLOSE(out, expected);
66   }
67 };
68 
TEST_F(OpLogSoftmaxOutTest,Smoke)69 TEST_F(OpLogSoftmaxOutTest, Smoke) {
70   TensorFactory<ScalarType::Float> tff;
71   std::vector<int32_t> sizes = {1, 3};
72   Tensor in = tff.make(sizes, {0, 1, 2});
73   Tensor out = tff.zeros(sizes);
74 
75   Tensor ret = op_log_softmax_out(in, /*dim=*/1, /*half_to_float=*/false, out);
76 
77   // Should always return the provided out Tensor.
78   EXPECT_TENSOR_EQ(ret, out);
79 
80   // Expected tensor.
81   Tensor expected = tff.make({1, 3}, {-2.40761, -1.40761, -0.407606});
82 
83   EXPECT_TENSOR_CLOSE(out, expected);
84 }
85 
TEST_F(OpLogSoftmaxOutTest,AllDtypesSupported)86 TEST_F(OpLogSoftmaxOutTest, AllDtypesSupported) {
87   if (!SupportedFeatures::get()->op_log_softmax_dtype_double) {
88     GTEST_SKIP() << "This kernel does not support dtype double";
89   }
90 
91   test_dtype<float, ScalarType::Float>();
92   test_dtype<double, ScalarType::Double>();
93   // TODO: Also add tests for half, complex, quantized, and other types. Easiest
94   // way to do that would be to make TensorFactory support zeros() and ones()
95   // for those types.
96 }
97 
TEST_F(OpLogSoftmaxOutTest,MismatchedDimensionsDies)98 TEST_F(OpLogSoftmaxOutTest, MismatchedDimensionsDies) {
99   if (SupportedFeatures::get()->is_aten) {
100     GTEST_SKIP() << "ATen currently supports mismatched dimensions";
101   }
102 
103   TensorFactory<ScalarType::Float> tff;
104 
105   // Input tensor with shape (1, 3) and values (0, 1, 2).
106   Tensor x = tff.make({1, 3}, {0, 1, 2});
107 
108   // Output shape should be (1, 3)
109   Tensor out = tff.zeros({1, 3});
110 
111   // Dim out of bounds
112   ET_EXPECT_KERNEL_FAILURE(
113       context_, op_log_softmax_out(x, /*dim=*/3, /*half_to_float*/ false, out));
114 }
115 
TEST_F(OpLogSoftmaxOutTest,MismatchedDimensionSizeDies)116 TEST_F(OpLogSoftmaxOutTest, MismatchedDimensionSizeDies) {
117   if (SupportedFeatures::get()->is_aten) {
118     GTEST_SKIP() << "ATen currently supports mismatched dimension size";
119   }
120 
121   TensorFactory<ScalarType::Float> tf;
122 
123   Tensor x = tf.ones({3, 4});
124 
125   // wrong_out has incompatible dim
126   Tensor wrong_out = tf.zeros({2, 10, 4});
127 
128   ET_EXPECT_KERNEL_FAILURE(
129       context_,
130       op_log_softmax_out(x, /*dim=*/1, /*half_to_float*/ false, wrong_out));
131 }
132 
TEST_F(OpLogSoftmaxOutTest,TestWithLargeNumber)133 TEST_F(OpLogSoftmaxOutTest, TestWithLargeNumber) {
134   if (!SupportedFeatures::get()->op_log_softmax_dtype_double) {
135     GTEST_SKIP() << "This kernel does not support dtype double";
136   }
137 
138   if (SupportedFeatures::get()->is_aten) {
139     GTEST_SKIP() << "ATen does not support mixing float and double";
140   }
141 
142   TensorFactory<ScalarType::Double> tf;
143 
144   // Input tensor with shape (1, 2) and values (-1e5, 1e5).
145   // clang-format off
146   Tensor x = tf.make(
147     {1, 2},
148     {
149       -1e5, 1e5
150     });
151   // clang-format on
152 
153   Tensor out = tf.zeros({1, 2});
154 
155   op_log_softmax_out(x, /*dim=*/1, /*half_to_float*/ false, out);
156 
157   // clang-format off
158   Tensor expected = tf.make(
159     {1, 2},
160     {
161       -200000, 0
162     });
163   // clang-format on
164 
165   EXPECT_TENSOR_CLOSE(out, expected);
166 }
167 
TEST_F(OpLogSoftmaxOutTest,NegativeDim)168 TEST_F(OpLogSoftmaxOutTest, NegativeDim) {
169   if (!SupportedFeatures::get()->op_log_softmax_dtype_double) {
170     GTEST_SKIP() << "This kernel does not support dtype double";
171   }
172 
173   if (SupportedFeatures::get()->is_aten) {
174     GTEST_SKIP() << "ATen does not support negative dim";
175   }
176 
177   TensorFactory<ScalarType::Float> tf;
178 
179   // Input tensor with shape (2, 3) and values (0, 1, 2, 3, 4, 5).
180   // clang-format off
181   Tensor x = tf.make(
182     {2, 3},
183     {
184       0, 1, 2,
185       3, 4, 5
186     });
187   // clang-format on
188 
189   Tensor out = tf.zeros({2, 3});
190   Tensor out_negative_dim = tf.zeros({2, 3});
191 
192   op_log_softmax_out(x, /*dim=*/1, /*half_to_float=*/false, out);
193   op_log_softmax_out(x, /*dim=*/-1, /*half_to_float=*/false, out_negative_dim);
194 
195   // clang-format off
196   Tensor expected = tf.make(
197     {2, 3},
198     {
199       -2.40761, -1.40761, -0.407606,
200       -2.40761, -1.40761, -0.407606
201     });
202   // clang-format on
203 
204   EXPECT_TENSOR_CLOSE(out, expected);
205   EXPECT_TENSOR_CLOSE(out_negative_dim, expected);
206 
207   op_log_softmax_out(x, /*dim=*/0, /*half_to_float=*/false, out);
208   op_log_softmax_out(x, /*dim=*/-2, /*half_to_float=*/false, out_negative_dim);
209 
210   // clang-format off
211   expected = tf.make(
212     {2, 3},
213     {
214         -3.04859, -3.04859, -3.04859,
215         -0.0485874, -0.0485874, -0.0485874
216     });
217   // clang-format on
218 
219   EXPECT_TENSOR_CLOSE(out, expected);
220   EXPECT_TENSOR_CLOSE(out_negative_dim, expected);
221 }
222 
223 #if !defined(USE_ATEN_LIB)
TEST_F(OpLogSoftmaxOutTest,UpperBoundOutTensor)224 TEST_F(OpLogSoftmaxOutTest, UpperBoundOutTensor) {
225   TensorFactory<ScalarType::Float> tff;
226 
227   // Input tensor with shape (2, 3) and values (0, 1, 2, 3, 4, 5).
228   // clang-format off
229   Tensor x = tff.make(
230     {2, 3},
231     {
232       0, 1, 2,
233       3, 4, 5
234     });
235   // clang-format on
236 
237   Tensor out =
238       tff.zeros({5, 9}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
239 
240   op_log_softmax_out(x, /*dim=*/1, /*half_to_float*/ false, out);
241 
242   // clang-format off
243   Tensor expected = tff.make(
244     {2, 3},
245     {
246       -2.40761, -1.40761, -0.407606,
247       -2.40761, -1.40761, -0.407606
248     });
249   // clang-format on
250 
251   EXPECT_TENSOR_CLOSE(out, expected);
252 }
253 #endif
254 
TEST_F(OpLogSoftmaxOutTest,SimpleGeneratedCase)255 TEST_F(OpLogSoftmaxOutTest, SimpleGeneratedCase) {
256   TensorFactory<ScalarType::Float> tf;
257 
258   Tensor x = tf.make(
259       {10, 10},
260       {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
261        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
262        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
263        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
264        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
265        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
266        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
267        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0});
268   Tensor expected_result = tf.make(
269       {10, 10}, {-2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
270                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
271                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
272                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
273                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
274                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
275                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
276                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
277                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
278                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
279                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
280                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
281                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
282                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
283                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
284                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
285                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
286                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
287                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
288                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
289                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
290                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
291                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
292                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
293                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
294                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
295                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
296                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
297                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
298                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
299                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
300                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
301                  -2.3025851249694824, -2.3025851249694824, -2.3025851249694824,
302                  -2.3025851249694824});
303 
304   Tensor out = tf.zeros({10, 10});
305   Tensor ret = op_log_softmax_out(x, 1, false, out);
306   EXPECT_TENSOR_CLOSE(out, expected_result);
307 }
308 
TEST_F(OpLogSoftmaxOutTest,DynamicShapeUpperBoundSameAsExpected)309 TEST_F(OpLogSoftmaxOutTest, DynamicShapeUpperBoundSameAsExpected) {
310   TensorFactory<ScalarType::Float> tf;
311 
312   Tensor x = tf.make(
313       {3, 2},
314       {0.754019558429718,
315        0.8973914980888367,
316        0.34469079971313477,
317        0.40464818477630615,
318        0.36159539222717285,
319        0.1138353943824768});
320   Tensor expected_result = tf.make(
321       {3, 2},
322       {-0.7674003839492798,
323        -0.6240284442901611,
324        -0.7235751748085022,
325        -0.6636177897453308,
326        -0.576920747756958,
327        -0.824680745601654});
328 
329   Tensor out =
330       tf.zeros({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
331   Tensor ret = op_log_softmax_out(x, 1, false, out);
332   EXPECT_TENSOR_CLOSE(out, expected_result);
333 }
334 
TEST_F(OpLogSoftmaxOutTest,DynamicShapeUpperBoundLargerThanExpected)335 TEST_F(OpLogSoftmaxOutTest, DynamicShapeUpperBoundLargerThanExpected) {
336   TensorFactory<ScalarType::Float> tf;
337 
338   Tensor x = tf.make(
339       {3, 2},
340       {0.754019558429718,
341        0.8973914980888367,
342        0.34469079971313477,
343        0.40464818477630615,
344        0.36159539222717285,
345        0.1138353943824768});
346   Tensor expected_result = tf.make(
347       {3, 2},
348       {-0.7674003839492798,
349        -0.6240284442901611,
350        -0.7235751748085022,
351        -0.6636177897453308,
352        -0.576920747756958,
353        -0.824680745601654});
354 
355   Tensor out =
356       tf.zeros({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
357   Tensor ret = op_log_softmax_out(x, 1, false, out);
358   EXPECT_TENSOR_CLOSE(out, expected_result);
359 }
360 
TEST_F(OpLogSoftmaxOutTest,DynamicShapeUnbound)361 TEST_F(OpLogSoftmaxOutTest, DynamicShapeUnbound) {
362   GTEST_SKIP() << "Dynamic shape not supported";
363   TensorFactory<ScalarType::Float> tf;
364 
365   Tensor x = tf.make(
366       {3, 2},
367       {0.754019558429718,
368        0.8973914980888367,
369        0.34469079971313477,
370        0.40464818477630615,
371        0.36159539222717285,
372        0.1138353943824768});
373   Tensor expected_result = tf.make(
374       {3, 2},
375       {-0.7674003839492798,
376        -0.6240284442901611,
377        -0.7235751748085022,
378        -0.6636177897453308,
379        -0.576920747756958,
380        -0.824680745601654});
381 
382   Tensor out =
383       tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
384   Tensor ret = op_log_softmax_out(x, 1, false, out);
385   EXPECT_TENSOR_CLOSE(out, expected_result);
386 }
387