1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16
17 #include <gtest/gtest.h>
18
19 using namespace ::testing;
20 using exec_aten::ArrayRef;
21 using exec_aten::ScalarType;
22 using exec_aten::Tensor;
23 using torch::executor::testing::TensorFactory;
24
25 class OpSoftmaxOutTest : public OperatorTest {
26 protected:
op_softmax_out(const Tensor & self,int64_t dim,bool half_to_float,Tensor & out)27 Tensor& op_softmax_out(
28 const Tensor& self,
29 int64_t dim,
30 bool half_to_float,
31 Tensor& out) {
32 return torch::executor::aten::_softmax_outf(
33 context_, self, dim, half_to_float, out);
34 }
35
36 // A generic smoke test that works for the supported dtypes.
37 template <class CTYPE, exec_aten::ScalarType DTYPE>
test_dtype()38 void test_dtype() {
39 TensorFactory<DTYPE> tf;
40
41 // Input tensor with shape (2, 3) and values (0, 1, 2, 3, 4, 5).
42 // clang-format off
43 Tensor x = tf.make(
44 {2, 3},
45 {
46 0, 1, 2,
47 3, 4, 5
48 });
49 // clang-format on
50
51 Tensor out = tf.zeros({2, 3});
52
53 op_softmax_out(x, /*dim=*/1, /*half_to_float*/ false, out);
54
55 // clang-format off
56 Tensor expected = tf.make(
57 {2, 3},
58 {
59 0.0900306, 0.244728, 0.665241,
60 0.0900306, 0.244728, 0.665241
61 });
62 // clang-format on
63
64 EXPECT_TENSOR_CLOSE(out, expected);
65 }
66 };
67
TEST_F(OpSoftmaxOutTest,Smoke)68 TEST_F(OpSoftmaxOutTest, Smoke) {
69 TensorFactory<ScalarType::Float> tff;
70 std::vector<int32_t> sizes = {1, 3};
71 Tensor in = tff.make(sizes, {0, 1, 2});
72 Tensor out = tff.zeros(sizes);
73
74 Tensor ret = op_softmax_out(in, /*dim=*/1, /*half_to_float=*/false, out);
75
76 // Should always return the provided out Tensor.
77 EXPECT_TENSOR_EQ(ret, out);
78
79 // Expected tensor.
80 Tensor expected = tff.make({1, 3}, {0.0900306, 0.244728, 0.665241});
81
82 EXPECT_TENSOR_CLOSE(out, expected);
83 }
84
TEST_F(OpSoftmaxOutTest,HalfSupport)85 TEST_F(OpSoftmaxOutTest, HalfSupport) {
86 TensorFactory<ScalarType::Half> tfh;
87 std::vector<int32_t> sizes = {1, 4};
88 Tensor in = tfh.ones(sizes);
89 Tensor out = tfh.zeros(sizes);
90
91 Tensor ret = op_softmax_out(in, /*dim=*/1, /*half_to_float=*/false, out);
92
93 // Should always return the provided out Tensor.
94 EXPECT_TENSOR_EQ(ret, out);
95
96 // Expected tensor.
97 Tensor expected = tfh.make({1, 4}, {0.25, 0.25, 0.25, 0.25});
98
99 EXPECT_TENSOR_CLOSE(out, expected);
100 }
101
TEST_F(OpSoftmaxOutTest,AllDtypesSupported)102 TEST_F(OpSoftmaxOutTest, AllDtypesSupported) {
103 test_dtype<float, ScalarType::Float>();
104 test_dtype<double, ScalarType::Double>();
105 // TODO: Also add tests for half, complex, quantized, and other types. Easiest
106 // way to do that would be to make TensorFactory support zeros() and ones()
107 // for those types.
108 }
109
TEST_F(OpSoftmaxOutTest,MismatchedDimensionsDies)110 TEST_F(OpSoftmaxOutTest, MismatchedDimensionsDies) {
111 TensorFactory<ScalarType::Float> tff;
112
113 // Input tensor with shape (1, 3) and values (0, 1, 2).
114 Tensor x = tff.make({1, 3}, {0, 1, 2});
115
116 // Output shape should be (1, 3)
117 Tensor out = tff.zeros({1, 3});
118
119 // Dim out of bounds
120 ET_EXPECT_KERNEL_FAILURE(
121 context_, op_softmax_out(x, /*dim=*/3, /*half_to_float*/ false, out));
122 }
123
TEST_F(OpSoftmaxOutTest,MismatchedDimensionSizeDies)124 TEST_F(OpSoftmaxOutTest, MismatchedDimensionSizeDies) {
125 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
126 GTEST_SKIP() << "ATen kernel can handle mismatched dimension size";
127 }
128 TensorFactory<ScalarType::Float> tf;
129
130 Tensor x = tf.ones({3, 4});
131
132 // wrong_out has incompatible dim
133 Tensor wrong_out = tf.zeros({2, 10, 4});
134
135 ET_EXPECT_KERNEL_FAILURE(
136 context_,
137 op_softmax_out(x, /*dim=*/1, /*half_to_float*/ false, wrong_out));
138 }
139
TEST_F(OpSoftmaxOutTest,NegativeDim)140 TEST_F(OpSoftmaxOutTest, NegativeDim) {
141 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
142 GTEST_SKIP() << "ATen kernel test fails";
143 }
144 TensorFactory<ScalarType::Float> tf;
145
146 // Input tensor with shape (2, 3) and values (0, 1, 2, 3, 4, 5).
147 // clang-format off
148 Tensor x = tf.make(
149 {2, 3},
150 {
151 0, 1, 2,
152 3, 4, 5
153 });
154 // clang-format on
155
156 Tensor out = tf.zeros({2, 3});
157 Tensor out_negative_dim = tf.zeros({2, 3});
158
159 op_softmax_out(x, /*dim=*/1, /*half_to_float=*/false, out);
160 op_softmax_out(x, /*dim=*/-1, /*half_to_float=*/false, out_negative_dim);
161
162 // clang-format off
163 Tensor expected = tf.make(
164 {2, 3},
165 {
166 0.0900306, 0.244728, 0.665241,
167 0.0900306, 0.244728, 0.665241
168 });
169 // clang-format on
170
171 EXPECT_TENSOR_CLOSE(out, expected);
172 EXPECT_TENSOR_CLOSE(out_negative_dim, expected);
173
174 op_softmax_out(x, /*dim=*/0, /*half_to_float=*/false, out);
175 op_softmax_out(x, /*dim=*/-2, /*half_to_float=*/false, out_negative_dim);
176
177 // clang-format off
178 expected = tf.make(
179 {2, 3},
180 {
181 0.0474259, 0.0474259, 0.0474259,
182 0.952574, 0.952574, 0.952574
183 });
184 // clang-format on
185
186 EXPECT_TENSOR_CLOSE(out, expected);
187 EXPECT_TENSOR_CLOSE(out_negative_dim, expected);
188 }
189
TEST_F(OpSoftmaxOutTest,SimpleGeneratedCase)190 TEST_F(OpSoftmaxOutTest, SimpleGeneratedCase) {
191 TensorFactory<ScalarType::Float> tf;
192
193 Tensor x = tf.make(
194 {10, 10},
195 {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
196 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
197 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
198 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
199 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
200 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
201 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
202 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0});
203 Tensor expected_result = tf.make(
204 {10, 10}, {0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
205 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
206 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
207 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
208 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
209 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
210 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
211 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
212 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
213 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
214 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
215 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
216 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
217 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
218 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
219 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
220 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
221 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
222 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
223 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
224 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
225 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
226 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
227 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
228 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
229 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
230 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
231 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
232 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
233 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
234 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
235 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
236 0.10000000149011612, 0.10000000149011612, 0.10000000149011612,
237 0.10000000149011612});
238
239 Tensor out = tf.zeros({10, 10});
240 Tensor ret = op_softmax_out(x, 1, false, out);
241 EXPECT_TENSOR_CLOSE(out, expected_result);
242 }
243
TEST_F(OpSoftmaxOutTest,DynamicShapeUpperBoundSameAsExpected)244 TEST_F(OpSoftmaxOutTest, DynamicShapeUpperBoundSameAsExpected) {
245 TensorFactory<ScalarType::Float> tf;
246
247 Tensor x = tf.make(
248 {3, 2},
249 {0.3893158435821533,
250 0.4583776593208313,
251 0.14476794004440308,
252 0.44050133228302,
253 0.2491583228111267,
254 0.8098345994949341});
255 Tensor expected_result = tf.make(
256 {3, 2},
257 {0.4827413856983185,
258 0.5172585844993591,
259 0.426600843667984,
260 0.5733991861343384,
261 0.3633909821510315,
262 0.6366089582443237});
263
264 Tensor out =
265 tf.zeros({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
266 Tensor ret = op_softmax_out(x, 1, false, out);
267 EXPECT_TENSOR_CLOSE(out, expected_result);
268 }
269
TEST_F(OpSoftmaxOutTest,DynamicShapeUpperBoundLargerThanExpected)270 TEST_F(OpSoftmaxOutTest, DynamicShapeUpperBoundLargerThanExpected) {
271 TensorFactory<ScalarType::Float> tf;
272
273 Tensor x = tf.make(
274 {3, 2},
275 {0.3893158435821533,
276 0.4583776593208313,
277 0.14476794004440308,
278 0.44050133228302,
279 0.2491583228111267,
280 0.8098345994949341});
281 Tensor expected_result = tf.make(
282 {3, 2},
283 {0.4827413856983185,
284 0.5172585844993591,
285 0.426600843667984,
286 0.5733991861343384,
287 0.3633909821510315,
288 0.6366089582443237});
289
290 Tensor out =
291 tf.zeros({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
292 Tensor ret = op_softmax_out(x, 1, false, out);
293 EXPECT_TENSOR_CLOSE(out, expected_result);
294 }
295
TEST_F(OpSoftmaxOutTest,DynamicShapeUnbound)296 TEST_F(OpSoftmaxOutTest, DynamicShapeUnbound) {
297 GTEST_SKIP() << "Dynamic shape unbound not supported";
298 TensorFactory<ScalarType::Float> tf;
299
300 Tensor x = tf.make(
301 {3, 2},
302 {0.3893158435821533,
303 0.4583776593208313,
304 0.14476794004440308,
305 0.44050133228302,
306 0.2491583228111267,
307 0.8098345994949341});
308 Tensor expected_result = tf.make(
309 {3, 2},
310 {0.4827413856983185,
311 0.5172585844993591,
312 0.426600843667984,
313 0.5733991861343384,
314 0.3633909821510315,
315 0.6366089582443237});
316
317 Tensor out =
318 tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
319 Tensor ret = op_softmax_out(x, 1, false, out);
320 EXPECT_TENSOR_CLOSE(out, expected_result);
321 }
322