xref: /aosp_15_r20/external/executorch/kernels/test/op_glu_test.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 
16 #include <gtest/gtest.h>
17 #include <cmath>
18 
19 using namespace ::testing;
20 using exec_aten::Scalar;
21 using exec_aten::ScalarType;
22 using exec_aten::Tensor;
23 using torch::executor::testing::TensorFactory;
24 
25 class OpGluOutTest : public OperatorTest {
26  protected:
op_glu_out(const Tensor & self,int64_t dim,Tensor & out)27   Tensor& op_glu_out(const Tensor& self, int64_t dim, Tensor& out) {
28     return torch::executor::aten::glu_outf(context_, self, dim, out);
29   }
30 
31   // Common testing for glu operator
32   template <ScalarType DTYPE, ScalarType OUT_DTYPE>
test_glu_out()33   void test_glu_out() {
34     TensorFactory<DTYPE> tf;
35     TensorFactory<OUT_DTYPE> tf_out;
36 
37     const std::vector<int32_t> sizes = {4, 2};
38     const std::vector<int32_t> out_sizes_1 = {2, 2};
39 
40     // Valid input should give the expected output
41     Tensor in = tf.ones(sizes);
42     Tensor out = tf_out.zeros(out_sizes_1);
43     op_glu_out(in, 0, out);
44     EXPECT_TENSOR_CLOSE(
45         out,
46         tf_out.make(
47             out_sizes_1, /*data=*/{0.731059, 0.731059, 0.731059, 0.731059}));
48     const std::vector<int32_t> out_sizes_2 = {4, 1};
49     out = tf_out.zeros(out_sizes_2);
50     op_glu_out(in, 1, out);
51     EXPECT_TENSOR_CLOSE(
52         out,
53         tf_out.make(
54             out_sizes_2, /*data=*/{0.731059, 0.731059, 0.731059, 0.731059}));
55   }
56 
57   // Mismatched shape tests.
58   template <ScalarType INPUT_DTYPE>
test_glu_out_mismatched_shape()59   void test_glu_out_mismatched_shape() {
60     TensorFactory<INPUT_DTYPE> tf_in;
61 
62     // Input tensor and out tensor dimension size mismatch
63     Tensor in = tf_in.zeros(/*sizes=*/{4, 4, 4});
64     Tensor out = tf_in.zeros(/*sizes=*/{2, 4, 2});
65 
66     ET_EXPECT_KERNEL_FAILURE(context_, op_glu_out(in, 0, out));
67 
68     out = tf_in.zeros(/*sizes=*/{4, 4, 4});
69     ET_EXPECT_KERNEL_FAILURE(context_, op_glu_out(in, 0, out));
70   }
71 
72   // Invalid dimensions tests.
73   template <ScalarType INPUT_DTYPE>
test_glu_out_invalid_dim()74   void test_glu_out_invalid_dim() {
75     TensorFactory<INPUT_DTYPE> tf_in;
76     Tensor in = tf_in.zeros(/*sizes=*/{2, 2});
77     const std::vector<int32_t> out_sizes = {1, 2};
78     Tensor out = tf_in.zeros(out_sizes);
79 
80     // Dim is not valid
81     ET_EXPECT_KERNEL_FAILURE(context_, op_glu_out(in, 3, out));
82 
83     // Dim size is not even
84     in = tf_in.zeros(/*sizes=*/{3, 2});
85     ET_EXPECT_KERNEL_FAILURE(context_, op_glu_out(in, 0, out));
86   }
87 
88   // Unhandled input dtypes.
89   template <ScalarType INPUT_DTYPE>
test_div_invalid_input_dtype_dies()90   void test_div_invalid_input_dtype_dies() {
91     TensorFactory<INPUT_DTYPE> tf_in;
92     TensorFactory<ScalarType::Float> tf_float;
93 
94     const std::vector<int32_t> sizes = {2, 2};
95     const std::vector<int32_t> out_sizes = {1, 2};
96     Tensor in = tf_in.ones(sizes);
97     Tensor out = tf_float.zeros(out_sizes);
98 
99     ET_EXPECT_KERNEL_FAILURE(context_, op_glu_out(in, 0, out));
100   }
101 
102   // Unhandled output dtypes.
103   template <ScalarType OUTPUT_DTYPE>
test_div_invalid_output_dtype_dies()104   void test_div_invalid_output_dtype_dies() {
105     TensorFactory<ScalarType::Float> tf_float;
106     TensorFactory<OUTPUT_DTYPE> tf_out;
107 
108     const std::vector<int32_t> sizes = {2, 2};
109     const std::vector<int32_t> out_sizes = {1, 2};
110     Tensor in = tf_float.ones(sizes);
111     Tensor out = tf_out.zeros(out_sizes);
112 
113     ET_EXPECT_KERNEL_FAILURE(context_, op_glu_out(in, 0, out));
114   }
115 };
116 
TEST_F(OpGluOutTest,AllInputFloatOutputSupport)117 TEST_F(OpGluOutTest, AllInputFloatOutputSupport) {
118 #define TEST_ENTRY(ctype, dtype) \
119   test_glu_out<ScalarType::dtype, ScalarType::Float>();
120   ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
121 #undef TEST_ENTRY
122 }
123 
TEST_F(OpGluOutTest,AllInputDoubleOutputSupport)124 TEST_F(OpGluOutTest, AllInputDoubleOutputSupport) {
125 #define TEST_ENTRY(ctype, dtype) \
126   test_glu_out<ScalarType::dtype, ScalarType::Double>();
127   ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
128 #undef TEST_ENTRY
129 }
130 
TEST_F(OpGluOutTest,InfinityAndNANTest)131 TEST_F(OpGluOutTest, InfinityAndNANTest) {
132   TensorFactory<ScalarType::Float> tf;
133   const std::vector<int32_t> sizes = {4, 2};
134   const std::vector<int32_t> out_sizes = {4, 1};
135   Tensor in = tf.make(
136       sizes, /*data=*/{INFINITY, 1, -INFINITY, 1, INFINITY, -INFINITY, NAN, 1});
137   Tensor out = tf.zeros(out_sizes);
138   op_glu_out(in, 1, out);
139   EXPECT_TENSOR_CLOSE(
140       out,
141       tf.make(
142           /*sizes=*/out_sizes, /*data=*/{INFINITY, -INFINITY, NAN, NAN}));
143 }
144 
TEST_F(OpGluOutTest,MismatchedShapesDies)145 TEST_F(OpGluOutTest, MismatchedShapesDies) {
146   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
147     GTEST_SKIP() << "ATen kernel can handle mismatched shapes";
148   }
149 #define TEST_ENTRY(ctype, dtype) \
150   test_glu_out_mismatched_shape<ScalarType::dtype>();
151   ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
152 #undef TEST_ENTRY
153 }
154 
TEST_F(OpGluOutTest,InvalidDimDies)155 TEST_F(OpGluOutTest, InvalidDimDies) {
156 #define TEST_ENTRY(ctype, dtype) test_glu_out_invalid_dim<ScalarType::dtype>();
157   ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
158 #undef TEST_ENTRY
159 }
160 
TEST_F(OpGluOutTest,AllNonFloatInputDTypeDies)161 TEST_F(OpGluOutTest, AllNonFloatInputDTypeDies) {
162 #define TEST_ENTRY(ctype, dtype) \
163   test_div_invalid_input_dtype_dies<ScalarType::dtype>();
164   ET_FORALL_INT_TYPES_AND(Bool, TEST_ENTRY);
165 #undef TEST_ENTRY
166 }
167 
TEST_F(OpGluOutTest,AllNonFloatOutputDTypeDies)168 TEST_F(OpGluOutTest, AllNonFloatOutputDTypeDies) {
169 #define TEST_ENTRY(ctype, dtype) \
170   test_div_invalid_output_dtype_dies<ScalarType::dtype>();
171   ET_FORALL_INT_TYPES_AND(Bool, TEST_ENTRY);
172 #undef TEST_ENTRY
173 }
174 
TEST_F(OpGluOutTest,DynamicShapeUpperBoundSameAsExpected)175 TEST_F(OpGluOutTest, DynamicShapeUpperBoundSameAsExpected) {
176   GTEST_SKIP() << "Dynamic shape not supported";
177   TensorFactory<ScalarType::Float> tf;
178 
179   Tensor x = tf.make(
180       {4, 2},
181       {0.057747602462768555,
182        0.8781633377075195,
183        0.4503108263015747,
184        0.40363800525665283,
185        0.3379024863243103,
186        0.13906866312026978,
187        0.6991606950759888,
188        0.4374786615371704});
189   Tensor expected_result = tf.make(
190       {2, 2},
191       {0.0337061733007431,
192        0.4695638120174408,
193        0.3008083701133728,
194        0.2452739030122757});
195 
196   Tensor out =
197       tf.zeros({4, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
198   Tensor ret = op_glu_out(x, 0, out);
199   EXPECT_TENSOR_CLOSE(out, expected_result);
200 }
201 
TEST_F(OpGluOutTest,DynamicShapeUpperBoundLargerThanExpected)202 TEST_F(OpGluOutTest, DynamicShapeUpperBoundLargerThanExpected) {
203   TensorFactory<ScalarType::Float> tf;
204 
205   Tensor x = tf.make(
206       {4, 2},
207       {0.057747602462768555,
208        0.8781633377075195,
209        0.4503108263015747,
210        0.40363800525665283,
211        0.3379024863243103,
212        0.13906866312026978,
213        0.6991606950759888,
214        0.4374786615371704});
215   Tensor expected_result = tf.make(
216       {2, 2},
217       {0.0337061733007431,
218        0.4695638120174408,
219        0.3008083701133728,
220        0.2452739030122757});
221 
222   Tensor out =
223       tf.zeros({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
224   Tensor ret = op_glu_out(x, 0, out);
225   EXPECT_TENSOR_CLOSE(out, expected_result);
226 }
227 
TEST_F(OpGluOutTest,DynamicShapeUnbound)228 TEST_F(OpGluOutTest, DynamicShapeUnbound) {
229   GTEST_SKIP() << "Dynamic shape unbound not supported";
230   TensorFactory<ScalarType::Float> tf;
231 
232   Tensor x = tf.make(
233       {4, 2},
234       {0.057747602462768555,
235        0.8781633377075195,
236        0.4503108263015747,
237        0.40363800525665283,
238        0.3379024863243103,
239        0.13906866312026978,
240        0.6991606950759888,
241        0.4374786615371704});
242   Tensor expected_result = tf.make(
243       {2, 2},
244       {0.0337061733007431,
245        0.4695638120174408,
246        0.3008083701133728,
247        0.2452739030122757});
248 
249   Tensor out =
250       tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
251   Tensor ret = op_glu_out(x, 0, out);
252   EXPECT_TENSOR_CLOSE(out, expected_result);
253 }
254