xref: /aosp_15_r20/external/executorch/runtime/executor/test/executor_test.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <gtest/gtest.h>
10 
11 #include <executorch/extension/pytree/pytree.h>
12 #include <executorch/runtime/core/evalue.h>
13 #include <executorch/runtime/core/exec_aten/exec_aten.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
15 #include <executorch/runtime/kernel/kernel_runtime_context.h>
16 #include <executorch/runtime/kernel/operator_registry.h>
17 #include <executorch/runtime/platform/runtime.h>
18 #include <executorch/test/utils/DeathTest.h>
19 
20 using exec_aten::IntArrayRef;
21 using exec_aten::Scalar;
22 using exec_aten::ScalarType;
23 using exec_aten::SizesType;
24 using exec_aten::Tensor;
25 using executorch::runtime::Error;
26 using executorch::runtime::EValue;
27 using executorch::runtime::get_op_function_from_registry;
28 using executorch::runtime::Kernel;
29 using executorch::runtime::KernelRuntimeContext;
30 using executorch::runtime::OpFunction;
31 using executorch::runtime::register_kernel;
32 using executorch::runtime::registry_has_op_function;
33 using executorch::runtime::Result;
34 using executorch::runtime::testing::TensorFactory;
35 
36 namespace pytree = ::executorch::extension::pytree;
37 
38 class ExecutorTest : public ::testing::Test {
39  protected:
SetUp()40   void SetUp() override {
41     executorch::runtime::runtime_init();
42   }
43 };
44 
TEST_F(ExecutorTest,Tensor)45 TEST_F(ExecutorTest, Tensor) {
46   TensorFactory<ScalarType::Int> tf;
47   Tensor a = tf.make({2, 2}, {1, 2, 3, 4});
48 
49   auto data_p = a.const_data_ptr<int32_t>();
50   ASSERT_EQ(data_p[0], 1);
51   ASSERT_EQ(data_p[1], 2);
52   ASSERT_EQ(data_p[2], 3);
53   ASSERT_EQ(data_p[3], 4);
54 }
55 
TEST_F(ExecutorTest,EValue)56 TEST_F(ExecutorTest, EValue) {
57   TensorFactory<ScalarType::Int> tf;
58   Tensor a = tf.make({2, 2}, {1, 2, 3, 4});
59 
60   EValue v(a);
61   ASSERT_TRUE(v.isTensor());
62   ASSERT_EQ(v.toTensor().nbytes(), 16);
63 }
64 
65 /**
66  * According to the precision limitations listed here:
67  * https://en.wikipedia.org/wiki/Half-precision_floating-point_format#Precision_limitations
68  * The max precision error for a half in the range [2^n, 2^(n+1)] is 2^(n-10)
69  */
toleranceFloat16(float f)70 float toleranceFloat16(float f) {
71   return pow(2, static_cast<int>(log2(fabs(f))) - 10);
72 }
73 
TEST_F(ExecutorTest,TensorHalf)74 TEST_F(ExecutorTest, TensorHalf) {
75   TensorFactory<ScalarType::Half> tf;
76   Tensor a = tf.make({2, 2}, {1.0f, 2.0f, 3.0f, 4.0f});
77 
78   ASSERT_EQ(a.nbytes(), 8);
79   ASSERT_EQ(a.element_size(), 2);
80   ASSERT_EQ(a.numel(), 4);
81   ASSERT_EQ(a.scalar_type(), ScalarType::Half);
82 
83   auto data_p = a.const_data_ptr<exec_aten::Half>();
84   ASSERT_NEAR(
85       data_p[0], 1.0f, toleranceFloat16(fmax(fabs(1.0f), fabs(data_p[0]))));
86   ASSERT_NEAR(
87       data_p[1], 2.0f, toleranceFloat16(fmax(fabs(2.0f), fabs(data_p[1]))));
88 }
89 
TEST_F(ExecutorTest,RegistryLookupAndCall)90 TEST_F(ExecutorTest, RegistryLookupAndCall) {
91   const char* op_name = "aten::add.out";
92   Result<OpFunction> func = get_op_function_from_registry(op_name);
93   ASSERT_EQ(func.error(), Error::Ok);
94   ASSERT_NE(*func, nullptr);
95 
96   TensorFactory<ScalarType::Int> tf;
97   constexpr size_t num_evalues = 4;
98   EValue evalues[num_evalues] = {
99       tf.make({2, 2}, {1, 2, 3, 4}),
100       tf.make({2, 2}, {5, 6, 7, 8}),
101       Scalar(1),
102       tf.make({2, 2}, {0, 0, 0, 0}),
103   };
104 
105   EValue* kernel_args[5];
106   for (size_t i = 0; i < num_evalues; i++) {
107     kernel_args[i] = &evalues[i];
108   }
109   // x and x_out args are same evalue for out variant kernels
110   kernel_args[4] = &evalues[3];
111 
112   KernelRuntimeContext context{};
113   (*func)(context, kernel_args);
114   auto c_ptr = evalues[3].toTensor().const_data_ptr<int32_t>();
115   ASSERT_EQ(c_ptr[3], 12);
116 }
117 
TEST_F(ExecutorTest,IntArrayRefSingleElement)118 TEST_F(ExecutorTest, IntArrayRefSingleElement) {
119   // Create an IntArrayRef with a single element. `ref` will contain a pointer
120   // to `one`, which must outlive the array ref.
121   const IntArrayRef::value_type one = 1;
122   IntArrayRef ref(one);
123   EXPECT_EQ(ref[0], 1);
124 }
125 
TEST_F(ExecutorTest,IntArrayRefDataAndLength)126 TEST_F(ExecutorTest, IntArrayRefDataAndLength) {
127   // Create an IntArrayRef from an array. `ref` will contain a pointer to
128   // `array`, which must outlive the array ref.
129   const IntArrayRef::value_type array[4] = {5, 6, 7, 8};
130   const IntArrayRef::size_type length = 4;
131   IntArrayRef ref(array, length);
132 
133   EXPECT_EQ(ref.size(), length);
134   EXPECT_EQ(ref.front(), 5);
135   EXPECT_EQ(ref.back(), 8);
136 }
137 
TEST_F(ExecutorTest,EValueFromScalar)138 TEST_F(ExecutorTest, EValueFromScalar) {
139   Scalar b((bool)true);
140   Scalar i((int64_t)2);
141   Scalar d((double)3.0);
142 
143   EValue evalue_b(b);
144   ASSERT_TRUE(evalue_b.isScalar());
145   ASSERT_TRUE(evalue_b.isBool());
146   ASSERT_EQ(evalue_b.toBool(), true);
147 
148   EValue evalue_i(i);
149   ASSERT_TRUE(evalue_i.isScalar());
150   ASSERT_TRUE(evalue_i.isInt());
151   ASSERT_EQ(evalue_i.toInt(), 2);
152 
153   EValue evalue_d(d);
154   ASSERT_TRUE(evalue_d.isScalar());
155   ASSERT_TRUE(evalue_d.isDouble());
156   ASSERT_NEAR(evalue_d.toDouble(), 3.0, 0.01);
157 }
158 
TEST_F(ExecutorTest,EValueToScalar)159 TEST_F(ExecutorTest, EValueToScalar) {
160   EValue v((int64_t)2);
161   ASSERT_TRUE(v.isScalar());
162 
163   Scalar s = v.toScalar();
164   ASSERT_TRUE(s.isIntegral(false));
165   ASSERT_EQ(s.to<int64_t>(), 2);
166 }
167 
test_op(KernelRuntimeContext &,EValue **)168 void test_op(KernelRuntimeContext& /*unused*/, EValue** /*unused*/) {}
169 
TEST_F(ExecutorTest,OpRegistration)170 TEST_F(ExecutorTest, OpRegistration) {
171   auto s1 = register_kernel(Kernel("test", test_op));
172   auto s2 = register_kernel(Kernel("test_2", test_op));
173   ASSERT_EQ(Error::Ok, s1);
174   ASSERT_EQ(Error::Ok, s2);
175   ET_EXPECT_DEATH(
176       []() { (void)register_kernel(Kernel("test", test_op)); }(), "");
177 
178   ASSERT_TRUE(registry_has_op_function("test"));
179   ASSERT_TRUE(registry_has_op_function("test_2"));
180 }
181 
TEST_F(ExecutorTest,OpRegistrationWithContext)182 TEST_F(ExecutorTest, OpRegistrationWithContext) {
183   auto op = Kernel(
184       "test_op_with_context",
185       [](KernelRuntimeContext& context, EValue** values) {
186         (void)context;
187         *(values[0]) = Scalar(100);
188       });
189   auto s1 = register_kernel(op);
190   ASSERT_EQ(Error::Ok, s1);
191 
192   Result<OpFunction> func =
193       get_op_function_from_registry("test_op_with_context");
194   ASSERT_EQ(func.error(), Error::Ok);
195 
196   EValue values[1];
197   values[0] = Scalar(0);
198   EValue* kernels[1];
199   kernels[0] = &values[0];
200   KernelRuntimeContext context{};
201   (*func)(context, kernels);
202 
203   auto val = values[0].toScalar().to<int64_t>();
204   ASSERT_EQ(val, 100);
205 }
206 
TEST_F(ExecutorTest,AddMulAlreadyRegistered)207 TEST_F(ExecutorTest, AddMulAlreadyRegistered) {
208   ASSERT_TRUE(registry_has_op_function("aten::add.out"));
209   ASSERT_TRUE(registry_has_op_function("aten::mul.out"));
210 }
211 
TEST(PyTreeEValue,List)212 TEST(PyTreeEValue, List) {
213   std::string spec = "L2#1#1($,$)";
214 
215   Scalar i((int64_t)2);
216   Scalar d((double)3.0);
217   EValue items[2] = {i, d};
218 
219   auto c = pytree::unflatten(spec, items);
220   ASSERT_TRUE(c.isList());
221   ASSERT_EQ(c.size(), 2);
222 
223   const auto& child0 = c[0];
224   const auto& child1 = c[1];
225 
226   ASSERT_TRUE(child0.isLeaf());
227   ASSERT_TRUE(child1.isLeaf());
228 
229   EValue ev_child0 = child0;
230   ASSERT_TRUE(ev_child0.isScalar());
231   ASSERT_TRUE(ev_child0.isInt());
232   ASSERT_EQ(ev_child0.toInt(), 2);
233 
234   ASSERT_TRUE(child1.leaf().isScalar());
235   ASSERT_TRUE(child1.leaf().isDouble());
236   ASSERT_NEAR(child1.leaf().toDouble(), 3.0, 0.01);
237 }
238 
unflatten(EValue * items)239 auto unflatten(EValue* items) {
240   std::string spec = "D4#1#1#1#1('key0':$,1:$,23:$,123:$)";
241   return pytree::unflatten(spec, items);
242 }
243 
TEST(PyTreeEValue,DestructedSpec)244 TEST(PyTreeEValue, DestructedSpec) {
245   Scalar i0((int64_t)2);
246   Scalar d1((double)3.0);
247   Scalar i2((int64_t)4);
248   Scalar d3((double)5.0);
249   EValue items[4] = {i0, d1, i2, d3};
250   auto c = unflatten(items);
251 
252   ASSERT_TRUE(c.isDict());
253   ASSERT_EQ(c.size(), 4);
254 
255   auto& key0 = c.key(0);
256   auto& key1 = c.key(1);
257 
258   ASSERT_TRUE(key0 == pytree::Key("key0"));
259   ASSERT_TRUE(key1 == pytree::Key(1));
260 
261   const auto& child0 = c[0];
262   const auto& child1 = c[1];
263   ASSERT_TRUE(child0.isLeaf());
264   ASSERT_TRUE(child1.isLeaf());
265 
266   EValue ev_child0 = child0;
267   ASSERT_TRUE(ev_child0.isScalar());
268   ASSERT_TRUE(ev_child0.isInt());
269   ASSERT_EQ(ev_child0.toInt(), 2);
270 
271   ASSERT_TRUE(child1.leaf().isScalar());
272   ASSERT_TRUE(child1.leaf().isDouble());
273   ASSERT_NEAR(child1.leaf().toDouble(), 3.0, 0.01);
274 }
275