1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/extension/kernel_util/make_boxed_from_unboxed_functor.h>
10 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
11 #include <executorch/runtime/core/portable_type/tensor.h>
12 #include <executorch/runtime/kernel/kernel_runtime_context.h>
13 #include <executorch/runtime/kernel/operator_registry.h>
14 #include <executorch/runtime/platform/runtime.h>
15 #include <gtest/gtest.h>
16
17 using namespace ::testing;
18 using exec_aten::ArrayRef;
19 using exec_aten::optional;
20 using exec_aten::ScalarType;
21 using exec_aten::Tensor;
22 using exec_aten::TensorImpl;
23 using executorch::runtime::BoxedEvalueList;
24 using executorch::runtime::Error;
25 using executorch::runtime::EValue;
26 using executorch::runtime::get_op_function_from_registry;
27 using executorch::runtime::KernelRuntimeContext;
28 using executorch::runtime::registry_has_op_function;
29
my_op_out(KernelRuntimeContext & ctx,const Tensor & a,Tensor & out)30 Tensor& my_op_out(KernelRuntimeContext& ctx, const Tensor& a, Tensor& out) {
31 (void)ctx;
32 (void)a;
33 return out;
34 }
35
set_1_out(KernelRuntimeContext & ctx,Tensor & out)36 Tensor& set_1_out(KernelRuntimeContext& ctx, Tensor& out) {
37 (void)ctx;
38 out.mutable_data_ptr<int32_t>()[0] = 1;
39 return out;
40 }
41
42 Tensor&
add_tensor_out(KernelRuntimeContext & ctx,ArrayRef<Tensor> a,Tensor & out)43 add_tensor_out(KernelRuntimeContext& ctx, ArrayRef<Tensor> a, Tensor& out) {
44 (void)ctx;
45 for (int i = 0; i < out.numel(); i++) {
46 int sum = 0;
47 for (int j = 0; j < a.size(); j++) {
48 sum += a[j].const_data_ptr<int32_t>()[i];
49 }
50 out.mutable_data_ptr<int32_t>()[i] = sum;
51 }
52 return out;
53 }
54
add_optional_scalar_out(KernelRuntimeContext & ctx,optional<int64_t> s1,optional<int64_t> s2,Tensor & out)55 Tensor& add_optional_scalar_out(
56 KernelRuntimeContext& ctx,
57 optional<int64_t> s1,
58 optional<int64_t> s2,
59 Tensor& out) {
60 (void)ctx;
61 if (s1.has_value()) {
62 out.mutable_data_ptr<int32_t>()[0] += s1.value();
63 }
64 if (s2.has_value()) {
65 out.mutable_data_ptr<int32_t>()[0] += s2.value();
66 }
67 return out;
68 }
69
add_optional_tensor_out(KernelRuntimeContext & ctx,ArrayRef<optional<Tensor>> a,Tensor & out)70 Tensor& add_optional_tensor_out(
71 KernelRuntimeContext& ctx,
72 ArrayRef<optional<Tensor>> a,
73 Tensor& out) {
74 (void)ctx;
75 for (int i = 0; i < a.size(); i++) {
76 if (a[i].has_value()) {
77 for (int j = 0; j < a[i].value().numel(); j++) {
78 out.mutable_data_ptr<int32_t>()[j] +=
79 a[i].value().const_data_ptr<int32_t>()[j];
80 }
81 }
82 }
83 return out;
84 }
85
86 class MakeBoxedFromUnboxedFunctorTest : public ::testing::Test {
87 public:
SetUp()88 void SetUp() override {
89 torch::executor::runtime_init();
90 }
91 };
92
TEST_F(MakeBoxedFromUnboxedFunctorTest,Basic)93 TEST_F(MakeBoxedFromUnboxedFunctorTest, Basic) {
94 EXECUTORCH_LIBRARY(my_ns, "my_op.out", my_op_out);
95 EXPECT_TRUE(registry_has_op_function("my_ns::my_op.out"));
96 }
97
TEST_F(MakeBoxedFromUnboxedFunctorTest,UnboxLogicWorks)98 TEST_F(MakeBoxedFromUnboxedFunctorTest, UnboxLogicWorks) {
99 EXECUTORCH_LIBRARY(my_ns, "set_1.out", set_1_out);
100 EXPECT_TRUE(registry_has_op_function("my_ns::set_1.out"));
101
102 // prepare out tensor
103 TensorImpl::SizesType sizes[1] = {5};
104 TensorImpl::DimOrderType dim_order[1] = {0};
105 int32_t data[5] = {0, 0, 0, 0, 0};
106 auto a_impl = TensorImpl(ScalarType::Int, 1, sizes, data, dim_order, nullptr);
107 auto a = Tensor(&a_impl);
108
109 // get boxed callable
110 auto fn = get_op_function_from_registry("my_ns::set_1.out");
111 ASSERT_EQ(fn.error(), Error::Ok);
112
113 // run it
114 KernelRuntimeContext context;
115 EValue values[1];
116 values[0] = a;
117 EValue* stack[1];
118 stack[0] = &values[0];
119
120 (*fn)(context, stack);
121
122 // check result
123 EXPECT_EQ(a.const_data_ptr<int32_t>()[0], 1);
124 }
125
TEST_F(MakeBoxedFromUnboxedFunctorTest,UnboxArrayRef)126 TEST_F(MakeBoxedFromUnboxedFunctorTest, UnboxArrayRef) {
127 EXECUTORCH_LIBRARY(my_ns, "add_tensor.out", add_tensor_out);
128 EXPECT_TRUE(registry_has_op_function("my_ns::add_tensor.out"));
129
130 // prepare ArrayRef input.
131 torch::executor::testing::TensorFactory<ScalarType::Int> tf;
132 Tensor storage[2] = {tf.ones({5}), tf.ones({5})};
133 EValue evalues[2] = {storage[0], storage[1]};
134 EValue* values_p[2] = {&evalues[0], &evalues[1]};
135 BoxedEvalueList<Tensor> a_box(values_p, storage, 2);
136 EValue boxed_array_ref(a_box);
137 // prepare out tensor.
138 EValue out(tf.zeros({5}));
139
140 auto fn = get_op_function_from_registry("my_ns::add_tensor.out");
141 ASSERT_EQ(fn.error(), Error::Ok);
142
143 // run it.
144 KernelRuntimeContext context;
145 EValue values[2] = {boxed_array_ref, out};
146 EValue* stack[2] = {&values[0], &values[1]};
147 (*fn)(context, stack);
148
149 // check result.
150 for (int i = 0; i < 5; i++) {
151 EXPECT_EQ(stack[1]->toTensor().const_data_ptr<int32_t>()[i], 2);
152 }
153 }
154
TEST_F(MakeBoxedFromUnboxedFunctorTest,UnboxOptional)155 TEST_F(MakeBoxedFromUnboxedFunctorTest, UnboxOptional) {
156 EXECUTORCH_LIBRARY(my_ns, "add_optional_scalar.out", add_optional_scalar_out);
157 EXPECT_TRUE(registry_has_op_function("my_ns::add_optional_scalar.out"));
158
159 // prepare optional input.
160 EValue scalar((int64_t)3);
161 EValue scalar_none;
162
163 // prepare out tensor.
164 torch::executor::testing::TensorFactory<ScalarType::Int> tf;
165 EValue out(tf.ones({1}));
166 auto fn = get_op_function_from_registry("my_ns::add_optional_scalar.out");
167 ASSERT_EQ(fn.error(), Error::Ok);
168
169 // run it.
170 KernelRuntimeContext context;
171 EValue values[3] = {scalar, scalar_none, out};
172 EValue* stack[3] = {&values[0], &values[1], &values[2]};
173 (*fn)(context, stack);
174
175 // check result.
176 EXPECT_EQ(stack[2]->toTensor().const_data_ptr<int32_t>()[0], 4);
177 }
178
TEST_F(MakeBoxedFromUnboxedFunctorTest,UnboxOptionalArrayRef)179 TEST_F(MakeBoxedFromUnboxedFunctorTest, UnboxOptionalArrayRef) {
180 EXECUTORCH_LIBRARY(my_ns, "add_optional_tensor.out", add_optional_tensor_out);
181 EXPECT_TRUE(registry_has_op_function("my_ns::add_optional_tensor.out"));
182
183 // prepare optional tensors.
184 torch::executor::testing::TensorFactory<ScalarType::Int> tf;
185 optional<Tensor> storage[2];
186 EValue evalues[2] = {EValue(tf.ones({5})), EValue()};
187 EValue* values_p[2] = {&evalues[0], &evalues[1]};
188 BoxedEvalueList<optional<Tensor>> a_box(values_p, storage, 2);
189 EValue boxed_array_ref(a_box);
190
191 // prepare out tensor.
192 EValue out(tf.zeros({5}));
193 auto fn = get_op_function_from_registry("my_ns::add_optional_tensor.out");
194 ASSERT_EQ(fn.error(), Error::Ok);
195
196 // run it.
197 KernelRuntimeContext context;
198 EValue values[2] = {boxed_array_ref, out};
199 EValue* stack[2] = {&values[0], &values[1]};
200 (*fn)(context, stack);
201
202 // check result.
203 for (int i = 0; i < 5; i++) {
204 EXPECT_EQ(stack[1]->toTensor().const_data_ptr<int32_t>()[i], 1);
205 }
206 }
207