xref: /aosp_15_r20/external/executorch/kernels/portable/cpu/op_fill.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/portable/cpu/scalar_utils.h>
10 #include <executorch/kernels/portable/cpu/util/functional_util.h>
11 #include <executorch/runtime/kernel/kernel_includes.h>
12 
13 namespace torch {
14 namespace executor {
15 namespace native {
16 
17 using Scalar = exec_aten::Scalar;
18 using ScalarType = exec_aten::ScalarType;
19 using Tensor = exec_aten::Tensor;
20 
fill_scalar_out(KernelRuntimeContext & ctx,const Tensor & a,const Scalar & b,Tensor & out)21 Tensor& fill_scalar_out(
22     KernelRuntimeContext& ctx,
23     const Tensor& a,
24     const Scalar& b,
25     Tensor& out) {
26   (void)ctx;
27 
28   ScalarType a_type = a.scalar_type();
29   ScalarType b_type = utils::get_scalar_dtype(b);
30   ScalarType out_type = out.scalar_type();
31 
32   ET_KERNEL_CHECK(ctx, a_type == out_type, InvalidArgument, out);
33 
34   ET_KERNEL_CHECK(
35       ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out);
36 
37   // Resize for dynamic shape
38   ET_KERNEL_CHECK_MSG(
39       ctx,
40       resize_tensor(out, a.sizes()) == Error::Ok,
41       InvalidArgument,
42       out,
43       "Failed to resize output tensor.");
44 
45   ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "fill.Scalar_out", CTYPE_A, [&] {
46     CTYPE_A b_casted;
47     ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "fill.Scalar_out", CTYPE_B, [&] {
48       CTYPE_B b_val;
49       utils::extract_scalar(b, &b_val);
50       b_casted = static_cast<CTYPE_A>(b_val);
51     });
52 
53     apply_unary_map_fn(
54         [b_casted](const CTYPE_A val_a) { return b_casted; },
55         a.const_data_ptr<CTYPE_A>(),
56         out.mutable_data_ptr<CTYPE_A>(),
57         out.numel());
58   });
59 
60   return out;
61 }
62 
fill_tensor_out(KernelRuntimeContext & ctx,const Tensor & a,const Tensor & b,Tensor & out)63 Tensor& fill_tensor_out(
64     KernelRuntimeContext& ctx,
65     const Tensor& a,
66     const Tensor& b,
67     Tensor& out) {
68   (void)ctx;
69 
70   // Assert `b` must be a scalar tensor.
71   ET_KERNEL_CHECK(ctx, tensor_is_scalar(b), InvalidArgument, out);
72 
73   ET_KERNEL_CHECK(
74       ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out);
75 
76   ScalarType a_type = a.scalar_type();
77   ScalarType b_type = b.scalar_type();
78   ScalarType out_type = out.scalar_type();
79 
80   ET_KERNEL_CHECK(ctx, a_type == out_type, InvalidArgument, out);
81 
82   // Resize for dynamic shape
83   ET_KERNEL_CHECK_MSG(
84       ctx,
85       resize_tensor(out, a.sizes()) == Error::Ok,
86       InvalidArgument,
87       out,
88       "Failed to resize output tensor.");
89 
90   ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "fill.Tensor_out", CTYPE_A, [&] {
91     CTYPE_A b_casted;
92     ET_SWITCH_REAL_TYPES_AND(
93         Bool, b_type, ctx, "fill.Tensor_out", CTYPE_B, [&] {
94           CTYPE_B b_val;
95           extract_scalar_tensor(b, &b_val);
96           b_casted = static_cast<CTYPE_A>(b_val);
97         });
98 
99     apply_unary_map_fn(
100         [b_casted](const CTYPE_A val_a) { return b_casted; },
101         a.const_data_ptr<CTYPE_A>(),
102         out.mutable_data_ptr<CTYPE_A>(),
103         out.numel());
104   });
105 
106   return out;
107 }
108 
109 } // namespace native
110 } // namespace executor
111 } // namespace torch
112