xref: /aosp_15_r20/external/executorch/kernels/portable/cpu/util/activation_ops_util.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <cstring>
10 
11 #include <executorch/kernels/portable/cpu/util/activation_ops_util.h>
12 
13 namespace torch {
14 namespace executor {
15 
check_gelu_args(const Tensor & in,string_view approximate,Tensor & out)16 bool check_gelu_args(const Tensor& in, string_view approximate, Tensor& out) {
17   ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out));
18   ET_LOG_AND_RETURN_IF_FALSE(in.scalar_type() != ScalarType::Bool);
19   ET_LOG_MSG_AND_RETURN_IF_FALSE(
20       approximate == "tanh" || approximate == "none",
21       "Invalid approximation format: %.*s for gelu",
22       static_cast<int>(approximate.length()),
23       approximate.data());
24   return true;
25 }
26 
check_glu_args(const Tensor & in,int64_t dim,Tensor & out)27 bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out) {
28   ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, in.dim()));
29   ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(in));
30 
31   const size_t non_negative_dim = dim < 0 ? dim + in.dim() : dim;
32   const size_t dim_size = in.size(non_negative_dim);
33 
34   ET_LOG_MSG_AND_RETURN_IF_FALSE(
35       dim_size % 2 == 0,
36       "Halving dimension must be even, but dimension %zd is size %zd",
37       non_negative_dim,
38       dim_size);
39 
40   ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(out));
41   ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(in, out));
42   ET_LOG_MSG_AND_RETURN_IF_FALSE(
43       out.size(non_negative_dim) == dim_size / 2,
44       "output tensor must have half the size of the input tensor along the specified dimension.");
45 
46   for (size_t i = 0; i < in.dim(); ++i) {
47     if (i != non_negative_dim) {
48       ET_LOG_MSG_AND_RETURN_IF_FALSE(
49           out.size(i) == in.size(i),
50           "output tensor must have the same size as the input tensor in all dimensions except for the specified dimension.");
51     }
52   }
53 
54   return true;
55 }
56 
check_log_softmax_args(const Tensor & in,int64_t dim,bool half_to_float,Tensor & out)57 bool check_log_softmax_args(
58     const Tensor& in,
59     int64_t dim,
60     bool half_to_float,
61     Tensor& out) {
62   ET_LOG_MSG_AND_RETURN_IF_FALSE(
63       !half_to_float, "half to float conversion is not supported on CPU");
64   ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out));
65   ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim));
66   ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(in));
67   ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(out));
68   return true;
69 }
70 
check_softmax_args(const Tensor & in,int64_t dim,bool half_to_float,Tensor & out)71 bool check_softmax_args(
72     const Tensor& in,
73     int64_t dim,
74     bool half_to_float,
75     Tensor& out) {
76   return check_log_softmax_args(in, dim, half_to_float, out);
77 }
78 
resize_glu_out(const Tensor & in,int64_t dim,Tensor & out)79 Error resize_glu_out(const Tensor& in, int64_t dim, Tensor& out) {
80   exec_aten::SizesType expected_output_size[kTensorDimensionLimit];
81 
82   const size_t non_negative_dim = dim < 0 ? dim + in.dim() : dim;
83   for (size_t i = 0; i < in.dim(); i++) {
84     expected_output_size[i] =
85         (i == non_negative_dim) ? (in.size(i) / 2) : in.size(i);
86   }
87 
88   ArrayRef<exec_aten::SizesType> output_size{
89       expected_output_size, static_cast<size_t>(out.dim())};
90 
91   return resize_tensor(out, output_size);
92 }
93 
94 } // namespace executor
95 } // namespace torch
96