xref: /aosp_15_r20/external/executorch/runtime/core/exec_aten/util/tensor_util_portable.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/runtime/core/exec_aten/util/tensor_util.h>
10 
11 #include <cstring>
12 
13 #include <executorch/runtime/core/portable_type/tensor.h>
14 #include <executorch/runtime/platform/assert.h>
15 
16 namespace executorch {
17 namespace runtime {
18 /**
19  * Implementation for ExecuTorch tensor util, should only be included in
20  * an target with ATen mode turned off. Explicitly taking
21  * torch::executor::Tensor (instead of exec_aten::Tensor) to make sure it fails
22  * at compile time if built incorrectly.
23  */
get_dim_order(const torch::executor::Tensor & tensor,exec_aten::DimOrderType * out_dim_order,size_t out_dim_order_size)24 Error get_dim_order(
25     const torch::executor::Tensor& tensor,
26     exec_aten::DimOrderType* out_dim_order,
27     size_t out_dim_order_size) {
28   ET_CHECK_OR_RETURN_ERROR(
29       out_dim_order_size == tensor.dim_order().size(),
30       InvalidArgument,
31       "Size needs to be equal to the number of dimensions of the tensor size %zu, tensor.dim() %zu",
32       out_dim_order_size,
33       tensor.dim_order().size());
34   std::memcpy(
35       out_dim_order,
36       tensor.dim_order().data(),
37       tensor.dim_order().size() * sizeof(exec_aten::DimOrderType));
38   return Error::Ok;
39 }
40 
tensor_has_valid_dim_order(torch::executor::Tensor t)41 bool tensor_has_valid_dim_order(torch::executor::Tensor t) {
42   if (!validate_dim_order(t.dim_order().data(), t.dim_order().size())) {
43     ET_LOG(Error, "Tensor dim order is not valid:");
44     for (size_t d = 0; d < t.dim(); ++d) {
45       ET_LOG(
46           Error,
47           "    dim_order(%zu): %zu",
48           static_cast<size_t>(d),
49           static_cast<size_t>(t.dim_order()[d]));
50     }
51     return false;
52   }
53   return true;
54 }
55 
tensor_is_default_or_channels_last_dim_order(torch::executor::Tensor t)56 bool tensor_is_default_or_channels_last_dim_order(torch::executor::Tensor t) {
57   bool ret_val =
58       is_contiguous_dim_order(t.dim_order().data(), t.dim_order().size()) ||
59       is_channels_last_dim_order(t.dim_order().data(), t.dim_order().size());
60 
61   if (!ret_val) {
62     ET_LOG(
63         Error,
64         "Expected tensor to have default or channels last dim order, but got");
65     for (size_t d = 0; d < t.dim(); ++d) {
66       ET_LOG(
67           Error,
68           "    dim_order(%zu): %zu",
69           static_cast<size_t>(d),
70           static_cast<size_t>(t.dim_order()[d]));
71     }
72   }
73   return ret_val;
74 }
75 
tensor_is_default_dim_order(torch::executor::Tensor t)76 bool tensor_is_default_dim_order(torch::executor::Tensor t) {
77   bool ret_val =
78       is_contiguous_dim_order(t.dim_order().data(), t.dim_order().size());
79 
80   if (!ret_val) {
81     ET_LOG(Error, "Expected tensor to have default dim order, but got");
82     for (size_t d = 0; d < t.dim(); ++d) {
83       ET_LOG(
84           Error,
85           "    dim_order(%zu): %zu",
86           static_cast<size_t>(d),
87           static_cast<size_t>(t.dim_order()[d]));
88     }
89   }
90   return ret_val;
91 }
92 
tensor_is_channels_last_dim_order(torch::executor::Tensor t)93 bool tensor_is_channels_last_dim_order(torch::executor::Tensor t) {
94   bool ret_val =
95       is_channels_last_dim_order(t.dim_order().data(), t.dim_order().size());
96 
97   if (!ret_val) {
98     ET_LOG(Error, "Expected tensor to have channels last dim order, but got");
99     for (size_t d = 0; d < t.dim(); ++d) {
100       ET_LOG(
101           Error,
102           "    dim_order(%zu): %zu",
103           static_cast<size_t>(d),
104           static_cast<size_t>(t.dim_order()[d]));
105     }
106   }
107   return ret_val;
108 }
109 
tensors_have_same_dim_order(const exec_aten::ArrayRef<exec_aten::Tensor> tensor_list)110 bool tensors_have_same_dim_order(
111     const exec_aten::ArrayRef<exec_aten::Tensor> tensor_list) {
112   if (tensor_list.size() < 2) {
113     return true;
114   }
115   bool all_contiguous = true;
116   bool all_channels_last = true;
117   for (size_t i = 0; i < tensor_list.size(); ++i) {
118     all_contiguous = all_contiguous &&
119         is_contiguous_dim_order(
120                          tensor_list[i].dim_order().data(),
121                          tensor_list[i].dim_order().size());
122     all_channels_last = all_channels_last &&
123         is_channels_last_dim_order(
124                             tensor_list[i].dim_order().data(),
125                             tensor_list[i].dim_order().size());
126   }
127 
128   ET_LOG_MSG_AND_RETURN_IF_FALSE(
129       all_contiguous || all_channels_last,
130       "%zd input tensors have different dim orders",
131       tensor_list.size());
132 
133   return true;
134 }
135 
136 namespace internal {
137 
share_tensor_data(const torch::executor::Tensor & t_dst,const torch::executor::Tensor & t_src)138 Error share_tensor_data(
139     const torch::executor::Tensor& t_dst,
140     const torch::executor::Tensor& t_src) {
141   ET_CHECK_OR_RETURN_ERROR(
142       t_dst.nbytes() == t_src.nbytes(),
143       InvalidArgument,
144       "t_dst.nbytes() %zu != t_src.nbytes(). %zu",
145       t_dst.nbytes(),
146       t_src.nbytes());
147 
148   ET_CHECK_OR_RETURN_ERROR(
149       t_src.mutable_data_ptr() != nullptr,
150       InvalidArgument,
151       "Source tensor should have data_ptr not being nullptr.");
152   // Assign internal data_ptr as the one in forwarded tensor
153   t_dst.unsafeGetTensorImpl()->set_data(t_src.mutable_data_ptr());
154 
155   return Error::Ok;
156 }
157 
copy_tensor_data(const torch::executor::Tensor & t_dst,const torch::executor::Tensor & t_src)158 Error copy_tensor_data(
159     const torch::executor::Tensor& t_dst,
160     const torch::executor::Tensor& t_src) {
161   ET_CHECK_OR_RETURN_ERROR(
162       t_dst.const_data_ptr() != nullptr,
163       InvalidArgument,
164       "ExecutionPlan input supposed to preallocated but has nullptr for data");
165   // inputs with a size 0 dimension can be nullptr
166   if (t_src.const_data_ptr() != nullptr) {
167     ET_CHECK_OR_RETURN_ERROR(
168         t_dst.nbytes() == t_src.nbytes(),
169         InvalidArgument,
170         "t_dst.nbytes() %zu != t_src.nbytes(). %zu",
171         t_dst.nbytes(),
172         t_src.nbytes());
173     std::memcpy(
174         t_dst.mutable_data_ptr(), t_src.const_data_ptr(), t_src.nbytes());
175   }
176   return Error::Ok;
177 }
178 
set_tensor_data(const torch::executor::Tensor & t,void * buffer,size_t buffer_size)179 ET_NODISCARD Error set_tensor_data(
180     const torch::executor::Tensor& t,
181     void* buffer,
182     size_t buffer_size) {
183   ET_CHECK_OR_RETURN_ERROR(
184       buffer_size >= t.nbytes(),
185       InvalidArgument,
186       "buffer_size %zu is smaller than smaller than tensor nbytes %zu",
187       buffer_size,
188       t.nbytes());
189   t.unsafeGetTensorImpl()->set_data(buffer);
190   return Error::Ok;
191 }
192 
reset_data_ptr(const torch::executor::Tensor & tensor)193 void reset_data_ptr(const torch::executor::Tensor& tensor) {
194   // Lean mode doesn't deallocate the tensor data_ptr in the allocator
195   tensor.unsafeGetTensorImpl()->set_data(nullptr);
196 }
197 
198 class TensorResizerFriend final {
199  public:
resize_tensor_impl(exec_aten::TensorImpl * impl,exec_aten::ArrayRef<exec_aten::SizesType> new_sizes)200   ET_NODISCARD static Error resize_tensor_impl(
201       exec_aten::TensorImpl* impl,
202       exec_aten::ArrayRef<exec_aten::SizesType> new_sizes) {
203     return impl->internal_resize_contiguous(new_sizes);
204   }
205 };
206 
resize_tensor_impl(torch::executor::TensorImpl * impl,torch::executor::ArrayRef<exec_aten::SizesType> new_sizes)207 Error resize_tensor_impl(
208     torch::executor::TensorImpl* impl,
209     torch::executor::ArrayRef<exec_aten::SizesType> new_sizes) {
210   return TensorResizerFriend::resize_tensor_impl(impl, new_sizes);
211 }
212 } // namespace internal
213 
214 } // namespace runtime
215 } // namespace executorch
216