1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/flex/buffer_map_util.h"
16
17 #include <utility>
18
19 #include "tensorflow/core/framework/log_memory.h"
20 #include "tensorflow/core/framework/tensor.h"
21 #include "tensorflow/core/framework/typed_allocator.h"
22 #include "tensorflow/core/framework/types.pb.h"
23 #include "tensorflow/core/platform/status.h"
24 #include "tensorflow/lite/delegates/flex/util.h"
25 #include "tensorflow/lite/experimental/resource/resource_variable.h"
26 #include "tensorflow/lite/string_util.h"
27
28 namespace tflite {
29 namespace flex {
30
31 namespace {
32 // Returns a boolean to indicate whether we should reuse memory from the
33 // TfLiteTensor.
ShouldReuseTensorMemory(const TfLiteTensor * tensor)34 inline bool ShouldReuseTensorMemory(const TfLiteTensor* tensor) {
35 // TODO(b/205153246): Currently arena-alloated memory could not be reused
36 // since it might be invalid after the original arena grow in size and copied
37 // over to a new memory block.
38 // First check alignment is consistent with Tensorflow.
39 if (EIGEN_MAX_ALIGN_BYTES != 0 &&
40 reinterpret_cast<intptr_t>(tensor->data.raw) % EIGEN_MAX_ALIGN_BYTES) {
41 return false;
42 }
43 return tensor->allocation_type != kTfLiteArenaRw;
44 }
45 } // namespace
46
FillAllocationDescription(tensorflow::AllocationDescription * proto) const47 void BaseTfLiteTensorBuffer::FillAllocationDescription(
48 tensorflow::AllocationDescription* proto) const {
49 int64_t rb = size();
50 proto->set_requested_bytes(rb);
51 proto->set_allocator_name(tensorflow::cpu_allocator()->Name());
52 }
53
LogAllocation()54 void BaseTfLiteTensorBuffer::LogAllocation() {
55 if (tensorflow::LogMemory::IsEnabled() && data() != nullptr) {
56 tensorflow::LogMemory::RecordRawAllocation(
57 "TfLiteTensorBuffer_New",
58 tensorflow::LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, size(),
59 data(), tensorflow::cpu_allocator());
60 }
61 }
LogDeallocation()62 void BaseTfLiteTensorBuffer::LogDeallocation() {
63 if (tensorflow::LogMemory::IsEnabled() && data() != nullptr) {
64 tensorflow::LogMemory::RecordRawDeallocation(
65 "TfLiteTensorBuffer_Delete",
66 tensorflow::LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, data(),
67 tensorflow::cpu_allocator(), false);
68 }
69 }
70
MaybeAllocateTensorflowBuffer(const TfLiteTensor * tensor,bool allow_reusing) const71 void* TfLiteTensorBuffer::MaybeAllocateTensorflowBuffer(
72 const TfLiteTensor* tensor, bool allow_reusing) const {
73 if (allow_reusing && ShouldReuseTensorMemory(tensor)) {
74 return tensor->data.raw;
75 }
76 return tensorflow::cpu_allocator()->AllocateRaw(EIGEN_MAX_ALIGN_BYTES,
77 tensor->bytes);
78 }
79
TfLiteTensorBuffer(const TfLiteTensor * tensor,bool allow_reusing)80 TfLiteTensorBuffer::TfLiteTensorBuffer(const TfLiteTensor* tensor,
81 bool allow_reusing)
82 : BaseTfLiteTensorBuffer(
83 MaybeAllocateTensorflowBuffer(tensor, allow_reusing)) {
84 len_ = tensor->bytes;
85
86 reused_buffer_from_tflite_ = allow_reusing && ShouldReuseTensorMemory(tensor);
87
88 if (data() && !reused_buffer_from_tflite_) {
89 LogAllocation();
90 std::memcpy(data(), tensor->data.raw, tensor->bytes);
91 }
92 }
93
~TfLiteTensorBuffer()94 TfLiteTensorBuffer::~TfLiteTensorBuffer() {
95 if (!reused_buffer_from_tflite_) {
96 LogDeallocation();
97 // Only deallocate tensor memory if it's allocated via Tensorflow's CPU
98 // allocator.
99 tensorflow::cpu_allocator()->DeallocateRaw(data());
100 }
101 }
102
StringTfLiteTensorBuffer(const TfLiteTensor * tensor)103 StringTfLiteTensorBuffer::StringTfLiteTensorBuffer(const TfLiteTensor* tensor)
104 : StringTfLiteTensorBuffer(
105 tensor, tensor->data.raw != nullptr ? GetStringCount(tensor) : 0) {}
106
~StringTfLiteTensorBuffer()107 StringTfLiteTensorBuffer::~StringTfLiteTensorBuffer() {
108 LogDeallocation();
109 tensorflow::TypedAllocator::Deallocate<tensorflow::tstring>(
110 tensorflow::cpu_allocator(), static_cast<tensorflow::tstring*>(data()),
111 num_strings_);
112 }
113
StringTfLiteTensorBuffer(const TfLiteTensor * tensor,int num_strings)114 StringTfLiteTensorBuffer::StringTfLiteTensorBuffer(const TfLiteTensor* tensor,
115 int num_strings)
116 : BaseTfLiteTensorBuffer(
117 num_strings != 0
118 ? tensorflow::TypedAllocator::Allocate<tensorflow::tstring>(
119 tensorflow::cpu_allocator(), num_strings,
120 tensorflow::AllocationAttributes())
121 : nullptr),
122 num_strings_(num_strings) {
123 LogAllocation();
124
125 if (data()) {
126 tensorflow::tstring* p = static_cast<tensorflow::tstring*>(data());
127 for (size_t i = 0; i < num_strings_; ++p, ++i) {
128 auto ref = GetString(tensor, i);
129 p->assign(ref.str, ref.len);
130 }
131 }
132 }
133
SetTfTensorFromTfLite(const TfLiteTensor * tensor,tensorflow::Tensor * tf_tensor,bool allow_reusing)134 tensorflow::Status SetTfTensorFromTfLite(const TfLiteTensor* tensor,
135 tensorflow::Tensor* tf_tensor,
136 bool allow_reusing) {
137 if (resource::IsBuiltinResource(tensor)) {
138 // If this is native TF Lite resource variable, then we create a TF resource
139 // tensor where the tensor handle encodes the identifier of the TF Lite
140 // resource.
141 // This approach assumes that there is only a single model being invoked
142 // via the Interpreter instance, so that the resource IDs won't have any
143 // collisions. If we plan to support concurrent execution in the future, we
144 // should make sure the resource ID being encoded is unique between
145 // different executions.
146 tensorflow::Tensor t(tensorflow::DT_RESOURCE, tensorflow::TensorShape({}));
147 tensorflow::ResourceHandle handle;
148 handle.set_name(TfLiteResourceIdentifier(tensor));
149 t.flat<tensorflow::ResourceHandle>()(0) = handle;
150 *tf_tensor = t;
151 return ::tensorflow::OkStatus();
152 } else if (IsResourceOrVariant(tensor)) {
153 // TODO(b/179094265): This is an experimental implementation, subject to
154 // change. This can be re-implemented with life cycle management mechanism
155 // like reference counting.
156 // In a different subgraph, it can load the TensorFlow tensor pointer of the
157 // given TensorFlow Lite tensor, which is stored in the `data` field. The
158 // memory management cycle of the shared TensorFlow's tensor will be managed
159 // by the buffer maps since the loaded tensors always will be kept in the
160 // buffer map.
161 //
162 // The life cycle of the pointer will be managed by the reference counting
163 // in the TensorFlow world and the pointer will be freed when all the buffer
164 // maps, who own it, are gone.
165 const tensorflow::Tensor** tf_tensor_ptr =
166 reinterpret_cast<const tensorflow::Tensor**>(tensor->data.raw);
167 *tf_tensor = **tf_tensor_ptr;
168 return ::tensorflow::OkStatus();
169 }
170
171 tensorflow::TensorShape shape;
172 int num_dims = tensor->dims->size;
173 for (int i = 0; i < num_dims; ++i) {
174 shape.AddDim(tensor->dims->data[i]);
175 }
176 // TODO(b/152916533): We assume this is a new tensor and allocate a new buffer
177 // for it. This is not always the best approach. For example, this might
178 // be a reallocation after resizing tensors. In that case it would be
179 // preferable to somehow reuse the buffer.
180 BaseTfLiteTensorBuffer* buf;
181 if (tensor->type == kTfLiteString) {
182 buf = new StringTfLiteTensorBuffer(tensor);
183 } else {
184 buf = new TfLiteTensorBuffer(tensor, allow_reusing);
185 }
186 tensorflow::Tensor t = tensorflow::TensorCApi::MakeTensor(
187 GetTensorFlowDataType(tensor->type), shape, buf);
188 buf->Unref();
189
190 *tf_tensor = std::move(t);
191 return ::tensorflow::OkStatus();
192 }
193
194 } // namespace flex
195 } // namespace tflite
196