1 /*
2 * Copyright (c) 2016-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/TensorAllocator.h"
25
26 #include "arm_compute/core/Coordinates.h"
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/TensorInfo.h"
29 #include "arm_compute/runtime/MemoryGroup.h"
30 #include "arm_compute/runtime/MemoryRegion.h"
31
32 #include <cstddef>
33
34 using namespace arm_compute;
35
36 namespace
37 {
validate_subtensor_shape(const TensorInfo & parent_info,const TensorInfo & child_info,const Coordinates & coords)38 bool validate_subtensor_shape(const TensorInfo &parent_info, const TensorInfo &child_info, const Coordinates &coords)
39 {
40 bool is_valid = true;
41 const TensorShape &parent_shape = parent_info.tensor_shape();
42 const TensorShape &child_shape = child_info.tensor_shape();
43 const size_t parent_dims = parent_info.num_dimensions();
44 const size_t child_dims = child_info.num_dimensions();
45
46 if(child_dims <= parent_dims)
47 {
48 for(size_t num_dimensions = child_dims; num_dimensions > 0; --num_dimensions)
49 {
50 const size_t child_dim_size = coords[num_dimensions - 1] + child_shape[num_dimensions - 1];
51
52 if((coords[num_dimensions - 1] < 0) || (child_dim_size > parent_shape[num_dimensions - 1]))
53 {
54 is_valid = false;
55 break;
56 }
57 }
58 }
59 else
60 {
61 is_valid = false;
62 }
63
64 return is_valid;
65 }
66 } // namespace
67
TensorAllocator(IMemoryManageable * owner)68 TensorAllocator::TensorAllocator(IMemoryManageable *owner)
69 : _owner(owner), _associated_memory_group(nullptr), _memory()
70 {
71 }
72
~TensorAllocator()73 TensorAllocator::~TensorAllocator()
74 {
75 info().set_is_resizable(true);
76 }
77
TensorAllocator(TensorAllocator && o)78 TensorAllocator::TensorAllocator(TensorAllocator &&o) noexcept
79 : ITensorAllocator(std::move(o)),
80 _owner(o._owner),
81 _associated_memory_group(o._associated_memory_group),
82 _memory(std::move(o._memory))
83 {
84 o._owner = nullptr;
85 o._associated_memory_group = nullptr;
86 o._memory = Memory();
87 }
88
operator =(TensorAllocator && o)89 TensorAllocator &TensorAllocator::operator=(TensorAllocator &&o) noexcept
90 {
91 if(&o != this)
92 {
93 _owner = o._owner;
94 o._owner = nullptr;
95
96 _associated_memory_group = o._associated_memory_group;
97 o._associated_memory_group = nullptr;
98
99 _memory = std::move(o._memory);
100 o._memory = Memory();
101
102 ITensorAllocator::operator=(std::move(o));
103 }
104 return *this;
105 }
106
init(const TensorAllocator & allocator,const Coordinates & coords,TensorInfo & sub_info)107 void TensorAllocator::init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
108 {
109 // Get parent info
110 const TensorInfo parent_info = allocator.info();
111
112 // Check if coordinates and new shape are within the parent tensor
113 ARM_COMPUTE_ERROR_ON(!validate_subtensor_shape(parent_info, sub_info, coords));
114 ARM_COMPUTE_UNUSED(validate_subtensor_shape);
115
116 // Copy pointer to buffer
117 _memory = Memory(allocator._memory.region());
118
119 // Init tensor info with new dimensions
120 size_t total_size = parent_info.offset_element_in_bytes(coords) + sub_info.total_size() - sub_info.offset_first_element_in_bytes();
121 sub_info.init(sub_info.tensor_shape(), sub_info.format(), parent_info.strides_in_bytes(), parent_info.offset_element_in_bytes(coords), total_size);
122
123 // Set TensorInfo
124 init(sub_info);
125 }
126
data() const127 uint8_t *TensorAllocator::data() const
128 {
129 return (_memory.region() == nullptr) ? nullptr : reinterpret_cast<uint8_t *>(_memory.region()->buffer());
130 }
131
allocate()132 void TensorAllocator::allocate()
133 {
134 // Align to 64-byte boundaries by default if alignment is not specified
135 const size_t alignment_to_use = (alignment() != 0) ? alignment() : 64;
136 if(_associated_memory_group == nullptr)
137 {
138 _memory.set_owned_region(std::make_unique<MemoryRegion>(info().total_size(), alignment_to_use));
139 }
140 else
141 {
142 _associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment_to_use);
143 }
144 info().set_is_resizable(false);
145 }
146
free()147 void TensorAllocator::free()
148 {
149 _memory.set_region(nullptr);
150 info().set_is_resizable(true);
151 }
152
import_memory(void * memory)153 Status TensorAllocator::import_memory(void *memory)
154 {
155 ARM_COMPUTE_RETURN_ERROR_ON(memory == nullptr);
156 ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
157 ARM_COMPUTE_RETURN_ERROR_ON(alignment() != 0 && !arm_compute::utility::check_aligned(memory, alignment()));
158
159 _memory.set_owned_region(std::make_unique<MemoryRegion>(memory, info().total_size()));
160 info().set_is_resizable(false);
161
162 return Status{};
163 }
164
set_associated_memory_group(IMemoryGroup * associated_memory_group)165 void TensorAllocator::set_associated_memory_group(IMemoryGroup *associated_memory_group)
166 {
167 ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
168 ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr && _associated_memory_group != associated_memory_group);
169 ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.region()->buffer() != nullptr);
170
171 _associated_memory_group = associated_memory_group;
172 }
173
lock()174 uint8_t *TensorAllocator::lock()
175 {
176 ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
177 return reinterpret_cast<uint8_t *>(_memory.region()->buffer());
178 }
179
unlock()180 void TensorAllocator::unlock()
181 {
182 }
183