xref: /aosp_15_r20/external/armnn/include/armnn/backends/ITensorHandle.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/MemorySources.hpp>
8 #include <armnn/utility/IgnoreUnused.hpp>
9 
10 namespace armnn
11 {
12 
13 class TensorShape;
14 
15 class ITensorHandle
16 {
17 public:
~ITensorHandle()18     virtual ~ITensorHandle(){}
19 
20     /// Indicate to the memory manager that this resource is active.
21     /// This is used to compute overlapping lifetimes of resources.
22     virtual void Manage() = 0;
23 
24     /// Indicate to the memory manager that this resource is no longer active.
25     /// This is used to compute overlapping lifetimes of resources.
26     virtual void Allocate() = 0;
27 
28     /// Get the parent tensor if this is a subtensor.
29     /// \return a pointer to the parent tensor. Otherwise nullptr if not a subtensor.
30     virtual ITensorHandle* GetParent() const = 0;
31 
32     /// Map the tensor data for access.
33     /// \param blocking hint to block the calling thread until all other accesses are complete. (backend dependent)
34     /// \return pointer to the first element of the mapped data.
35     virtual const void* Map(bool blocking=true) const = 0;
36 
37     /// Unmap the tensor data
38     virtual void Unmap() const = 0;
39 
40     /// Map the tensor data for access. Must be paired with call to Unmap().
41     /// \param blocking hint to block the calling thread until all other accesses are complete. (backend dependent)
42     /// \return pointer to the first element of the mapped data.
Map(bool blocking=true)43     void* Map(bool blocking=true)
44     {
45         return const_cast<void*>(static_cast<const ITensorHandle*>(this)->Map(blocking));
46     }
47 
48     /// Unmap the tensor data that was previously mapped with call to Map().
Unmap()49     void Unmap()
50     {
51         return static_cast<const ITensorHandle*>(this)->Unmap();
52     }
53 
54     /// Get the strides for each dimension ordered from largest to smallest where
55     /// the smallest value is the same as the size of a single element in the tensor.
56     /// \return a TensorShape filled with the strides for each dimension
57     virtual TensorShape GetStrides() const = 0;
58 
59     /// Get the number of elements for each dimension ordered from slowest iterating dimension
60     /// to fastest iterating dimension.
61     /// \return a TensorShape filled with the number of elements for each dimension.
62     virtual TensorShape GetShape() const = 0;
63 
64     /// Testing support to be able to verify and set tensor data content
65     virtual void CopyOutTo(void* memory) const = 0;
66     virtual void CopyInFrom(const void* memory) = 0;
67 
68     /// Get flags describing supported import sources.
GetImportFlags() const69     virtual unsigned int GetImportFlags() const { return 0; }
70 
71     /// Import externally allocated memory
72     /// \param memory base address of the memory being imported.
73     /// \param source source of the allocation for the memory being imported.
74     /// \return true on success or false on failure
Import(void * memory,MemorySource source)75     virtual bool Import(void* memory, MemorySource source)
76     {
77         IgnoreUnused(memory, source);
78         return false;
79     };
80 
81     /// Implementations must determine if this memory block can be imported.
82     /// This might be based on alignment or memory source type.
83     /// \return true if this memory can be imported.
84     /// \return false by default, cannot be imported.
CanBeImported(void * memory,MemorySource source)85     virtual bool CanBeImported(void* memory, MemorySource source)
86     {
87         IgnoreUnused(memory, source);
88         return false;
89     };
90 
91     /// Unimport externally allocated memory
Unimport()92     virtual void Unimport()
93     {};
94 };
95 
96 }
97