xref: /aosp_15_r20/external/tensorflow/tensorflow/core/framework/allocator.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/core/framework/allocator.h"
17 
18 #include <atomic>
19 
20 #include "tensorflow/core/framework/allocator_registry.h"
21 #include "tensorflow/core/framework/tracking_allocator.h"
22 #include "tensorflow/core/lib/strings/strcat.h"
23 #include "tensorflow/core/lib/strings/stringprintf.h"
24 #include "tensorflow/core/platform/mem.h"
25 #include "tensorflow/core/platform/mutex.h"
26 #include "tensorflow/core/platform/types.h"
27 
28 namespace tensorflow {
29 
DebugString() const30 string AllocatorStats::DebugString() const {
31   return strings::Printf(
32       "Limit:            %20lld\n"
33       "InUse:            %20lld\n"
34       "MaxInUse:         %20lld\n"
35       "NumAllocs:        %20lld\n"
36       "MaxAllocSize:     %20lld\n"
37       "Reserved:         %20lld\n"
38       "PeakReserved:     %20lld\n"
39       "LargestFreeBlock: %20lld\n",
40       static_cast<long long>(this->bytes_limit ? *this->bytes_limit : 0),
41       static_cast<long long>(this->bytes_in_use),
42       static_cast<long long>(this->peak_bytes_in_use),
43       static_cast<long long>(this->num_allocs),
44       static_cast<long long>(this->largest_alloc_size),
45       static_cast<long long>(this->bytes_reserved),
46       static_cast<long long>(this->peak_bytes_reserved),
47       static_cast<long long>(this->largest_free_block_bytes));
48 }
49 
50 constexpr size_t Allocator::kAllocatorAlignment;
51 
~Allocator()52 Allocator::~Allocator() {}
53 
54 // If true, cpu allocator collects full stats.
55 static bool cpu_allocator_collect_full_stats = false;
56 
EnableCPUAllocatorFullStats()57 void EnableCPUAllocatorFullStats() { cpu_allocator_collect_full_stats = true; }
CPUAllocatorFullStatsEnabled()58 bool CPUAllocatorFullStatsEnabled() { return cpu_allocator_collect_full_stats; }
59 
DebugString() const60 string AllocatorAttributes::DebugString() const {
61   return strings::StrCat("AllocatorAttributes(on_host=", on_host(),
62                          " nic_compatible=", nic_compatible(),
63                          " gpu_compatible=", gpu_compatible(), ")");
64 }
65 
cpu_allocator_base()66 Allocator* cpu_allocator_base() {
67   static Allocator* cpu_alloc =
68       AllocatorFactoryRegistry::singleton()->GetAllocator();
69   // TODO(tucker): This really seems wrong.  It's only going to be effective on
70   // the first call in a process (but the desired effect is associated with a
71   // session), and we probably ought to be tracking the highest level Allocator,
72   // not the lowest.  Revisit the advertised semantics of the triggering option.
73   if (cpu_allocator_collect_full_stats && !cpu_alloc->TracksAllocationSizes()) {
74     cpu_alloc = new TrackingAllocator(cpu_alloc, true);
75   }
76   return cpu_alloc;
77 }
78 
cpu_allocator(int numa_node)79 Allocator* cpu_allocator(int numa_node) {
80   // Correctness relies on devices being created prior to the first call
81   // to cpu_allocator, if devices are ever to be created in the process.
82   // Device creation in turn triggers ProcessState creation and the availability
83   // of the correct access pointer via this function call.
84   static ProcessStateInterface* ps =
85       AllocatorFactoryRegistry::singleton()->process_state();
86   if (ps) {
87     return ps->GetCPUAllocator(numa_node);
88   } else {
89     return cpu_allocator_base();
90   }
91 }
92 
SubAllocator(const std::vector<Visitor> & alloc_visitors,const std::vector<Visitor> & free_visitors)93 SubAllocator::SubAllocator(const std::vector<Visitor>& alloc_visitors,
94                            const std::vector<Visitor>& free_visitors)
95     : alloc_visitors_(alloc_visitors), free_visitors_(free_visitors) {}
96 
VisitAlloc(void * ptr,int index,size_t num_bytes)97 void SubAllocator::VisitAlloc(void* ptr, int index, size_t num_bytes) {
98   for (const auto& v : alloc_visitors_) {
99     v(ptr, index, num_bytes);
100   }
101 }
102 
VisitFree(void * ptr,int index,size_t num_bytes)103 void SubAllocator::VisitFree(void* ptr, int index, size_t num_bytes) {
104   // Although we don't guarantee any order of visitor application, strive
105   // to apply free visitors in reverse order of alloc visitors.
106   for (int i = free_visitors_.size() - 1; i >= 0; --i) {
107     free_visitors_[i](ptr, index, num_bytes);
108   }
109 }
110 }  // namespace tensorflow
111