1 // Copyright 2023 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef PARTITION_ALLOC_SHIM_SHIM_ALLOC_FUNCTIONS_H_
6 #error This header is meant to be included only once by allocator_shim*.cc
7 #endif
8 
9 #ifndef PARTITION_ALLOC_SHIM_SHIM_ALLOC_FUNCTIONS_H_
10 #define PARTITION_ALLOC_SHIM_SHIM_ALLOC_FUNCTIONS_H_
11 
12 #include <bit>
13 #include <cerrno>
14 
15 #include "build/build_config.h"
16 #include "partition_alloc/partition_alloc_base/bits.h"
17 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
18 #include "partition_alloc/partition_alloc_base/memory/page_size.h"
19 #include "partition_alloc/partition_alloc_buildflags.h"
20 #include "partition_alloc/partition_alloc_check.h"
21 
22 namespace {
23 
GetCachedPageSize()24 PA_ALWAYS_INLINE size_t GetCachedPageSize() {
25   static size_t pagesize = 0;
26   if (!pagesize) {
27     pagesize = partition_alloc::internal::base::GetPageSize();
28   }
29   return pagesize;
30 }
31 
32 }  // namespace
33 
34 // The Shim* functions below are the entry-points into the shim-layer and
35 // are supposed to be invoked by the allocator_shim_override_*
36 // headers to route the malloc / new symbols through the shim layer.
37 // They are defined as ALWAYS_INLINE in order to remove a level of indirection
38 // between the system-defined entry points and the shim implementations.
39 extern "C" {
40 
41 // The general pattern for allocations is:
42 // - Try to allocate, if succeeded return the pointer.
43 // - If the allocation failed:
44 //   - Call the std::new_handler if it was a C++ allocation.
45 //   - Call the std::new_handler if it was a malloc() (or calloc() or similar)
46 //     AND Setallocator_shim::internal::CallNewHandlerOnMallocFailure(true).
47 //   - If the std::new_handler is NOT set just return nullptr.
48 //   - If the std::new_handler is set:
49 //     - Assume it will abort() if it fails (very likely the new_handler will
50 //       just suicide printing a message).
51 //     - Assume it did succeed if it returns, in which case reattempt the alloc.
52 
ShimCppNew(size_t size)53 PA_ALWAYS_INLINE void* ShimCppNew(size_t size) {
54   const allocator_shim::AllocatorDispatch* const chain_head =
55       allocator_shim::internal::GetChainHead();
56   void* ptr;
57   do {
58     void* context = nullptr;
59 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
60     context = malloc_default_zone();
61 #endif
62     ptr = chain_head->alloc_function(chain_head, size, context);
63   } while (!ptr && allocator_shim::internal::CallNewHandler(size));
64   return ptr;
65 }
66 
ShimCppNewNoThrow(size_t size)67 PA_ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
68   void* context = nullptr;
69 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
70   context = malloc_default_zone();
71 #endif
72   const allocator_shim::AllocatorDispatch* const chain_head =
73       allocator_shim::internal::GetChainHead();
74   return chain_head->alloc_unchecked_function(chain_head, size, context);
75 }
76 
ShimCppAlignedNew(size_t size,size_t alignment)77 PA_ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
78   const allocator_shim::AllocatorDispatch* const chain_head =
79       allocator_shim::internal::GetChainHead();
80   void* ptr;
81   do {
82     void* context = nullptr;
83 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
84     context = malloc_default_zone();
85 #endif
86     ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
87                                              context);
88   } while (!ptr && allocator_shim::internal::CallNewHandler(size));
89   return ptr;
90 }
91 
ShimCppDelete(void * address)92 PA_ALWAYS_INLINE void ShimCppDelete(void* address) {
93   void* context = nullptr;
94 #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
95   context = malloc_default_zone();
96 #endif
97   const allocator_shim::AllocatorDispatch* const chain_head =
98       allocator_shim::internal::GetChainHead();
99   return chain_head->free_function(chain_head, address, context);
100 }
101 
ShimMalloc(size_t size,void * context)102 PA_ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
103   const allocator_shim::AllocatorDispatch* const chain_head =
104       allocator_shim::internal::GetChainHead();
105   void* ptr;
106   do {
107     ptr = chain_head->alloc_function(chain_head, size, context);
108   } while (!ptr &&
109            allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
110            allocator_shim::internal::CallNewHandler(size));
111   return ptr;
112 }
113 
ShimCalloc(size_t n,size_t size,void * context)114 PA_ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
115   const allocator_shim::AllocatorDispatch* const chain_head =
116       allocator_shim::internal::GetChainHead();
117   void* ptr;
118   do {
119     ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
120                                                       context);
121   } while (!ptr &&
122            allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
123            allocator_shim::internal::CallNewHandler(size));
124   return ptr;
125 }
126 
ShimRealloc(void * address,size_t size,void * context)127 PA_ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
128   // realloc(size == 0) means free() and might return a nullptr. We should
129   // not call the std::new_handler in that case, though.
130   const allocator_shim::AllocatorDispatch* const chain_head =
131       allocator_shim::internal::GetChainHead();
132   void* ptr;
133   do {
134     ptr = chain_head->realloc_function(chain_head, address, size, context);
135   } while (!ptr && size &&
136            allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
137            allocator_shim::internal::CallNewHandler(size));
138   return ptr;
139 }
140 
ShimMemalign(size_t alignment,size_t size,void * context)141 PA_ALWAYS_INLINE void* ShimMemalign(size_t alignment,
142                                     size_t size,
143                                     void* context) {
144   const allocator_shim::AllocatorDispatch* const chain_head =
145       allocator_shim::internal::GetChainHead();
146   void* ptr;
147   do {
148     ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
149                                              context);
150   } while (!ptr &&
151            allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
152            allocator_shim::internal::CallNewHandler(size));
153   return ptr;
154 }
155 
ShimPosixMemalign(void ** res,size_t alignment,size_t size)156 PA_ALWAYS_INLINE int ShimPosixMemalign(void** res,
157                                        size_t alignment,
158                                        size_t size) {
159   // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
160   // in tc_malloc.cc.
161   if (((alignment % sizeof(void*)) != 0) || !std::has_single_bit(alignment)) {
162     return EINVAL;
163   }
164   void* ptr = ShimMemalign(alignment, size, nullptr);
165   *res = ptr;
166   return ptr ? 0 : ENOMEM;
167 }
168 
ShimValloc(size_t size,void * context)169 PA_ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
170   return ShimMemalign(GetCachedPageSize(), size, context);
171 }
172 
ShimPvalloc(size_t size)173 PA_ALWAYS_INLINE void* ShimPvalloc(size_t size) {
174   // pvalloc(0) should allocate one page, according to its man page.
175   if (size == 0) {
176     size = GetCachedPageSize();
177   } else {
178     size = partition_alloc::internal::base::bits::AlignUp(size,
179                                                           GetCachedPageSize());
180   }
181   // The third argument is nullptr because pvalloc is glibc only and does not
182   // exist on OSX/BSD systems.
183   return ShimMemalign(GetCachedPageSize(), size, nullptr);
184 }
185 
ShimFree(void * address,void * context)186 PA_ALWAYS_INLINE void ShimFree(void* address, void* context) {
187   const allocator_shim::AllocatorDispatch* const chain_head =
188       allocator_shim::internal::GetChainHead();
189   return chain_head->free_function(chain_head, address, context);
190 }
191 
ShimGetSizeEstimate(const void * address,void * context)192 PA_ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address,
193                                             void* context) {
194   const allocator_shim::AllocatorDispatch* const chain_head =
195       allocator_shim::internal::GetChainHead();
196   return chain_head->get_size_estimate_function(
197       chain_head, const_cast<void*>(address), context);
198 }
199 
ShimGoodSize(size_t size,void * context)200 PA_ALWAYS_INLINE size_t ShimGoodSize(size_t size, void* context) {
201   const allocator_shim::AllocatorDispatch* const chain_head =
202       allocator_shim::internal::GetChainHead();
203   return chain_head->good_size_function(chain_head, size, context);
204 }
205 
ShimClaimedAddress(void * address,void * context)206 PA_ALWAYS_INLINE bool ShimClaimedAddress(void* address, void* context) {
207   const allocator_shim::AllocatorDispatch* const chain_head =
208       allocator_shim::internal::GetChainHead();
209   return chain_head->claimed_address_function(chain_head, address, context);
210 }
211 
ShimBatchMalloc(size_t size,void ** results,unsigned num_requested,void * context)212 PA_ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
213                                           void** results,
214                                           unsigned num_requested,
215                                           void* context) {
216   const allocator_shim::AllocatorDispatch* const chain_head =
217       allocator_shim::internal::GetChainHead();
218   return chain_head->batch_malloc_function(chain_head, size, results,
219                                            num_requested, context);
220 }
221 
ShimBatchFree(void ** to_be_freed,unsigned num_to_be_freed,void * context)222 PA_ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
223                                     unsigned num_to_be_freed,
224                                     void* context) {
225   const allocator_shim::AllocatorDispatch* const chain_head =
226       allocator_shim::internal::GetChainHead();
227   return chain_head->batch_free_function(chain_head, to_be_freed,
228                                          num_to_be_freed, context);
229 }
230 
ShimFreeDefiniteSize(void * ptr,size_t size,void * context)231 PA_ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr,
232                                            size_t size,
233                                            void* context) {
234   const allocator_shim::AllocatorDispatch* const chain_head =
235       allocator_shim::internal::GetChainHead();
236   return chain_head->free_definite_size_function(chain_head, ptr, size,
237                                                  context);
238 }
239 
ShimTryFreeDefault(void * ptr,void * context)240 PA_ALWAYS_INLINE void ShimTryFreeDefault(void* ptr, void* context) {
241   const allocator_shim::AllocatorDispatch* const chain_head =
242       allocator_shim::internal::GetChainHead();
243   return chain_head->try_free_default_function(chain_head, ptr, context);
244 }
245 
ShimAlignedMalloc(size_t size,size_t alignment,void * context)246 PA_ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
247                                          size_t alignment,
248                                          void* context) {
249   const allocator_shim::AllocatorDispatch* const chain_head =
250       allocator_shim::internal::GetChainHead();
251   void* ptr = nullptr;
252   do {
253     ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
254                                               context);
255   } while (!ptr &&
256            allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
257            allocator_shim::internal::CallNewHandler(size));
258   return ptr;
259 }
260 
ShimAlignedRealloc(void * address,size_t size,size_t alignment,void * context)261 PA_ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
262                                           size_t size,
263                                           size_t alignment,
264                                           void* context) {
265   // _aligned_realloc(size == 0) means _aligned_free() and might return a
266   // nullptr. We should not call the std::new_handler in that case, though.
267   const allocator_shim::AllocatorDispatch* const chain_head =
268       allocator_shim::internal::GetChainHead();
269   void* ptr = nullptr;
270   do {
271     ptr = chain_head->aligned_realloc_function(chain_head, address, size,
272                                                alignment, context);
273   } while (!ptr && size &&
274            allocator_shim::internal::g_call_new_handler_on_malloc_failure &&
275            allocator_shim::internal::CallNewHandler(size));
276   return ptr;
277 }
278 
ShimAlignedFree(void * address,void * context)279 PA_ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
280   const allocator_shim::AllocatorDispatch* const chain_head =
281       allocator_shim::internal::GetChainHead();
282   return chain_head->aligned_free_function(chain_head, address, context);
283 }
284 
285 #undef PA_ALWAYS_INLINE
286 
287 }  // extern "C"
288 
289 #endif  // PARTITION_ALLOC_SHIM_SHIM_ALLOC_FUNCTIONS_H_
290