1// Copyright 2017 The Chromium Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file contains all the logic necessary to intercept allocations on
6// macOS. "malloc zones" are an abstraction that allows the process to intercept
7// all malloc-related functions.  There is no good mechanism [short of
8// interposition] to determine new malloc zones are added, so there's no clean
9// mechanism to intercept all malloc zones. This file contains logic to
10// intercept the default and purgeable zones, which always exist. A cursory
11// review of Chrome seems to imply that non-default zones are almost never used.
12//
13// This file also contains logic to intercept Core Foundation and Objective-C
14// allocations. The implementations forward to the default malloc zone, so the
15// only reason to intercept these calls is to re-label OOM crashes with slightly
16// more details.
17
18#include "partition_alloc/shim/allocator_interception_apple.h"
19
20#include "partition_alloc/partition_alloc_buildflags.h"
21
22#if BUILDFLAG(USE_ALLOCATOR_SHIM)
23#include <CoreFoundation/CoreFoundation.h>
24#import <Foundation/Foundation.h>
25#include <mach/mach.h>
26#import <objc/runtime.h>
27
28#include <algorithm>
29#include <bit>
30#include <cerrno>
31#include <cstddef>
32#include <new>
33
34#include "build/build_config.h"
35#include "partition_alloc/oom.h"
36#include "partition_alloc/partition_alloc_base/apple/mach_logging.h"
37#include "partition_alloc/partition_alloc_base/compiler_specific.h"
38#include "partition_alloc/partition_alloc_base/logging.h"
39#include "partition_alloc/partition_alloc_check.h"
40#include "partition_alloc/shim/malloc_zone_functions_apple.h"
41#include "partition_alloc/third_party/apple_apsl/CFBase.h"
42
43#if BUILDFLAG(IS_IOS)
44#include "partition_alloc/partition_alloc_base/ios/ios_util.h"
45#else
46#include "partition_alloc/partition_alloc_base/mac/mac_util.h"
47#endif
48
49// The patching of Objective-C runtime bits must be done without any
50// interference from the ARC machinery.
51#if PA_HAS_FEATURE(objc_arc)
52#error "This file must not be compiled with ARC."
53#endif
54
55namespace allocator_shim {
56
57bool g_replaced_default_zone = false;
58
59namespace {
60
61bool g_oom_killer_enabled;
62bool g_allocator_shims_failed_to_install;
63
64// Starting with Mac OS X 10.7, the zone allocators set up by the system are
65// read-only, to prevent them from being overwritten in an attack. However,
66// blindly unprotecting and reprotecting the zone allocators fails with
67// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
68// memory in its bss. Explicit saving/restoring of the protection is required.
69//
70// This function takes a pointer to a malloc zone, de-protects it if necessary,
71// and returns (in the out parameters) a region of memory (if any) to be
72// re-protected when modifications are complete. This approach assumes that
73// there is no contention for the protection of this memory.
74//
75// Returns true if the malloc zone was properly de-protected, or false
76// otherwise. If this function returns false, the out parameters are invalid and
77// the region does not need to be re-protected.
78bool DeprotectMallocZone(ChromeMallocZone* default_zone,
79                         vm_address_t* reprotection_start,
80                         vm_size_t* reprotection_length,
81                         vm_prot_t* reprotection_value) {
82  mach_port_t unused;
83  *reprotection_start = reinterpret_cast<vm_address_t>(default_zone);
84  struct vm_region_basic_info_64 info;
85  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
86  kern_return_t result =
87      vm_region_64(mach_task_self(), reprotection_start, reprotection_length,
88                   VM_REGION_BASIC_INFO_64,
89                   reinterpret_cast<vm_region_info_t>(&info), &count, &unused);
90  if (result != KERN_SUCCESS) {
91    PA_MACH_LOG(ERROR, result) << "vm_region_64";
92    return false;
93  }
94
95  // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
96  // balance it with a deallocate in case this ever changes. See
97  // the VM_REGION_BASIC_INFO_64 case in vm_map_region() in 10.15's
98  // https://opensource.apple.com/source/xnu/xnu-6153.11.26/osfmk/vm/vm_map.c .
99  mach_port_deallocate(mach_task_self(), unused);
100
101  if (!(info.max_protection & VM_PROT_WRITE)) {
102    PA_LOG(ERROR) << "Invalid max_protection " << info.max_protection;
103    return false;
104  }
105
106  // Does the region fully enclose the zone pointers? Possibly unwarranted
107  // simplification used: using the size of a full version 10 malloc zone rather
108  // than the actual smaller size if the passed-in zone is not version 10.
109  PA_DCHECK(*reprotection_start <=
110            reinterpret_cast<vm_address_t>(default_zone));
111  vm_size_t zone_offset = reinterpret_cast<vm_address_t>(default_zone) -
112                          reinterpret_cast<vm_address_t>(*reprotection_start);
113  PA_DCHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
114
115  if (info.protection & VM_PROT_WRITE) {
116    // No change needed; the zone is already writable.
117    *reprotection_start = 0;
118    *reprotection_length = 0;
119    *reprotection_value = VM_PROT_NONE;
120  } else {
121    *reprotection_value = info.protection;
122    result =
123        vm_protect(mach_task_self(), *reprotection_start, *reprotection_length,
124                   false, info.protection | VM_PROT_WRITE);
125    if (result != KERN_SUCCESS) {
126      PA_MACH_LOG(ERROR, result) << "vm_protect";
127      return false;
128    }
129  }
130  return true;
131}
132
133#if !defined(ADDRESS_SANITIZER)
134
135MallocZoneFunctions g_old_zone;
136MallocZoneFunctions g_old_purgeable_zone;
137
138#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
139
140void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
141  void* result = g_old_zone.malloc(zone, size);
142  if (!result && size) {
143    partition_alloc::TerminateBecauseOutOfMemory(size);
144  }
145  return result;
146}
147
148void* oom_killer_calloc(struct _malloc_zone_t* zone,
149                        size_t num_items,
150                        size_t size) {
151  void* result = g_old_zone.calloc(zone, num_items, size);
152  if (!result && num_items && size) {
153    partition_alloc::TerminateBecauseOutOfMemory(num_items * size);
154  }
155  return result;
156}
157
158void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
159  void* result = g_old_zone.valloc(zone, size);
160  if (!result && size) {
161    partition_alloc::TerminateBecauseOutOfMemory(size);
162  }
163  return result;
164}
165
166void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
167  g_old_zone.free(zone, ptr);
168}
169
170void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
171  void* result = g_old_zone.realloc(zone, ptr, size);
172  if (!result && size) {
173    partition_alloc::TerminateBecauseOutOfMemory(size);
174  }
175  return result;
176}
177
178void* oom_killer_memalign(struct _malloc_zone_t* zone,
179                          size_t alignment,
180                          size_t size) {
181  void* result = g_old_zone.memalign(zone, alignment, size);
182  // Only die if posix_memalign would have returned ENOMEM, since there are
183  // other reasons why null might be returned. See posix_memalign() in 10.15's
184  // https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
185  if (!result && size && alignment >= sizeof(void*) &&
186      std::has_single_bit(alignment)) {
187    partition_alloc::TerminateBecauseOutOfMemory(size);
188  }
189  return result;
190}
191
192#endif  // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
193
194void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
195  void* result = g_old_purgeable_zone.malloc(zone, size);
196  if (!result && size) {
197    partition_alloc::TerminateBecauseOutOfMemory(size);
198  }
199  return result;
200}
201
202void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
203                                  size_t num_items,
204                                  size_t size) {
205  void* result = g_old_purgeable_zone.calloc(zone, num_items, size);
206  if (!result && num_items && size) {
207    partition_alloc::TerminateBecauseOutOfMemory(num_items * size);
208  }
209  return result;
210}
211
212void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
213  void* result = g_old_purgeable_zone.valloc(zone, size);
214  if (!result && size) {
215    partition_alloc::TerminateBecauseOutOfMemory(size);
216  }
217  return result;
218}
219
220void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
221  g_old_purgeable_zone.free(zone, ptr);
222}
223
224void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
225                                   void* ptr,
226                                   size_t size) {
227  void* result = g_old_purgeable_zone.realloc(zone, ptr, size);
228  if (!result && size) {
229    partition_alloc::TerminateBecauseOutOfMemory(size);
230  }
231  return result;
232}
233
234void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
235                                    size_t alignment,
236                                    size_t size) {
237  void* result = g_old_purgeable_zone.memalign(zone, alignment, size);
238  // Only die if posix_memalign would have returned ENOMEM, since there are
239  // other reasons why null might be returned. See posix_memalign() in 10.15's
240  // https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c .
241  if (!result && size && alignment >= sizeof(void*) &&
242      std::has_single_bit(alignment)) {
243    partition_alloc::TerminateBecauseOutOfMemory(size);
244  }
245  return result;
246}
247
248#endif  // !defined(ADDRESS_SANITIZER)
249
250#if !defined(ADDRESS_SANITIZER)
251
252// === Core Foundation CFAllocators ===
253
254bool CanGetContextForCFAllocator() {
255#if BUILDFLAG(IS_IOS)
256  return !partition_alloc::internal::base::ios::IsRunningOnOrLater(17, 0, 0);
257#else
258  // As of macOS 14, the allocators are in read-only memory and can no longer be
259  // altered.
260  return partition_alloc::internal::base::mac::MacOSMajorVersion() < 14;
261#endif
262}
263
264CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
265  ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
266      reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
267  return &our_allocator->_context;
268}
269
270CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
271CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
272CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
273
274void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
275                                            CFOptionFlags hint,
276                                            void* info) {
277  void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
278  if (!result) {
279    partition_alloc::TerminateBecauseOutOfMemory(
280        static_cast<size_t>(alloc_size));
281  }
282  return result;
283}
284
285void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
286                                    CFOptionFlags hint,
287                                    void* info) {
288  void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
289  if (!result) {
290    partition_alloc::TerminateBecauseOutOfMemory(
291        static_cast<size_t>(alloc_size));
292  }
293  return result;
294}
295
296void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
297                                         CFOptionFlags hint,
298                                         void* info) {
299  void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
300  if (!result) {
301    partition_alloc::TerminateBecauseOutOfMemory(
302        static_cast<size_t>(alloc_size));
303  }
304  return result;
305}
306
307#endif  // !defined(ADDRESS_SANITIZER)
308
309// === Cocoa NSObject allocation ===
310
311typedef id (*allocWithZone_t)(id, SEL, NSZone*);
312allocWithZone_t g_old_allocWithZone;
313
314id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
315  id result = g_old_allocWithZone(self, _cmd, zone);
316  if (!result) {
317    partition_alloc::TerminateBecauseOutOfMemory(0);
318  }
319  return result;
320}
321
322void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
323  ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
324  if (!IsMallocZoneAlreadyStored(chrome_zone)) {
325    return;
326  }
327  MallocZoneFunctions& functions = GetFunctionsForZone(zone);
328  ReplaceZoneFunctions(chrome_zone, &functions);
329}
330
331}  // namespace
332
333bool UncheckedMallocMac(size_t size, void** result) {
334#if defined(ADDRESS_SANITIZER)
335  *result = malloc(size);
336#else
337  if (g_old_zone.malloc) {
338    *result = g_old_zone.malloc(malloc_default_zone(), size);
339  } else {
340    *result = malloc(size);
341  }
342#endif  // defined(ADDRESS_SANITIZER)
343
344  return *result != NULL;
345}
346
347bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
348#if defined(ADDRESS_SANITIZER)
349  *result = calloc(num_items, size);
350#else
351  if (g_old_zone.calloc) {
352    *result = g_old_zone.calloc(malloc_default_zone(), num_items, size);
353  } else {
354    *result = calloc(num_items, size);
355  }
356#endif  // defined(ADDRESS_SANITIZER)
357
358  return *result != NULL;
359}
360
361void InitializeDefaultDispatchToMacAllocator() {
362  StoreFunctionsForAllZones();
363}
364
365void StoreFunctionsForDefaultZone() {
366  ChromeMallocZone* default_zone =
367      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
368  StoreMallocZone(default_zone);
369}
370
371void StoreFunctionsForAllZones() {
372  // This ensures that the default zone is always at the front of the array,
373  // which is important for performance.
374  StoreFunctionsForDefaultZone();
375
376  vm_address_t* zones;
377  unsigned int count;
378  kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
379  if (kr != KERN_SUCCESS) {
380    return;
381  }
382  for (unsigned int i = 0; i < count; ++i) {
383    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
384    StoreMallocZone(zone);
385  }
386}
387
388void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
389  // The default zone does not get returned in malloc_get_all_zones().
390  ChromeMallocZone* default_zone =
391      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
392  if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
393    ReplaceZoneFunctions(default_zone, functions);
394  }
395
396  vm_address_t* zones;
397  unsigned int count;
398  kern_return_t kr =
399      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &count);
400  if (kr != KERN_SUCCESS) {
401    return;
402  }
403  for (unsigned int i = 0; i < count; ++i) {
404    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
405    if (DoesMallocZoneNeedReplacing(zone, functions)) {
406      ReplaceZoneFunctions(zone, functions);
407    }
408  }
409  g_replaced_default_zone = true;
410}
411
412void InterceptAllocationsMac() {
413  if (g_oom_killer_enabled) {
414    return;
415  }
416
417  g_oom_killer_enabled = true;
418
419  // === C malloc/calloc/valloc/realloc/posix_memalign ===
420
421  // This approach is not perfect, as requests for amounts of memory larger than
422  // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
423  // still fail with a NULL rather than dying (see malloc_zone_malloc() in
424  // https://opensource.apple.com/source/libmalloc/libmalloc-283/src/malloc.c
425  // for details). Unfortunately, it's the best we can do. Also note that this
426  // does not affect allocations from non-default zones.
427
428#if !defined(ADDRESS_SANITIZER)
429  // Don't do anything special on OOM for the malloc zones replaced by
430  // AddressSanitizer, as modifying or protecting them may not work correctly.
431#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
432  // The malloc zone backed by PartitionAlloc crashes by default, so there is
433  // no need to install the OOM killer.
434  ChromeMallocZone* default_zone =
435      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
436  if (!IsMallocZoneAlreadyStored(default_zone)) {
437    StoreZoneFunctions(default_zone, &g_old_zone);
438    MallocZoneFunctions new_functions = {};
439    new_functions.malloc = oom_killer_malloc;
440    new_functions.calloc = oom_killer_calloc;
441    new_functions.valloc = oom_killer_valloc;
442    new_functions.free = oom_killer_free;
443    new_functions.realloc = oom_killer_realloc;
444    new_functions.memalign = oom_killer_memalign;
445
446    ReplaceZoneFunctions(default_zone, &new_functions);
447    g_replaced_default_zone = true;
448  }
449#endif  // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
450
451  ChromeMallocZone* purgeable_zone =
452      reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
453  if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
454    StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
455    MallocZoneFunctions new_functions = {};
456    new_functions.malloc = oom_killer_malloc_purgeable;
457    new_functions.calloc = oom_killer_calloc_purgeable;
458    new_functions.valloc = oom_killer_valloc_purgeable;
459    new_functions.free = oom_killer_free_purgeable;
460    new_functions.realloc = oom_killer_realloc_purgeable;
461    new_functions.memalign = oom_killer_memalign_purgeable;
462    ReplaceZoneFunctions(purgeable_zone, &new_functions);
463  }
464#endif
465
466  // === C malloc_zone_batch_malloc ===
467
468  // batch_malloc is omitted because the default malloc zone's implementation
469  // only supports batch_malloc for "tiny" allocations from the free list. It
470  // will fail for allocations larger than "tiny", and will only allocate as
471  // many blocks as it's able to from the free list. These factors mean that it
472  // can return less than the requested memory even in a non-out-of-memory
473  // situation. There's no good way to detect whether a batch_malloc failure is
474  // due to these other factors, or due to genuine memory or address space
475  // exhaustion. The fact that it only allocates space from the "tiny" free list
476  // means that it's likely that a failure will not be due to memory exhaustion.
477  // Similarly, these constraints on batch_malloc mean that callers must always
478  // be expecting to receive less memory than was requested, even in situations
479  // where memory pressure is not a concern. Finally, the only public interface
480  // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
481  // system's malloc implementation. It's unlikely that anyone's even heard of
482  // it.
483
484#ifndef ADDRESS_SANITIZER
485  // === Core Foundation CFAllocators ===
486
487  // This will not catch allocation done by custom allocators, but will catch
488  // all allocation done by system-provided ones.
489
490  PA_CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
491           !g_old_cfallocator_malloc_zone)
492      << "Old allocators unexpectedly non-null";
493
494  bool cf_allocator_internals_known = CanGetContextForCFAllocator();
495
496  if (cf_allocator_internals_known) {
497    CFAllocatorContext* context =
498        ContextForCFAllocator(kCFAllocatorSystemDefault);
499    PA_CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
500    g_old_cfallocator_system_default = context->allocate;
501    PA_CHECK(g_old_cfallocator_system_default)
502        << "Failed to get kCFAllocatorSystemDefault allocation function.";
503    context->allocate = oom_killer_cfallocator_system_default;
504
505    context = ContextForCFAllocator(kCFAllocatorMalloc);
506    PA_CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
507    g_old_cfallocator_malloc = context->allocate;
508    PA_CHECK(g_old_cfallocator_malloc)
509        << "Failed to get kCFAllocatorMalloc allocation function.";
510    context->allocate = oom_killer_cfallocator_malloc;
511
512    context = ContextForCFAllocator(kCFAllocatorMallocZone);
513    PA_CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
514    g_old_cfallocator_malloc_zone = context->allocate;
515    PA_CHECK(g_old_cfallocator_malloc_zone)
516        << "Failed to get kCFAllocatorMallocZone allocation function.";
517    context->allocate = oom_killer_cfallocator_malloc_zone;
518  }
519#endif
520
521  // === Cocoa NSObject allocation ===
522
523  // Note that both +[NSObject new] and +[NSObject alloc] call through to
524  // +[NSObject allocWithZone:].
525
526  PA_CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
527
528  Class nsobject_class = [NSObject class];
529  Method orig_method =
530      class_getClassMethod(nsobject_class, @selector(allocWithZone:));
531  g_old_allocWithZone =
532      reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
533  PA_CHECK(g_old_allocWithZone)
534      << "Failed to get allocWithZone allocation function.";
535  method_setImplementation(orig_method,
536                           reinterpret_cast<IMP>(oom_killer_allocWithZone));
537}
538
539void UninterceptMallocZonesForTesting() {
540  UninterceptMallocZoneForTesting(malloc_default_zone());  // IN-TEST
541  vm_address_t* zones;
542  unsigned int count;
543  kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
544  PA_CHECK(kr == KERN_SUCCESS);
545  for (unsigned int i = 0; i < count; ++i) {
546    UninterceptMallocZoneForTesting(  // IN-TEST
547        reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
548  }
549
550  ClearAllMallocZonesForTesting();  // IN-TEST
551}
552
553bool AreMallocZonesIntercepted() {
554  return !g_allocator_shims_failed_to_install;
555}
556
557void ShimNewMallocZones() {
558  StoreFunctionsForAllZones();
559
560  // Use the functions for the default zone as a template to replace those
561  // new zones.
562  ChromeMallocZone* default_zone =
563      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
564  PA_DCHECK(IsMallocZoneAlreadyStored(default_zone));
565
566  MallocZoneFunctions new_functions;
567  StoreZoneFunctions(default_zone, &new_functions);
568  ReplaceFunctionsForStoredZones(&new_functions);
569}
570
571void ReplaceZoneFunctions(ChromeMallocZone* zone,
572                          const MallocZoneFunctions* functions) {
573  // Remove protection.
574  vm_address_t reprotection_start = 0;
575  vm_size_t reprotection_length = 0;
576  vm_prot_t reprotection_value = VM_PROT_NONE;
577  bool success = DeprotectMallocZone(zone, &reprotection_start,
578                                     &reprotection_length, &reprotection_value);
579  if (!success) {
580    g_allocator_shims_failed_to_install = true;
581    return;
582  }
583
584  PA_CHECK(functions->malloc && functions->calloc && functions->valloc &&
585           functions->free && functions->realloc);
586  zone->malloc = functions->malloc;
587  zone->calloc = functions->calloc;
588  zone->valloc = functions->valloc;
589  zone->free = functions->free;
590  zone->realloc = functions->realloc;
591  if (functions->batch_malloc) {
592    zone->batch_malloc = functions->batch_malloc;
593  }
594  if (functions->batch_free) {
595    zone->batch_free = functions->batch_free;
596  }
597  if (functions->size) {
598    zone->size = functions->size;
599  }
600  if (zone->version >= 5 && functions->memalign) {
601    zone->memalign = functions->memalign;
602  }
603  if (zone->version >= 6 && functions->free_definite_size) {
604    zone->free_definite_size = functions->free_definite_size;
605  }
606  if (zone->version >= 10 && functions->claimed_address) {
607    zone->claimed_address = functions->claimed_address;
608  }
609  if (zone->version >= 13 && functions->try_free_default) {
610    zone->try_free_default = functions->try_free_default;
611  }
612
613  // Cap the version to the max supported to ensure malloc doesn't try to call
614  // functions that weren't replaced.
615#if (__MAC_OS_X_VERSION_MAX_ALLOWED >= 130000) || \
616    (__IPHONE_OS_VERSION_MAX_ALLOWED >= 160100)
617  zone->version = std::min(zone->version, 13U);
618#else
619  zone->version = std::min(zone->version, 12U);
620#endif
621
622  // Restore protection if it was active.
623  if (reprotection_start) {
624    kern_return_t result =
625        vm_protect(mach_task_self(), reprotection_start, reprotection_length,
626                   false, reprotection_value);
627    PA_MACH_DCHECK(result == KERN_SUCCESS, result) << "vm_protect";
628  }
629}
630
631}  // namespace allocator_shim
632
633#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
634