xref: /aosp_15_r20/external/cronet/base/metrics/persistent_memory_allocator.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/metrics/persistent_memory_allocator.h"
6 
7 #include <assert.h>
8 
9 #include <algorithm>
10 #include <atomic>
11 #include <optional>
12 #include <string_view>
13 
14 #include "base/bits.h"
15 #include "base/containers/contains.h"
16 #include "base/debug/alias.h"
17 #include "base/debug/crash_logging.h"
18 #include "base/debug/dump_without_crashing.h"
19 #include "base/files/memory_mapped_file.h"
20 #include "base/logging.h"
21 #include "base/metrics/histogram_functions.h"
22 #include "base/metrics/persistent_histogram_allocator.h"
23 #include "base/metrics/sparse_histogram.h"
24 #include "base/notreached.h"
25 #include "base/numerics/checked_math.h"
26 #include "base/numerics/safe_conversions.h"
27 #include "base/strings/strcat.h"
28 #include "base/system/sys_info.h"
29 #include "base/threading/scoped_blocking_call.h"
30 #include "build/build_config.h"
31 
32 #if BUILDFLAG(IS_WIN)
33 #include <windows.h>
34 // Must be after <windows.h>
35 #include <winbase.h>
36 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
37 #include <sys/mman.h>
38 #if BUILDFLAG(IS_ANDROID)
39 #include <sys/prctl.h>
40 #endif
41 #endif
42 
43 namespace {
44 
45 // Limit of memory segment size. It has to fit in an unsigned 32-bit number
46 // and should be a power of 2 in order to accommodate almost any page size.
47 constexpr uint32_t kSegmentMaxSize = 1 << 30;  // 1 GiB
48 
49 // A constant (random) value placed in the shared metadata to identify
50 // an already initialized memory segment.
51 constexpr uint32_t kGlobalCookie = 0x408305DC;
52 
53 // The current version of the metadata. If updates are made that change
54 // the metadata, the version number can be queried to operate in a backward-
55 // compatible manner until the memory segment is completely re-initalized.
56 // Note: If you update the metadata in a non-backwards compatible way, reset
57 // |kCompatibleVersions|. Otherwise, add the previous version.
58 constexpr uint32_t kGlobalVersion = 3;
59 static constexpr uint32_t kOldCompatibleVersions[] = {2};
60 
61 // Constant values placed in the block headers to indicate its state.
62 constexpr uint32_t kBlockCookieFree = 0;
63 constexpr uint32_t kBlockCookieQueue = 1;
64 constexpr uint32_t kBlockCookieWasted = 0x4B594F52;
65 constexpr uint32_t kBlockCookieAllocated = 0xC8799269;
66 
67 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
68 // types rather than combined bitfield.
69 
70 // Flags stored in the flags_ field of the SharedMetadata structure below.
71 constexpr uint32_t kFlagCorrupt = 1 << 0;
72 constexpr uint32_t kFlagFull = 1 << 1;
73 
74 // Errors that are logged in "errors" histogram.
75 // These values are persisted to logs. Entries should not be renumbered and
76 // numeric values should never be reused.
77 enum AllocatorError : int {
78   kMemoryIsCorrupt = 1,
79   kMaxValue = kMemoryIsCorrupt,
80 };
81 
CheckFlag(const volatile std::atomic<uint32_t> * flags,uint32_t flag)82 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, uint32_t flag) {
83   uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
84   return (loaded_flags & flag) != 0;
85 }
86 
SetFlag(volatile std::atomic<uint32_t> * flags,uint32_t flag)87 void SetFlag(volatile std::atomic<uint32_t>* flags, uint32_t flag) {
88   uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
89   for (;;) {
90     uint32_t new_flags = (loaded_flags & ~flag) | flag;
91     // In the failue case, actual "flags" value stored in loaded_flags.
92     // These access are "relaxed" because they are completely independent
93     // of all other values.
94     if (flags->compare_exchange_weak(loaded_flags, new_flags,
95                                      std::memory_order_relaxed,
96                                      std::memory_order_relaxed)) {
97       break;
98     }
99   }
100 }
101 
102 }  // namespace
103 
104 namespace base {
105 
106 // The block-header is placed at the top of every allocation within the
107 // segment to describe the data that follows it.
108 struct PersistentMemoryAllocator::BlockHeader {
109   uint32_t size;       // Number of bytes in this block, including header.
110   uint32_t cookie;     // Constant value indicating completed allocation.
111   std::atomic<uint32_t> type_id;  // Arbitrary number indicating data type.
112   std::atomic<uint32_t> next;     // Pointer to the next block when iterating.
113 };
114 
115 // The shared metadata exists once at the top of the memory segment to
116 // describe the state of the allocator to all processes. The size of this
117 // structure must be a multiple of 64-bits to ensure compatibility between
118 // architectures.
119 struct PersistentMemoryAllocator::SharedMetadata {
120   uint32_t cookie;     // Some value that indicates complete initialization.
121   uint32_t size;       // Total size of memory segment.
122   uint32_t page_size;  // Paging size within memory segment.
123   uint32_t version;    // Version code so upgrades don't break.
124   uint64_t id;         // Arbitrary ID number given by creator.
125   uint32_t name;       // Reference to stored name string.
126   uint32_t padding1;   // Pad-out read-only data to 64-bit alignment.
127 
128   // Above is read-only after first construction. Below may be changed and
129   // so must be marked "volatile" to provide correct inter-process behavior.
130 
131   // State of the memory, plus some padding to keep alignment.
132   volatile std::atomic<uint8_t> memory_state;  // MemoryState enum values.
133   uint8_t padding2[3];
134 
135   // Bitfield of information flags. Access to this should be done through
136   // the CheckFlag() and SetFlag() methods defined above.
137   volatile std::atomic<uint32_t> flags;
138 
139   // Offset/reference to first free space in segment.
140   volatile std::atomic<uint32_t> freeptr;
141 
142   // The "iterable" queue is an M&S Queue as described here, append-only:
143   // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
144   // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
145   volatile std::atomic<uint32_t> tailptr;  // Last block of iteration queue.
146   volatile BlockHeader queue;   // Empty block for linked-list head/tail.
147 };
148 
149 // The "queue" block header is used to detect "last node" so that zero/null
150 // can be used to indicate that it hasn't been added at all. It is part of
151 // the SharedMetadata structure which itself is always located at offset zero.
152 const PersistentMemoryAllocator::Reference
153     PersistentMemoryAllocator::kReferenceQueue =
154         offsetof(SharedMetadata, queue);
155 
156 const base::FilePath::CharType PersistentMemoryAllocator::kFileExtension[] =
157     FILE_PATH_LITERAL(".pma");
158 
159 
Iterator(const PersistentMemoryAllocator * allocator)160 PersistentMemoryAllocator::Iterator::Iterator(
161     const PersistentMemoryAllocator* allocator)
162     : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
163 
Iterator(const PersistentMemoryAllocator * allocator,Reference starting_after)164 PersistentMemoryAllocator::Iterator::Iterator(
165     const PersistentMemoryAllocator* allocator,
166     Reference starting_after)
167     : allocator_(allocator), last_record_(0), record_count_(0) {
168   Reset(starting_after);
169 }
170 
171 PersistentMemoryAllocator::Iterator::~Iterator() = default;
172 
Reset()173 void PersistentMemoryAllocator::Iterator::Reset() {
174   last_record_.store(kReferenceQueue, std::memory_order_relaxed);
175   record_count_.store(0, std::memory_order_relaxed);
176 }
177 
Reset(Reference starting_after)178 void PersistentMemoryAllocator::Iterator::Reset(Reference starting_after) {
179   if (starting_after == 0) {
180     Reset();
181     return;
182   }
183 
184   last_record_.store(starting_after, std::memory_order_relaxed);
185   record_count_.store(0, std::memory_order_relaxed);
186 
187   // Ensure that the starting point is a valid, iterable block (meaning it can
188   // be read and has a non-zero "next" pointer).
189   const volatile BlockHeader* block =
190       allocator_->GetBlock(starting_after, 0, 0, false, false);
191   if (!block || block->next.load(std::memory_order_relaxed) == 0) {
192     NOTREACHED();
193     last_record_.store(kReferenceQueue, std::memory_order_release);
194   }
195 }
196 
197 PersistentMemoryAllocator::Reference
GetLast()198 PersistentMemoryAllocator::Iterator::GetLast() {
199   Reference last = last_record_.load(std::memory_order_relaxed);
200   if (last == kReferenceQueue)
201     return kReferenceNull;
202   return last;
203 }
204 
205 PersistentMemoryAllocator::Reference
GetNext(uint32_t * type_return)206 PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
207   // Make a copy of the existing count of found-records, acquiring all changes
208   // made to the allocator, notably "freeptr" (see comment in loop for why
209   // the load of that value cannot be moved above here) that occurred during
210   // any previous runs of this method, including those by parallel threads
211   // that interrupted it. It pairs with the Release at the end of this method.
212   //
213   // Otherwise, if the compiler were to arrange the two loads such that
214   // "count" was fetched _after_ "freeptr" then it would be possible for
215   // this thread to be interrupted between them and other threads perform
216   // multiple allocations, make-iterables, and iterations (with the included
217   // increment of |record_count_|) culminating in the check at the bottom
218   // mistakenly determining that a loop exists. Isn't this stuff fun?
219   uint32_t count = record_count_.load(std::memory_order_acquire);
220 
221   Reference last = last_record_.load(std::memory_order_acquire);
222   Reference next;
223   while (true) {
224     const volatile BlockHeader* block =
225         allocator_->GetBlock(last, 0, 0, true, false);
226     if (!block)  // Invalid iterator state.
227       return kReferenceNull;
228 
229     // The compiler and CPU can freely reorder all memory accesses on which
230     // there are no dependencies. It could, for example, move the load of
231     // "freeptr" to above this point because there are no explicit dependencies
232     // between it and "next". If it did, however, then another block could
233     // be queued after that but before the following load meaning there is
234     // one more queued block than the future "detect loop by having more
235     // blocks that could fit before freeptr" will allow.
236     //
237     // By "acquiring" the "next" value here, it's synchronized to the enqueue
238     // of the node which in turn is synchronized to the allocation (which sets
239     // freeptr). Thus, the scenario above cannot happen.
240     next = block->next.load(std::memory_order_acquire);
241     if (next == kReferenceQueue)  // No next allocation in queue.
242       return kReferenceNull;
243     block = allocator_->GetBlock(next, 0, 0, false, false);
244     if (!block) {  // Memory is corrupt.
245       allocator_->SetCorrupt();
246       return kReferenceNull;
247     }
248 
249     // Update the "last_record" pointer to be the reference being returned.
250     // If it fails then another thread has already iterated past it so loop
251     // again. Failing will also load the existing value into "last" so there
252     // is no need to do another such load when the while-loop restarts. A
253     // "strong" compare-exchange is used because failing unnecessarily would
254     // mean repeating some fairly costly validations above.
255     if (last_record_.compare_exchange_strong(
256             last, next, std::memory_order_acq_rel, std::memory_order_acquire)) {
257       *type_return = block->type_id.load(std::memory_order_relaxed);
258       break;
259     }
260   }
261 
262   // Memory corruption could cause a loop in the list. Such must be detected
263   // so as to not cause an infinite loop in the caller. This is done by simply
264   // making sure it doesn't iterate more times than the absolute maximum
265   // number of allocations that could have been made. Callers are likely
266   // to loop multiple times before it is detected but at least it stops.
267   const uint32_t freeptr = std::min(
268       allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
269       allocator_->mem_size_);
270   const uint32_t max_records =
271       freeptr / (sizeof(BlockHeader) + kAllocAlignment);
272   if (count > max_records) {
273     allocator_->SetCorrupt();
274     return kReferenceNull;
275   }
276 
277   // Increment the count and release the changes made above. It pairs with
278   // the Acquire at the top of this method. Note that this operation is not
279   // strictly synchonized with fetching of the object to return, which would
280   // have to be done inside the loop and is somewhat complicated to achieve.
281   // It does not matter if it falls behind temporarily so long as it never
282   // gets ahead.
283   record_count_.fetch_add(1, std::memory_order_release);
284   return next;
285 }
286 
287 PersistentMemoryAllocator::Reference
GetNextOfType(uint32_t type_match)288 PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
289   Reference ref;
290   uint32_t type_found;
291   while ((ref = GetNext(&type_found)) != 0) {
292     if (type_found == type_match)
293       return ref;
294   }
295   return kReferenceNull;
296 }
297 
298 
299 // static
IsMemoryAcceptable(const void * base,size_t size,size_t page_size,bool readonly)300 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
301                                                    size_t size,
302                                                    size_t page_size,
303                                                    bool readonly) {
304   return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
305           (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
306           (size % kAllocAlignment == 0 || readonly) &&
307           (page_size == 0 || size % page_size == 0 || readonly));
308 }
309 
PersistentMemoryAllocator(void * base,size_t size,size_t page_size,uint64_t id,std::string_view name,AccessMode access_mode)310 PersistentMemoryAllocator::PersistentMemoryAllocator(void* base,
311                                                      size_t size,
312                                                      size_t page_size,
313                                                      uint64_t id,
314                                                      std::string_view name,
315                                                      AccessMode access_mode)
316     : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL),
317                                 size,
318                                 page_size,
319                                 id,
320                                 name,
321                                 access_mode) {}
322 
PersistentMemoryAllocator(Memory memory,size_t size,size_t page_size,uint64_t id,std::string_view name,AccessMode access_mode)323 PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
324                                                      size_t size,
325                                                      size_t page_size,
326                                                      uint64_t id,
327                                                      std::string_view name,
328                                                      AccessMode access_mode)
329     : mem_base_(static_cast<char*>(memory.base)),
330       mem_type_(memory.type),
331       mem_size_(checked_cast<uint32_t>(size)),
332       mem_page_(checked_cast<uint32_t>((page_size ? page_size : size))),
333 #if BUILDFLAG(IS_NACL)
334       vm_page_size_(4096U),  // SysInfo is not built for NACL.
335 #else
336       vm_page_size_(SysInfo::VMAllocationGranularity()),
337 #endif
338       access_mode_(access_mode) {
339   // These asserts ensure that the structures are 32/64-bit agnostic and meet
340   // all the requirements of use within the allocator. They access private
341   // definitions and so cannot be moved to the global scope.
342   static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
343                 "struct is not portable across different natural word widths");
344   static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
345                 "struct is not portable across different natural word widths");
346 
347   static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
348                 "BlockHeader is not a multiple of kAllocAlignment");
349   static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
350                 "SharedMetadata is not a multiple of kAllocAlignment");
351   static_assert(kReferenceQueue % kAllocAlignment == 0,
352                 "\"queue\" is not aligned properly; must be at end of struct");
353 
354   // Ensure that memory segment is of acceptable size.
355   const bool readonly = access_mode == kReadOnly;
356   CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly));
357 
358   // These atomics operate inter-process and so must be lock-free.
359   DCHECK(SharedMetadata().freeptr.is_lock_free());
360   DCHECK(SharedMetadata().flags.is_lock_free());
361   DCHECK(BlockHeader().next.is_lock_free());
362   CHECK(corrupt_.is_lock_free());
363 
364   // When calling SetCorrupt() during initialization, don't write to the memory
365   // in kReadOnly and kReadWriteExisting modes.
366   const bool allow_write_for_set_corrupt = (access_mode == kReadWrite);
367   if (shared_meta()->cookie != kGlobalCookie) {
368     if (access_mode != kReadWrite) {
369       SetCorrupt(allow_write_for_set_corrupt);
370       return;
371     }
372 
373     // This block is only executed when a completely new memory segment is
374     // being initialized. It's unshared and single-threaded...
375     volatile BlockHeader* const first_block =
376         reinterpret_cast<volatile BlockHeader*>(mem_base_ +
377                                                 sizeof(SharedMetadata));
378     if (shared_meta()->cookie != 0 ||
379         shared_meta()->size != 0 ||
380         shared_meta()->version != 0 ||
381         shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
382         shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
383         shared_meta()->id != 0 ||
384         shared_meta()->name != 0 ||
385         shared_meta()->tailptr != 0 ||
386         shared_meta()->queue.cookie != 0 ||
387         shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
388         first_block->size != 0 ||
389         first_block->cookie != 0 ||
390         first_block->type_id.load(std::memory_order_relaxed) != 0 ||
391         first_block->next != 0) {
392       // ...or something malicious has been playing with the metadata.
393       CHECK(allow_write_for_set_corrupt);
394       SetCorrupt(allow_write_for_set_corrupt);
395     }
396 
397     // This is still safe to do even if corruption has been detected.
398     shared_meta()->cookie = kGlobalCookie;
399     shared_meta()->size = mem_size_;
400     shared_meta()->page_size = mem_page_;
401     shared_meta()->version = kGlobalVersion;
402     shared_meta()->id = id;
403     // Don't overwrite `freeptr` if it is set since we could have raced with
404     // another allocator. In such a case, `freeptr` would get "rewinded", and
405     // new objects would be allocated on top of already allocated objects.
406     uint32_t empty_freeptr = 0;
407     shared_meta()->freeptr.compare_exchange_strong(
408         /*expected=*/empty_freeptr, /*desired=*/sizeof(SharedMetadata),
409         /*success=*/std::memory_order_release,
410         /*failure=*/std::memory_order_relaxed);
411 
412     // Set up the queue of iterable allocations.
413     shared_meta()->queue.size = sizeof(BlockHeader);
414     shared_meta()->queue.cookie = kBlockCookieQueue;
415     shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
416     shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
417 
418     // Allocate space for the name so other processes can learn it.
419     if (!name.empty()) {
420       const size_t name_length = name.length() + 1;
421       shared_meta()->name = Allocate(name_length, 0);
422       char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
423       if (name_cstr)
424         memcpy(name_cstr, name.data(), name.length());
425     }
426 
427     shared_meta()->memory_state.store(MEMORY_INITIALIZED,
428                                       std::memory_order_release);
429   } else {
430     if (shared_meta()->size == 0 ||
431         (shared_meta()->version != kGlobalVersion &&
432          !Contains(kOldCompatibleVersions, shared_meta()->version)) ||
433         shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
434         shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
435         shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
436       SetCorrupt(allow_write_for_set_corrupt);
437     }
438     if (!readonly) {
439       // The allocator is attaching to a previously initialized segment of
440       // memory. If the initialization parameters differ, make the best of it
441       // by reducing the local construction parameters to match those of the
442       // actual memory area. This ensures that the local object never tries to
443       // write outside of the original bounds.
444       // Because the fields are const to ensure that no code other than the
445       // constructor makes changes to them as well as to give optimization hints
446       // to the compiler, it's necessary to const-cast them for changes here.
447       if (shared_meta()->size < mem_size_)
448         *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
449       if (shared_meta()->page_size < mem_page_)
450         *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
451 
452       // Ensure that settings are still valid after the above adjustments.
453       if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly)) {
454         SetCorrupt(allow_write_for_set_corrupt);
455       }
456     }
457   }
458 }
459 
~PersistentMemoryAllocator()460 PersistentMemoryAllocator::~PersistentMemoryAllocator() {
461   // It's strictly forbidden to do any memory access here in case there is
462   // some issue with the underlying memory segment. The "Local" allocator
463   // makes use of this to allow deletion of the segment on the heap from
464   // within its destructor.
465 }
466 
Id() const467 uint64_t PersistentMemoryAllocator::Id() const {
468   return shared_meta()->id;
469 }
470 
Name() const471 const char* PersistentMemoryAllocator::Name() const {
472   Reference name_ref = shared_meta()->name;
473   const char* name_cstr =
474       GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
475   if (!name_cstr)
476     return "";
477 
478   size_t name_length = GetAllocSize(name_ref);
479   if (name_cstr[name_length - 1] != '\0') {
480     NOTREACHED();
481     SetCorrupt();
482     return "";
483   }
484 
485   return name_cstr;
486 }
487 
CreateTrackingHistograms(std::string_view name)488 void PersistentMemoryAllocator::CreateTrackingHistograms(
489     std::string_view name) {
490   if (name.empty() || access_mode_ == kReadOnly) {
491     return;
492   }
493   std::string name_string(name);
494 
495 #if 0
496   // This histogram wasn't being used so has been disabled. It is left here
497   // in case development of a new use of the allocator could benefit from
498   // recording (temporarily and locally) the allocation sizes.
499   DCHECK(!allocs_histogram_);
500   allocs_histogram_ = Histogram::FactoryGet(
501       "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
502       HistogramBase::kUmaTargetedHistogramFlag);
503 #endif
504 
505   DCHECK(!used_histogram_);
506   used_histogram_ = LinearHistogram::FactoryGet(
507       "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
508       HistogramBase::kUmaTargetedHistogramFlag);
509 
510   DCHECK(!errors_histogram_);
511   errors_histogram_ = LinearHistogram::FactoryGet(
512       "UMA.PersistentAllocator." + name_string + ".Errors", 1,
513       AllocatorError::kMaxValue + 1, AllocatorError::kMaxValue + 2,
514       HistogramBase::kUmaTargetedHistogramFlag);
515 }
516 
Flush(bool sync)517 void PersistentMemoryAllocator::Flush(bool sync) {
518   FlushPartial(used(), sync);
519 }
520 
SetMemoryState(uint8_t memory_state)521 void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
522   shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
523   FlushPartial(sizeof(SharedMetadata), false);
524 }
525 
GetMemoryState() const526 uint8_t PersistentMemoryAllocator::GetMemoryState() const {
527   return shared_meta()->memory_state.load(std::memory_order_relaxed);
528 }
529 
used() const530 size_t PersistentMemoryAllocator::used() const {
531   return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
532                   mem_size_);
533 }
534 
GetAsReference(const void * memory,uint32_t type_id) const535 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
536     const void* memory,
537     uint32_t type_id) const {
538   uintptr_t address = reinterpret_cast<uintptr_t>(memory);
539   if (address < reinterpret_cast<uintptr_t>(mem_base_))
540     return kReferenceNull;
541 
542   uintptr_t offset = address - reinterpret_cast<uintptr_t>(mem_base_);
543   if (offset >= mem_size_ || offset < sizeof(BlockHeader))
544     return kReferenceNull;
545 
546   Reference ref = static_cast<Reference>(offset) - sizeof(BlockHeader);
547   if (!GetBlockData(ref, type_id, kSizeAny))
548     return kReferenceNull;
549 
550   return ref;
551 }
552 
GetAllocSize(Reference ref) const553 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
554   const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
555   if (!block)
556     return 0;
557   uint32_t size = block->size;
558   // Header was verified by GetBlock() but a malicious actor could change
559   // the value between there and here. Check it again.
560   uint32_t total_size;
561   if (size <= sizeof(BlockHeader) ||
562       !base::CheckAdd(ref, size).AssignIfValid(&total_size) ||
563       total_size > mem_size_) {
564     SetCorrupt();
565     return 0;
566   }
567   return size - sizeof(BlockHeader);
568 }
569 
GetType(Reference ref) const570 uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
571   const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
572   if (!block)
573     return 0;
574   return block->type_id.load(std::memory_order_relaxed);
575 }
576 
ChangeType(Reference ref,uint32_t to_type_id,uint32_t from_type_id,bool clear)577 bool PersistentMemoryAllocator::ChangeType(Reference ref,
578                                            uint32_t to_type_id,
579                                            uint32_t from_type_id,
580                                            bool clear) {
581   DCHECK_NE(access_mode_, kReadOnly);
582   volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
583   if (!block)
584     return false;
585 
586   // "Strong" exchanges are used below because there is no loop that can retry
587   // in the wake of spurious failures possible with "weak" exchanges. It is,
588   // in aggregate, an "acquire-release" operation so no memory accesses can be
589   // reordered either before or after this method (since changes based on type
590   // could happen on either side).
591 
592   if (clear) {
593     // If clearing the memory, first change it to the "transitioning" type so
594     // there can be no confusion by other threads. After the memory is cleared,
595     // it can be changed to its final type.
596     if (!block->type_id.compare_exchange_strong(
597             from_type_id, kTypeIdTransitioning, std::memory_order_acquire,
598             std::memory_order_acquire)) {
599       // Existing type wasn't what was expected: fail (with no changes)
600       return false;
601     }
602 
603     // Clear the memory in an atomic manner. Using "release" stores force
604     // every write to be done after the ones before it. This is better than
605     // using memset because (a) it supports "volatile" and (b) it creates a
606     // reliable pattern upon which other threads may rely.
607     volatile std::atomic<int>* data =
608         reinterpret_cast<volatile std::atomic<int>*>(
609             reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader));
610     const uint32_t words = (block->size - sizeof(BlockHeader)) / sizeof(int);
611     DCHECK_EQ(0U, (block->size - sizeof(BlockHeader)) % sizeof(int));
612     for (uint32_t i = 0; i < words; ++i) {
613       data->store(0, std::memory_order_release);
614       ++data;
615     }
616 
617     // If the destination type is "transitioning" then skip the final exchange.
618     if (to_type_id == kTypeIdTransitioning)
619       return true;
620 
621     // Finish the change to the desired type.
622     from_type_id = kTypeIdTransitioning;  // Exchange needs modifiable original.
623     bool success = block->type_id.compare_exchange_strong(
624         from_type_id, to_type_id, std::memory_order_release,
625         std::memory_order_relaxed);
626     DCHECK(success);  // Should never fail.
627     return success;
628   }
629 
630   // One step change to the new type. Will return false if the existing value
631   // doesn't match what is expected.
632   return block->type_id.compare_exchange_strong(from_type_id, to_type_id,
633                                                 std::memory_order_acq_rel,
634                                                 std::memory_order_acquire);
635 }
636 
Allocate(size_t req_size,uint32_t type_id)637 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
638     size_t req_size,
639     uint32_t type_id) {
640   Reference ref = AllocateImpl(req_size, type_id);
641   if (ref) {
642     // Success: Record this allocation in usage stats (if active).
643     if (allocs_histogram_)
644       allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size));
645   } else {
646     // Failure: Record an allocation of zero for tracking.
647     if (allocs_histogram_)
648       allocs_histogram_->Add(0);
649   }
650   return ref;
651 }
652 
AllocateImpl(size_t req_size,uint32_t type_id)653 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
654     size_t req_size,
655     uint32_t type_id) {
656   DCHECK_NE(access_mode_, kReadOnly);
657 
658   // Validate req_size to ensure it won't overflow when used as 32-bit value.
659   if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
660     NOTREACHED();
661     return kReferenceNull;
662   }
663 
664   // Round up the requested size, plus header, to the next allocation alignment.
665   size_t size = bits::AlignUp(req_size + sizeof(BlockHeader), kAllocAlignment);
666   if (size <= sizeof(BlockHeader) || size > mem_page_) {
667     // This shouldn't be reached through normal means.
668     debug::DumpWithoutCrashing();
669     return kReferenceNull;
670   }
671 
672   // Get the current start of unallocated memory. Other threads may
673   // update this at any time and cause us to retry these operations.
674   // This value should be treated as "const" to avoid confusion through
675   // the code below but recognize that any failed compare-exchange operation
676   // involving it will cause it to be loaded with a more recent value. The
677   // code should either exit or restart the loop in that case.
678   /* const */ uint32_t freeptr =
679       shared_meta()->freeptr.load(std::memory_order_acquire);
680 
681   // Allocation is lockless so we do all our caculation and then, if saving
682   // indicates a change has occurred since we started, scrap everything and
683   // start over.
684   for (;;) {
685     if (IsCorrupt())
686       return kReferenceNull;
687 
688     if (freeptr + size > mem_size_) {
689       SetFlag(&shared_meta()->flags, kFlagFull);
690       return kReferenceNull;
691     }
692 
693     // Get pointer to the "free" block. If something has been allocated since
694     // the load of freeptr above, it is still safe as nothing will be written
695     // to that location until after the compare-exchange below.
696     volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
697     if (!block) {
698       SetCorrupt();
699       return kReferenceNull;
700     }
701 
702     // An allocation cannot cross page boundaries. If it would, create a
703     // "wasted" block and begin again at the top of the next page. This
704     // area could just be left empty but we fill in the block header just
705     // for completeness sake.
706     const uint32_t page_free = mem_page_ - freeptr % mem_page_;
707     if (size > page_free) {
708       if (page_free <= sizeof(BlockHeader)) {
709         SetCorrupt();
710         return kReferenceNull;
711       }
712 
713 #if !BUILDFLAG(IS_NACL)
714       // In production, with the current state of the code, this code path
715       // should not be reached. However, crash reports have been hinting that it
716       // is. Add crash keys to investigate this.
717       // TODO(crbug.com/1432981): Remove them once done.
718       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "mem_size_",
719                               mem_size_);
720       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "mem_page_",
721                               mem_page_);
722       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "freeptr", freeptr);
723       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "page_free",
724                               page_free);
725       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "size", size);
726       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "req_size",
727                               req_size);
728       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "type_id", type_id);
729       std::string persistent_file_name = "N/A";
730       auto* allocator = GlobalHistogramAllocator::Get();
731       if (allocator && allocator->HasPersistentLocation()) {
732         persistent_file_name =
733             allocator->GetPersistentLocation().BaseName().AsUTF8Unsafe();
734       }
735       SCOPED_CRASH_KEY_STRING256("PersistentMemoryAllocator", "file_name",
736                                  persistent_file_name);
737       debug::DumpWithoutCrashing();
738 #endif  // !BUILDFLAG(IS_NACL)
739 
740       const uint32_t new_freeptr = freeptr + page_free;
741       if (shared_meta()->freeptr.compare_exchange_strong(
742               freeptr, new_freeptr, std::memory_order_acq_rel,
743               std::memory_order_acquire)) {
744         block->size = page_free;
745         block->cookie = kBlockCookieWasted;
746       }
747       continue;
748     }
749 
750     // Don't leave a slice at the end of a page too small for anything. This
751     // can result in an allocation up to two alignment-sizes greater than the
752     // minimum required by requested-size + header + alignment.
753     if (page_free - size < sizeof(BlockHeader) + kAllocAlignment) {
754       size = page_free;
755       if (freeptr + size > mem_size_) {
756         SetCorrupt();
757         return kReferenceNull;
758       }
759     }
760 
761     // This cast is safe because (freeptr + size) <= mem_size_.
762     const uint32_t new_freeptr = static_cast<uint32_t>(freeptr + size);
763 
764     // Save our work. Try again if another thread has completed an allocation
765     // while we were processing. A "weak" exchange would be permissable here
766     // because the code will just loop and try again but the above processing
767     // is significant so make the extra effort of a "strong" exchange.
768     if (!shared_meta()->freeptr.compare_exchange_strong(
769             freeptr, new_freeptr, std::memory_order_acq_rel,
770             std::memory_order_acquire)) {
771       continue;
772     }
773 
774     // Given that all memory was zeroed before ever being given to an instance
775     // of this class and given that we only allocate in a monotomic fashion
776     // going forward, it must be that the newly allocated block is completely
777     // full of zeros. If we find anything in the block header that is NOT a
778     // zero then something must have previously run amuck through memory,
779     // writing beyond the allocated space and into unallocated space.
780     if (block->size != 0 ||
781         block->cookie != kBlockCookieFree ||
782         block->type_id.load(std::memory_order_relaxed) != 0 ||
783         block->next.load(std::memory_order_relaxed) != 0) {
784       SetCorrupt();
785       return kReferenceNull;
786     }
787 
788     // Make sure the memory exists by writing to the first byte of every memory
789     // page it touches beyond the one containing the block header itself.
790     // As the underlying storage is often memory mapped from disk or shared
791     // space, sometimes things go wrong and those address don't actually exist
792     // leading to a SIGBUS (or Windows equivalent) at some arbitrary location
793     // in the code. This should concentrate all those failures into this
794     // location for easy tracking and, eventually, proper handling.
795     volatile char* mem_end = reinterpret_cast<volatile char*>(block) + size;
796     volatile char* mem_begin = reinterpret_cast<volatile char*>(
797         (reinterpret_cast<uintptr_t>(block) + sizeof(BlockHeader) +
798          (vm_page_size_ - 1)) &
799         ~static_cast<uintptr_t>(vm_page_size_ - 1));
800     for (volatile char* memory = mem_begin; memory < mem_end;
801          memory += vm_page_size_) {
802       // It's required that a memory segment start as all zeros and thus the
803       // newly allocated block is all zeros at this point. Thus, writing a
804       // zero to it allows testing that the memory exists without actually
805       // changing its contents. The compiler doesn't know about the requirement
806       // and so cannot optimize-away these writes.
807       *memory = 0;
808     }
809 
810     // Load information into the block header. There is no "release" of the
811     // data here because this memory can, currently, be seen only by the thread
812     // performing the allocation. When it comes time to share this, the thread
813     // will call MakeIterable() which does the release operation.
814     // `size` is at most kSegmentMaxSize, so this cast is safe.
815     block->size = static_cast<uint32_t>(size);
816     block->cookie = kBlockCookieAllocated;
817     block->type_id.store(type_id, std::memory_order_relaxed);
818     return freeptr;
819   }
820 }
821 
GetMemoryInfo(MemoryInfo * meminfo) const822 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
823   uint32_t remaining = std::max(
824       mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
825       (uint32_t)sizeof(BlockHeader));
826   meminfo->total = mem_size_;
827   meminfo->free = remaining - sizeof(BlockHeader);
828 }
829 
MakeIterable(Reference ref)830 void PersistentMemoryAllocator::MakeIterable(Reference ref) {
831   DCHECK_NE(access_mode_, kReadOnly);
832   if (IsCorrupt())
833     return;
834   volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
835   if (!block)  // invalid reference
836     return;
837 
838   Reference empty_ref = 0;
839   if (!block->next.compare_exchange_strong(
840           /*expected=*/empty_ref, /*desired=*/kReferenceQueue,
841           /*success=*/std::memory_order_acq_rel,
842           /*failure=*/std::memory_order_acquire)) {
843     // Already iterable (or another thread is currently making this iterable).
844     return;
845   }
846 
847   // Try to add this block to the tail of the queue. May take multiple tries.
848   // If so, tail will be automatically updated with a more recent value during
849   // compare-exchange operations.
850   uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire);
851   for (;;) {
852     // Acquire the current tail-pointer released by previous call to this
853     // method and validate it.
854     block = GetBlock(tail, 0, 0, true, false);
855     if (!block) {
856       SetCorrupt();
857       return;
858     }
859 
860     // Try to insert the block at the tail of the queue. The tail node always
861     // has an existing value of kReferenceQueue; if that is somehow not the
862     // existing value then another thread has acted in the meantime. A "strong"
863     // exchange is necessary so the "else" block does not get executed when
864     // that is not actually the case (which can happen with a "weak" exchange).
865     uint32_t next = kReferenceQueue;  // Will get replaced with existing value.
866     if (block->next.compare_exchange_strong(next, ref,
867                                             std::memory_order_acq_rel,
868                                             std::memory_order_acquire)) {
869       // Update the tail pointer to the new offset. If the "else" clause did
870       // not exist, then this could be a simple Release_Store to set the new
871       // value but because it does, it's possible that other threads could add
872       // one or more nodes at the tail before reaching this point. We don't
873       // have to check the return value because it either operates correctly
874       // or the exact same operation has already been done (by the "else"
875       // clause) on some other thread.
876       shared_meta()->tailptr.compare_exchange_strong(tail, ref,
877                                                      std::memory_order_release,
878                                                      std::memory_order_relaxed);
879       return;
880     }
881     // In the unlikely case that a thread crashed or was killed between the
882     // update of "next" and the update of "tailptr", it is necessary to
883     // perform the operation that would have been done. There's no explicit
884     // check for crash/kill which means that this operation may also happen
885     // even when the other thread is in perfect working order which is what
886     // necessitates the CompareAndSwap above.
887     shared_meta()->tailptr.compare_exchange_strong(
888         tail, next, std::memory_order_acq_rel, std::memory_order_acquire);
889   }
890 }
891 
892 // The "corrupted" state is held both locally and globally (shared). The
893 // shared flag can't be trusted since a malicious actor could overwrite it.
894 // Because corruption can be detected during read-only operations such as
895 // iteration, this method may be called by other "const" methods. In this
896 // case, it's safe to discard the constness and modify the local flag and
897 // maybe even the shared flag if the underlying data isn't actually read-only.
SetCorrupt(bool allow_write) const898 void PersistentMemoryAllocator::SetCorrupt(bool allow_write) const {
899   if (!corrupt_.load(std::memory_order_relaxed) &&
900       !CheckFlag(
901           const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
902           kFlagCorrupt)) {
903     LOG(ERROR) << "Corruption detected in shared-memory segment.";
904     RecordError(kMemoryIsCorrupt);
905   }
906 
907   corrupt_.store(true, std::memory_order_relaxed);
908   if (allow_write && access_mode_ != kReadOnly) {
909     SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
910             kFlagCorrupt);
911   }
912 }
913 
IsCorrupt() const914 bool PersistentMemoryAllocator::IsCorrupt() const {
915   if (corrupt_.load(std::memory_order_relaxed)) {
916     return true;
917   }
918   if (CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
919     // Set the local flag if we found the flag in the data.
920     SetCorrupt(/*allow_write=*/false);
921     return true;
922   }
923   return false;
924 }
925 
IsFull() const926 bool PersistentMemoryAllocator::IsFull() const {
927   return CheckFlag(&shared_meta()->flags, kFlagFull);
928 }
929 
930 // Dereference a block |ref| and ensure that it's valid for the desired
931 // |type_id| and |size|. |special| indicates that we may try to access block
932 // headers not available to callers but still accessed by this module. By
933 // having internal dereferences go through this same function, the allocator
934 // is hardened against corruption.
935 const volatile PersistentMemoryAllocator::BlockHeader*
GetBlock(Reference ref,uint32_t type_id,size_t size,bool queue_ok,bool free_ok) const936 PersistentMemoryAllocator::GetBlock(Reference ref,
937                                     uint32_t type_id,
938                                     size_t size,
939                                     bool queue_ok,
940                                     bool free_ok) const {
941   // Handle special cases.
942   if (ref == kReferenceQueue && queue_ok)
943     return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
944 
945   // Validation of parameters.
946   if (ref < sizeof(SharedMetadata))
947     return nullptr;
948   if (ref % kAllocAlignment != 0)
949     return nullptr;
950   size += sizeof(BlockHeader);
951   uint32_t total_size;
952   if (!base::CheckAdd(ref, size).AssignIfValid(&total_size)) {
953     return nullptr;
954   }
955   if (total_size > mem_size_) {
956     return nullptr;
957   }
958 
959   // Validation of referenced block-header.
960   if (!free_ok) {
961     const volatile BlockHeader* const block =
962         reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
963     if (block->cookie != kBlockCookieAllocated)
964       return nullptr;
965     if (block->size < size)
966       return nullptr;
967     uint32_t block_size;
968     if (!base::CheckAdd(ref, block->size).AssignIfValid(&block_size)) {
969       return nullptr;
970     }
971     if (block_size > mem_size_) {
972       return nullptr;
973     }
974     if (type_id != 0 &&
975         block->type_id.load(std::memory_order_relaxed) != type_id) {
976       return nullptr;
977     }
978   }
979 
980   // Return pointer to block data.
981   return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
982 }
983 
FlushPartial(size_t length,bool sync)984 void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
985   // Generally there is nothing to do as every write is done through volatile
986   // memory with atomic instructions to guarantee consistency. This (virtual)
987   // method exists so that derived classes can do special things, such as tell
988   // the OS to write changes to disk now rather than when convenient.
989 }
990 
RecordError(int error) const991 void PersistentMemoryAllocator::RecordError(int error) const {
992   if (errors_histogram_)
993     errors_histogram_->Add(error);
994 }
995 
freeptr() const996 uint32_t PersistentMemoryAllocator::freeptr() const {
997   return shared_meta()->freeptr.load(std::memory_order_relaxed);
998 }
999 
version() const1000 uint32_t PersistentMemoryAllocator::version() const {
1001   return shared_meta()->version;
1002 }
1003 
GetBlockData(Reference ref,uint32_t type_id,size_t size) const1004 const volatile void* PersistentMemoryAllocator::GetBlockData(
1005     Reference ref,
1006     uint32_t type_id,
1007     size_t size) const {
1008   DCHECK(size > 0);
1009   const volatile BlockHeader* block =
1010       GetBlock(ref, type_id, size, false, false);
1011   if (!block)
1012     return nullptr;
1013   return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
1014 }
1015 
UpdateTrackingHistograms()1016 void PersistentMemoryAllocator::UpdateTrackingHistograms() {
1017   DCHECK_NE(access_mode_, kReadOnly);
1018   if (used_histogram_) {
1019     MemoryInfo meminfo;
1020     GetMemoryInfo(&meminfo);
1021     HistogramBase::Sample used_percent = static_cast<HistogramBase::Sample>(
1022         ((meminfo.total - meminfo.free) * 100ULL / meminfo.total));
1023     used_histogram_->Add(used_percent);
1024   }
1025 }
1026 
1027 
1028 //----- LocalPersistentMemoryAllocator -----------------------------------------
1029 
LocalPersistentMemoryAllocator(size_t size,uint64_t id,std::string_view name)1030 LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
1031     size_t size,
1032     uint64_t id,
1033     std::string_view name)
1034     : PersistentMemoryAllocator(AllocateLocalMemory(size, name),
1035                                 size,
1036                                 0,
1037                                 id,
1038                                 name,
1039                                 kReadWrite) {}
1040 
~LocalPersistentMemoryAllocator()1041 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
1042   DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
1043 }
1044 
1045 // static
1046 PersistentMemoryAllocator::Memory
AllocateLocalMemory(size_t size,std::string_view name)1047 LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size,
1048                                                     std::string_view name) {
1049   void* address;
1050 
1051 #if BUILDFLAG(IS_WIN)
1052   address =
1053       ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1054   if (address)
1055     return Memory(address, MEM_VIRTUAL);
1056 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1057   // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
1058   // MAP_SHARED is not available on Linux <2.4 but required on Mac.
1059   address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
1060                    MAP_ANON | MAP_SHARED, -1, 0);
1061   if (address != MAP_FAILED) {
1062 #if BUILDFLAG(IS_ANDROID)
1063     // Allow the anonymous memory region allocated by mmap(MAP_ANON) to be
1064     // identified in /proc/$PID/smaps.  This helps improve visibility into
1065     // Chrome's memory usage on Android.
1066     const std::string arena_name = base::StrCat({"persistent:", name});
1067     prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, address, size, arena_name.c_str());
1068 #endif
1069     return Memory(address, MEM_VIRTUAL);
1070   }
1071 #else
1072 #error This architecture is not (yet) supported.
1073 #endif
1074 
1075   // As a last resort, just allocate the memory from the heap. This will
1076   // achieve the same basic result but the acquired memory has to be
1077   // explicitly zeroed and thus realized immediately (i.e. all pages are
1078   // added to the process now istead of only when first accessed).
1079   address = malloc(size);
1080   DPCHECK(address);
1081   memset(address, 0, size);
1082   return Memory(address, MEM_MALLOC);
1083 }
1084 
1085 // static
DeallocateLocalMemory(void * memory,size_t size,MemoryType type)1086 void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
1087                                                            size_t size,
1088                                                            MemoryType type) {
1089   if (type == MEM_MALLOC) {
1090     free(memory);
1091     return;
1092   }
1093 
1094   DCHECK_EQ(MEM_VIRTUAL, type);
1095 #if BUILDFLAG(IS_WIN)
1096   BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
1097   DCHECK(success);
1098 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1099   int result = ::munmap(memory, size);
1100   DCHECK_EQ(0, result);
1101 #else
1102 #error This architecture is not (yet) supported.
1103 #endif
1104 }
1105 
1106 //----- WritableSharedPersistentMemoryAllocator --------------------------------
1107 
1108 WritableSharedPersistentMemoryAllocator::
WritableSharedPersistentMemoryAllocator(base::WritableSharedMemoryMapping memory,uint64_t id,std::string_view name)1109     WritableSharedPersistentMemoryAllocator(
1110         base::WritableSharedMemoryMapping memory,
1111         uint64_t id,
1112         std::string_view name)
1113     : PersistentMemoryAllocator(Memory(memory.memory(), MEM_SHARED),
1114                                 memory.size(),
1115                                 0,
1116                                 id,
1117                                 name,
1118                                 kReadWrite),
1119       shared_memory_(std::move(memory)) {}
1120 
1121 WritableSharedPersistentMemoryAllocator::
1122     ~WritableSharedPersistentMemoryAllocator() = default;
1123 
1124 // static
IsSharedMemoryAcceptable(const base::WritableSharedMemoryMapping & memory)1125 bool WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
1126     const base::WritableSharedMemoryMapping& memory) {
1127   return IsMemoryAcceptable(memory.memory(), memory.size(), 0, false);
1128 }
1129 
1130 //----- ReadOnlySharedPersistentMemoryAllocator --------------------------------
1131 
1132 ReadOnlySharedPersistentMemoryAllocator::
ReadOnlySharedPersistentMemoryAllocator(base::ReadOnlySharedMemoryMapping memory,uint64_t id,std::string_view name)1133     ReadOnlySharedPersistentMemoryAllocator(
1134         base::ReadOnlySharedMemoryMapping memory,
1135         uint64_t id,
1136         std::string_view name)
1137     : PersistentMemoryAllocator(
1138           Memory(const_cast<void*>(memory.memory()), MEM_SHARED),
1139           memory.size(),
1140           0,
1141           id,
1142           name,
1143           kReadOnly),
1144       shared_memory_(std::move(memory)) {}
1145 
1146 ReadOnlySharedPersistentMemoryAllocator::
1147     ~ReadOnlySharedPersistentMemoryAllocator() = default;
1148 
1149 // static
IsSharedMemoryAcceptable(const base::ReadOnlySharedMemoryMapping & memory)1150 bool ReadOnlySharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
1151     const base::ReadOnlySharedMemoryMapping& memory) {
1152   return IsMemoryAcceptable(memory.memory(), memory.size(), 0, true);
1153 }
1154 
1155 #if !BUILDFLAG(IS_NACL)
1156 //----- FilePersistentMemoryAllocator ------------------------------------------
1157 
FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,size_t max_size,uint64_t id,std::string_view name,AccessMode access_mode)1158 FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
1159     std::unique_ptr<MemoryMappedFile> file,
1160     size_t max_size,
1161     uint64_t id,
1162     std::string_view name,
1163     AccessMode access_mode)
1164     : PersistentMemoryAllocator(
1165           Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
1166           max_size != 0 ? max_size : file->length(),
1167           0,
1168           id,
1169           name,
1170           access_mode),
1171       mapped_file_(std::move(file)) {}
1172 
1173 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() = default;
1174 
1175 // static
IsFileAcceptable(const MemoryMappedFile & file,bool readonly)1176 bool FilePersistentMemoryAllocator::IsFileAcceptable(
1177     const MemoryMappedFile& file,
1178     bool readonly) {
1179   return IsMemoryAcceptable(file.data(), file.length(), 0, readonly);
1180 }
1181 
Cache()1182 void FilePersistentMemoryAllocator::Cache() {
1183   // Since this method is expected to load data from permanent storage
1184   // into memory, blocking I/O may occur.
1185   base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
1186                                                 base::BlockingType::MAY_BLOCK);
1187 
1188   // Calculate begin/end addresses so that the first byte of every page
1189   // in that range can be read. Keep within the used space. The |volatile|
1190   // keyword makes it so the compiler can't make assumptions about what is
1191   // in a given memory location and thus possibly avoid the read.
1192   const volatile char* mem_end = mem_base_ + used();
1193   const volatile char* mem_begin = mem_base_;
1194 
1195   // Iterate over the memory a page at a time, reading the first byte of
1196   // every page. The values are added to a |total| so that the compiler
1197   // can't omit the read.
1198   int total = 0;
1199   for (const volatile char* memory = mem_begin; memory < mem_end;
1200        memory += vm_page_size_) {
1201     total += *memory;
1202   }
1203 
1204   // Tell the compiler that |total| is used so that it can't optimize away
1205   // the memory accesses above.
1206   debug::Alias(&total);
1207 }
1208 
FlushPartial(size_t length,bool sync)1209 void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
1210   if (IsReadonly())
1211     return;
1212 
1213   std::optional<base::ScopedBlockingCall> scoped_blocking_call;
1214   if (sync)
1215     scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
1216 
1217 #if BUILDFLAG(IS_WIN)
1218   // Windows doesn't support asynchronous flush.
1219   scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
1220   BOOL success = ::FlushViewOfFile(data(), length);
1221   DPCHECK(success);
1222 #elif BUILDFLAG(IS_APPLE)
1223   // On OSX, "invalidate" removes all cached pages, forcing a re-read from
1224   // disk. That's not applicable to "flush" so omit it.
1225   int result =
1226       ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
1227   DCHECK_NE(EINVAL, result);
1228 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1229   // On POSIX, "invalidate" forces _other_ processes to recognize what has
1230   // been written to disk and so is applicable to "flush".
1231   int result = ::msync(const_cast<void*>(data()), length,
1232                        MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
1233   DCHECK_NE(EINVAL, result);
1234 #else
1235 #error Unsupported OS.
1236 #endif
1237 }
1238 #endif  // !BUILDFLAG(IS_NACL)
1239 
1240 //----- DelayedPersistentAllocation --------------------------------------------
1241 
DelayedPersistentAllocation(PersistentMemoryAllocator * allocator,std::atomic<Reference> * ref,uint32_t type,size_t size,size_t offset)1242 DelayedPersistentAllocation::DelayedPersistentAllocation(
1243     PersistentMemoryAllocator* allocator,
1244     std::atomic<Reference>* ref,
1245     uint32_t type,
1246     size_t size,
1247     size_t offset)
1248     : allocator_(allocator),
1249       type_(type),
1250       size_(checked_cast<uint32_t>(size)),
1251       offset_(checked_cast<uint32_t>(offset)),
1252       reference_(ref) {
1253   DCHECK(allocator_);
1254   DCHECK_NE(0U, type_);
1255   DCHECK_LT(0U, size_);
1256   DCHECK(reference_);
1257 }
1258 
1259 DelayedPersistentAllocation::~DelayedPersistentAllocation() = default;
1260 
GetUntyped() const1261 span<uint8_t> DelayedPersistentAllocation::GetUntyped() const {
1262   // Relaxed operations are acceptable here because it's not protecting the
1263   // contents of the allocation in any way.
1264   Reference ref = reference_->load(std::memory_order_acquire);
1265 
1266 #if !BUILDFLAG(IS_NACL)
1267   // TODO(crbug/1432981): Remove these. They are used to investigate unexpected
1268   // failures.
1269   bool ref_found = (ref != 0);
1270   bool raced = false;
1271 #endif  // !BUILDFLAG(IS_NACL)
1272 
1273   if (!ref) {
1274     ref = allocator_->Allocate(size_, type_);
1275     if (!ref) {
1276       return span<uint8_t>();
1277     }
1278 
1279     // Store the new reference in its proper location using compare-and-swap.
1280     // Use a "strong" exchange to ensure no false-negatives since the operation
1281     // cannot be retried.
1282     Reference existing = 0;  // Must be mutable; receives actual value.
1283     if (!reference_->compare_exchange_strong(existing, ref,
1284                                              std::memory_order_release,
1285                                              std::memory_order_relaxed)) {
1286       // Failure indicates that something else has raced ahead, performed the
1287       // allocation, and stored its reference. Purge the allocation that was
1288       // just done and use the other one instead.
1289       DCHECK_EQ(type_, allocator_->GetType(existing));
1290       DCHECK_LE(size_, allocator_->GetAllocSize(existing));
1291       allocator_->ChangeType(ref, 0, type_, /*clear=*/false);
1292       ref = existing;
1293 #if !BUILDFLAG(IS_NACL)
1294       raced = true;
1295 #endif  // !BUILDFLAG(IS_NACL)
1296     }
1297   }
1298 
1299   uint8_t* mem = allocator_->GetAsArray<uint8_t>(ref, type_, size_);
1300   if (!mem) {
1301 #if !BUILDFLAG(IS_NACL)
1302     // TODO(crbug/1432981): Remove these. They are used to investigate
1303     // unexpected failures.
1304     SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "full",
1305                           allocator_->IsFull());
1306     SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "corrupted",
1307                           allocator_->IsCorrupt());
1308     SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "freeptr",
1309                             allocator_->freeptr());
1310     // The allocator's cookie should always be `kGlobalCookie`. Add it to crash
1311     // keys to see if the file was corrupted externally, e.g. by a file
1312     // shredder. Cast to volatile to avoid compiler optimizations and ensure
1313     // that the actual value is read.
1314     SCOPED_CRASH_KEY_NUMBER(
1315         "PersistentMemoryAllocator", "cookie",
1316         static_cast<volatile PersistentMemoryAllocator::SharedMetadata*>(
1317             allocator_->shared_meta())
1318             ->cookie);
1319     SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "ref", ref);
1320     SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "ref_found", ref_found);
1321     SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "raced", raced);
1322     SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "type_", type_);
1323     SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "size_", size_);
1324     if (ref == 0xC8799269) {
1325       // There are many crash reports containing the corrupted "0xC8799269"
1326       // value in |ref|. This value is actually a "magic" number to indicate
1327       // that a certain block in persistent memory was successfully allocated,
1328       // so it should not appear there. Include some extra crash keys to see if
1329       // the surrounding values were also corrupted. If so, the value before
1330       // would be the size of the allocated object, and the value after would be
1331       // the type id of the allocated object. If they are not corrupted, these
1332       // would contain |ranges_checksum| and the start of |samples_metadata|
1333       // respectively (see PersistentHistogramData struct). We do some pointer
1334       // arithmetic here -- it should theoretically be safe, unless something
1335       // went terribly wrong...
1336       SCOPED_CRASH_KEY_NUMBER(
1337           "PersistentMemoryAllocator", "ref_before",
1338           (reference_ - 1)->load(std::memory_order_relaxed));
1339       SCOPED_CRASH_KEY_NUMBER(
1340           "PersistentMemoryAllocator", "ref_after",
1341           (reference_ + 1)->load(std::memory_order_relaxed));
1342       DUMP_WILL_BE_NOTREACHED_NORETURN();
1343       return span<uint8_t>();
1344     }
1345 #endif  // !BUILDFLAG(IS_NACL)
1346     // This should never happen but be tolerant if it does as corruption from
1347     // the outside is something to guard against.
1348     DUMP_WILL_BE_NOTREACHED_NORETURN();
1349     return span<uint8_t>();
1350   }
1351   return make_span(mem + offset_, size_ - offset_);
1352 }
1353 
1354 }  // namespace base
1355