1 // Copyright 2023 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "partition_alloc/lightweight_quarantine.h"
6 
7 #include "partition_alloc/internal_allocator.h"
8 #include "partition_alloc/partition_page.h"
9 #include "partition_alloc/partition_root.h"
10 
11 namespace partition_alloc::internal {
12 
CreateBranch(bool lock_required)13 LightweightQuarantineBranch LightweightQuarantineRoot::CreateBranch(
14     bool lock_required) {
15   return LightweightQuarantineBranch(*this, lock_required);
16 }
17 
LightweightQuarantineBranch(Root & root,bool lock_required)18 LightweightQuarantineBranch::LightweightQuarantineBranch(Root& root,
19                                                          bool lock_required)
20     : root_(root), lock_required_(lock_required) {}
21 
LightweightQuarantineBranch(LightweightQuarantineBranch && b)22 LightweightQuarantineBranch::LightweightQuarantineBranch(
23     LightweightQuarantineBranch&& b)
24     : root_(b.root_),
25       lock_required_(b.lock_required_),
26       slots_(std::move(b.slots_)),
27       branch_size_in_bytes_(b.branch_size_in_bytes_) {
28   b.branch_size_in_bytes_ = 0;
29 }
30 
~LightweightQuarantineBranch()31 LightweightQuarantineBranch::~LightweightQuarantineBranch() {
32   Purge();
33   slots_.clear();
34 }
35 
Quarantine(void * object,SlotSpanMetadata * slot_span,uintptr_t slot_start)36 bool LightweightQuarantineBranch::Quarantine(void* object,
37                                              SlotSpanMetadata* slot_span,
38                                              uintptr_t slot_start) {
39   const auto usable_size = root_.allocator_root_.GetSlotUsableSize(slot_span);
40 
41   const size_t capacity_in_bytes =
42       root_.capacity_in_bytes_.load(std::memory_order_relaxed);
43 
44   {
45     ConditionalScopedGuard guard(lock_required_, lock_);
46 
47     // Note that `root_` is _not_ locked while `this` is locked with `lock_`,
48     // so there is no synchronization between `root_` and `this` (branch)
49     // except for the single-branch use case.
50     const size_t root_size_in_bytes =
51         root_.size_in_bytes_.load(std::memory_order_relaxed);
52     // Due to no synchronization, `branch_size_in_bytes_` may be larger than
53     // `root_size_in_bytes`.
54     const size_t size_in_bytes_held_by_others =
55         branch_size_in_bytes_ < root_size_in_bytes
56             ? root_size_in_bytes - branch_size_in_bytes_
57             : 0;
58     if (capacity_in_bytes < size_in_bytes_held_by_others + usable_size) {
59       // Even if this branch dequarantines all entries held by it, this entry
60       // cannot fit within the capacity.
61       root_.allocator_root_.FreeNoHooksImmediate(object, slot_span, slot_start);
62       root_.quarantine_miss_count_.fetch_add(1u, std::memory_order_relaxed);
63       return false;
64     }
65 
66     // Dequarantine some entries as required.
67     PurgeInternal(capacity_in_bytes - usable_size);
68 
69     // Update stats (locked).
70     branch_size_in_bytes_ += usable_size;
71     root_.size_in_bytes_.fetch_add(usable_size, std::memory_order_relaxed);
72     // `root_.capacity_in_bytes_` is _not_ a hard limit, plus there is no
73     // synchronization between the root and branch, so `branch_size_in_bytes_`
74     // may be larger than `root_.capacity_in_bytes_` at this point.
75 
76     slots_.emplace_back(slot_start, usable_size);
77 
78     // Swap randomly so that the quarantine list remain shuffled.
79     // This is not uniformly random, but sufficiently random.
80     const size_t random_index = random_.RandUint32() % slots_.size();
81     std::swap(slots_[random_index], slots_.back());
82   }
83 
84   // Update stats (not locked).
85   root_.count_.fetch_add(1, std::memory_order_relaxed);
86   root_.cumulative_count_.fetch_add(1, std::memory_order_relaxed);
87   root_.cumulative_size_in_bytes_.fetch_add(usable_size,
88                                             std::memory_order_relaxed);
89   return true;
90 }
91 
IsQuarantinedForTesting(void * object)92 bool LightweightQuarantineBranch::IsQuarantinedForTesting(void* object) {
93   ConditionalScopedGuard guard(lock_required_, lock_);
94   uintptr_t slot_start = root_.allocator_root_.ObjectToSlotStart(object);
95   for (const auto& slot : slots_) {
96     if (slot.slot_start == slot_start) {
97       return true;
98     }
99   }
100   return false;
101 }
102 
PurgeInternal(size_t target_size_in_bytes)103 void LightweightQuarantineBranch::PurgeInternal(size_t target_size_in_bytes) {
104   size_t size_in_bytes = root_.size_in_bytes_.load(std::memory_order_relaxed);
105   int64_t freed_count = 0;
106   int64_t freed_size_in_bytes = 0;
107 
108   // Dequarantine some entries as required.
109   while (!slots_.empty() && target_size_in_bytes < size_in_bytes) {
110     // As quarantined entries are shuffled, picking last entry is equivalent
111     // to picking random entry.
112     const auto& to_free = slots_.back();
113     size_t to_free_size = to_free.usable_size;
114 
115     auto* slot_span = SlotSpanMetadata::FromSlotStart(to_free.slot_start);
116     void* object = root_.allocator_root_.SlotStartToObject(to_free.slot_start);
117     PA_DCHECK(slot_span == SlotSpanMetadata::FromObject(object));
118 
119     PA_DCHECK(to_free.slot_start);
120     root_.allocator_root_.FreeNoHooksImmediate(object, slot_span,
121                                                to_free.slot_start);
122 
123     freed_count++;
124     freed_size_in_bytes += to_free_size;
125     size_in_bytes -= to_free_size;
126 
127     slots_.pop_back();
128   }
129 
130   branch_size_in_bytes_ -= freed_size_in_bytes;
131   root_.size_in_bytes_.fetch_sub(freed_size_in_bytes,
132                                  std::memory_order_relaxed);
133   root_.count_.fetch_sub(freed_count, std::memory_order_relaxed);
134 }
135 
136 }  // namespace partition_alloc::internal
137