1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/starscan/pcscan_scheduling.h"
6
7 #include <algorithm>
8 #include <atomic>
9
10 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
11 #include "partition_alloc/partition_alloc_base/time/time.h"
12 #include "partition_alloc/partition_alloc_check.h"
13 #include "partition_alloc/partition_alloc_hooks.h"
14 #include "partition_alloc/partition_lock.h"
15 #include "partition_alloc/starscan/logging.h"
16 #include "partition_alloc/starscan/pcscan.h"
17
18 namespace partition_alloc::internal {
19
20 // static
21 constexpr size_t QuarantineData::kQuarantineSizeMinLimit;
22
SetNewSchedulingBackend(PCScanSchedulingBackend & backend)23 void PCScanScheduler::SetNewSchedulingBackend(
24 PCScanSchedulingBackend& backend) {
25 backend_ = &backend;
26 }
27
DisableScheduling()28 void PCScanSchedulingBackend::DisableScheduling() {
29 scheduling_enabled_.store(false, std::memory_order_relaxed);
30 }
31
EnableScheduling()32 void PCScanSchedulingBackend::EnableScheduling() {
33 scheduling_enabled_.store(true, std::memory_order_relaxed);
34 // Check if *Scan needs to be run immediately.
35 if (NeedsToImmediatelyScan()) {
36 PCScan::PerformScan(PCScan::InvocationMode::kNonBlocking);
37 }
38 }
39
ScanStarted()40 size_t PCScanSchedulingBackend::ScanStarted() {
41 auto& data = GetQuarantineData();
42 data.epoch.fetch_add(1, std::memory_order_relaxed);
43 return data.current_size.exchange(0, std::memory_order_relaxed);
44 }
45
UpdateDelayedSchedule()46 base::TimeDelta PCScanSchedulingBackend::UpdateDelayedSchedule() {
47 return base::TimeDelta();
48 }
49
50 // static
51 constexpr double LimitBackend::kQuarantineSizeFraction;
52
LimitReached()53 bool LimitBackend::LimitReached() {
54 return is_scheduling_enabled();
55 }
56
UpdateScheduleAfterScan(size_t survived_bytes,base::TimeDelta,size_t heap_size)57 void LimitBackend::UpdateScheduleAfterScan(size_t survived_bytes,
58 base::TimeDelta,
59 size_t heap_size) {
60 scheduler_.AccountFreed(survived_bytes);
61 // |heap_size| includes the current quarantine size, we intentionally leave
62 // some slack till hitting the limit.
63 auto& data = GetQuarantineData();
64 data.size_limit.store(
65 std::max(QuarantineData::kQuarantineSizeMinLimit,
66 static_cast<size_t>(kQuarantineSizeFraction * heap_size)),
67 std::memory_order_relaxed);
68 }
69
NeedsToImmediatelyScan()70 bool LimitBackend::NeedsToImmediatelyScan() {
71 return false;
72 }
73
74 // static
75 constexpr double MUAwareTaskBasedBackend::kSoftLimitQuarantineSizePercent;
76 // static
77 constexpr double MUAwareTaskBasedBackend::kHardLimitQuarantineSizePercent;
78 // static
79 constexpr double MUAwareTaskBasedBackend::kTargetMutatorUtilizationPercent;
80
MUAwareTaskBasedBackend(PCScanScheduler & scheduler,ScheduleDelayedScanFunc schedule_delayed_scan)81 MUAwareTaskBasedBackend::MUAwareTaskBasedBackend(
82 PCScanScheduler& scheduler,
83 ScheduleDelayedScanFunc schedule_delayed_scan)
84 : PCScanSchedulingBackend(scheduler),
85 schedule_delayed_scan_(schedule_delayed_scan) {
86 PA_DCHECK(schedule_delayed_scan_);
87 }
88
89 MUAwareTaskBasedBackend::~MUAwareTaskBasedBackend() = default;
90
LimitReached()91 bool MUAwareTaskBasedBackend::LimitReached() {
92 bool should_reschedule = false;
93 base::TimeDelta reschedule_delay;
94 {
95 ScopedGuard guard(scheduler_lock_);
96 // At this point we reached a limit where the schedule generally wants to
97 // trigger a scan.
98 if (hard_limit_) {
99 // The hard limit is not reset, indicating that the scheduler only hit the
100 // soft limit. See inlined comments for the algorithm.
101 auto& data = GetQuarantineData();
102 PA_DCHECK(hard_limit_ >= QuarantineData::kQuarantineSizeMinLimit);
103 // 1. Update the limit to the hard limit which will always immediately
104 // trigger a scan.
105 data.size_limit.store(hard_limit_, std::memory_order_relaxed);
106 hard_limit_ = 0;
107
108 // 2. Unlikely case: If also above hard limit, start scan right away. This
109 // ignores explicit PCScan disabling.
110 if (PA_UNLIKELY(data.current_size.load(std::memory_order_relaxed) >
111 data.size_limit.load(std::memory_order_relaxed))) {
112 return true;
113 }
114
115 // 3. Check if PCScan was explicitly disabled.
116 if (PA_UNLIKELY(!is_scheduling_enabled())) {
117 return false;
118 }
119
120 // 4. Otherwise, the soft limit would trigger a scan immediately if the
121 // mutator utilization requirement is satisfied.
122 reschedule_delay = earliest_next_scan_time_ - base::TimeTicks::Now();
123 if (reschedule_delay <= base::TimeDelta()) {
124 // May invoke scan immediately.
125 return true;
126 }
127
128 PA_PCSCAN_VLOG(3) << "Rescheduling scan with delay: "
129 << reschedule_delay.InMillisecondsF() << " ms";
130 // 5. If the MU requirement is not satisfied, schedule a delayed scan to
131 // the time instance when MU is satisfied.
132 should_reschedule = true;
133 }
134 }
135 // Don't reschedule under the lock as the callback can call free() and
136 // recursively enter the lock.
137 if (should_reschedule) {
138 schedule_delayed_scan_(reschedule_delay.InMicroseconds());
139 return false;
140 }
141 return true;
142 }
143
ScanStarted()144 size_t MUAwareTaskBasedBackend::ScanStarted() {
145 ScopedGuard guard(scheduler_lock_);
146
147 return PCScanSchedulingBackend::ScanStarted();
148 }
149
UpdateScheduleAfterScan(size_t survived_bytes,base::TimeDelta time_spent_in_scan,size_t heap_size)150 void MUAwareTaskBasedBackend::UpdateScheduleAfterScan(
151 size_t survived_bytes,
152 base::TimeDelta time_spent_in_scan,
153 size_t heap_size) {
154 scheduler_.AccountFreed(survived_bytes);
155
156 ScopedGuard guard(scheduler_lock_);
157
158 // |heap_size| includes the current quarantine size, we intentionally leave
159 // some slack till hitting the limit.
160 auto& data = GetQuarantineData();
161 data.size_limit.store(
162 std::max(
163 QuarantineData::kQuarantineSizeMinLimit,
164 static_cast<size_t>(kSoftLimitQuarantineSizePercent * heap_size)),
165 std::memory_order_relaxed);
166 hard_limit_ = std::max(
167 QuarantineData::kQuarantineSizeMinLimit,
168 static_cast<size_t>(kHardLimitQuarantineSizePercent * heap_size));
169
170 // This computes the time window that the scheduler will reserve for the
171 // mutator. Scanning, unless reaching the hard limit, will generally be
172 // delayed until this time has passed.
173 const auto time_required_on_mutator =
174 time_spent_in_scan * kTargetMutatorUtilizationPercent /
175 (1.0 - kTargetMutatorUtilizationPercent);
176 earliest_next_scan_time_ = base::TimeTicks::Now() + time_required_on_mutator;
177 }
178
NeedsToImmediatelyScan()179 bool MUAwareTaskBasedBackend::NeedsToImmediatelyScan() {
180 bool should_reschedule = false;
181 base::TimeDelta reschedule_delay;
182 {
183 ScopedGuard guard(scheduler_lock_);
184 // If |hard_limit_| was set to zero, the soft limit was reached. Bail out if
185 // it's not.
186 if (hard_limit_) {
187 return false;
188 }
189
190 // Check if mutator utilization requiremet is satisfied.
191 reschedule_delay = earliest_next_scan_time_ - base::TimeTicks::Now();
192 if (reschedule_delay <= base::TimeDelta()) {
193 // May invoke scan immediately.
194 return true;
195 }
196
197 PA_PCSCAN_VLOG(3) << "Rescheduling scan with delay: "
198 << reschedule_delay.InMillisecondsF() << " ms";
199 // Schedule a delayed scan to the time instance when MU is satisfied.
200 should_reschedule = true;
201 }
202 // Don't reschedule under the lock as the callback can call free() and
203 // recursively enter the lock.
204 if (should_reschedule) {
205 schedule_delayed_scan_(reschedule_delay.InMicroseconds());
206 }
207 return false;
208 }
209
UpdateDelayedSchedule()210 base::TimeDelta MUAwareTaskBasedBackend::UpdateDelayedSchedule() {
211 ScopedGuard guard(scheduler_lock_);
212 // TODO(1197479): Adjust schedule to current heap sizing.
213 const auto delay = earliest_next_scan_time_ - base::TimeTicks::Now();
214 PA_PCSCAN_VLOG(3) << "Schedule is off by " << delay.InMillisecondsF() << "ms";
215 return delay >= base::TimeDelta() ? delay : base::TimeDelta();
216 }
217
218 } // namespace partition_alloc::internal
219