1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "partition_alloc/starscan/pcscan_internal.h"
6 
7 #include <algorithm>
8 #include <array>
9 #include <chrono>
10 #include <condition_variable>
11 #include <cstdint>
12 #include <mutex>
13 #include <numeric>
14 #include <set>
15 #include <thread>
16 #include <type_traits>
17 #include <unordered_map>
18 #include <vector>
19 
20 #include "build/build_config.h"
21 #include "partition_alloc/address_pool_manager.h"
22 #include "partition_alloc/allocation_guard.h"
23 #include "partition_alloc/internal_allocator.h"
24 #include "partition_alloc/page_allocator.h"
25 #include "partition_alloc/page_allocator_constants.h"
26 #include "partition_alloc/partition_address_space.h"
27 #include "partition_alloc/partition_alloc.h"
28 #include "partition_alloc/partition_alloc_base/bits.h"
29 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
30 #include "partition_alloc/partition_alloc_base/cpu.h"
31 #include "partition_alloc/partition_alloc_base/debug/alias.h"
32 #include "partition_alloc/partition_alloc_base/immediate_crash.h"
33 #include "partition_alloc/partition_alloc_base/memory/ref_counted.h"
34 #include "partition_alloc/partition_alloc_base/memory/scoped_refptr.h"
35 #include "partition_alloc/partition_alloc_base/no_destructor.h"
36 #include "partition_alloc/partition_alloc_base/threading/platform_thread.h"
37 #include "partition_alloc/partition_alloc_base/time/time.h"
38 #include "partition_alloc/partition_alloc_buildflags.h"
39 #include "partition_alloc/partition_alloc_check.h"
40 #include "partition_alloc/partition_alloc_config.h"
41 #include "partition_alloc/partition_alloc_constants.h"
42 #include "partition_alloc/partition_freelist_entry.h"
43 #include "partition_alloc/partition_page.h"
44 #include "partition_alloc/reservation_offset_table.h"
45 #include "partition_alloc/stack/stack.h"
46 #include "partition_alloc/starscan/pcscan_scheduling.h"
47 #include "partition_alloc/starscan/raceful_worklist.h"
48 #include "partition_alloc/starscan/scan_loop.h"
49 #include "partition_alloc/starscan/snapshot.h"
50 #include "partition_alloc/starscan/stats_collector.h"
51 #include "partition_alloc/starscan/stats_reporter.h"
52 #include "partition_alloc/tagging.h"
53 #include "partition_alloc/thread_cache.h"
54 
55 #if !BUILDFLAG(HAS_64_BIT_POINTERS)
56 #include "partition_alloc/address_pool_manager_bitmap.h"
57 #endif
58 
59 #if PA_CONFIG(STARSCAN_NOINLINE_SCAN_FUNCTIONS)
60 #define PA_SCAN_INLINE PA_NOINLINE
61 #else
62 #define PA_SCAN_INLINE PA_ALWAYS_INLINE
63 #endif
64 
65 namespace partition_alloc::internal {
66 
DoubleFreeAttempt()67 [[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void DoubleFreeAttempt() {
68   PA_NO_CODE_FOLDING();
69   PA_IMMEDIATE_CRASH();
70 }
71 
72 namespace {
73 
74 #if PA_CONFIG(HAS_ALLOCATION_GUARD)
75 // Currently, check reentracy only on Linux. On Android TLS is emulated by the
76 // runtime lib, which can allocate and therefore cause reentrancy.
77 struct ReentrantScannerGuard final {
78  public:
ReentrantScannerGuardpartition_alloc::internal::__anon9b4c4c840111::ReentrantScannerGuard79   ReentrantScannerGuard() {
80     PA_CHECK(!guard_);
81     guard_ = true;
82   }
~ReentrantScannerGuardpartition_alloc::internal::__anon9b4c4c840111::ReentrantScannerGuard83   ~ReentrantScannerGuard() { guard_ = false; }
84 
85  private:
86   // Since this variable has hidden visibility (not referenced by other DSOs),
87   // assume that thread_local works on all supported architectures.
88   static thread_local size_t guard_;
89 };
90 thread_local size_t ReentrantScannerGuard::guard_ = 0;
91 #else
92 struct [[maybe_unused]] ReentrantScannerGuard final {};
93 #endif  // PA_CONFIG(HAS_ALLOCATION_GUARD)
94 
95 // Scope that disables MTE checks. Only used inside scanning to avoid the race:
96 // a slot tag is changed by the mutator, while the scanner sees an old value.
97 struct DisableMTEScope final {
DisableMTEScopepartition_alloc::internal::__anon9b4c4c840111::DisableMTEScope98   DisableMTEScope() {
99     ::partition_alloc::ChangeMemoryTaggingModeForCurrentThread(
100         ::partition_alloc::TagViolationReportingMode::kDisabled);
101   }
~DisableMTEScopepartition_alloc::internal::__anon9b4c4c840111::DisableMTEScope102   ~DisableMTEScope() {
103     ::partition_alloc::ChangeMemoryTaggingModeForCurrentThread(
104         parent_tagging_mode);
105   }
106 
107  private:
108   ::partition_alloc::TagViolationReportingMode parent_tagging_mode =
109       ::partition_alloc::internal::GetMemoryTaggingModeForCurrentThread();
110 };
111 
112 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
113 // Bytemap that represent regions (cards) that contain quarantined slots.
114 // A single PCScan cycle consists of the following steps:
115 // 1) clearing (memset quarantine + marking cards that contain quarantine);
116 // 2) scanning;
117 // 3) sweeping (freeing + unmarking cards that contain freed slots).
118 // Marking cards on step 1) ensures that the card table stays in the consistent
119 // state while scanning. Unmarking on the step 3) ensures that unmarking
120 // actually happens (and we don't hit too many false positives).
121 //
122 // The code here relies on the fact that |address| is in the regular pool and
123 // that the card table (this object) is allocated at the very beginning of that
124 // pool.
125 class QuarantineCardTable final {
126  public:
127   // Avoid the load of the base of the regular pool.
GetFrom(uintptr_t address)128   PA_ALWAYS_INLINE static QuarantineCardTable& GetFrom(uintptr_t address) {
129     PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(address));
130     return *reinterpret_cast<QuarantineCardTable*>(
131         address & PartitionAddressSpace::RegularPoolBaseMask());
132   }
133 
Quarantine(uintptr_t begin,size_t size)134   PA_ALWAYS_INLINE void Quarantine(uintptr_t begin, size_t size) {
135     return SetImpl(begin, size, true);
136   }
137 
Unquarantine(uintptr_t begin,size_t size)138   PA_ALWAYS_INLINE void Unquarantine(uintptr_t begin, size_t size) {
139     return SetImpl(begin, size, false);
140   }
141 
142   // Returns whether the card to which |address| points to contains quarantined
143   // slots. May return false positives for but should never return false
144   // negatives, as otherwise this breaks security.
IsQuarantined(uintptr_t address) const145   PA_ALWAYS_INLINE bool IsQuarantined(uintptr_t address) const {
146     const size_t byte = Byte(address);
147     PA_SCAN_DCHECK(byte < bytes_.size());
148     return bytes_[byte];
149   }
150 
151  private:
152   static constexpr size_t kCardSize = kPoolMaxSize / kSuperPageSize;
153   static constexpr size_t kBytes = kPoolMaxSize / kCardSize;
154 
155   QuarantineCardTable() = default;
156 
Byte(uintptr_t address)157   PA_ALWAYS_INLINE static size_t Byte(uintptr_t address) {
158     return (address & ~PartitionAddressSpace::RegularPoolBaseMask()) /
159            kCardSize;
160   }
161 
SetImpl(uintptr_t begin,size_t size,bool value)162   PA_ALWAYS_INLINE void SetImpl(uintptr_t begin, size_t size, bool value) {
163     const size_t byte = Byte(begin);
164     const size_t need_bytes = (size + (kCardSize - 1)) / kCardSize;
165     PA_SCAN_DCHECK(bytes_.size() >= byte + need_bytes);
166     PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(begin));
167     for (size_t i = byte; i < byte + need_bytes; ++i) {
168       bytes_[i] = value;
169     }
170   }
171 
172   std::array<bool, kBytes> bytes_;
173 };
174 static_assert(kSuperPageSize >= sizeof(QuarantineCardTable),
175               "Card table size must be less than kSuperPageSize, since this is "
176               "what is committed");
177 #endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
178 
179 template <typename T>
180 using MetadataVector = std::vector<T, InternalAllocator<T>>;
181 template <typename T>
182 using MetadataSet = std::set<T, std::less<>, InternalAllocator<T>>;
183 template <typename K, typename V>
184 using MetadataHashMap =
185     std::unordered_map<K,
186                        V,
187                        std::hash<K>,
188                        std::equal_to<>,
189                        InternalAllocator<std::pair<const K, V>>>;
190 
191 struct GetSlotStartResult final {
is_foundpartition_alloc::internal::__anon9b4c4c840111::GetSlotStartResult192   PA_ALWAYS_INLINE bool is_found() const {
193     PA_SCAN_DCHECK(!slot_start || slot_size);
194     return slot_start;
195   }
196 
197   uintptr_t slot_start = 0;
198   size_t slot_size = 0;
199 };
200 
201 // Returns the start of a slot, or 0 if |maybe_inner_address| is not inside of
202 // an existing slot span. The function may return a non-0 address even inside a
203 // decommitted or free slot span, it's the caller responsibility to check if
204 // memory is actually allocated.
205 //
206 // |maybe_inner_address| must be within a normal-bucket super page and can also
207 // point to guard pages or slot-span metadata.
208 PA_SCAN_INLINE GetSlotStartResult
GetSlotStartInSuperPage(uintptr_t maybe_inner_address)209 GetSlotStartInSuperPage(uintptr_t maybe_inner_address) {
210   PA_SCAN_DCHECK(IsManagedByNormalBuckets(maybe_inner_address));
211   // Don't use SlotSpanMetadata/PartitionPage::FromAddr() and family, because
212   // they expect an address within a super page payload area, which we don't
213   // know yet if |maybe_inner_address| is.
214   const uintptr_t super_page = maybe_inner_address & kSuperPageBaseMask;
215 
216   const uintptr_t partition_page_index =
217       (maybe_inner_address & kSuperPageOffsetMask) >> PartitionPageShift();
218   auto* page =
219       PartitionSuperPageToMetadataArea(super_page) + partition_page_index;
220   // Check if page is valid. The check also works for the guard pages and the
221   // metadata page.
222   if (!page->is_valid) {
223     return {};
224   }
225 
226   page -= page->slot_span_metadata_offset;
227   PA_SCAN_DCHECK(page->is_valid);
228   PA_SCAN_DCHECK(!page->slot_span_metadata_offset);
229   auto* slot_span = &page->slot_span_metadata;
230   // Check if the slot span is actually used and valid.
231   if (!slot_span->bucket) {
232     return {};
233   }
234 #if PA_SCAN_DCHECK_IS_ON()
235   DCheckIsValidSlotSpan(slot_span);
236 #endif
237   const uintptr_t slot_span_start =
238       SlotSpanMetadata::ToSlotSpanStart(slot_span);
239   const ptrdiff_t ptr_offset = maybe_inner_address - slot_span_start;
240   PA_SCAN_DCHECK(0 <= ptr_offset &&
241                  ptr_offset < static_cast<ptrdiff_t>(
242                                   slot_span->bucket->get_pages_per_slot_span() *
243                                   PartitionPageSize()));
244   // Slot span size in bytes is not necessarily multiple of partition page.
245   // Don't check if the pointer points outside of usable area, since checking
246   // the quarantine bit will anyway return false in this case.
247   const size_t slot_size = slot_span->bucket->slot_size;
248   const size_t slot_number = slot_span->bucket->GetSlotNumber(ptr_offset);
249   const uintptr_t slot_start = slot_span_start + (slot_number * slot_size);
250   PA_SCAN_DCHECK(slot_start <= maybe_inner_address &&
251                  maybe_inner_address < slot_start + slot_size);
252   return {.slot_start = slot_start, .slot_size = slot_size};
253 }
254 
255 #if PA_SCAN_DCHECK_IS_ON()
IsQuarantineEmptyOnSuperPage(uintptr_t super_page)256 bool IsQuarantineEmptyOnSuperPage(uintptr_t super_page) {
257   auto* bitmap = SuperPageStateBitmap(super_page);
258   size_t visited = 0;
259   bitmap->IterateQuarantined([&visited](auto) { ++visited; });
260   return !visited;
261 }
262 #endif
263 
DetectSimdSupport()264 SimdSupport DetectSimdSupport() {
265 #if PA_CONFIG(STARSCAN_NEON_SUPPORTED)
266   return SimdSupport::kNEON;
267 #else
268   const base::CPU& cpu = base::CPU::GetInstanceNoAllocation();
269   if (cpu.has_avx2()) {
270     return SimdSupport::kAVX2;
271   }
272   if (cpu.has_sse41()) {
273     return SimdSupport::kSSE41;
274   }
275   return SimdSupport::kUnvectorized;
276 #endif  // PA_CONFIG(STARSCAN_NEON_SUPPORTED)
277 }
278 
CommitCardTable()279 void CommitCardTable() {
280 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
281   RecommitSystemPages(PartitionAddressSpace::RegularPoolBase(),
282                       sizeof(QuarantineCardTable),
283                       PageAccessibilityConfiguration(
284                           PageAccessibilityConfiguration::kReadWrite),
285                       PageAccessibilityDisposition::kRequireUpdate);
286 #endif
287 }
288 
289 template <class Function>
IterateNonEmptySlotSpans(uintptr_t super_page,size_t nonempty_slot_spans,Function function)290 void IterateNonEmptySlotSpans(uintptr_t super_page,
291                               size_t nonempty_slot_spans,
292                               Function function) {
293   PA_SCAN_DCHECK(!(super_page % kSuperPageAlignment));
294   PA_SCAN_DCHECK(nonempty_slot_spans);
295 
296   size_t slot_spans_to_visit = nonempty_slot_spans;
297 #if PA_SCAN_DCHECK_IS_ON()
298   size_t visited = 0;
299 #endif
300 
301   IterateSlotSpans(super_page, true /*with_quarantine*/,
302                    [&function, &slot_spans_to_visit
303 #if PA_SCAN_DCHECK_IS_ON()
304                     ,
305                     &visited
306 #endif
307   ](SlotSpanMetadata* slot_span) {
308                      if (slot_span->is_empty() || slot_span->is_decommitted()) {
309                        // Skip empty/decommitted slot spans.
310                        return false;
311                      }
312                      function(slot_span);
313                      --slot_spans_to_visit;
314 #if PA_SCAN_DCHECK_IS_ON()
315                      // In debug builds, scan all the slot spans to check that
316                      // number of visited slot spans is equal to the number of
317                      // nonempty_slot_spans.
318                      ++visited;
319                      return false;
320 #else
321         return slot_spans_to_visit == 0;
322 #endif
323                    });
324 #if PA_SCAN_DCHECK_IS_ON()
325   // Check that exactly all non-empty slot spans have been visited.
326   PA_DCHECK(nonempty_slot_spans == visited);
327 #endif
328 }
329 
330 // SuperPageSnapshot is used to record all slot spans that contain live slots.
331 // The class avoids dynamic allocations and is designed to be instantiated on
332 // stack. To avoid stack overflow, internal data structures are kept packed.
333 class SuperPageSnapshot final {
334   // The following constants are used to define a conservative estimate for
335   // maximum number of slot spans in a super page.
336   //
337   // For systems with runtime-defined page size, assume partition page size is
338   // at least 16kiB.
339   static constexpr size_t kMinPartitionPageSize =
340       __builtin_constant_p(PartitionPageSize()) ? PartitionPageSize() : 1 << 14;
341   static constexpr size_t kStateBitmapMinReservedSize =
342       __builtin_constant_p(ReservedStateBitmapSize())
343           ? ReservedStateBitmapSize()
344           : partition_alloc::internal::base::bits::AlignUp(
345                 sizeof(AllocationStateMap),
346                 kMinPartitionPageSize);
347   // Take into account guard partition page at the end of super-page.
348   static constexpr size_t kGuardPagesSize = 2 * kMinPartitionPageSize;
349 
350   static constexpr size_t kPayloadMaxSize =
351       kSuperPageSize - kStateBitmapMinReservedSize - kGuardPagesSize;
352   static_assert(kPayloadMaxSize % kMinPartitionPageSize == 0,
353                 "kPayloadMaxSize must be multiple of kMinPartitionPageSize");
354 
355   static constexpr size_t kMaxSlotSpansInSuperPage =
356       kPayloadMaxSize / kMinPartitionPageSize;
357 
358  public:
359   struct ScanArea {
360     // Use packed integer types to save stack space. In theory, kAlignment could
361     // be used instead of words, but it doesn't seem to bring savings.
362     uint32_t offset_within_page_in_words;
363     uint32_t size_in_words;
364     uint32_t slot_size_in_words;
365   };
366 
367   class ScanAreas : private std::array<ScanArea, kMaxSlotSpansInSuperPage> {
368     using Base = std::array<ScanArea, kMaxSlotSpansInSuperPage>;
369 
370    public:
371     using iterator = Base::iterator;
372     using const_iterator = Base::const_iterator;
373     using Base::operator[];
374 
begin()375     iterator begin() { return Base::begin(); }
begin() const376     const_iterator begin() const { return Base::begin(); }
377 
end()378     iterator end() { return std::next(begin(), size_); }
end() const379     const_iterator end() const { return std::next(begin(), size_); }
380 
set_size(size_t new_size)381     void set_size(size_t new_size) { size_ = new_size; }
382 
383    private:
384     size_t size_;
385   };
386 
387   static_assert(std::is_trivially_default_constructible_v<ScanAreas>,
388                 "ScanAreas must be trivially default constructible to ensure "
389                 "that no memsets are generated by the compiler as a "
390                 "result of value-initialization (or zero-initialization)");
391 
392   void* operator new(size_t) = delete;
393   void operator delete(void*) = delete;
394 
395   // Creates snapshot for a single super page. In theory, we could simply
396   // iterate over slot spans without taking a snapshot. However, we do this to
397   // minimize the mutex locking time. The mutex must be acquired to make sure
398   // that no mutator is concurrently changing any of the slot spans.
399   explicit SuperPageSnapshot(uintptr_t super_page_base);
400 
scan_areas() const401   const ScanAreas& scan_areas() const { return scan_areas_; }
402 
403  private:
404   ScanAreas scan_areas_;
405 };
406 
407 static_assert(
408     sizeof(SuperPageSnapshot) <= 2048,
409     "SuperPageSnapshot must stay relatively small to be allocated on stack");
410 
SuperPageSnapshot(uintptr_t super_page)411 SuperPageSnapshot::SuperPageSnapshot(uintptr_t super_page) {
412   auto* extent_entry = PartitionSuperPageToExtent(super_page);
413 
414   ::partition_alloc::internal::ScopedGuard lock(
415       ::partition_alloc::internal::PartitionRootLock(extent_entry->root));
416 
417   const size_t nonempty_slot_spans =
418       extent_entry->number_of_nonempty_slot_spans;
419   if (!nonempty_slot_spans) {
420 #if PA_SCAN_DCHECK_IS_ON()
421     // Check that quarantine bitmap is empty for super-pages that contain
422     // only empty/decommitted slot-spans.
423     PA_CHECK(IsQuarantineEmptyOnSuperPage(super_page));
424 #endif
425     scan_areas_.set_size(0);
426     return;
427   }
428 
429   size_t current = 0;
430 
431   IterateNonEmptySlotSpans(
432       super_page, nonempty_slot_spans,
433       [this, &current](SlotSpanMetadata* slot_span) {
434         const uintptr_t payload_begin =
435             SlotSpanMetadata::ToSlotSpanStart(slot_span);
436         // For single-slot slot-spans, scan only utilized slot part.
437         const size_t provisioned_size =
438             PA_UNLIKELY(slot_span->CanStoreRawSize())
439                 ? slot_span->GetRawSize()
440                 : slot_span->GetProvisionedSize();
441         // Free & decommitted slot spans are skipped.
442         PA_SCAN_DCHECK(provisioned_size > 0);
443         const uintptr_t payload_end = payload_begin + provisioned_size;
444         auto& area = scan_areas_[current];
445 
446         const size_t offset_in_words =
447             (payload_begin & kSuperPageOffsetMask) / sizeof(uintptr_t);
448         const size_t size_in_words =
449             (payload_end - payload_begin) / sizeof(uintptr_t);
450         const size_t slot_size_in_words =
451             slot_span->bucket->slot_size / sizeof(uintptr_t);
452 
453 #if PA_SCAN_DCHECK_IS_ON()
454         PA_DCHECK(offset_in_words <=
455                   std::numeric_limits<
456                       decltype(area.offset_within_page_in_words)>::max());
457         PA_DCHECK(size_in_words <=
458                   std::numeric_limits<decltype(area.size_in_words)>::max());
459         PA_DCHECK(
460             slot_size_in_words <=
461             std::numeric_limits<decltype(area.slot_size_in_words)>::max());
462 #endif
463 
464         area.offset_within_page_in_words = offset_in_words;
465         area.size_in_words = size_in_words;
466         area.slot_size_in_words = slot_size_in_words;
467 
468         ++current;
469       });
470 
471   PA_SCAN_DCHECK(kMaxSlotSpansInSuperPage >= current);
472   scan_areas_.set_size(current);
473 }
474 
475 }  // namespace
476 
477 class PCScanScanLoop;
478 
479 // This class is responsible for performing the entire PCScan task.
480 // TODO(bikineev): Move PCScan algorithm out of PCScanTask.
481 class PCScanTask final : public base::RefCountedThreadSafe<PCScanTask>,
482                          public InternalPartitionAllocated {
483  public:
484   // Creates and initializes a PCScan state.
485   PCScanTask(PCScan& pcscan, size_t quarantine_last_size);
486 
487   PCScanTask(PCScanTask&&) noexcept = delete;
488   PCScanTask& operator=(PCScanTask&&) noexcept = delete;
489 
490   // Execute PCScan from mutator inside safepoint.
491   void RunFromMutator();
492 
493   // Execute PCScan from the scanner thread. Must be called only once from the
494   // scanner thread.
495   void RunFromScanner();
496 
scheduler() const497   PCScanScheduler& scheduler() const { return pcscan_.scheduler(); }
498 
499  private:
500   class StackVisitor;
501   friend class PCScanScanLoop;
502 
503   using Root = PCScan::Root;
504   using SlotSpan = SlotSpanMetadata;
505 
506   // This is used:
507   // - to synchronize all scanning threads (mutators and the scanner);
508   // - for the scanner, to transition through the state machine
509   //   (kScheduled -> kScanning (ctor) -> kSweepingAndFinishing (dtor).
510   template <Context context>
511   class SyncScope final {
512    public:
SyncScope(PCScanTask & task)513     explicit SyncScope(PCScanTask& task) : task_(task) {
514       task_.number_of_scanning_threads_.fetch_add(1, std::memory_order_relaxed);
515       if (context == Context::kScanner) {
516         task_.pcscan_.state_.store(PCScan::State::kScanning,
517                                    std::memory_order_relaxed);
518         task_.pcscan_.SetJoinableIfSafepointEnabled(true);
519       }
520     }
~SyncScope()521     ~SyncScope() {
522       // First, notify the scanning thread that this thread is done.
523       NotifyThreads();
524       if (context == Context::kScanner) {
525         // The scanner thread must wait here until all safepoints leave.
526         // Otherwise, sweeping may free a page that can later be accessed by a
527         // descheduled mutator.
528         WaitForOtherThreads();
529         task_.pcscan_.state_.store(PCScan::State::kSweepingAndFinishing,
530                                    std::memory_order_relaxed);
531       }
532     }
533 
534    private:
NotifyThreads()535     void NotifyThreads() {
536       {
537         // The lock is required as otherwise there is a race between
538         // fetch_sub/notify in the mutator and checking
539         // number_of_scanning_threads_/waiting in the scanner.
540         std::lock_guard<std::mutex> lock(task_.mutex_);
541         task_.number_of_scanning_threads_.fetch_sub(1,
542                                                     std::memory_order_relaxed);
543         {
544           // Notify that scan is done and there is no need to enter
545           // the safepoint. This also helps a mutator to avoid repeating
546           // entering. Since the scanner thread waits for all threads to finish,
547           // there is no ABA problem here.
548           task_.pcscan_.SetJoinableIfSafepointEnabled(false);
549         }
550       }
551       task_.condvar_.notify_all();
552     }
553 
WaitForOtherThreads()554     void WaitForOtherThreads() {
555       std::unique_lock<std::mutex> lock(task_.mutex_);
556       task_.condvar_.wait(lock, [this] {
557         return !task_.number_of_scanning_threads_.load(
558             std::memory_order_relaxed);
559       });
560     }
561 
562     PCScanTask& task_;
563   };
564 
565   friend class base::RefCountedThreadSafe<PCScanTask>;
566   ~PCScanTask() = default;
567 
568   PA_SCAN_INLINE AllocationStateMap* TryFindScannerBitmapForPointer(
569       uintptr_t maybe_ptr) const;
570 
571   // Lookup and marking functions. Return size of the slot if marked, or zero
572   // otherwise.
573   PA_SCAN_INLINE size_t TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const;
574 
575   // Scans stack, only called from safepoints.
576   void ScanStack();
577 
578   // Scan individual areas.
579   void ScanNormalArea(PCScanInternal& pcscan,
580                       PCScanScanLoop& scan_loop,
581                       uintptr_t begin,
582                       uintptr_t end);
583   void ScanLargeArea(PCScanInternal& pcscan,
584                      PCScanScanLoop& scan_loop,
585                      uintptr_t begin,
586                      uintptr_t end,
587                      size_t slot_size);
588 
589   // Scans all registered partitions and marks reachable quarantined slots.
590   void ScanPartitions();
591 
592   // Clear quarantined slots and prepare card table for fast lookup
593   void ClearQuarantinedSlotsAndPrepareCardTable();
594 
595   // Unprotect all slot spans from all partitions.
596   void UnprotectPartitions();
597 
598   // Sweeps (frees) unreachable quarantined entries.
599   void SweepQuarantine();
600 
601   // Finishes the scanner (updates limits, UMA, etc).
602   void FinishScanner();
603 
604   // Cache the pcscan epoch to avoid the compiler loading the atomic
605   // QuarantineData::epoch_ on each access.
606   const size_t pcscan_epoch_;
607   std::unique_ptr<StarScanSnapshot> snapshot_;
608   StatsCollector stats_;
609   // Mutex and codvar that are used to synchronize scanning threads.
610   std::mutex mutex_;
611   std::condition_variable condvar_;
612   std::atomic<size_t> number_of_scanning_threads_{0u};
613   // We can unprotect only once to reduce context-switches.
614   std::once_flag unprotect_once_flag_;
615   bool immediatelly_free_slots_{false};
616   PCScan& pcscan_;
617 };
618 
TryFindScannerBitmapForPointer(uintptr_t maybe_ptr) const619 PA_SCAN_INLINE AllocationStateMap* PCScanTask::TryFindScannerBitmapForPointer(
620     uintptr_t maybe_ptr) const {
621   PA_SCAN_DCHECK(IsManagedByPartitionAllocRegularPool(maybe_ptr));
622   // First, check if |maybe_ptr| points to a valid super page or a quarantined
623   // card.
624 #if BUILDFLAG(HAS_64_BIT_POINTERS)
625 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
626   // Check if |maybe_ptr| points to a quarantined card.
627   if (PA_LIKELY(
628           !QuarantineCardTable::GetFrom(maybe_ptr).IsQuarantined(maybe_ptr))) {
629     return nullptr;
630   }
631 #else   // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
632   // Without the card table, use the reservation offset table to check if
633   // |maybe_ptr| points to a valid super-page. It's not as precise (meaning that
634   // we may have hit the slow path more frequently), but reduces the memory
635   // overhead.  Since we are certain here, that |maybe_ptr| refers to the
636   // regular pool, it's okay to use non-checking version of
637   // ReservationOffsetPointer().
638   const uintptr_t offset =
639       maybe_ptr & ~PartitionAddressSpace::RegularPoolBaseMask();
640   if (PA_LIKELY(*ReservationOffsetPointer(kRegularPoolHandle, offset) !=
641                 kOffsetTagNormalBuckets)) {
642     return nullptr;
643   }
644 #endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
645 #else   // BUILDFLAG(HAS_64_BIT_POINTERS)
646   if (PA_LIKELY(!IsManagedByPartitionAllocRegularPool(maybe_ptr))) {
647     return nullptr;
648   }
649 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
650 
651   // We are certain here that |maybe_ptr| points to an allocated super-page.
652   return StateBitmapFromAddr(maybe_ptr);
653 }
654 
655 // Looks up and marks a potential dangling pointer. Returns the size of the slot
656 // (which is then accounted as quarantined), or zero if no slot is found.
657 // For normal bucket super pages, PCScan uses two quarantine bitmaps, the
658 // mutator and the scanner one. The former is used by mutators when slots are
659 // freed, while the latter is used concurrently by the PCScan thread. The
660 // bitmaps are swapped as soon as PCScan is triggered. Once a dangling pointer
661 // (which points to a slot in the scanner bitmap) is found,
662 // TryMarkSlotInNormalBuckets() marks it again in the bitmap and clears
663 // from the scanner bitmap. This way, when scanning is done, all uncleared
664 // entries in the scanner bitmap correspond to unreachable slots.
665 PA_SCAN_INLINE size_t
TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const666 PCScanTask::TryMarkSlotInNormalBuckets(uintptr_t maybe_ptr) const {
667   // Check if |maybe_ptr| points somewhere to the heap.
668   // The caller has to make sure that |maybe_ptr| isn't MTE-tagged.
669   auto* state_map = TryFindScannerBitmapForPointer(maybe_ptr);
670   if (!state_map) {
671     return 0;
672   }
673 
674   // Beyond this point, we know that |maybe_ptr| is a pointer within a
675   // normal-bucket super page.
676   PA_SCAN_DCHECK(IsManagedByNormalBuckets(maybe_ptr));
677 
678 #if !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
679   // Pointer from a normal bucket is always in the first superpage.
680   auto* root = Root::FromAddrInFirstSuperpage(maybe_ptr);
681   // Without the card table, we must make sure that |maybe_ptr| doesn't point to
682   // metadata partition.
683   // TODO(bikineev): To speed things up, consider removing the check and
684   // committing quarantine bitmaps for metadata partition.
685   // TODO(bikineev): Marking an entry in the reservation-table is not a
686   // publishing operation, meaning that the |root| pointer may not be assigned
687   // yet. This can happen as arbitrary pointers may point into a super-page
688   // during its set up. Make sure to check |root| is not null before
689   // dereferencing it.
690   if (PA_UNLIKELY(!root || !root->IsQuarantineEnabled())) {
691     return 0;
692   }
693 #endif  // !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
694 
695   // Check if pointer was in the quarantine bitmap.
696   const GetSlotStartResult slot_start_result =
697       GetSlotStartInSuperPage(maybe_ptr);
698   if (!slot_start_result.is_found()) {
699     return 0;
700   }
701 
702   const uintptr_t slot_start = slot_start_result.slot_start;
703   if (PA_LIKELY(!state_map->IsQuarantined(slot_start))) {
704     return 0;
705   }
706 
707   PA_SCAN_DCHECK((maybe_ptr & kSuperPageBaseMask) ==
708                  (slot_start & kSuperPageBaseMask));
709 
710   if (PA_UNLIKELY(immediatelly_free_slots_)) {
711     return 0;
712   }
713 
714   // Now we are certain that |maybe_ptr| is a dangling pointer. Mark it again in
715   // the mutator bitmap and clear from the scanner bitmap. Note that since
716   // PCScan has exclusive access to the scanner bitmap, we can avoid atomic rmw
717   // operation for it.
718   if (PA_LIKELY(
719           state_map->MarkQuarantinedAsReachable(slot_start, pcscan_epoch_))) {
720     return slot_start_result.slot_size;
721   }
722 
723   return 0;
724 }
725 
ClearQuarantinedSlotsAndPrepareCardTable()726 void PCScanTask::ClearQuarantinedSlotsAndPrepareCardTable() {
727   const PCScan::ClearType clear_type = pcscan_.clear_type_;
728 
729 #if !PA_CONFIG(STARSCAN_USE_CARD_TABLE)
730   if (clear_type == PCScan::ClearType::kEager) {
731     return;
732   }
733 #endif
734 
735   StarScanSnapshot::ClearingView view(*snapshot_);
736   view.VisitConcurrently([clear_type](uintptr_t super_page) {
737     auto* bitmap = StateBitmapFromAddr(super_page);
738     auto* root = Root::FromFirstSuperPage(super_page);
739     bitmap->IterateQuarantined([root, clear_type](uintptr_t slot_start) {
740       auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
741       // Use zero as a zapping value to speed up the fast bailout check in
742       // ScanPartitions.
743       const size_t size = root->GetSlotUsableSize(slot_span);
744       if (clear_type == PCScan::ClearType::kLazy) {
745         void* object = root->SlotStartToObject(slot_start);
746         memset(object, 0, size);
747       }
748 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
749       // Set card(s) for this quarantined slot.
750       QuarantineCardTable::GetFrom(slot_start).Quarantine(slot_start, size);
751 #endif
752     });
753   });
754 }
755 
UnprotectPartitions()756 void PCScanTask::UnprotectPartitions() {
757   auto& pcscan = PCScanInternal::Instance();
758   if (!pcscan.WriteProtectionEnabled()) {
759     return;
760   }
761 
762   StarScanSnapshot::UnprotectingView unprotect_view(*snapshot_);
763   unprotect_view.VisitConcurrently([&pcscan](uintptr_t super_page) {
764     SuperPageSnapshot super_page_snapshot(super_page);
765 
766     for (const auto& scan_area : super_page_snapshot.scan_areas()) {
767       const uintptr_t begin =
768           super_page |
769           (scan_area.offset_within_page_in_words * sizeof(uintptr_t));
770       const uintptr_t end =
771           begin + (scan_area.size_in_words * sizeof(uintptr_t));
772 
773       pcscan.UnprotectPages(begin, end - begin);
774     }
775   });
776 }
777 
778 class PCScanScanLoop final : public ScanLoop<PCScanScanLoop> {
779   friend class ScanLoop<PCScanScanLoop>;
780 
781  public:
PCScanScanLoop(const PCScanTask & task)782   explicit PCScanScanLoop(const PCScanTask& task)
783       : ScanLoop(PCScanInternal::Instance().simd_support()), task_(task) {}
784 
quarantine_size() const785   size_t quarantine_size() const { return quarantine_size_; }
786 
787  private:
788 #if BUILDFLAG(HAS_64_BIT_POINTERS)
RegularPoolBase()789   PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
790     return PartitionAddressSpace::RegularPoolBase();
791   }
RegularPoolMask()792   PA_ALWAYS_INLINE static uintptr_t RegularPoolMask() {
793     return PartitionAddressSpace::RegularPoolBaseMask();
794   }
795 #endif  // BUILDFLAG(HAS_64_BIT_POINTERS)
796 
CheckPointer(uintptr_t maybe_ptr_maybe_tagged)797   PA_SCAN_INLINE void CheckPointer(uintptr_t maybe_ptr_maybe_tagged) {
798     // |maybe_ptr| may have an MTE tag, so remove it first.
799     quarantine_size_ +=
800         task_.TryMarkSlotInNormalBuckets(UntagAddr(maybe_ptr_maybe_tagged));
801   }
802 
803   const PCScanTask& task_;
804   DisableMTEScope disable_mte_;
805   size_t quarantine_size_ = 0;
806 };
807 
808 class PCScanTask::StackVisitor final : public internal::StackVisitor {
809  public:
StackVisitor(const PCScanTask & task)810   explicit StackVisitor(const PCScanTask& task) : task_(task) {}
811 
VisitStack(uintptr_t * stack_ptr,uintptr_t * stack_top)812   void VisitStack(uintptr_t* stack_ptr, uintptr_t* stack_top) override {
813     static constexpr size_t kMinimalAlignment = 32;
814     uintptr_t begin =
815         reinterpret_cast<uintptr_t>(stack_ptr) & ~(kMinimalAlignment - 1);
816     uintptr_t end =
817         (reinterpret_cast<uintptr_t>(stack_top) + kMinimalAlignment - 1) &
818         ~(kMinimalAlignment - 1);
819     PA_CHECK(begin < end);
820     PCScanScanLoop loop(task_);
821     loop.Run(begin, end);
822     quarantine_size_ += loop.quarantine_size();
823   }
824 
825   // Returns size of quarantined slots that are reachable from the current
826   // stack.
quarantine_size() const827   size_t quarantine_size() const { return quarantine_size_; }
828 
829  private:
830   const PCScanTask& task_;
831   size_t quarantine_size_ = 0;
832 };
833 
PCScanTask(PCScan & pcscan,size_t quarantine_last_size)834 PCScanTask::PCScanTask(PCScan& pcscan, size_t quarantine_last_size)
835     : pcscan_epoch_(pcscan.epoch() - 1),
836       snapshot_(StarScanSnapshot::Create(PCScanInternal::Instance())),
837       stats_(PCScanInternal::Instance().process_name(), quarantine_last_size),
838       immediatelly_free_slots_(
839           PCScanInternal::Instance().IsImmediateFreeingEnabled()),
840       pcscan_(pcscan) {}
841 
ScanStack()842 void PCScanTask::ScanStack() {
843   const auto& pcscan = PCScanInternal::Instance();
844   if (!pcscan.IsStackScanningEnabled()) {
845     return;
846   }
847   // Check if the stack top was registered. It may happen that it's not if the
848   // current allocation happens from pthread trampolines.
849   void* stack_top = StackTopRegistry::Get().GetCurrentThreadStackTop();
850   if (PA_UNLIKELY(!stack_top)) {
851     return;
852   }
853 
854   Stack stack_scanner(stack_top);
855   StackVisitor visitor(*this);
856   stack_scanner.IteratePointers(&visitor);
857   stats_.IncreaseSurvivedQuarantineSize(visitor.quarantine_size());
858 }
859 
ScanNormalArea(PCScanInternal & pcscan,PCScanScanLoop & scan_loop,uintptr_t begin,uintptr_t end)860 void PCScanTask::ScanNormalArea(PCScanInternal& pcscan,
861                                 PCScanScanLoop& scan_loop,
862                                 uintptr_t begin,
863                                 uintptr_t end) {
864   // Protect slot span before scanning it.
865   pcscan.ProtectPages(begin, end - begin);
866   scan_loop.Run(begin, end);
867 }
868 
ScanLargeArea(PCScanInternal & pcscan,PCScanScanLoop & scan_loop,uintptr_t begin,uintptr_t end,size_t slot_size)869 void PCScanTask::ScanLargeArea(PCScanInternal& pcscan,
870                                PCScanScanLoop& scan_loop,
871                                uintptr_t begin,
872                                uintptr_t end,
873                                size_t slot_size) {
874   // For scanning large areas, it's worthwhile checking whether the range that
875   // is scanned contains allocated slots. It also helps to skip discarded
876   // freed slots.
877   // Protect slot span before scanning it.
878   pcscan.ProtectPages(begin, end - begin);
879 
880   auto* bitmap = StateBitmapFromAddr(begin);
881 
882   for (uintptr_t current_slot = begin; current_slot < end;
883        current_slot += slot_size) {
884     // It is okay to skip slots as the object they hold has been zapped at this
885     // point, which means that the pointers no longer retain other slots.
886     if (!bitmap->IsAllocated(current_slot)) {
887       continue;
888     }
889     uintptr_t current_slot_end = current_slot + slot_size;
890     // |slot_size| may be larger than |raw_size| for single-slot slot spans.
891     scan_loop.Run(current_slot, std::min(current_slot_end, end));
892   }
893 }
894 
ScanPartitions()895 void PCScanTask::ScanPartitions() {
896   // Threshold for which bucket size it is worthwhile in checking whether the
897   // slot is allocated and needs to be scanned. PartitionPurgeSlotSpan()
898   // purges only slots >= page-size, this helps us to avoid faulting in
899   // discarded pages. We actually lower it further to 1024, to take advantage of
900   // skipping unallocated slots, but don't want to go any lower, as this comes
901   // at a cost of expensive bitmap checking.
902   static constexpr size_t kLargeScanAreaThresholdInWords =
903       1024 / sizeof(uintptr_t);
904 
905   PCScanScanLoop scan_loop(*this);
906   auto& pcscan = PCScanInternal::Instance();
907 
908   StarScanSnapshot::ScanningView snapshot_view(*snapshot_);
909   snapshot_view.VisitConcurrently([this, &pcscan,
910                                    &scan_loop](uintptr_t super_page) {
911     SuperPageSnapshot super_page_snapshot(super_page);
912 
913     for (const auto& scan_area : super_page_snapshot.scan_areas()) {
914       const uintptr_t begin =
915           super_page |
916           (scan_area.offset_within_page_in_words * sizeof(uintptr_t));
917       PA_SCAN_DCHECK(begin ==
918                      super_page + (scan_area.offset_within_page_in_words *
919                                    sizeof(uintptr_t)));
920       const uintptr_t end = begin + scan_area.size_in_words * sizeof(uintptr_t);
921 
922       if (PA_UNLIKELY(scan_area.slot_size_in_words >=
923                       kLargeScanAreaThresholdInWords)) {
924         ScanLargeArea(pcscan, scan_loop, begin, end,
925                       scan_area.slot_size_in_words * sizeof(uintptr_t));
926       } else {
927         ScanNormalArea(pcscan, scan_loop, begin, end);
928       }
929     }
930   });
931 
932   stats_.IncreaseSurvivedQuarantineSize(scan_loop.quarantine_size());
933 }
934 
935 namespace {
936 
937 struct SweepStat {
938   // Bytes that were really swept (by calling free()).
939   size_t swept_bytes = 0;
940   // Bytes of marked quarantine memory that were discarded (by calling
941   // madvice(DONT_NEED)).
942   size_t discarded_bytes = 0;
943 };
944 
UnmarkInCardTable(uintptr_t slot_start,SlotSpanMetadata * slot_span)945 void UnmarkInCardTable(uintptr_t slot_start, SlotSpanMetadata* slot_span) {
946 #if PA_CONFIG(STARSCAN_USE_CARD_TABLE)
947   // Reset card(s) for this quarantined slot. Please note that the cards may
948   // still contain quarantined slots (which were promoted in this scan cycle),
949   // but ClearQuarantinedSlotsAndPrepareCardTable() will set them again in the
950   // next PCScan cycle.
951   QuarantineCardTable::GetFrom(slot_start)
952       .Unquarantine(slot_start, slot_span->GetUtilizedSlotSize());
953 #endif  // PA_CONFIG(STARSCAN_USE_CARD_TABLE)
954 }
955 
FreeAndUnmarkInCardTable(PartitionRoot * root,SlotSpanMetadata * slot_span,uintptr_t slot_start)956 [[maybe_unused]] size_t FreeAndUnmarkInCardTable(PartitionRoot* root,
957                                                  SlotSpanMetadata* slot_span,
958                                                  uintptr_t slot_start) {
959   void* object = root->SlotStartToObject(slot_start);
960   root->FreeNoHooksImmediate(object, slot_span, slot_start);
961   UnmarkInCardTable(slot_start, slot_span);
962   return slot_span->bucket->slot_size;
963 }
964 
SweepSuperPage(PartitionRoot * root,uintptr_t super_page,size_t epoch,SweepStat & stat)965 [[maybe_unused]] void SweepSuperPage(PartitionRoot* root,
966                                      uintptr_t super_page,
967                                      size_t epoch,
968                                      SweepStat& stat) {
969   auto* bitmap = StateBitmapFromAddr(super_page);
970   PartitionRoot::FromFirstSuperPage(super_page);
971   bitmap->IterateUnmarkedQuarantined(epoch, [root,
972                                              &stat](uintptr_t slot_start) {
973     auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
974     stat.swept_bytes += FreeAndUnmarkInCardTable(root, slot_span, slot_start);
975   });
976 }
977 
SweepSuperPageAndDiscardMarkedQuarantine(PartitionRoot * root,uintptr_t super_page,size_t epoch,SweepStat & stat)978 [[maybe_unused]] void SweepSuperPageAndDiscardMarkedQuarantine(
979     PartitionRoot* root,
980     uintptr_t super_page,
981     size_t epoch,
982     SweepStat& stat) {
983   auto* bitmap = StateBitmapFromAddr(super_page);
984   bitmap->IterateQuarantined(epoch, [root, &stat](uintptr_t slot_start,
985                                                   bool is_marked) {
986     auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
987     if (PA_LIKELY(!is_marked)) {
988       stat.swept_bytes += FreeAndUnmarkInCardTable(root, slot_span, slot_start);
989       return;
990     }
991     // Otherwise, try to discard pages for marked quarantine. Since no data is
992     // stored in quarantined slots (e.g. the |next| pointer), this can be
993     // freely done.
994     const size_t slot_size = slot_span->bucket->slot_size;
995     if (slot_size >= SystemPageSize()) {
996       const uintptr_t discard_end =
997           base::bits::AlignDown(slot_start + slot_size, SystemPageSize());
998       const uintptr_t discard_begin =
999           base::bits::AlignUp(slot_start, SystemPageSize());
1000       const intptr_t discard_size = discard_end - discard_begin;
1001       if (discard_size > 0) {
1002         DiscardSystemPages(discard_begin, discard_size);
1003         stat.discarded_bytes += discard_size;
1004       }
1005     }
1006   });
1007 }
1008 
SweepSuperPageWithBatchedFree(PartitionRoot * root,uintptr_t super_page,size_t epoch,SweepStat & stat)1009 [[maybe_unused]] void SweepSuperPageWithBatchedFree(PartitionRoot* root,
1010                                                     uintptr_t super_page,
1011                                                     size_t epoch,
1012                                                     SweepStat& stat) {
1013   auto* bitmap = StateBitmapFromAddr(super_page);
1014   SlotSpanMetadata* previous_slot_span = nullptr;
1015   internal::PartitionFreelistEntry* freelist_tail = nullptr;
1016   internal::PartitionFreelistEntry* freelist_head = nullptr;
1017   size_t freelist_entries = 0;
1018 
1019   const auto bitmap_iterator = [&](uintptr_t slot_start) {
1020     SlotSpanMetadata* current_slot_span =
1021         SlotSpanMetadata::FromSlotStart(slot_start);
1022     const internal::PartitionFreelistDispatcher* freelist_dispatcher =
1023         root->get_freelist_dispatcher();
1024     auto* entry = freelist_dispatcher->EmplaceAndInitNull(slot_start);
1025 
1026     if (current_slot_span != previous_slot_span) {
1027       // We started scanning a new slot span. Flush the accumulated freelist to
1028       // the slot-span's freelist. This is a single lock acquired per slot span.
1029       if (previous_slot_span && freelist_entries) {
1030         root->RawFreeBatch(freelist_head, freelist_tail, freelist_entries,
1031                            previous_slot_span);
1032       }
1033       freelist_head = entry;
1034       freelist_tail = nullptr;
1035       freelist_entries = 0;
1036       previous_slot_span = current_slot_span;
1037     }
1038 
1039     if (freelist_tail) {
1040       freelist_dispatcher->SetNext(freelist_tail, entry);
1041     }
1042     freelist_tail = entry;
1043     ++freelist_entries;
1044 
1045     UnmarkInCardTable(slot_start, current_slot_span);
1046 
1047     stat.swept_bytes += current_slot_span->bucket->slot_size;
1048   };
1049 
1050   bitmap->IterateUnmarkedQuarantinedAndFree(epoch, bitmap_iterator);
1051 
1052   if (previous_slot_span && freelist_entries) {
1053     root->RawFreeBatch(freelist_head, freelist_tail, freelist_entries,
1054                        previous_slot_span);
1055   }
1056 }
1057 
1058 }  // namespace
1059 
SweepQuarantine()1060 void PCScanTask::SweepQuarantine() {
1061   // Check that scan is unjoinable by this time.
1062   PA_DCHECK(!pcscan_.IsJoinable());
1063   // Discard marked quarantine memory on every Nth scan.
1064   // TODO(bikineev): Find a better signal (e.g. memory pressure, high
1065   // survival rate, etc).
1066   static constexpr size_t kDiscardMarkedQuarantineFrequency = 16;
1067   const bool should_discard =
1068       (pcscan_epoch_ % kDiscardMarkedQuarantineFrequency == 0) &&
1069       (pcscan_.clear_type_ == PCScan::ClearType::kEager);
1070 
1071   SweepStat stat;
1072   StarScanSnapshot::SweepingView sweeping_view(*snapshot_);
1073   sweeping_view.VisitNonConcurrently(
1074       [this, &stat, should_discard](uintptr_t super_page) {
1075         auto* root = PartitionRoot::FromFirstSuperPage(super_page);
1076 
1077 #if PA_CONFIG(STARSCAN_BATCHED_FREE)
1078         SweepSuperPageWithBatchedFree(root, super_page, pcscan_epoch_, stat);
1079         (void)should_discard;
1080 #else
1081         if (PA_UNLIKELY(should_discard && !root->settings.use_cookie)) {
1082           SweepSuperPageAndDiscardMarkedQuarantine(root, super_page,
1083                                                    pcscan_epoch_, stat);
1084         } else {
1085           SweepSuperPage(root, super_page, pcscan_epoch_, stat);
1086         }
1087 #endif  // PA_CONFIG(STARSCAN_BATCHED_FREE)
1088       });
1089 
1090   stats_.IncreaseSweptSize(stat.swept_bytes);
1091   stats_.IncreaseDiscardedQuarantineSize(stat.discarded_bytes);
1092 
1093 #if PA_CONFIG(THREAD_CACHE_SUPPORTED)
1094   // Sweeping potentially frees into the current thread's thread cache. Purge
1095   // releases the cache back to the global allocator.
1096   auto* current_thread_tcache = ThreadCache::Get();
1097   if (ThreadCache::IsValid(current_thread_tcache)) {
1098     current_thread_tcache->Purge();
1099   }
1100 #endif  // PA_CONFIG(THREAD_CACHE_SUPPORTED)
1101 }
1102 
FinishScanner()1103 void PCScanTask::FinishScanner() {
1104   stats_.ReportTracesAndHists(PCScanInternal::Instance().GetReporter());
1105 
1106   pcscan_.scheduler_.scheduling_backend().UpdateScheduleAfterScan(
1107       stats_.survived_quarantine_size(), stats_.GetOverallTime(),
1108       PCScanInternal::Instance().CalculateTotalHeapSize());
1109 
1110   PCScanInternal::Instance().ResetCurrentPCScanTask();
1111   // Change the state and check that concurrent task can't be scheduled twice.
1112   PA_CHECK(pcscan_.state_.exchange(PCScan::State::kNotRunning,
1113                                    std::memory_order_acq_rel) ==
1114            PCScan::State::kSweepingAndFinishing);
1115 }
1116 
RunFromMutator()1117 void PCScanTask::RunFromMutator() {
1118   ReentrantScannerGuard reentrancy_guard;
1119   StatsCollector::MutatorScope overall_scope(
1120       stats_, StatsCollector::MutatorId::kOverall);
1121   {
1122     SyncScope<Context::kMutator> sync_scope(*this);
1123     // Mutator might start entering the safepoint while scanning was already
1124     // finished.
1125     if (!pcscan_.IsJoinable()) {
1126       return;
1127     }
1128     {
1129       // Clear all quarantined slots and prepare card table.
1130       StatsCollector::MutatorScope clear_scope(
1131           stats_, StatsCollector::MutatorId::kClear);
1132       ClearQuarantinedSlotsAndPrepareCardTable();
1133     }
1134     {
1135       // Scan the thread's stack to find dangling references.
1136       StatsCollector::MutatorScope scan_scope(
1137           stats_, StatsCollector::MutatorId::kScanStack);
1138       ScanStack();
1139     }
1140     {
1141       // Unprotect all scanned pages, if needed.
1142       UnprotectPartitions();
1143     }
1144     {
1145       // Scan heap for dangling references.
1146       StatsCollector::MutatorScope scan_scope(stats_,
1147                                               StatsCollector::MutatorId::kScan);
1148       ScanPartitions();
1149     }
1150   }
1151 }
1152 
RunFromScanner()1153 void PCScanTask::RunFromScanner() {
1154   ReentrantScannerGuard reentrancy_guard;
1155   {
1156     StatsCollector::ScannerScope overall_scope(
1157         stats_, StatsCollector::ScannerId::kOverall);
1158     {
1159       SyncScope<Context::kScanner> sync_scope(*this);
1160       {
1161         // Clear all quarantined slots and prepare the card table.
1162         StatsCollector::ScannerScope clear_scope(
1163             stats_, StatsCollector::ScannerId::kClear);
1164         ClearQuarantinedSlotsAndPrepareCardTable();
1165       }
1166       {
1167         // Scan heap for dangling references.
1168         StatsCollector::ScannerScope scan_scope(
1169             stats_, StatsCollector::ScannerId::kScan);
1170         ScanPartitions();
1171       }
1172       {
1173         // Unprotect all scanned pages, if needed.
1174         UnprotectPartitions();
1175       }
1176     }
1177     {
1178       // Sweep unreachable quarantined slots.
1179       StatsCollector::ScannerScope sweep_scope(
1180           stats_, StatsCollector::ScannerId::kSweep);
1181       SweepQuarantine();
1182     }
1183   }
1184   FinishScanner();
1185 }
1186 
1187 class PCScan::PCScanThread final {
1188  public:
1189   using TaskHandle = PCScanInternal::TaskHandle;
1190 
Instance()1191   static PCScanThread& Instance() {
1192     // Lazily instantiate the scanning thread.
1193     static internal::base::NoDestructor<PCScanThread> instance;
1194     return *instance;
1195   }
1196 
PostTask(TaskHandle task)1197   void PostTask(TaskHandle task) {
1198     {
1199       std::lock_guard<std::mutex> lock(mutex_);
1200       PA_DCHECK(!posted_task_.get());
1201       posted_task_ = std::move(task);
1202       wanted_delay_ = base::TimeDelta();
1203     }
1204     condvar_.notify_one();
1205   }
1206 
PostDelayedTask(base::TimeDelta delay)1207   void PostDelayedTask(base::TimeDelta delay) {
1208     {
1209       std::lock_guard<std::mutex> lock(mutex_);
1210       if (posted_task_.get()) {
1211         return;
1212       }
1213       wanted_delay_ = delay;
1214     }
1215     condvar_.notify_one();
1216   }
1217 
1218  private:
1219   friend class internal::base::NoDestructor<PCScanThread>;
1220 
PCScanThread()1221   PCScanThread() {
1222     ScopedAllowAllocations allow_allocations_within_std_thread;
1223     std::thread{[](PCScanThread* instance) {
1224                   static constexpr const char* kThreadName = "PCScan";
1225                   // Ideally we should avoid mixing base:: and std:: API for
1226                   // threading, but this is useful for visualizing the pcscan
1227                   // thread in chrome://tracing.
1228                   internal::base::PlatformThread::SetName(kThreadName);
1229                   instance->TaskLoop();
1230                 },
1231                 this}
1232         .detach();
1233   }
1234 
1235   // Waits and returns whether the delay should be recomputed.
Wait(std::unique_lock<std::mutex> & lock)1236   bool Wait(std::unique_lock<std::mutex>& lock) {
1237     PA_DCHECK(lock.owns_lock());
1238     if (wanted_delay_.is_zero()) {
1239       condvar_.wait(lock, [this] {
1240         // Re-evaluate if either delay changed, or a task was
1241         // enqueued.
1242         return !wanted_delay_.is_zero() || posted_task_.get();
1243       });
1244       // The delay has already been set up and should not be queried again.
1245       return false;
1246     }
1247     condvar_.wait_for(
1248         lock, std::chrono::microseconds(wanted_delay_.InMicroseconds()));
1249     // If no task has been posted, the delay should be recomputed at this point.
1250     return !posted_task_.get();
1251   }
1252 
TaskLoop()1253   void TaskLoop() {
1254     while (true) {
1255       TaskHandle current_task;
1256       {
1257         std::unique_lock<std::mutex> lock(mutex_);
1258         // Scheduling.
1259         while (!posted_task_.get()) {
1260           if (Wait(lock)) {
1261             wanted_delay_ =
1262                 scheduler().scheduling_backend().UpdateDelayedSchedule();
1263             if (wanted_delay_.is_zero()) {
1264               break;
1265             }
1266           }
1267         }
1268         // Differentiate between a posted task and a delayed task schedule.
1269         if (posted_task_.get()) {
1270           std::swap(current_task, posted_task_);
1271           wanted_delay_ = base::TimeDelta();
1272         } else {
1273           PA_DCHECK(wanted_delay_.is_zero());
1274         }
1275       }
1276       // Differentiate between a posted task and a delayed task schedule.
1277       if (current_task.get()) {
1278         current_task->RunFromScanner();
1279       } else {
1280         PCScan::Instance().PerformScan(PCScan::InvocationMode::kNonBlocking);
1281       }
1282     }
1283   }
1284 
scheduler() const1285   PCScanScheduler& scheduler() const { return PCScan::Instance().scheduler(); }
1286 
1287   std::mutex mutex_;
1288   std::condition_variable condvar_;
1289   TaskHandle posted_task_;
1290   base::TimeDelta wanted_delay_;
1291 };
1292 
PCScanInternal()1293 PCScanInternal::PCScanInternal() : simd_support_(DetectSimdSupport()) {}
1294 
1295 PCScanInternal::~PCScanInternal() = default;
1296 
Initialize(PCScan::InitConfig config)1297 void PCScanInternal::Initialize(PCScan::InitConfig config) {
1298   PA_DCHECK(!is_initialized_);
1299 #if BUILDFLAG(HAS_64_BIT_POINTERS)
1300   // Make sure that pools are initialized.
1301   PartitionAddressSpace::Init();
1302 #endif
1303   CommitCardTable();
1304 #if PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
1305   if (config.write_protection ==
1306       PCScan::InitConfig::WantedWriteProtectionMode::kEnabled) {
1307     write_protector_ = std::make_unique<UserFaultFDWriteProtector>();
1308   } else {
1309     write_protector_ = std::make_unique<NoWriteProtector>();
1310   }
1311 #else
1312   write_protector_ = std::make_unique<NoWriteProtector>();
1313 #endif  // PA_CONFIG(STARSCAN_UFFD_WRITE_PROTECTOR_SUPPORTED)
1314   PCScan::SetClearType(write_protector_->SupportedClearType());
1315 
1316   if (config.safepoint == PCScan::InitConfig::SafepointMode::kEnabled) {
1317     PCScan::Instance().EnableSafepoints();
1318   }
1319   scannable_roots_ = RootsMap();
1320   nonscannable_roots_ = RootsMap();
1321 
1322   static partition_alloc::StatsReporter s_no_op_reporter;
1323   PCScan::Instance().RegisterStatsReporter(&s_no_op_reporter);
1324 
1325   // Don't initialize PCScanThread::Instance() as otherwise sandbox complains
1326   // about multiple threads running on sandbox initialization.
1327   is_initialized_ = true;
1328 }
1329 
PerformScan(PCScan::InvocationMode invocation_mode)1330 void PCScanInternal::PerformScan(PCScan::InvocationMode invocation_mode) {
1331 #if PA_SCAN_DCHECK_IS_ON()
1332   PA_DCHECK(is_initialized());
1333   PA_DCHECK(scannable_roots().size() > 0);
1334   PA_DCHECK(std::all_of(
1335       scannable_roots().begin(), scannable_roots().end(),
1336       [](const auto& pair) { return pair.first->IsScanEnabled(); }));
1337   PA_DCHECK(std::all_of(
1338       nonscannable_roots().begin(), nonscannable_roots().end(),
1339       [](const auto& pair) { return pair.first->IsQuarantineEnabled(); }));
1340 #endif
1341 
1342   PCScan& frontend = PCScan::Instance();
1343   {
1344     // If scanning is already in progress, bail out.
1345     PCScan::State expected = PCScan::State::kNotRunning;
1346     if (!frontend.state_.compare_exchange_strong(
1347             expected, PCScan::State::kScheduled, std::memory_order_acq_rel,
1348             std::memory_order_relaxed)) {
1349       return;
1350     }
1351   }
1352 
1353   const size_t last_quarantine_size =
1354       frontend.scheduler_.scheduling_backend().ScanStarted();
1355 
1356   // Create PCScan task and set it as current.
1357   auto task = base::MakeRefCounted<PCScanTask>(frontend, last_quarantine_size);
1358   PCScanInternal::Instance().SetCurrentPCScanTask(task);
1359 
1360   if (PA_UNLIKELY(invocation_mode ==
1361                   PCScan::InvocationMode::kScheduleOnlyForTesting)) {
1362     // Immediately change the state to enable safepoint testing.
1363     frontend.state_.store(PCScan::State::kScanning, std::memory_order_release);
1364     frontend.SetJoinableIfSafepointEnabled(true);
1365     return;
1366   }
1367 
1368   // Post PCScan task.
1369   if (PA_LIKELY(invocation_mode == PCScan::InvocationMode::kNonBlocking)) {
1370     PCScan::PCScanThread::Instance().PostTask(std::move(task));
1371   } else {
1372     PA_SCAN_DCHECK(PCScan::InvocationMode::kBlocking == invocation_mode ||
1373                    PCScan::InvocationMode::kForcedBlocking == invocation_mode);
1374     std::move(*task).RunFromScanner();
1375   }
1376 }
1377 
PerformScanIfNeeded(PCScan::InvocationMode invocation_mode)1378 void PCScanInternal::PerformScanIfNeeded(
1379     PCScan::InvocationMode invocation_mode) {
1380   if (!scannable_roots().size()) {
1381     return;
1382   }
1383   PCScan& frontend = PCScan::Instance();
1384   if (invocation_mode == PCScan::InvocationMode::kForcedBlocking ||
1385       frontend.scheduler_.scheduling_backend()
1386           .GetQuarantineData()
1387           .MinimumScanningThresholdReached()) {
1388     PerformScan(invocation_mode);
1389   }
1390 }
1391 
PerformDelayedScan(base::TimeDelta delay)1392 void PCScanInternal::PerformDelayedScan(base::TimeDelta delay) {
1393   PCScan::PCScanThread::Instance().PostDelayedTask(delay);
1394 }
1395 
JoinScan()1396 void PCScanInternal::JoinScan() {
1397   // Current task can be destroyed by the scanner. Check that it's valid.
1398   if (auto current_task = CurrentPCScanTask()) {
1399     current_task->RunFromMutator();
1400   }
1401 }
1402 
CurrentPCScanTask() const1403 PCScanInternal::TaskHandle PCScanInternal::CurrentPCScanTask() const {
1404   std::lock_guard<std::mutex> lock(current_task_mutex_);
1405   return current_task_;
1406 }
1407 
SetCurrentPCScanTask(TaskHandle task)1408 void PCScanInternal::SetCurrentPCScanTask(TaskHandle task) {
1409   std::lock_guard<std::mutex> lock(current_task_mutex_);
1410   current_task_ = std::move(task);
1411 }
1412 
ResetCurrentPCScanTask()1413 void PCScanInternal::ResetCurrentPCScanTask() {
1414   std::lock_guard<std::mutex> lock(current_task_mutex_);
1415   current_task_.reset();
1416 }
1417 
1418 namespace {
GetSuperPagesAndCommitStateBitmaps(PCScan::Root & root)1419 PCScanInternal::SuperPages GetSuperPagesAndCommitStateBitmaps(
1420     PCScan::Root& root) {
1421   const size_t state_bitmap_size_to_commit = CommittedStateBitmapSize();
1422   PCScanInternal::SuperPages super_pages;
1423   for (auto* super_page_extent = root.first_extent; super_page_extent;
1424        super_page_extent = super_page_extent->next) {
1425     for (uintptr_t super_page = SuperPagesBeginFromExtent(super_page_extent),
1426                    super_page_end = SuperPagesEndFromExtent(super_page_extent);
1427          super_page != super_page_end; super_page += kSuperPageSize) {
1428       // Make sure the metadata is committed.
1429       // TODO(bikineev): Remove once this is known to work.
1430       const volatile char* metadata =
1431           reinterpret_cast<char*>(PartitionSuperPageToMetadataArea(super_page));
1432       *metadata;
1433       RecommitSystemPages(SuperPageStateBitmapAddr(super_page),
1434                           state_bitmap_size_to_commit,
1435                           PageAccessibilityConfiguration(
1436                               PageAccessibilityConfiguration::kReadWrite),
1437                           PageAccessibilityDisposition::kRequireUpdate);
1438       super_pages.push_back(super_page);
1439     }
1440   }
1441   return super_pages;
1442 }
1443 }  // namespace
1444 
RegisterScannableRoot(Root * root)1445 void PCScanInternal::RegisterScannableRoot(Root* root) {
1446   PA_DCHECK(is_initialized());
1447   PA_DCHECK(root);
1448   // Avoid nesting locks and store super_pages in a temporary vector.
1449   SuperPages super_pages;
1450   {
1451     ::partition_alloc::internal::ScopedGuard guard(
1452         ::partition_alloc::internal::PartitionRootLock(root));
1453     PA_CHECK(root->IsQuarantineAllowed());
1454     if (root->IsScanEnabled()) {
1455       return;
1456     }
1457     PA_CHECK(!root->IsQuarantineEnabled());
1458     super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
1459     root->settings.scan_mode = Root::ScanMode::kEnabled;
1460     root->settings.quarantine_mode = Root::QuarantineMode::kEnabled;
1461   }
1462   std::lock_guard<std::mutex> lock(roots_mutex_);
1463   PA_DCHECK(!scannable_roots_.count(root));
1464   auto& root_super_pages = scannable_roots_[root];
1465   root_super_pages.insert(root_super_pages.end(), super_pages.begin(),
1466                           super_pages.end());
1467 }
1468 
RegisterNonScannableRoot(Root * root)1469 void PCScanInternal::RegisterNonScannableRoot(Root* root) {
1470   PA_DCHECK(is_initialized());
1471   PA_DCHECK(root);
1472   // Avoid nesting locks and store super_pages in a temporary vector.
1473   SuperPages super_pages;
1474   {
1475     ::partition_alloc::internal::ScopedGuard guard(
1476         ::partition_alloc::internal::PartitionRootLock(root));
1477     PA_CHECK(root->IsQuarantineAllowed());
1478     PA_CHECK(!root->IsScanEnabled());
1479     if (root->IsQuarantineEnabled()) {
1480       return;
1481     }
1482     super_pages = GetSuperPagesAndCommitStateBitmaps(*root);
1483     root->settings.quarantine_mode = Root::QuarantineMode::kEnabled;
1484   }
1485   std::lock_guard<std::mutex> lock(roots_mutex_);
1486   PA_DCHECK(!nonscannable_roots_.count(root));
1487   auto& root_super_pages = nonscannable_roots_[root];
1488   root_super_pages.insert(root_super_pages.end(), super_pages.begin(),
1489                           super_pages.end());
1490 }
1491 
RegisterNewSuperPage(Root * root,uintptr_t super_page_base)1492 void PCScanInternal::RegisterNewSuperPage(Root* root,
1493                                           uintptr_t super_page_base) {
1494   PA_DCHECK(is_initialized());
1495   PA_DCHECK(root);
1496   PA_CHECK(root->IsQuarantineAllowed());
1497   PA_DCHECK(!(super_page_base % kSuperPageAlignment));
1498   // Make sure the metadata is committed.
1499   // TODO(bikineev): Remove once this is known to work.
1500   const volatile char* metadata = reinterpret_cast<char*>(
1501       PartitionSuperPageToMetadataArea(super_page_base));
1502   *metadata;
1503 
1504   std::lock_guard<std::mutex> lock(roots_mutex_);
1505 
1506   // Dispatch based on whether root is scannable or not.
1507   if (root->IsScanEnabled()) {
1508     PA_DCHECK(scannable_roots_.count(root));
1509     auto& super_pages = scannable_roots_[root];
1510     PA_DCHECK(std::find(super_pages.begin(), super_pages.end(),
1511                         super_page_base) == super_pages.end());
1512     super_pages.push_back(super_page_base);
1513   } else {
1514     PA_DCHECK(root->IsQuarantineEnabled());
1515     PA_DCHECK(nonscannable_roots_.count(root));
1516     auto& super_pages = nonscannable_roots_[root];
1517     PA_DCHECK(std::find(super_pages.begin(), super_pages.end(),
1518                         super_page_base) == super_pages.end());
1519     super_pages.push_back(super_page_base);
1520   }
1521 }
1522 
SetProcessName(const char * process_name)1523 void PCScanInternal::SetProcessName(const char* process_name) {
1524   PA_DCHECK(is_initialized());
1525   PA_DCHECK(process_name);
1526   PA_DCHECK(!process_name_);
1527   process_name_ = process_name;
1528 }
1529 
CalculateTotalHeapSize() const1530 size_t PCScanInternal::CalculateTotalHeapSize() const {
1531   PA_DCHECK(is_initialized());
1532   std::lock_guard<std::mutex> lock(roots_mutex_);
1533   const auto acc = [](size_t size, const auto& pair) {
1534     return size + pair.first->get_total_size_of_committed_pages();
1535   };
1536   return std::accumulate(scannable_roots_.begin(), scannable_roots_.end(), 0u,
1537                          acc) +
1538          std::accumulate(nonscannable_roots_.begin(), nonscannable_roots_.end(),
1539                          0u, acc);
1540 }
1541 
EnableStackScanning()1542 void PCScanInternal::EnableStackScanning() {
1543   PA_DCHECK(!stack_scanning_enabled_);
1544   stack_scanning_enabled_ = true;
1545 }
DisableStackScanning()1546 void PCScanInternal::DisableStackScanning() {
1547   PA_DCHECK(stack_scanning_enabled_);
1548   stack_scanning_enabled_ = false;
1549 }
IsStackScanningEnabled() const1550 bool PCScanInternal::IsStackScanningEnabled() const {
1551   return stack_scanning_enabled_;
1552 }
1553 
WriteProtectionEnabled() const1554 bool PCScanInternal::WriteProtectionEnabled() const {
1555   return write_protector_->IsEnabled();
1556 }
1557 
ProtectPages(uintptr_t begin,size_t size)1558 void PCScanInternal::ProtectPages(uintptr_t begin, size_t size) {
1559   // Slot-span sizes are multiple of system page size. However, the ranges that
1560   // are recorded are not, since in the snapshot we only record the used
1561   // payload. Therefore we align up the incoming range by 4k. The unused part of
1562   // slot-spans doesn't need to be protected (the allocator will enter the
1563   // safepoint before trying to allocate from it).
1564   PA_SCAN_DCHECK(write_protector_.get());
1565   write_protector_->ProtectPages(
1566       begin,
1567       partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
1568 }
1569 
UnprotectPages(uintptr_t begin,size_t size)1570 void PCScanInternal::UnprotectPages(uintptr_t begin, size_t size) {
1571   PA_SCAN_DCHECK(write_protector_.get());
1572   write_protector_->UnprotectPages(
1573       begin,
1574       partition_alloc::internal::base::bits::AlignUp(size, SystemPageSize()));
1575 }
1576 
ClearRootsForTesting()1577 void PCScanInternal::ClearRootsForTesting() {
1578   std::lock_guard<std::mutex> lock(roots_mutex_);
1579   // Set all roots as non-scannable and non-quarantinable.
1580   for (auto& pair : scannable_roots_) {
1581     Root* root = pair.first;
1582     root->settings.scan_mode = Root::ScanMode::kDisabled;
1583     root->settings.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
1584   }
1585   for (auto& pair : nonscannable_roots_) {
1586     Root* root = pair.first;
1587     root->settings.quarantine_mode = Root::QuarantineMode::kDisabledByDefault;
1588   }
1589   // Make sure to destroy maps so that on the following ReinitForTesting() call
1590   // the maps don't attempt to destroy the backing.
1591   scannable_roots_.clear();
1592   scannable_roots_.~RootsMap();
1593   nonscannable_roots_.clear();
1594   nonscannable_roots_.~RootsMap();
1595   // Destroy write protector object, so that there is no double free on the next
1596   // call to ReinitForTesting();
1597   write_protector_.reset();
1598 }
1599 
ReinitForTesting(PCScan::InitConfig config)1600 void PCScanInternal::ReinitForTesting(PCScan::InitConfig config) {
1601   is_initialized_ = false;
1602   auto* new_this = new (this) PCScanInternal;
1603   new_this->Initialize(config);
1604 }
1605 
FinishScanForTesting()1606 void PCScanInternal::FinishScanForTesting() {
1607   auto current_task = CurrentPCScanTask();
1608   PA_CHECK(current_task.get());
1609   current_task->RunFromScanner();
1610 }
1611 
RegisterStatsReporter(partition_alloc::StatsReporter * reporter)1612 void PCScanInternal::RegisterStatsReporter(
1613     partition_alloc::StatsReporter* reporter) {
1614   PA_DCHECK(reporter);
1615   stats_reporter_ = reporter;
1616 }
1617 
GetReporter()1618 partition_alloc::StatsReporter& PCScanInternal::GetReporter() {
1619   PA_DCHECK(stats_reporter_);
1620   return *stats_reporter_;
1621 }
1622 
1623 }  // namespace partition_alloc::internal
1624