1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <cstdint>
6 
7 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
8 
9 #include "partition_alloc/starscan/pcscan.h"
10 
11 #include "build/build_config.h"
12 #include "partition_alloc/partition_alloc-inl.h"
13 #include "partition_alloc/partition_alloc_base/bits.h"
14 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
15 #include "partition_alloc/partition_alloc_base/cpu.h"
16 #include "partition_alloc/partition_alloc_base/logging.h"
17 #include "partition_alloc/partition_alloc_buildflags.h"
18 #include "partition_alloc/partition_alloc_config.h"
19 #include "partition_alloc/partition_alloc_constants.h"
20 #include "partition_alloc/partition_alloc_for_testing.h"
21 #include "partition_alloc/partition_freelist_entry.h"
22 #include "partition_alloc/partition_root.h"
23 #include "partition_alloc/stack/stack.h"
24 #include "partition_alloc/tagging.h"
25 #include "testing/gtest/include/gtest/gtest.h"
26 
27 #if BUILDFLAG(USE_STARSCAN)
28 
29 namespace partition_alloc::internal {
30 
31 namespace {
32 
33 struct DisableStackScanningScope final {
DisableStackScanningScopepartition_alloc::internal::__anon386791a70111::DisableStackScanningScope34   DisableStackScanningScope() {
35     if (PCScan::IsStackScanningEnabled()) {
36       PCScan::DisableStackScanning();
37       changed_ = true;
38     }
39   }
~DisableStackScanningScopepartition_alloc::internal::__anon386791a70111::DisableStackScanningScope40   ~DisableStackScanningScope() {
41     if (changed_) {
42       PCScan::EnableStackScanning();
43     }
44   }
45 
46  private:
47   bool changed_ = false;
48 };
49 
50 }  // namespace
51 
52 class PartitionAllocPCScanTestBase : public testing::Test {
53  public:
PartitionAllocPCScanTestBase()54   PartitionAllocPCScanTestBase()
55       : allocator_([]() {
56           PartitionOptions opts;
57           opts.star_scan_quarantine = PartitionOptions::kAllowed;
58           opts.memory_tagging = {
59               .enabled = base::CPU::GetInstanceNoAllocation().has_mte()
60                              ? partition_alloc::PartitionOptions::kEnabled
61                              : partition_alloc::PartitionOptions::kDisabled};
62           return opts;
63         }()) {
__anon386791a70302(size_t) 64     PartitionAllocGlobalInit([](size_t) { PA_LOG(FATAL) << "Out of memory"; });
65     // Previous test runs within the same process decommit pools, therefore
66     // we need to make sure that the card table is recommitted for each run.
67     PCScan::ReinitForTesting(
68         {PCScan::InitConfig::WantedWriteProtectionMode::kDisabled,
69          PCScan::InitConfig::SafepointMode::kEnabled});
70     allocator_.root()->UncapEmptySlotSpanMemoryForTesting();
71     allocator_.root()->SwitchToDenserBucketDistribution();
72 
73     PCScan::RegisterScannableRoot(allocator_.root());
74   }
75 
~PartitionAllocPCScanTestBase()76   ~PartitionAllocPCScanTestBase() override {
77     allocator_.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
78                                    PurgeFlags::kDiscardUnusedSystemPages);
79     PartitionAllocGlobalUninitForTesting();
80   }
81 
RunPCScan()82   void RunPCScan() {
83     PCScan::Instance().PerformScan(PCScan::InvocationMode::kBlocking);
84   }
85 
SchedulePCScan()86   void SchedulePCScan() {
87     PCScan::Instance().PerformScan(
88         PCScan::InvocationMode::kScheduleOnlyForTesting);
89   }
90 
JoinPCScanAsMutator()91   void JoinPCScanAsMutator() {
92     auto& instance = PCScan::Instance();
93     PA_CHECK(instance.IsJoinable());
94     instance.JoinScan();
95   }
96 
FinishPCScanAsScanner()97   void FinishPCScanAsScanner() { PCScan::FinishScanForTesting(); }
98 
IsInQuarantine(void * object) const99   bool IsInQuarantine(void* object) const {
100     uintptr_t slot_start = root().ObjectToSlotStart(object);
101     return StateBitmapFromAddr(slot_start)->IsQuarantined(slot_start);
102   }
103 
root()104   PartitionRoot& root() { return *allocator_.root(); }
root() const105   const PartitionRoot& root() const { return *allocator_.root(); }
106 
107  private:
108   // Leverage the already-templated version outside `internal::`.
109   partition_alloc::PartitionAllocatorAllowLeaksForTesting allocator_;
110 };
111 
112 namespace {
113 
114 // The test that expects free() being quarantined only when tag overflow occurs.
115 using PartitionAllocPCScanWithMTETest = PartitionAllocPCScanTestBase;
116 
117 // The test that expects every free() being quarantined.
118 class PartitionAllocPCScanTest : public PartitionAllocPCScanTestBase {
119  public:
PartitionAllocPCScanTest()120   PartitionAllocPCScanTest() { root().SetQuarantineAlwaysForTesting(true); }
~PartitionAllocPCScanTest()121   ~PartitionAllocPCScanTest() override {
122     root().SetQuarantineAlwaysForTesting(false);
123   }
124 };
125 
126 struct FullSlotSpanAllocation {
127   SlotSpanMetadata* slot_span;
128   void* first;
129   void* last;
130 };
131 
132 // Assumes heap is purged.
GetFullSlotSpan(PartitionRoot & root,size_t object_size)133 FullSlotSpanAllocation GetFullSlotSpan(PartitionRoot& root,
134                                        size_t object_size) {
135   PA_CHECK(0u == root.get_total_size_of_committed_pages());
136 
137   const size_t raw_size = root.AdjustSizeForExtrasAdd(object_size);
138   const size_t bucket_index =
139       root.SizeToBucketIndex(raw_size, root.GetBucketDistribution());
140   PartitionRoot::Bucket& bucket = root.buckets[bucket_index];
141   const size_t num_slots = (bucket.get_bytes_per_span()) / bucket.slot_size;
142 
143   uintptr_t first = 0;
144   uintptr_t last = 0;
145   for (size_t i = 0; i < num_slots; ++i) {
146     void* ptr = root.Alloc<partition_alloc::AllocFlags::kNoHooks>(object_size);
147     EXPECT_TRUE(ptr);
148     if (i == 0) {
149       first = root.ObjectToSlotStart(ptr);
150     } else if (i == num_slots - 1) {
151       last = root.ObjectToSlotStart(ptr);
152     }
153   }
154 
155   EXPECT_EQ(SlotSpanMetadata::FromSlotStart(first),
156             SlotSpanMetadata::FromSlotStart(last));
157   if (bucket.num_system_pages_per_slot_span ==
158       NumSystemPagesPerPartitionPage()) {
159     // Pointers are expected to be in the same partition page, but have a
160     // different MTE-tag.
161     EXPECT_EQ(UntagAddr(first & PartitionPageBaseMask()),
162               UntagAddr(last & PartitionPageBaseMask()));
163   }
164   EXPECT_EQ(num_slots, bucket.active_slot_spans_head->num_allocated_slots);
165   EXPECT_EQ(nullptr, bucket.active_slot_spans_head->get_freelist_head());
166   EXPECT_TRUE(bucket.is_valid());
167   EXPECT_TRUE(bucket.active_slot_spans_head !=
168               SlotSpanMetadata::get_sentinel_slot_span());
169 
170   return {bucket.active_slot_spans_head, root.SlotStartToObject(first),
171           root.SlotStartToObject(last)};
172 }
173 
IsInFreeList(uintptr_t slot_start)174 bool IsInFreeList(uintptr_t slot_start) {
175   // slot_start isn't MTE-tagged, whereas pointers in the freelist are.
176   void* slot_start_tagged = SlotStartAddr2Ptr(slot_start);
177   auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
178   const PartitionFreelistDispatcher* freelist_dispatcher =
179       PartitionRoot::FromSlotSpanMetadata(slot_span)->get_freelist_dispatcher();
180   for (auto* entry = slot_span->get_freelist_head(); entry;
181        entry =
182            freelist_dispatcher->GetNext(entry, slot_span->bucket->slot_size)) {
183     if (entry == slot_start_tagged) {
184       return true;
185     }
186   }
187   return false;
188 }
189 
190 struct ListBase {
191   // Volatile to prevent the compiler from doing dead store elimination.
192   ListBase* volatile next = nullptr;
193 };
194 
195 template <size_t Size, size_t Alignment = 0>
196 struct List final : ListBase {
197   char buffer[Size];
198 
Createpartition_alloc::internal::__anon386791a70411::List199   static List* Create(PartitionRoot& root, ListBase* next = nullptr) {
200     List* list;
201     if (Alignment) {
202       list = static_cast<List*>(root.AlignedAlloc(Alignment, sizeof(List)));
203     } else {
204       list = static_cast<List*>(root.Alloc(sizeof(List), nullptr));
205     }
206     list->next = next;
207     return list;
208   }
209 
Destroypartition_alloc::internal::__anon386791a70411::List210   static void Destroy(PartitionRoot& root, List* list) { root.Free(list); }
211 };
212 
__anon386791a70502() 213 constexpr auto kPartitionOptionWithStarScan = []() {
214   PartitionOptions opts;
215   opts.star_scan_quarantine = PartitionOptions::kAllowed;
216   return opts;
217 }();
218 
TEST_F(PartitionAllocPCScanTest,ArbitraryObjectInQuarantine)219 TEST_F(PartitionAllocPCScanTest, ArbitraryObjectInQuarantine) {
220   using ListType = List<8>;
221 
222   auto* obj1 = ListType::Create(root());
223   auto* obj2 = ListType::Create(root());
224   EXPECT_FALSE(IsInQuarantine(obj1));
225   EXPECT_FALSE(IsInQuarantine(obj2));
226 
227   ListType::Destroy(root(), obj2);
228   EXPECT_FALSE(IsInQuarantine(obj1));
229   EXPECT_TRUE(IsInQuarantine(obj2));
230 }
231 
TEST_F(PartitionAllocPCScanTest,FirstObjectInQuarantine)232 TEST_F(PartitionAllocPCScanTest, FirstObjectInQuarantine) {
233   static constexpr size_t kAllocationSize = 16;
234 
235   FullSlotSpanAllocation full_slot_span =
236       GetFullSlotSpan(root(), kAllocationSize);
237   EXPECT_FALSE(IsInQuarantine(full_slot_span.first));
238 
239   root().Free<FreeFlags::kNoHooks>(full_slot_span.first);
240   EXPECT_TRUE(IsInQuarantine(full_slot_span.first));
241 }
242 
TEST_F(PartitionAllocPCScanTest,LastObjectInQuarantine)243 TEST_F(PartitionAllocPCScanTest, LastObjectInQuarantine) {
244   static constexpr size_t kAllocationSize = 16;
245 
246   FullSlotSpanAllocation full_slot_span =
247       GetFullSlotSpan(root(), kAllocationSize);
248   EXPECT_FALSE(IsInQuarantine(full_slot_span.last));
249 
250   root().Free<FreeFlags::kNoHooks>(full_slot_span.last);
251   EXPECT_TRUE(IsInQuarantine(full_slot_span.last));
252 }
253 
254 template <typename SourceList, typename ValueList>
TestDanglingReference(PartitionAllocPCScanTest & test,SourceList * source,ValueList * value,PartitionRoot & value_root)255 void TestDanglingReference(PartitionAllocPCScanTest& test,
256                            SourceList* source,
257                            ValueList* value,
258                            PartitionRoot& value_root) {
259   {
260     // Free |value| and leave the dangling reference in |source|.
261     ValueList::Destroy(value_root, value);
262     // Check that |value| is in the quarantine now.
263     EXPECT_TRUE(test.IsInQuarantine(value));
264     // Run PCScan.
265     test.RunPCScan();
266     // Check that the object is still quarantined since it's referenced by
267     // |source|.
268     EXPECT_TRUE(test.IsInQuarantine(value));
269   }
270   {
271     // Get rid of the dangling reference.
272     source->next = nullptr;
273     // Run PCScan again.
274     test.RunPCScan();
275     // Check that the object is no longer in the quarantine.
276     EXPECT_FALSE(test.IsInQuarantine(value));
277     // Check that the object is in the freelist now.
278     EXPECT_TRUE(IsInFreeList(value_root.ObjectToSlotStart(value)));
279   }
280 }
281 
TestDanglingReferenceNotVisited(PartitionAllocPCScanTest & test,void * value,PartitionRoot & value_root)282 void TestDanglingReferenceNotVisited(PartitionAllocPCScanTest& test,
283                                      void* value,
284                                      PartitionRoot& value_root) {
285   value_root.Free(value);
286   // Check that |value| is in the quarantine now.
287   EXPECT_TRUE(test.IsInQuarantine(value));
288   // Run PCScan.
289   test.RunPCScan();
290   // Check that the object is no longer in the quarantine since the pointer to
291   // it was not scanned from the non-scannable partition.
292   EXPECT_FALSE(test.IsInQuarantine(value));
293   // Check that the object is in the freelist now.
294   EXPECT_TRUE(IsInFreeList(value_root.ObjectToSlotStart(value)));
295 }
296 
TEST_F(PartitionAllocPCScanTest,DanglingReferenceSameBucket)297 TEST_F(PartitionAllocPCScanTest, DanglingReferenceSameBucket) {
298   using SourceList = List<8>;
299   using ValueList = SourceList;
300 
301   // Create two objects, where |source| references |value|.
302   auto* value = ValueList::Create(root(), nullptr);
303   auto* source = SourceList::Create(root(), value);
304 
305   TestDanglingReference(*this, source, value, root());
306 }
307 
TEST_F(PartitionAllocPCScanTest,DanglingReferenceDifferentBuckets)308 TEST_F(PartitionAllocPCScanTest, DanglingReferenceDifferentBuckets) {
309   using SourceList = List<8>;
310   using ValueList = List<128>;
311 
312   // Create two objects, where |source| references |value|.
313   auto* value = ValueList::Create(root(), nullptr);
314   auto* source = SourceList::Create(root(), value);
315 
316   TestDanglingReference(*this, source, value, root());
317 }
318 
TEST_F(PartitionAllocPCScanTest,DanglingReferenceDifferentBucketsAligned)319 TEST_F(PartitionAllocPCScanTest, DanglingReferenceDifferentBucketsAligned) {
320   // Choose a high alignment that almost certainly will cause a gap between slot
321   // spans. But make it less than kMaxSupportedAlignment, or else two
322   // allocations will end up on different super pages.
323   constexpr size_t alignment = kMaxSupportedAlignment / 2;
324   using SourceList = List<8, alignment>;
325   using ValueList = List<128, alignment>;
326 
327   // Create two objects, where |source| references |value|.
328   auto* value = ValueList::Create(root(), nullptr);
329   auto* source = SourceList::Create(root(), value);
330 
331   // Double check the setup -- make sure that exactly two slot spans were
332   // allocated, within the same super page, with a gap in between.
333   {
334     ::partition_alloc::internal::ScopedGuard guard{root().lock_};
335 
336     uintptr_t value_slot_start = root().ObjectToSlotStart(value);
337     uintptr_t source_slot_start = root().ObjectToSlotStart(source);
338     auto super_page = value_slot_start & kSuperPageBaseMask;
339     ASSERT_EQ(super_page, source_slot_start & kSuperPageBaseMask);
340     size_t i = 0;
341     uintptr_t first_slot_span_end = 0;
342     uintptr_t second_slot_span_start = 0;
343     IterateSlotSpans(
344         super_page, true, [&](SlotSpanMetadata* slot_span) -> bool {
345           if (i == 0) {
346             first_slot_span_end = SlotSpanMetadata::ToSlotSpanStart(slot_span) +
347                                   slot_span->bucket->get_pages_per_slot_span() *
348                                       PartitionPageSize();
349           } else {
350             second_slot_span_start =
351                 SlotSpanMetadata::ToSlotSpanStart(slot_span);
352           }
353           ++i;
354           return false;
355         });
356     ASSERT_EQ(i, 2u);
357     ASSERT_GT(second_slot_span_start, first_slot_span_end);
358   }
359 
360   TestDanglingReference(*this, source, value, root());
361 }
362 
TEST_F(PartitionAllocPCScanTest,DanglingReferenceSameSlotSpanButDifferentPages)363 TEST_F(PartitionAllocPCScanTest,
364        DanglingReferenceSameSlotSpanButDifferentPages) {
365   using SourceList = List<8>;
366   using ValueList = SourceList;
367 
368   static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
369       static_cast<size_t>(PartitionPageSize() * 0.75);
370 
371   FullSlotSpanAllocation full_slot_span = GetFullSlotSpan(
372       root(), root().AdjustSizeForExtrasSubtract(
373                   kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
374 
375   // Assert that the first and the last objects are in the same slot span but on
376   // different partition pages.
377   // Converting to slot start also takes care of the MTE-tag difference.
378   ASSERT_EQ(SlotSpanMetadata::FromObject(full_slot_span.first),
379             SlotSpanMetadata::FromObject(full_slot_span.last));
380   uintptr_t first_slot_start = root().ObjectToSlotStart(full_slot_span.first);
381   uintptr_t last_slot_start = root().ObjectToSlotStart(full_slot_span.last);
382   ASSERT_NE(first_slot_start & PartitionPageBaseMask(),
383             last_slot_start & PartitionPageBaseMask());
384 
385   // Create two objects, on different partition pages.
386   auto* value = new (full_slot_span.first) ValueList;
387   auto* source = new (full_slot_span.last) SourceList;
388   source->next = value;
389 
390   TestDanglingReference(*this, source, value, root());
391 }
392 
TEST_F(PartitionAllocPCScanTest,DanglingReferenceFromFullPage)393 TEST_F(PartitionAllocPCScanTest, DanglingReferenceFromFullPage) {
394   using SourceList = List<64>;
395   using ValueList = SourceList;
396 
397   FullSlotSpanAllocation full_slot_span =
398       GetFullSlotSpan(root(), sizeof(SourceList));
399   void* source_buffer = full_slot_span.first;
400   // This allocation must go through the slow path and call SetNewActivePage(),
401   // which will flush the full page from the active page list.
402   void* value_buffer =
403       root().Alloc<partition_alloc::AllocFlags::kNoHooks>(sizeof(ValueList));
404 
405   // Assert that the first and the last objects are in different slot spans but
406   // in the same bucket.
407   SlotSpanMetadata* source_slot_span =
408       PartitionRoot::SlotSpanMetadata::FromObject(source_buffer);
409   SlotSpanMetadata* value_slot_span =
410       PartitionRoot::SlotSpanMetadata::FromObject(value_buffer);
411   ASSERT_NE(source_slot_span, value_slot_span);
412   ASSERT_EQ(source_slot_span->bucket, value_slot_span->bucket);
413 
414   // Create two objects, where |source| is in a full detached page.
415   auto* value = new (value_buffer) ValueList;
416   auto* source = new (source_buffer) SourceList;
417   source->next = value;
418 
419   TestDanglingReference(*this, source, value, root());
420 }
421 
422 template <size_t Size>
423 struct ListWithInnerReference {
424   char buffer1[Size];
425   // Volatile to prevent the compiler from doing dead store elimination.
426   char* volatile next = nullptr;
427   char buffer2[Size];
428 
Createpartition_alloc::internal::__anon386791a70411::ListWithInnerReference429   static ListWithInnerReference* Create(PartitionRoot& root) {
430     auto* list = static_cast<ListWithInnerReference*>(
431         root.Alloc(sizeof(ListWithInnerReference), nullptr));
432     return list;
433   }
434 
Destroypartition_alloc::internal::__anon386791a70411::ListWithInnerReference435   static void Destroy(PartitionRoot& root, ListWithInnerReference* list) {
436     root.Free(list);
437   }
438 };
439 
440 // Disabled due to consistent failure http://crbug.com/1242407
441 #if BUILDFLAG(IS_ANDROID)
442 #define MAYBE_DanglingInnerReference DISABLED_DanglingInnerReference
443 #else
444 #define MAYBE_DanglingInnerReference DanglingInnerReference
445 #endif
TEST_F(PartitionAllocPCScanTest,MAYBE_DanglingInnerReference)446 TEST_F(PartitionAllocPCScanTest, MAYBE_DanglingInnerReference) {
447   using SourceList = ListWithInnerReference<64>;
448   using ValueList = SourceList;
449 
450   auto* source = SourceList::Create(root());
451   auto* value = ValueList::Create(root());
452   source->next = value->buffer2;
453 
454   TestDanglingReference(*this, source, value, root());
455 }
456 
TEST_F(PartitionAllocPCScanTest,DanglingReferenceFromSingleSlotSlotSpan)457 TEST_F(PartitionAllocPCScanTest, DanglingReferenceFromSingleSlotSlotSpan) {
458   using SourceList = List<kMaxBucketed - 4096>;
459   using ValueList = SourceList;
460 
461   auto* source = SourceList::Create(root());
462   auto* slot_span = SlotSpanMetadata::FromObject(source);
463   ASSERT_TRUE(slot_span->CanStoreRawSize());
464 
465   auto* value = ValueList::Create(root());
466   source->next = value;
467 
468   TestDanglingReference(*this, source, value, root());
469 }
470 
TEST_F(PartitionAllocPCScanTest,DanglingInterPartitionReference)471 TEST_F(PartitionAllocPCScanTest, DanglingInterPartitionReference) {
472   using SourceList = List<64>;
473   using ValueList = SourceList;
474 
475   PartitionRoot source_root(kPartitionOptionWithStarScan);
476   source_root.UncapEmptySlotSpanMemoryForTesting();
477   PartitionRoot value_root(kPartitionOptionWithStarScan);
478   value_root.UncapEmptySlotSpanMemoryForTesting();
479 
480   PCScan::RegisterScannableRoot(&source_root);
481   source_root.SetQuarantineAlwaysForTesting(true);
482   PCScan::RegisterScannableRoot(&value_root);
483   value_root.SetQuarantineAlwaysForTesting(true);
484 
485   auto* source = SourceList::Create(source_root);
486   auto* value = ValueList::Create(value_root);
487   source->next = value;
488 
489   TestDanglingReference(*this, source, value, value_root);
490 }
491 
TEST_F(PartitionAllocPCScanTest,DanglingReferenceToNonScannablePartition)492 TEST_F(PartitionAllocPCScanTest, DanglingReferenceToNonScannablePartition) {
493   using SourceList = List<64>;
494   using ValueList = SourceList;
495 
496   PartitionRoot source_root(kPartitionOptionWithStarScan);
497   source_root.UncapEmptySlotSpanMemoryForTesting();
498   PartitionRoot value_root(kPartitionOptionWithStarScan);
499   value_root.UncapEmptySlotSpanMemoryForTesting();
500 
501   PCScan::RegisterScannableRoot(&source_root);
502   source_root.SetQuarantineAlwaysForTesting(true);
503   PCScan::RegisterNonScannableRoot(&value_root);
504   value_root.SetQuarantineAlwaysForTesting(true);
505 
506   auto* source = SourceList::Create(source_root);
507   auto* value = ValueList::Create(value_root);
508   source->next = value;
509 
510   TestDanglingReference(*this, source, value, value_root);
511 }
512 
TEST_F(PartitionAllocPCScanTest,DanglingReferenceFromNonScannablePartition)513 TEST_F(PartitionAllocPCScanTest, DanglingReferenceFromNonScannablePartition) {
514   using SourceList = List<64>;
515   using ValueList = SourceList;
516 
517   PartitionRoot source_root(kPartitionOptionWithStarScan);
518   source_root.UncapEmptySlotSpanMemoryForTesting();
519   PartitionRoot value_root(kPartitionOptionWithStarScan);
520   value_root.UncapEmptySlotSpanMemoryForTesting();
521 
522   PCScan::RegisterNonScannableRoot(&source_root);
523   value_root.SetQuarantineAlwaysForTesting(true);
524   PCScan::RegisterScannableRoot(&value_root);
525   source_root.SetQuarantineAlwaysForTesting(true);
526 
527   auto* source = SourceList::Create(source_root);
528   auto* value = ValueList::Create(value_root);
529   source->next = value;
530 
531   TestDanglingReferenceNotVisited(*this, value, value_root);
532 }
533 
534 // Death tests misbehave on Android, http://crbug.com/643760.
535 #if defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
536 #if PA_CONFIG(STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED)
TEST_F(PartitionAllocPCScanTest,DoubleFree)537 TEST_F(PartitionAllocPCScanTest, DoubleFree) {
538   auto* list = List<1>::Create(root());
539   List<1>::Destroy(root(), list);
540   EXPECT_DEATH(List<1>::Destroy(root(), list), "");
541 }
542 #endif  // PA_CONFIG(STARSCAN_EAGER_DOUBLE_FREE_DETECTION_ENABLED)
543 #endif  // defined(GTEST_HAS_DEATH_TEST) && !BUILDFLAG(IS_ANDROID)
544 
545 template <typename SourceList, typename ValueList>
TestDanglingReferenceWithSafepoint(PartitionAllocPCScanTest & test,SourceList * source,ValueList * value,PartitionRoot & value_root)546 void TestDanglingReferenceWithSafepoint(PartitionAllocPCScanTest& test,
547                                         SourceList* source,
548                                         ValueList* value,
549                                         PartitionRoot& value_root) {
550   {
551     // Free |value| and leave the dangling reference in |source|.
552     ValueList::Destroy(value_root, value);
553     // Check that |value| is in the quarantine now.
554     EXPECT_TRUE(test.IsInQuarantine(value));
555     // Schedule PCScan but don't scan.
556     test.SchedulePCScan();
557     // Enter safepoint and scan from mutator.
558     test.JoinPCScanAsMutator();
559     // Check that the object is still quarantined since it's referenced by
560     // |source|.
561     EXPECT_TRUE(test.IsInQuarantine(value));
562     // Check that |value| is not in the freelist.
563     EXPECT_FALSE(IsInFreeList(test.root().ObjectToSlotStart(value)));
564     // Run sweeper.
565     test.FinishPCScanAsScanner();
566     // Check that |value| still exists.
567     EXPECT_FALSE(IsInFreeList(test.root().ObjectToSlotStart(value)));
568   }
569   {
570     // Get rid of the dangling reference.
571     source->next = nullptr;
572     // Schedule PCScan but don't scan.
573     test.SchedulePCScan();
574     // Enter safepoint and scan from mutator.
575     test.JoinPCScanAsMutator();
576     // Check that |value| is not in the freelist yet, since sweeper didn't run.
577     EXPECT_FALSE(IsInFreeList(test.root().ObjectToSlotStart(value)));
578     test.FinishPCScanAsScanner();
579     // Check that the object is no longer in the quarantine.
580     EXPECT_FALSE(test.IsInQuarantine(value));
581     // Check that |value| is in the freelist now.
582     EXPECT_TRUE(IsInFreeList(test.root().ObjectToSlotStart(value)));
583   }
584 }
585 
TEST_F(PartitionAllocPCScanTest,Safepoint)586 TEST_F(PartitionAllocPCScanTest, Safepoint) {
587   using SourceList = List<64>;
588   using ValueList = SourceList;
589 
590   DisableStackScanningScope no_stack_scanning;
591 
592   auto* source = SourceList::Create(root());
593   auto* value = ValueList::Create(root());
594   source->next = value;
595 
596   TestDanglingReferenceWithSafepoint(*this, source, value, root());
597 }
598 
599 class PartitionAllocPCScanStackScanningTest : public PartitionAllocPCScanTest {
600  protected:
601   // Creates and sets a dangling reference in `dangling_reference_`.
CreateDanglingReference()602   PA_NOINLINE void CreateDanglingReference() {
603     using ValueList = List<8>;
604     auto* value = ValueList::Create(root(), nullptr);
605     ValueList::Destroy(root(), value);
606     dangling_reference_ = value;
607   }
608 
SetupAndRunTest()609   PA_NOINLINE void SetupAndRunTest() {
610     // Register the top of the stack to be the current pointer.
611     StackTopRegistry::Get().NotifyThreadCreated();
612     RunTest();
613   }
614 
RunTest()615   PA_NOINLINE void RunTest() {
616     // This writes the pointer to the stack.
617     [[maybe_unused]] auto* volatile stack_ref = dangling_reference_;
618     // Call the non-inline function that would scan the stack. Don't execute
619     // the rest of the actions inside the function, since otherwise it would
620     // be tail-call optimized and the parent frame's stack with the dangling
621     // pointer would be missed.
622     ScanStack();
623     // Check that the object is still quarantined since it's referenced by
624     // |dangling_reference_|.
625     EXPECT_TRUE(IsInQuarantine(dangling_reference_));
626     // Check that value is not in the freelist.
627     EXPECT_FALSE(IsInFreeList(root().ObjectToSlotStart(dangling_reference_)));
628     // Run sweeper.
629     FinishPCScanAsScanner();
630     // Check that |dangling_reference_| still exists.
631     EXPECT_FALSE(IsInFreeList(root().ObjectToSlotStart(dangling_reference_)));
632   }
633 
ScanStack()634   PA_NOINLINE void ScanStack() {
635     // Schedule PCScan but don't scan.
636     SchedulePCScan();
637     // Enter safepoint and scan from mutator. This will scan the stack.
638     JoinPCScanAsMutator();
639   }
640 
641   static void* dangling_reference_;
642 };
643 
644 // static
645 void* PartitionAllocPCScanStackScanningTest::dangling_reference_ = nullptr;
646 
647 // The test currently fails on some platform due to the stack dangling reference
648 // not being found.
TEST_F(PartitionAllocPCScanStackScanningTest,DISABLED_StackScanning)649 TEST_F(PartitionAllocPCScanStackScanningTest, DISABLED_StackScanning) {
650   PCScan::EnableStackScanning();
651 
652   // Set to nullptr if the test is retried.
653   dangling_reference_ = nullptr;
654 
655   CreateDanglingReference();
656 
657   SetupAndRunTest();
658 }
659 
TEST_F(PartitionAllocPCScanTest,DontScanUnusedRawSize)660 TEST_F(PartitionAllocPCScanTest, DontScanUnusedRawSize) {
661   using ValueList = List<8>;
662 
663   // Make sure to commit more memory than requested to have slack for storing
664   // dangling reference outside of the raw size.
665   const size_t big_size = kMaxBucketed - SystemPageSize() + 1;
666   void* ptr = root().Alloc(big_size);
667 
668   uintptr_t slot_start = root().ObjectToSlotStart(ptr);
669   auto* slot_span = SlotSpanMetadata::FromSlotStart(slot_start);
670   ASSERT_TRUE(big_size + sizeof(void*) <=
671               root().AllocationCapacityFromSlotStart(slot_start));
672   ASSERT_TRUE(slot_span->CanStoreRawSize());
673 
674   auto* value = ValueList::Create(root());
675 
676   // This not only points past the object, but past all extras around it.
677   // However, there should be enough space between this and the end of slot, to
678   // store some data.
679   uintptr_t source_end =
680       slot_start +
681       base::bits::AlignUp(slot_span->GetRawSize(), alignof(ValueList*));
682   // Write the pointer.
683   // Since we stripped the MTE-tag to get |slot_start|, we need to retag it.
684   *static_cast<ValueList**>(TagAddr(source_end)) = value;
685 
686   TestDanglingReferenceNotVisited(*this, value, root());
687 }
688 
TEST_F(PartitionAllocPCScanTest,PointersToGuardPages)689 TEST_F(PartitionAllocPCScanTest, PointersToGuardPages) {
690   struct Pointers {
691     void* super_page;
692     void* metadata_page;
693     void* guard_page1;
694     void* scan_bitmap;
695     void* guard_page2;
696   };
697   auto* const pointers = static_cast<Pointers*>(
698       root().Alloc<partition_alloc::AllocFlags::kNoHooks>(sizeof(Pointers)));
699 
700   // Converting to slot start strips MTE tag.
701   const uintptr_t super_page =
702       root().ObjectToSlotStart(pointers) & kSuperPageBaseMask;
703 
704   // Initialize scannable pointers with addresses of guard pages and metadata.
705   // None of these point to an MTE-tagged area, so no need for retagging.
706   pointers->super_page = reinterpret_cast<void*>(super_page);
707   pointers->metadata_page = PartitionSuperPageToMetadataArea(super_page);
708   pointers->guard_page1 =
709       static_cast<char*>(pointers->metadata_page) + SystemPageSize();
710   pointers->scan_bitmap = SuperPageStateBitmap(super_page);
711   pointers->guard_page2 = reinterpret_cast<void*>(super_page + kSuperPageSize -
712                                                   PartitionPageSize());
713 
714   // Simply run PCScan and expect no crashes.
715   RunPCScan();
716 }
717 
TEST_F(PartitionAllocPCScanTest,TwoDanglingPointersToSameObject)718 TEST_F(PartitionAllocPCScanTest, TwoDanglingPointersToSameObject) {
719   using SourceList = List<8>;
720   using ValueList = List<128>;
721 
722   auto* value = ValueList::Create(root(), nullptr);
723   // Create two source objects referring to |value|.
724   SourceList::Create(root(), value);
725   SourceList::Create(root(), value);
726 
727   // Destroy |value| and run PCScan.
728   ValueList::Destroy(root(), value);
729   RunPCScan();
730   EXPECT_TRUE(IsInQuarantine(value));
731 
732   // Check that accounted size after the cycle is only sizeof ValueList.
733   auto* slot_span_metadata = SlotSpanMetadata::FromObject(value);
734   const auto& quarantine =
735       PCScan::scheduler().scheduling_backend().GetQuarantineData();
736   EXPECT_EQ(slot_span_metadata->bucket->slot_size, quarantine.current_size);
737 }
738 
TEST_F(PartitionAllocPCScanTest,DanglingPointerToInaccessibleArea)739 TEST_F(PartitionAllocPCScanTest, DanglingPointerToInaccessibleArea) {
740   static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
741       static_cast<size_t>(PartitionPageSize() * 1.25);
742 
743   FullSlotSpanAllocation full_slot_span = GetFullSlotSpan(
744       root(), root().AdjustSizeForExtrasSubtract(
745                   kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
746 
747   // Assert that number of allocatable bytes for this bucket is smaller or equal
748   // to all allocated partition pages.
749   auto* bucket = full_slot_span.slot_span->bucket;
750   ASSERT_LE(bucket->get_bytes_per_span(),
751             bucket->get_pages_per_slot_span() * PartitionPageSize());
752 
753   // Let the first object point past the end of the last one + some random
754   // offset.
755   // It should fall within the same slot, so no need for MTE-retagging.
756   static constexpr size_t kOffsetPastEnd = 7;
757   *reinterpret_cast<uint8_t**>(full_slot_span.first) =
758       reinterpret_cast<uint8_t*>(full_slot_span.last) +
759       kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages + kOffsetPastEnd;
760 
761   // Destroy the last object and put it in quarantine.
762   root().Free(full_slot_span.last);
763   EXPECT_TRUE(IsInQuarantine(full_slot_span.last));
764 
765   // Run PCScan. After it, the quarantined object should not be promoted.
766   RunPCScan();
767   EXPECT_FALSE(IsInQuarantine(full_slot_span.last));
768 }
769 
TEST_F(PartitionAllocPCScanTest,DanglingPointerOutsideUsablePart)770 TEST_F(PartitionAllocPCScanTest, DanglingPointerOutsideUsablePart) {
771   using ValueList = List<kMaxBucketed - 4096>;
772   using SourceList = List<64>;
773 
774   auto* value = ValueList::Create(root());
775   auto* slot_span = SlotSpanMetadata::FromObject(value);
776   ASSERT_TRUE(slot_span->CanStoreRawSize());
777 
778   auto* source = SourceList::Create(root());
779 
780   // Let the |source| object point to the unused area of |value| and expect
781   // |value| to be nevertheless marked during scanning.
782   // It should fall within the same slot, so no need for MTE-retagging.
783   static constexpr size_t kOffsetPastEnd = 7;
784   source->next = reinterpret_cast<ListBase*>(
785       reinterpret_cast<uint8_t*>(value + 1) + kOffsetPastEnd);
786 
787   TestDanglingReference(*this, source, value, root());
788 }
789 
790 #if BUILDFLAG(HAS_MEMORY_TAGGING)
TEST_F(PartitionAllocPCScanWithMTETest,QuarantineOnlyOnTagOverflow)791 TEST_F(PartitionAllocPCScanWithMTETest, QuarantineOnlyOnTagOverflow) {
792   using ListType = List<64>;
793 
794   if (!base::CPU::GetInstanceNoAllocation().has_mte()) {
795     return;
796   }
797 
798   {
799     auto* obj1 = ListType::Create(root());
800     ListType::Destroy(root(), obj1);
801     auto* obj2 = ListType::Create(root());
802     // The test relies on unrandomized freelist! If the slot was not moved to
803     // quarantine, assert that the obj2 is the same as obj1 and the tags are
804     // different.
805     // MTE-retag |obj1|, as the tag changed when freeing it.
806     if (!HasOverflowTag(TagPtr(obj1))) {
807       // Assert that the pointer is the same.
808       ASSERT_EQ(UntagPtr(obj1), UntagPtr(obj2));
809       // Assert that the tag is different.
810       ASSERT_NE(obj1, obj2);
811     }
812   }
813 
814   for (size_t i = 0; i < 16; ++i) {
815     auto* obj = ListType::Create(root());
816     ListType::Destroy(root(), obj);
817     // MTE-retag |obj|, as the tag changed when freeing it.
818     obj = TagPtr(obj);
819     // Check if the tag overflows. If so, the object must be in quarantine.
820     if (HasOverflowTag(obj)) {
821       EXPECT_TRUE(IsInQuarantine(obj));
822       EXPECT_FALSE(IsInFreeList(root().ObjectToSlotStart(obj)));
823       return;
824     } else {
825       EXPECT_FALSE(IsInQuarantine(obj));
826       EXPECT_TRUE(IsInFreeList(root().ObjectToSlotStart(obj)));
827     }
828   }
829 
830   EXPECT_FALSE(true && "Should never be reached");
831 }
832 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING)
833 
834 }  // namespace
835 
836 }  // namespace partition_alloc::internal
837 
838 #endif  // BUILDFLAG(USE_STARSCAN)
839 #endif  // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
840