1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/persistent_memory_allocator.h"
6
7 #include <memory>
8
9 #include "base/files/file.h"
10 #include "base/files/file_util.h"
11 #include "base/files/memory_mapped_file.h"
12 #include "base/files/scoped_temp_dir.h"
13 #include "base/memory/raw_ptr.h"
14 #include "base/memory/read_only_shared_memory_region.h"
15 #include "base/memory/shared_memory_mapping.h"
16 #include "base/memory/writable_shared_memory_region.h"
17 #include "base/metrics/histogram.h"
18 #include "base/rand_util.h"
19 #include "base/strings/safe_sprintf.h"
20 #include "base/strings/stringprintf.h"
21 #include "base/synchronization/condition_variable.h"
22 #include "base/synchronization/lock.h"
23 #include "base/test/gtest_util.h"
24 #include "base/threading/simple_thread.h"
25 #include "build/build_config.h"
26 #include "testing/gmock/include/gmock/gmock.h"
27
28 namespace base {
29
30 namespace {
31
32 const uint32_t TEST_MEMORY_SIZE = 1 << 20; // 1 MiB
33 const uint32_t TEST_MEMORY_PAGE = 64 << 10; // 64 KiB
34 const uint32_t TEST_ID = 12345;
35 const char TEST_NAME[] = "TestAllocator";
36
SetFileLength(const base::FilePath & path,size_t length)37 void SetFileLength(const base::FilePath& path, size_t length) {
38 {
39 File file(path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE);
40 DCHECK(file.IsValid());
41 ASSERT_TRUE(file.SetLength(static_cast<int64_t>(length)));
42 }
43
44 int64_t actual_length;
45 DCHECK(GetFileSize(path, &actual_length));
46 DCHECK_EQ(length, static_cast<size_t>(actual_length));
47 }
48
49 } // namespace
50
51 typedef PersistentMemoryAllocator::Reference Reference;
52
53 class PersistentMemoryAllocatorTest : public testing::Test {
54 public:
55 // This can't be statically initialized because it's value isn't defined
56 // in the PersistentMemoryAllocator header file. Instead, it's simply set
57 // in the constructor.
58 uint32_t kAllocAlignment;
59
60 struct TestObject1 {
61 static constexpr uint32_t kPersistentTypeId = 1;
62 static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3;
63 int32_t onething;
64 char oranother;
65 };
66
67 struct TestObject2 {
68 static constexpr uint32_t kPersistentTypeId = 2;
69 static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8;
70 int64_t thiis;
71 int32_t that;
72 float andthe;
73 double other;
74 char thing[8];
75 };
76
PersistentMemoryAllocatorTest()77 PersistentMemoryAllocatorTest() {
78 kAllocAlignment = GetAllocAlignment();
79 mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
80 }
81
SetUp()82 void SetUp() override {
83 allocator_.reset();
84 ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
85 allocator_ = std::make_unique<PersistentMemoryAllocator>(
86 mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, TEST_ID,
87 TEST_NAME, PersistentMemoryAllocator::kReadWrite);
88 }
89
TearDown()90 void TearDown() override {
91 allocator_.reset();
92 }
93
CountIterables()94 unsigned CountIterables() {
95 PersistentMemoryAllocator::Iterator iter(allocator_.get());
96 uint32_t type;
97 unsigned count = 0;
98 while (iter.GetNext(&type) != 0) {
99 ++count;
100 }
101 return count;
102 }
103
GetAllocAlignment()104 static uint32_t GetAllocAlignment() {
105 return PersistentMemoryAllocator::kAllocAlignment;
106 }
107
108 protected:
109 std::unique_ptr<char[]> mem_segment_;
110 std::unique_ptr<PersistentMemoryAllocator> allocator_;
111 };
112
TEST_F(PersistentMemoryAllocatorTest,AllocateAndIterate)113 TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
114 allocator_->CreateTrackingHistograms(allocator_->Name());
115
116 std::string base_name(TEST_NAME);
117 EXPECT_EQ(TEST_ID, allocator_->Id());
118 EXPECT_TRUE(allocator_->used_histogram_);
119 EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
120 allocator_->used_histogram_->histogram_name());
121 EXPECT_EQ(PersistentMemoryAllocator::MEMORY_INITIALIZED,
122 allocator_->GetMemoryState());
123
124 // Get base memory info for later comparison.
125 PersistentMemoryAllocator::MemoryInfo meminfo0;
126 allocator_->GetMemoryInfo(&meminfo0);
127 EXPECT_EQ(TEST_MEMORY_SIZE, meminfo0.total);
128 EXPECT_GT(meminfo0.total, meminfo0.free);
129
130 // Validate allocation of test object and make sure it can be referenced
131 // and all metadata looks correct.
132 TestObject1* obj1 = allocator_->New<TestObject1>();
133 ASSERT_TRUE(obj1);
134 Reference block1 = allocator_->GetAsReference(obj1);
135 ASSERT_NE(0U, block1);
136 EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
137 EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
138 EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
139 EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
140 allocator_->GetAllocSize(block1));
141 PersistentMemoryAllocator::MemoryInfo meminfo1;
142 allocator_->GetMemoryInfo(&meminfo1);
143 EXPECT_EQ(meminfo0.total, meminfo1.total);
144 EXPECT_GT(meminfo0.free, meminfo1.free);
145
146 // Verify that pointers can be turned back into references and that invalid
147 // addresses return null.
148 char* memory1 = allocator_->GetAsArray<char>(block1, 1, 1);
149 ASSERT_TRUE(memory1);
150 EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 0));
151 EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 1));
152 EXPECT_EQ(0U, allocator_->GetAsReference(memory1, 2));
153 EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 1, 0));
154 EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 16, 0));
155 EXPECT_EQ(0U, allocator_->GetAsReference(nullptr, 0));
156 EXPECT_EQ(0U, allocator_->GetAsReference(&base_name, 0));
157
158 // Ensure that the test-object can be made iterable.
159 PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
160 EXPECT_EQ(0U, iter1a.GetLast());
161 uint32_t type;
162 EXPECT_EQ(0U, iter1a.GetNext(&type));
163 allocator_->MakeIterable(block1);
164 EXPECT_EQ(block1, iter1a.GetNext(&type));
165 EXPECT_EQ(1U, type);
166 EXPECT_EQ(block1, iter1a.GetLast());
167 EXPECT_EQ(0U, iter1a.GetNext(&type));
168 EXPECT_EQ(block1, iter1a.GetLast());
169
170 // Create second test-object and ensure everything is good and it cannot
171 // be confused with test-object of another type.
172 TestObject2* obj2 = allocator_->New<TestObject2>();
173 ASSERT_TRUE(obj2);
174 Reference block2 = allocator_->GetAsReference(obj2);
175 ASSERT_NE(0U, block2);
176 EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
177 EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
178 EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
179 EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
180 allocator_->GetAllocSize(block2));
181 PersistentMemoryAllocator::MemoryInfo meminfo2;
182 allocator_->GetMemoryInfo(&meminfo2);
183 EXPECT_EQ(meminfo1.total, meminfo2.total);
184 EXPECT_GT(meminfo1.free, meminfo2.free);
185
186 // Ensure that second test-object can also be made iterable.
187 allocator_->MakeIterable(obj2);
188 EXPECT_EQ(block2, iter1a.GetNext(&type));
189 EXPECT_EQ(2U, type);
190 EXPECT_EQ(block2, iter1a.GetLast());
191 EXPECT_EQ(0U, iter1a.GetNext(&type));
192 EXPECT_EQ(block2, iter1a.GetLast());
193
194 // Check that the iterator can be reset to the beginning.
195 iter1a.Reset();
196 EXPECT_EQ(0U, iter1a.GetLast());
197 EXPECT_EQ(block1, iter1a.GetNext(&type));
198 EXPECT_EQ(block1, iter1a.GetLast());
199 EXPECT_EQ(block2, iter1a.GetNext(&type));
200 EXPECT_EQ(block2, iter1a.GetLast());
201 EXPECT_EQ(0U, iter1a.GetNext(&type));
202
203 // Check that the iterator can be reset to an arbitrary location.
204 iter1a.Reset(block1);
205 EXPECT_EQ(block1, iter1a.GetLast());
206 EXPECT_EQ(block2, iter1a.GetNext(&type));
207 EXPECT_EQ(block2, iter1a.GetLast());
208 EXPECT_EQ(0U, iter1a.GetNext(&type));
209
210 // Check that iteration can begin after an arbitrary location.
211 PersistentMemoryAllocator::Iterator iter1b(allocator_.get(), block1);
212 EXPECT_EQ(block2, iter1b.GetNext(&type));
213 EXPECT_EQ(0U, iter1b.GetNext(&type));
214
215 // Ensure nothing has gone noticably wrong.
216 EXPECT_FALSE(allocator_->IsFull());
217 EXPECT_FALSE(allocator_->IsCorrupt());
218
219 // Check the internal histogram record of used memory.
220 allocator_->UpdateTrackingHistograms();
221 std::unique_ptr<HistogramSamples> used_samples(
222 allocator_->used_histogram_->SnapshotSamples());
223 EXPECT_TRUE(used_samples);
224 EXPECT_EQ(1, used_samples->TotalCount());
225
226 // Check that an object's type can be changed.
227 EXPECT_EQ(2U, allocator_->GetType(block2));
228 allocator_->ChangeType(block2, 3, 2, false);
229 EXPECT_EQ(3U, allocator_->GetType(block2));
230 allocator_->New<TestObject2>(block2, 3, false);
231 EXPECT_EQ(2U, allocator_->GetType(block2));
232
233 // Create second allocator (read/write) using the same memory segment.
234 std::unique_ptr<PersistentMemoryAllocator> allocator2(
235 new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
236 TEST_MEMORY_PAGE, 0, "",
237 PersistentMemoryAllocator::kReadWrite));
238 EXPECT_EQ(TEST_ID, allocator2->Id());
239 EXPECT_FALSE(allocator2->used_histogram_);
240
241 // Ensure that iteration and access through second allocator works.
242 PersistentMemoryAllocator::Iterator iter2(allocator2.get());
243 EXPECT_EQ(block1, iter2.GetNext(&type));
244 EXPECT_EQ(block2, iter2.GetNext(&type));
245 EXPECT_EQ(0U, iter2.GetNext(&type));
246 EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1));
247 EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2));
248
249 // Create a third allocator (read-only) using the same memory segment.
250 std::unique_ptr<const PersistentMemoryAllocator> allocator3(
251 new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
252 TEST_MEMORY_PAGE, 0, "",
253 PersistentMemoryAllocator::kReadOnly));
254 EXPECT_EQ(TEST_ID, allocator3->Id());
255 EXPECT_FALSE(allocator3->used_histogram_);
256
257 // Ensure that iteration and access through third allocator works.
258 PersistentMemoryAllocator::Iterator iter3(allocator3.get());
259 EXPECT_EQ(block1, iter3.GetNext(&type));
260 EXPECT_EQ(block2, iter3.GetNext(&type));
261 EXPECT_EQ(0U, iter3.GetNext(&type));
262 EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1));
263 EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2));
264
265 // Ensure that GetNextOfType works.
266 PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
267 EXPECT_EQ(block2, iter1c.GetNextOfType<TestObject2>());
268 EXPECT_EQ(0U, iter1c.GetNextOfType(2));
269
270 // Ensure that GetNextOfObject works.
271 PersistentMemoryAllocator::Iterator iter1d(allocator_.get());
272 EXPECT_EQ(obj2, iter1d.GetNextOfObject<TestObject2>());
273 EXPECT_EQ(nullptr, iter1d.GetNextOfObject<TestObject2>());
274
275 // Ensure that deleting an object works.
276 allocator_->Delete(obj2);
277 PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
278 EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
279
280 // Ensure that the memory state can be set.
281 allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
282 EXPECT_EQ(PersistentMemoryAllocator::MEMORY_DELETED,
283 allocator_->GetMemoryState());
284 }
285
TEST_F(PersistentMemoryAllocatorTest,PageTest)286 TEST_F(PersistentMemoryAllocatorTest, PageTest) {
287 // This allocation will go into the first memory page.
288 Reference block1 = allocator_->Allocate(TEST_MEMORY_PAGE / 2, 1);
289 EXPECT_LT(0U, block1);
290 EXPECT_GT(TEST_MEMORY_PAGE, block1);
291
292 // This allocation won't fit in same page as previous block.
293 Reference block2 =
294 allocator_->Allocate(TEST_MEMORY_PAGE - 2 * kAllocAlignment, 2);
295 EXPECT_EQ(TEST_MEMORY_PAGE, block2);
296
297 // This allocation will also require a new page.
298 Reference block3 = allocator_->Allocate(2 * kAllocAlignment + 99, 3);
299 EXPECT_EQ(2U * TEST_MEMORY_PAGE, block3);
300 }
301
302 // A simple thread that takes an allocator and repeatedly allocates random-
303 // sized chunks from it until no more can be done.
304 class AllocatorThread : public SimpleThread {
305 public:
AllocatorThread(const std::string & name,void * base,uint32_t size,uint32_t page_size)306 AllocatorThread(const std::string& name,
307 void* base,
308 uint32_t size,
309 uint32_t page_size)
310 : SimpleThread(name, Options()),
311 count_(0),
312 iterable_(0),
313 allocator_(base,
314 size,
315 page_size,
316 0,
317 "",
318 PersistentMemoryAllocator::kReadWrite) {}
319
Run()320 void Run() override {
321 for (;;) {
322 uint32_t size = RandInt(1, 99);
323 uint32_t type = RandInt(100, 999);
324 Reference block = allocator_.Allocate(size, type);
325 if (!block)
326 break;
327
328 count_++;
329 if (RandInt(0, 1)) {
330 allocator_.MakeIterable(block);
331 iterable_++;
332 }
333 }
334 }
335
iterable()336 unsigned iterable() { return iterable_; }
count()337 unsigned count() { return count_; }
338
339 private:
340 unsigned count_;
341 unsigned iterable_;
342 PersistentMemoryAllocator allocator_;
343 };
344
345 // Test parallel allocation/iteration and ensure consistency across all
346 // instances.
TEST_F(PersistentMemoryAllocatorTest,ParallelismTest)347 TEST_F(PersistentMemoryAllocatorTest, ParallelismTest) {
348 void* memory = mem_segment_.get();
349 AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
350 AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
351 AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
352 AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
353 AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
354
355 t1.Start();
356 t2.Start();
357 t3.Start();
358 t4.Start();
359 t5.Start();
360
361 unsigned last_count = 0;
362 do {
363 unsigned count = CountIterables();
364 EXPECT_LE(last_count, count);
365 } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
366
367 t1.Join();
368 t2.Join();
369 t3.Join();
370 t4.Join();
371 t5.Join();
372
373 EXPECT_FALSE(allocator_->IsCorrupt());
374 EXPECT_TRUE(allocator_->IsFull());
375 EXPECT_EQ(CountIterables(),
376 t1.iterable() + t2.iterable() + t3.iterable() + t4.iterable() +
377 t5.iterable());
378 }
379
380 // A simple thread that makes all objects passed iterable.
381 class MakeIterableThread : public SimpleThread {
382 public:
MakeIterableThread(const std::string & name,PersistentMemoryAllocator * allocator,span<Reference> refs)383 MakeIterableThread(const std::string& name,
384 PersistentMemoryAllocator* allocator,
385 span<Reference> refs)
386 : SimpleThread(name, Options()), allocator_(allocator), refs_(refs) {}
387
Run()388 void Run() override {
389 for (Reference ref : refs_) {
390 allocator_->MakeIterable(ref);
391 }
392 }
393
394 private:
395 raw_ptr<PersistentMemoryAllocator> allocator_;
396 span<Reference> refs_;
397 };
398
399 // Verifies that multiple threads making the same objects iterable doesn't cause
400 // any problems.
TEST_F(PersistentMemoryAllocatorTest,MakeIterableSameRefsTest)401 TEST_F(PersistentMemoryAllocatorTest, MakeIterableSameRefsTest) {
402 std::vector<Reference> refs;
403
404 // Fill up the allocator until it is full.
405 Reference ref;
406 while ((ref = allocator_->Allocate(/*size=*/1, /*type=*/0)) != 0) {
407 refs.push_back(ref);
408 }
409
410 ASSERT_TRUE(allocator_->IsFull());
411 ASSERT_FALSE(allocator_->IsCorrupt());
412
413 // Run two threads in parallel to make all objects in the allocator iterable.
414 MakeIterableThread t1("t1", allocator_.get(), refs);
415 MakeIterableThread t2("t2", allocator_.get(), refs);
416 t1.Start();
417 t2.Start();
418
419 t1.Join();
420 t2.Join();
421
422 EXPECT_EQ(CountIterables(), refs.size());
423 }
424
425 // A simple thread that counts objects by iterating through an allocator.
426 class CounterThread : public SimpleThread {
427 public:
CounterThread(const std::string & name,PersistentMemoryAllocator::Iterator * iterator,Lock * lock,ConditionVariable * condition,bool * wake_up)428 CounterThread(const std::string& name,
429 PersistentMemoryAllocator::Iterator* iterator,
430 Lock* lock,
431 ConditionVariable* condition,
432 bool* wake_up)
433 : SimpleThread(name, Options()),
434 iterator_(iterator),
435 lock_(lock),
436 condition_(condition),
437 count_(0),
438 wake_up_(wake_up) {}
439
440 CounterThread(const CounterThread&) = delete;
441 CounterThread& operator=(const CounterThread&) = delete;
442
Run()443 void Run() override {
444 // Wait so all threads can start at approximately the same time.
445 // Best performance comes from releasing a single worker which then
446 // releases the next, etc., etc.
447 {
448 AutoLock autolock(*lock_);
449
450 // Before calling Wait(), make sure that the wake up condition
451 // has not already passed. Also, since spurious signal events
452 // are possible, check the condition in a while loop to make
453 // sure that the wake up condition is met when this thread
454 // returns from the Wait().
455 // See usage comments in src/base/synchronization/condition_variable.h.
456 while (!*wake_up_) {
457 condition_->Wait();
458 condition_->Signal();
459 }
460 }
461
462 uint32_t type;
463 while (iterator_->GetNext(&type) != 0) {
464 ++count_;
465 }
466 }
467
count()468 unsigned count() { return count_; }
469
470 private:
471 raw_ptr<PersistentMemoryAllocator::Iterator> iterator_;
472 raw_ptr<Lock> lock_;
473 raw_ptr<ConditionVariable> condition_;
474 unsigned count_;
475 raw_ptr<bool> wake_up_;
476 };
477
478 // Ensure that parallel iteration returns the same number of objects as
479 // single-threaded iteration.
TEST_F(PersistentMemoryAllocatorTest,IteratorParallelismTest)480 TEST_F(PersistentMemoryAllocatorTest, IteratorParallelismTest) {
481 // Fill the memory segment with random allocations.
482 unsigned iterable_count = 0;
483 for (;;) {
484 uint32_t size = RandInt(1, 99);
485 uint32_t type = RandInt(100, 999);
486 Reference block = allocator_->Allocate(size, type);
487 if (!block)
488 break;
489 allocator_->MakeIterable(block);
490 ++iterable_count;
491 }
492 EXPECT_FALSE(allocator_->IsCorrupt());
493 EXPECT_TRUE(allocator_->IsFull());
494 EXPECT_EQ(iterable_count, CountIterables());
495
496 PersistentMemoryAllocator::Iterator iter(allocator_.get());
497 Lock lock;
498 ConditionVariable condition(&lock);
499 bool wake_up = false;
500
501 CounterThread t1("t1", &iter, &lock, &condition, &wake_up);
502 CounterThread t2("t2", &iter, &lock, &condition, &wake_up);
503 CounterThread t3("t3", &iter, &lock, &condition, &wake_up);
504 CounterThread t4("t4", &iter, &lock, &condition, &wake_up);
505 CounterThread t5("t5", &iter, &lock, &condition, &wake_up);
506
507 t1.Start();
508 t2.Start();
509 t3.Start();
510 t4.Start();
511 t5.Start();
512
513 // Take the lock and set the wake up condition to true. This helps to
514 // avoid a race condition where the Signal() event is called before
515 // all the threads have reached the Wait() and thus never get woken up.
516 {
517 AutoLock autolock(lock);
518 wake_up = true;
519 }
520
521 // This will release all the waiting threads.
522 condition.Signal();
523
524 t1.Join();
525 t2.Join();
526 t3.Join();
527 t4.Join();
528 t5.Join();
529
530 EXPECT_EQ(iterable_count,
531 t1.count() + t2.count() + t3.count() + t4.count() + t5.count());
532
533 #if 0
534 // These ensure that the threads don't run sequentially. It shouldn't be
535 // enabled in general because it could lead to a flaky test if it happens
536 // simply by chance but it is useful during development to ensure that the
537 // test is working correctly.
538 EXPECT_NE(iterable_count, t1.count());
539 EXPECT_NE(iterable_count, t2.count());
540 EXPECT_NE(iterable_count, t3.count());
541 EXPECT_NE(iterable_count, t4.count());
542 EXPECT_NE(iterable_count, t5.count());
543 #endif
544 }
545
TEST_F(PersistentMemoryAllocatorTest,DelayedAllocationTest)546 TEST_F(PersistentMemoryAllocatorTest, DelayedAllocationTest) {
547 std::atomic<Reference> ref1, ref2;
548 ref1.store(0, std::memory_order_relaxed);
549 ref2.store(0, std::memory_order_relaxed);
550 DelayedPersistentAllocation da1(allocator_.get(), &ref1, 1001u, 100u);
551 DelayedPersistentAllocation da2a(allocator_.get(), &ref2, 2002u, 200u, 0u);
552 DelayedPersistentAllocation da2b(allocator_.get(), &ref2, 2002u, 200u, 5u);
553 DelayedPersistentAllocation da2c(allocator_.get(), &ref2, 2002u, 200u, 8u);
554 DelayedPersistentAllocation da2d(allocator_.get(), &ref2, 2002u, 200u, 13u);
555
556 // Nothing should yet have been allocated.
557 uint32_t type;
558 PersistentMemoryAllocator::Iterator iter(allocator_.get());
559 EXPECT_EQ(0U, iter.GetNext(&type));
560
561 // Do first delayed allocation and check that a new persistent object exists.
562 EXPECT_EQ(0U, da1.reference());
563 span<uint8_t> mem1 = da1.Get<uint8_t>();
564 ASSERT_FALSE(mem1.empty());
565 EXPECT_NE(0U, da1.reference());
566 EXPECT_EQ(allocator_->GetAsReference(mem1.data(), 1001u),
567 ref1.load(std::memory_order_relaxed));
568 allocator_->MakeIterable(da1.reference());
569 EXPECT_NE(0U, iter.GetNext(&type));
570 EXPECT_EQ(1001U, type);
571 EXPECT_EQ(0U, iter.GetNext(&type));
572
573 // Do second delayed allocation and check.
574 span<uint8_t> mem2a = da2a.Get<uint8_t>();
575 ASSERT_EQ(mem2a.size(), 200u);
576 EXPECT_EQ(allocator_->GetAsReference(mem2a.data(), 2002u),
577 ref2.load(std::memory_order_relaxed));
578 allocator_->MakeIterable(da2a.reference());
579 EXPECT_NE(0U, iter.GetNext(&type));
580 EXPECT_EQ(2002U, type);
581 EXPECT_EQ(0U, iter.GetNext(&type));
582
583 // Third allocation should just return offset into second allocation.
584 span<uint8_t> mem2b = da2b.Get<uint8_t>();
585 ASSERT_EQ(mem2b.size(), 200u - 5u);
586 allocator_->MakeIterable(da2b.reference());
587 EXPECT_EQ(0U, iter.GetNext(&type));
588 EXPECT_EQ(reinterpret_cast<uintptr_t>(mem2a.data()) + 5u,
589 reinterpret_cast<uintptr_t>(mem2b.data()));
590
591 // Test Get<>() with a larger type than uint8_t, which gives us another
592 // span into the second allocation.
593 span<uint32_t> mem2c = da2c.Get<uint32_t>();
594 ASSERT_EQ(mem2c.size(), (200u - 8u) / sizeof(uint32_t));
595 allocator_->MakeIterable(da2c.reference());
596 EXPECT_EQ(0U, iter.GetNext(&type));
597 EXPECT_EQ(reinterpret_cast<uintptr_t>(mem2a.data()) + 8u,
598 reinterpret_cast<uintptr_t>(mem2c.data()));
599
600 // This allocation offset is misaligned for the uint32_t type, so it should
601 // not succeed.
602 EXPECT_CHECK_DEATH(da2d.Get<uint32_t>());
603 }
604
605 // This test doesn't verify anything other than it doesn't crash. Its goal
606 // is to find coding errors that aren't otherwise tested for, much like a
607 // "fuzzer" would.
608 // This test is suppsoed to fail on TSAN bot (crbug.com/579867).
609 #if defined(THREAD_SANITIZER)
610 #define MAYBE_CorruptionTest DISABLED_CorruptionTest
611 #else
612 #define MAYBE_CorruptionTest CorruptionTest
613 #endif
TEST_F(PersistentMemoryAllocatorTest,MAYBE_CorruptionTest)614 TEST_F(PersistentMemoryAllocatorTest, MAYBE_CorruptionTest) {
615 char* memory = mem_segment_.get();
616 AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
617 AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
618 AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
619 AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
620 AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
621
622 t1.Start();
623 t2.Start();
624 t3.Start();
625 t4.Start();
626 t5.Start();
627
628 do {
629 size_t offset = RandInt(0, TEST_MEMORY_SIZE - 1);
630 char value = RandInt(0, 255);
631 memory[offset] = value;
632 } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
633
634 t1.Join();
635 t2.Join();
636 t3.Join();
637 t4.Join();
638 t5.Join();
639
640 CountIterables();
641 }
642
643 // Attempt to cause crashes or loops by expressly creating dangerous conditions.
TEST_F(PersistentMemoryAllocatorTest,MaliciousTest)644 TEST_F(PersistentMemoryAllocatorTest, MaliciousTest) {
645 Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
646 Reference block2 = allocator_->Allocate(sizeof(TestObject1), 2);
647 Reference block3 = allocator_->Allocate(sizeof(TestObject1), 3);
648 Reference block4 = allocator_->Allocate(sizeof(TestObject1), 3);
649 Reference block5 = allocator_->Allocate(sizeof(TestObject1), 3);
650 allocator_->MakeIterable(block1);
651 allocator_->MakeIterable(block2);
652 allocator_->MakeIterable(block3);
653 allocator_->MakeIterable(block4);
654 allocator_->MakeIterable(block5);
655 EXPECT_EQ(5U, CountIterables());
656 EXPECT_FALSE(allocator_->IsCorrupt());
657
658 // Create loop in iterable list and ensure it doesn't hang. The return value
659 // from CountIterables() in these cases is unpredictable. If there is a
660 // failure, the call will hang and the test killed for taking too long.
661 uint32_t* header4 = (uint32_t*)(mem_segment_.get() + block4);
662 EXPECT_EQ(block5, header4[3]);
663 header4[3] = block4;
664 CountIterables(); // loop: 1-2-3-4-4
665 EXPECT_TRUE(allocator_->IsCorrupt());
666
667 // Test where loop goes back to previous block.
668 header4[3] = block3;
669 CountIterables(); // loop: 1-2-3-4-3
670
671 // Test where loop goes back to the beginning.
672 header4[3] = block1;
673 CountIterables(); // loop: 1-2-3-4-1
674 }
675
676
677 //----- LocalPersistentMemoryAllocator -----------------------------------------
678
TEST(LocalPersistentMemoryAllocatorTest,CreationTest)679 TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
680 LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, 42, "");
681 EXPECT_EQ(42U, allocator.Id());
682 EXPECT_NE(0U, allocator.Allocate(24, 1));
683 EXPECT_FALSE(allocator.IsFull());
684 EXPECT_FALSE(allocator.IsCorrupt());
685 }
686
687 //----- {Writable,ReadOnly}SharedPersistentMemoryAllocator ---------------------
688
TEST(SharedPersistentMemoryAllocatorTest,CreationTest)689 TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
690 base::WritableSharedMemoryRegion rw_region =
691 base::WritableSharedMemoryRegion::Create(TEST_MEMORY_SIZE);
692 ASSERT_TRUE(rw_region.IsValid());
693
694 PersistentMemoryAllocator::MemoryInfo meminfo1;
695 Reference r123, r456, r789;
696 {
697 base::WritableSharedMemoryMapping mapping = rw_region.Map();
698 ASSERT_TRUE(mapping.IsValid());
699 WritableSharedPersistentMemoryAllocator local(std::move(mapping), TEST_ID,
700 "");
701 EXPECT_FALSE(local.IsReadonly());
702 r123 = local.Allocate(123, 123);
703 r456 = local.Allocate(456, 456);
704 r789 = local.Allocate(789, 789);
705 local.MakeIterable(r123);
706 local.ChangeType(r456, 654, 456, false);
707 local.MakeIterable(r789);
708 local.GetMemoryInfo(&meminfo1);
709 EXPECT_FALSE(local.IsFull());
710 EXPECT_FALSE(local.IsCorrupt());
711 }
712
713 // Create writable and read-only mappings of the same region.
714 base::WritableSharedMemoryMapping rw_mapping = rw_region.Map();
715 ASSERT_TRUE(rw_mapping.IsValid());
716 base::ReadOnlySharedMemoryRegion ro_region =
717 base::WritableSharedMemoryRegion::ConvertToReadOnly(std::move(rw_region));
718 ASSERT_TRUE(ro_region.IsValid());
719 base::ReadOnlySharedMemoryMapping ro_mapping = ro_region.Map();
720 ASSERT_TRUE(ro_mapping.IsValid());
721
722 // Read-only test.
723 ReadOnlySharedPersistentMemoryAllocator shalloc2(std::move(ro_mapping), 0,
724 "");
725 EXPECT_TRUE(shalloc2.IsReadonly());
726 EXPECT_EQ(TEST_ID, shalloc2.Id());
727 EXPECT_FALSE(shalloc2.IsFull());
728 EXPECT_FALSE(shalloc2.IsCorrupt());
729
730 PersistentMemoryAllocator::Iterator iter2(&shalloc2);
731 uint32_t type;
732 EXPECT_EQ(r123, iter2.GetNext(&type));
733 EXPECT_EQ(r789, iter2.GetNext(&type));
734 EXPECT_EQ(0U, iter2.GetNext(&type));
735
736 EXPECT_EQ(123U, shalloc2.GetType(r123));
737 EXPECT_EQ(654U, shalloc2.GetType(r456));
738 EXPECT_EQ(789U, shalloc2.GetType(r789));
739
740 PersistentMemoryAllocator::MemoryInfo meminfo2;
741 shalloc2.GetMemoryInfo(&meminfo2);
742 EXPECT_EQ(meminfo1.total, meminfo2.total);
743 EXPECT_EQ(meminfo1.free, meminfo2.free);
744
745 // Read/write test.
746 WritableSharedPersistentMemoryAllocator shalloc3(std::move(rw_mapping), 0,
747 "");
748 EXPECT_FALSE(shalloc3.IsReadonly());
749 EXPECT_EQ(TEST_ID, shalloc3.Id());
750 EXPECT_FALSE(shalloc3.IsFull());
751 EXPECT_FALSE(shalloc3.IsCorrupt());
752
753 PersistentMemoryAllocator::Iterator iter3(&shalloc3);
754 EXPECT_EQ(r123, iter3.GetNext(&type));
755 EXPECT_EQ(r789, iter3.GetNext(&type));
756 EXPECT_EQ(0U, iter3.GetNext(&type));
757
758 EXPECT_EQ(123U, shalloc3.GetType(r123));
759 EXPECT_EQ(654U, shalloc3.GetType(r456));
760 EXPECT_EQ(789U, shalloc3.GetType(r789));
761
762 PersistentMemoryAllocator::MemoryInfo meminfo3;
763 shalloc3.GetMemoryInfo(&meminfo3);
764 EXPECT_EQ(meminfo1.total, meminfo3.total);
765 EXPECT_EQ(meminfo1.free, meminfo3.free);
766
767 // Interconnectivity test.
768 Reference obj = shalloc3.Allocate(42, 42);
769 ASSERT_TRUE(obj);
770 shalloc3.MakeIterable(obj);
771 EXPECT_EQ(obj, iter2.GetNext(&type));
772 EXPECT_EQ(42U, type);
773
774 // Clear-on-change test.
775 Reference data_ref = shalloc3.Allocate(sizeof(int) * 4, 911);
776 int* data = shalloc3.GetAsArray<int>(data_ref, 911, 4);
777 ASSERT_TRUE(data);
778 data[0] = 0;
779 data[1] = 1;
780 data[2] = 2;
781 data[3] = 3;
782 ASSERT_TRUE(shalloc3.ChangeType(data_ref, 119, 911, false));
783 EXPECT_EQ(0, data[0]);
784 EXPECT_EQ(1, data[1]);
785 EXPECT_EQ(2, data[2]);
786 EXPECT_EQ(3, data[3]);
787 ASSERT_TRUE(shalloc3.ChangeType(data_ref, 191, 119, true));
788 EXPECT_EQ(0, data[0]);
789 EXPECT_EQ(0, data[1]);
790 EXPECT_EQ(0, data[2]);
791 EXPECT_EQ(0, data[3]);
792 }
793
794 #if !BUILDFLAG(IS_NACL)
795 //----- FilePersistentMemoryAllocator ------------------------------------------
796
TEST(FilePersistentMemoryAllocatorTest,CreationTest)797 TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
798 ScopedTempDir temp_dir;
799 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
800 FilePath file_path = temp_dir.GetPath().AppendASCII("persistent_memory");
801
802 PersistentMemoryAllocator::MemoryInfo meminfo1;
803 Reference r123, r456, r789;
804 {
805 LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
806 EXPECT_FALSE(local.IsReadonly());
807 r123 = local.Allocate(123, 123);
808 r456 = local.Allocate(456, 456);
809 r789 = local.Allocate(789, 789);
810 local.MakeIterable(r123);
811 local.ChangeType(r456, 654, 456, false);
812 local.MakeIterable(r789);
813 local.GetMemoryInfo(&meminfo1);
814 EXPECT_FALSE(local.IsFull());
815 EXPECT_FALSE(local.IsCorrupt());
816
817 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
818 ASSERT_TRUE(writer.IsValid());
819 writer.Write(0, (const char*)local.data(), local.used());
820 }
821
822 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
823 ASSERT_TRUE(mmfile->Initialize(file_path));
824 EXPECT_TRUE(mmfile->IsValid());
825 const size_t mmlength = mmfile->length();
826 EXPECT_GE(meminfo1.total, mmlength);
827
828 FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "",
829 FilePersistentMemoryAllocator::kReadWrite);
830 EXPECT_FALSE(file.IsReadonly());
831 EXPECT_EQ(TEST_ID, file.Id());
832 EXPECT_FALSE(file.IsFull());
833 EXPECT_FALSE(file.IsCorrupt());
834
835 PersistentMemoryAllocator::Iterator iter(&file);
836 uint32_t type;
837 EXPECT_EQ(r123, iter.GetNext(&type));
838 EXPECT_EQ(r789, iter.GetNext(&type));
839 EXPECT_EQ(0U, iter.GetNext(&type));
840
841 EXPECT_EQ(123U, file.GetType(r123));
842 EXPECT_EQ(654U, file.GetType(r456));
843 EXPECT_EQ(789U, file.GetType(r789));
844
845 PersistentMemoryAllocator::MemoryInfo meminfo2;
846 file.GetMemoryInfo(&meminfo2);
847 EXPECT_GE(meminfo1.total, meminfo2.total);
848 EXPECT_GE(meminfo1.free, meminfo2.free);
849 EXPECT_EQ(mmlength, meminfo2.total);
850 EXPECT_EQ(0U, meminfo2.free);
851
852 // There's no way of knowing if Flush actually does anything but at least
853 // verify that it runs without CHECK violations.
854 file.Flush(false);
855 file.Flush(true);
856 }
857
TEST(FilePersistentMemoryAllocatorTest,ExtendTest)858 TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
859 ScopedTempDir temp_dir;
860 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
861 FilePath file_path = temp_dir.GetPath().AppendASCII("extend_test");
862 MemoryMappedFile::Region region = {0, 16 << 10}; // 16KiB maximum size.
863
864 // Start with a small but valid file of persistent data.
865 ASSERT_FALSE(PathExists(file_path));
866 {
867 LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
868 local.Allocate(1, 1);
869 local.Allocate(11, 11);
870
871 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
872 ASSERT_TRUE(writer.IsValid());
873 writer.Write(0, (const char*)local.data(), local.used());
874 }
875 ASSERT_TRUE(PathExists(file_path));
876 int64_t before_size;
877 ASSERT_TRUE(GetFileSize(file_path, &before_size));
878
879 // Map it as an extendable read/write file and append to it.
880 {
881 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
882 ASSERT_TRUE(mmfile->Initialize(
883 File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
884 region, MemoryMappedFile::READ_WRITE_EXTEND));
885 FilePersistentMemoryAllocator allocator(
886 std::move(mmfile), region.size, 0, "",
887 FilePersistentMemoryAllocator::kReadWrite);
888 EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
889
890 allocator.Allocate(111, 111);
891 EXPECT_LT(static_cast<size_t>(before_size), allocator.used());
892 }
893
894 // Validate that append worked.
895 int64_t after_size;
896 ASSERT_TRUE(GetFileSize(file_path, &after_size));
897 EXPECT_LT(before_size, after_size);
898
899 // Verify that it's still an acceptable file.
900 {
901 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
902 ASSERT_TRUE(mmfile->Initialize(
903 File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
904 region, MemoryMappedFile::READ_WRITE_EXTEND));
905 EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true));
906 EXPECT_TRUE(
907 FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false));
908 }
909 }
910
TEST(FilePersistentMemoryAllocatorTest,AcceptableTest)911 TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
912 const uint32_t kAllocAlignment =
913 PersistentMemoryAllocatorTest::GetAllocAlignment();
914 ScopedTempDir temp_dir;
915 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
916
917 LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
918 local.MakeIterable(local.Allocate(1, 1));
919 local.MakeIterable(local.Allocate(11, 11));
920 const size_t minsize = local.used();
921 std::unique_ptr<char[]> garbage(new char[minsize]);
922 RandBytes(garbage.get(), minsize);
923
924 std::unique_ptr<MemoryMappedFile> mmfile;
925 char filename[100];
926 for (size_t filesize = minsize; filesize > 0; --filesize) {
927 strings::SafeSPrintf(filename, "memory_%d_A", filesize);
928 FilePath file_path = temp_dir.GetPath().AppendASCII(filename);
929 ASSERT_FALSE(PathExists(file_path));
930 {
931 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
932 ASSERT_TRUE(writer.IsValid());
933 writer.Write(0, (const char*)local.data(), filesize);
934 }
935 ASSERT_TRUE(PathExists(file_path));
936
937 // Request read/write access for some sizes that are a multple of the
938 // allocator's alignment size. The allocator is strict about file size
939 // being a multiple of its internal alignment when doing read/write access.
940 const bool read_only = (filesize % (2 * kAllocAlignment)) != 0;
941 const uint32_t file_flags =
942 File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE);
943 const MemoryMappedFile::Access map_access =
944 read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
945
946 mmfile = std::make_unique<MemoryMappedFile>();
947 ASSERT_TRUE(mmfile->Initialize(File(file_path, file_flags), map_access));
948 EXPECT_EQ(filesize, mmfile->length());
949 if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
950 // Make sure construction doesn't crash. It will, however, cause
951 // error messages warning about about a corrupted memory segment.
952 FilePersistentMemoryAllocator allocator(
953 std::move(mmfile), 0, 0, "",
954 read_only ? FilePersistentMemoryAllocator::kReadOnly
955 : FilePersistentMemoryAllocator::kReadWrite);
956 // Also make sure that iteration doesn't crash.
957 PersistentMemoryAllocator::Iterator iter(&allocator);
958 uint32_t type_id;
959 Reference ref;
960 while ((ref = iter.GetNext(&type_id)) != 0) {
961 const char* data = allocator.GetAsArray<char>(
962 ref, 0, PersistentMemoryAllocator::kSizeAny);
963 uint32_t type = allocator.GetType(ref);
964 size_t size = allocator.GetAllocSize(ref);
965 // Ensure compiler can't optimize-out above variables.
966 (void)data;
967 (void)type;
968 (void)size;
969 }
970
971 // Ensure that short files are detected as corrupt and full files are not.
972 EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
973 } else {
974 // For filesize >= minsize, the file must be acceptable. This
975 // else clause (file-not-acceptable) should be reached only if
976 // filesize < minsize.
977 EXPECT_LT(filesize, minsize);
978 }
979
980 strings::SafeSPrintf(filename, "memory_%d_B", filesize);
981 file_path = temp_dir.GetPath().AppendASCII(filename);
982 ASSERT_FALSE(PathExists(file_path));
983 {
984 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
985 ASSERT_TRUE(writer.IsValid());
986 writer.Write(0, (const char*)garbage.get(), filesize);
987 }
988 ASSERT_TRUE(PathExists(file_path));
989
990 mmfile = std::make_unique<MemoryMappedFile>();
991 ASSERT_TRUE(mmfile->Initialize(File(file_path, file_flags), map_access));
992 EXPECT_EQ(filesize, mmfile->length());
993 if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
994 // Make sure construction doesn't crash. It will, however, cause
995 // error messages warning about about a corrupted memory segment.
996 FilePersistentMemoryAllocator allocator(
997 std::move(mmfile), 0, 0, "",
998 read_only ? FilePersistentMemoryAllocator::kReadOnly
999 : FilePersistentMemoryAllocator::kReadWrite);
1000 EXPECT_TRUE(allocator.IsCorrupt()); // Garbage data so it should be.
1001 } else {
1002 // For filesize >= minsize, the file must be acceptable. This
1003 // else clause (file-not-acceptable) should be reached only if
1004 // filesize < minsize.
1005 EXPECT_GT(minsize, filesize);
1006 }
1007 }
1008 }
1009
TEST_F(PersistentMemoryAllocatorTest,TruncateTest)1010 TEST_F(PersistentMemoryAllocatorTest, TruncateTest) {
1011 ScopedTempDir temp_dir;
1012 ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
1013 FilePath file_path = temp_dir.GetPath().AppendASCII("truncate_test");
1014
1015 // Start with a small but valid file of persistent data. Keep the "used"
1016 // amount for both allocations.
1017 Reference a1_ref;
1018 Reference a2_ref;
1019 size_t a1_used;
1020 size_t a2_used;
1021 ASSERT_FALSE(PathExists(file_path));
1022 {
1023 LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, TEST_ID, "");
1024 a1_ref = allocator.Allocate(100 << 10, 1);
1025 allocator.MakeIterable(a1_ref);
1026 a1_used = allocator.used();
1027 a2_ref = allocator.Allocate(200 << 10, 11);
1028 allocator.MakeIterable(a2_ref);
1029 a2_used = allocator.used();
1030
1031 File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
1032 ASSERT_TRUE(writer.IsValid());
1033 writer.Write(0, static_cast<const char*>(allocator.data()),
1034 allocator.size());
1035 }
1036 ASSERT_TRUE(PathExists(file_path));
1037 EXPECT_LE(a1_used, a2_ref);
1038
1039 // Truncate the file to include everything and make sure it can be read, both
1040 // with read-write and read-only access.
1041 for (size_t file_length : {a2_used, a1_used, a1_used / 2}) {
1042 SCOPED_TRACE(StringPrintf("file_length=%zu", file_length));
1043 SetFileLength(file_path, file_length);
1044
1045 for (bool read_only : {false, true}) {
1046 SCOPED_TRACE(StringPrintf("read_only=%s", read_only ? "true" : "false"));
1047
1048 std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
1049 ASSERT_TRUE(mmfile->Initialize(
1050 File(file_path, File::FLAG_OPEN |
1051 (read_only ? File::FLAG_READ
1052 : File::FLAG_READ | File::FLAG_WRITE)),
1053 read_only ? MemoryMappedFile::READ_ONLY
1054 : MemoryMappedFile::READ_WRITE));
1055 ASSERT_TRUE(
1056 FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only));
1057
1058 FilePersistentMemoryAllocator allocator(
1059 std::move(mmfile), 0, 0, "",
1060 read_only ? FilePersistentMemoryAllocator::kReadOnly
1061 : FilePersistentMemoryAllocator::kReadWrite);
1062
1063 PersistentMemoryAllocator::Iterator iter(&allocator);
1064 uint32_t type_id;
1065 EXPECT_EQ(file_length >= a1_used ? a1_ref : 0U, iter.GetNext(&type_id));
1066 EXPECT_EQ(file_length >= a2_used ? a2_ref : 0U, iter.GetNext(&type_id));
1067 EXPECT_EQ(0U, iter.GetNext(&type_id));
1068
1069 // Ensure that short files are detected as corrupt and full files are not.
1070 EXPECT_EQ(file_length < a2_used, allocator.IsCorrupt());
1071 }
1072
1073 // Ensure that file length was not adjusted.
1074 int64_t actual_length;
1075 ASSERT_TRUE(GetFileSize(file_path, &actual_length));
1076 EXPECT_EQ(file_length, static_cast<size_t>(actual_length));
1077 }
1078 }
1079
1080 #endif // !BUILDFLAG(IS_NACL)
1081
1082 } // namespace base
1083