xref: /aosp_15_r20/external/cronet/net/disk_cache/backend_unittest.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <stdint.h>
6 
7 #include <memory>
8 #include <optional>
9 #include <string_view>
10 
11 #include "base/containers/queue.h"
12 #include "base/files/file.h"
13 #include "base/files/file_util.h"
14 #include "base/functional/bind.h"
15 #include "base/functional/callback.h"
16 #include "base/functional/callback_helpers.h"
17 #include "base/memory/memory_pressure_listener.h"
18 #include "base/memory/raw_ptr.h"
19 #include "base/metrics/field_trial.h"
20 #include "base/ranges/algorithm.h"
21 #include "base/run_loop.h"
22 #include "base/strings/string_number_conversions.h"
23 #include "base/strings/string_split.h"
24 #include "base/strings/string_util.h"
25 #include "base/strings/stringprintf.h"
26 #include "base/task/sequenced_task_runner.h"
27 #include "base/task/single_thread_task_runner.h"
28 #include "base/task/thread_pool.h"
29 #include "base/test/bind.h"
30 #include "base/test/metrics/histogram_tester.h"
31 #include "base/test/scoped_feature_list.h"
32 #include "base/test/simple_test_clock.h"
33 #include "base/threading/platform_thread.h"
34 #include "base/threading/thread_restrictions.h"
35 #include "base/time/time.h"
36 #include "base/trace_event/memory_allocator_dump.h"
37 #include "base/trace_event/process_memory_dump.h"
38 #include "build/build_config.h"
39 #include "net/base/cache_type.h"
40 #include "net/base/completion_once_callback.h"
41 #include "net/base/io_buffer.h"
42 #include "net/base/net_errors.h"
43 #include "net/base/request_priority.h"
44 #include "net/base/test_completion_callback.h"
45 #include "net/base/tracing.h"
46 #include "net/disk_cache/backend_cleanup_tracker.h"
47 #include "net/disk_cache/blockfile/backend_impl.h"
48 #include "net/disk_cache/blockfile/entry_impl.h"
49 #include "net/disk_cache/blockfile/experiments.h"
50 #include "net/disk_cache/blockfile/mapped_file.h"
51 #include "net/disk_cache/cache_util.h"
52 #include "net/disk_cache/disk_cache_test_base.h"
53 #include "net/disk_cache/disk_cache_test_util.h"
54 #include "net/disk_cache/memory/mem_backend_impl.h"
55 #include "net/disk_cache/simple/simple_backend_impl.h"
56 #include "net/disk_cache/simple/simple_entry_format.h"
57 #include "net/disk_cache/simple/simple_histogram_enums.h"
58 #include "net/disk_cache/simple/simple_index.h"
59 #include "net/disk_cache/simple/simple_synchronous_entry.h"
60 #include "net/disk_cache/simple/simple_test_util.h"
61 #include "net/disk_cache/simple/simple_util.h"
62 #include "net/test/gtest_util.h"
63 #include "testing/gmock/include/gmock/gmock.h"
64 #include "testing/gtest/include/gtest/gtest.h"
65 #include "third_party/abseil-cpp/absl/base/dynamic_annotations.h"
66 
67 using disk_cache::EntryResult;
68 using net::test::IsError;
69 using net::test::IsOk;
70 using testing::ByRef;
71 using testing::Contains;
72 using testing::Eq;
73 using testing::Field;
74 
75 #if BUILDFLAG(IS_WIN)
76 #include <windows.h>
77 
78 #include "base/win/scoped_handle.h"
79 #endif
80 
81 // TODO(crbug.com/949811): Fix memory leaks in tests and re-enable on LSAN.
82 #ifdef LEAK_SANITIZER
83 #define MAYBE_BlockFileOpenOrCreateEntry DISABLED_BlockFileOpenOrCreateEntry
84 #define MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover \
85   DISABLED_NonEmptyCorruptSimpleCacheDoesNotRecover
86 #define MAYBE_SimpleOpenOrCreateEntry DISABLED_SimpleOpenOrCreateEntry
87 #else
88 #define MAYBE_BlockFileOpenOrCreateEntry BlockFileOpenOrCreateEntry
89 #define MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover \
90   NonEmptyCorruptSimpleCacheDoesNotRecover
91 #define MAYBE_SimpleOpenOrCreateEntry SimpleOpenOrCreateEntry
92 #endif
93 
94 using base::Time;
95 
96 namespace {
97 
98 const char kExistingEntryKey[] = "existing entry key";
99 
CreateExistingEntryCache(const base::FilePath & cache_path)100 std::unique_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
101     const base::FilePath& cache_path) {
102   net::TestCompletionCallback cb;
103 
104   std::unique_ptr<disk_cache::BackendImpl> cache(
105       std::make_unique<disk_cache::BackendImpl>(cache_path,
106                                                 /* cleanup_tracker = */ nullptr,
107                                                 /* cache_thread = */ nullptr,
108                                                 net::DISK_CACHE,
109                                                 /* net_log = */ nullptr));
110   cache->Init(cb.callback());
111   if (cb.WaitForResult() != net::OK)
112     return nullptr;
113 
114   TestEntryResultCompletionCallback cb2;
115   EntryResult result =
116       cache->CreateEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
117   result = cb2.GetResult(std::move(result));
118   if (result.net_error() != net::OK)
119     return nullptr;
120 
121   return cache;
122 }
123 
124 #if BUILDFLAG(IS_FUCHSIA)
125 // Load tests with large numbers of file descriptors perform poorly on
126 // virtualized test execution environments.
127 // TODO(807882): Remove this workaround when virtualized test performance
128 // improves.
129 const int kLargeNumEntries = 100;
130 #else
131 const int kLargeNumEntries = 512;
132 #endif
133 
134 }  // namespace
135 
136 // Tests that can run with different types of caches.
137 class DiskCacheBackendTest : public DiskCacheTestWithCache {
138  protected:
139   // Some utility methods:
140 
141   // Perform IO operations on the cache until there is pending IO.
142   int GeneratePendingIO(net::TestCompletionCallback* cb);
143 
144   // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
145   // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
146   // There are 4 entries after doomed_start and 2 after doomed_end.
147   void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
148 
149   bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
150   bool EnumerateAndMatchKeys(int max_to_open,
151                              TestIterator* iter,
152                              std::set<std::string>* keys_to_match,
153                              size_t* count);
154 
155   // Computes the expected size of entry metadata, i.e. the total size without
156   // the actual data stored. This depends only on the entry's |key| size.
157   int GetEntryMetadataSize(std::string key);
158 
159   // The Simple Backend only tracks the approximate sizes of entries. This
160   // rounds the exact size appropriately.
161   int GetRoundedSize(int exact_size);
162 
163   // Create a default key with the name provided, populate it with
164   // CacheTestFillBuffer, and ensure this was done correctly.
165   void CreateKeyAndCheck(disk_cache::Backend* cache, std::string key);
166 
167   // For the simple cache, wait until indexing has occurred and make sure
168   // completes successfully.
169   void WaitForSimpleCacheIndexAndCheck(disk_cache::Backend* cache);
170 
171   // Run all of the task runners untile idle, covers cache worker pools.
172   void RunUntilIdle();
173 
174   // Actual tests:
175   void BackendBasics();
176   void BackendKeying();
177   void BackendShutdownWithPendingFileIO(bool fast);
178   void BackendShutdownWithPendingIO(bool fast);
179   void BackendShutdownWithPendingCreate(bool fast);
180   void BackendShutdownWithPendingDoom();
181   void BackendSetSize();
182   void BackendLoad();
183   void BackendChain();
184   void BackendValidEntry();
185   void BackendInvalidEntry();
186   void BackendInvalidEntryRead();
187   void BackendInvalidEntryWithLoad();
188   void BackendTrimInvalidEntry();
189   void BackendTrimInvalidEntry2();
190   void BackendEnumerations();
191   void BackendEnumerations2();
192   void BackendDoomMidEnumeration();
193   void BackendInvalidEntryEnumeration();
194   void BackendFixEnumerators();
195   void BackendDoomRecent();
196   void BackendDoomBetween();
197   void BackendCalculateSizeOfAllEntries();
198   void BackendCalculateSizeOfEntriesBetween(
199       bool expect_access_time_range_comparisons);
200   void BackendTransaction(const std::string& name, int num_entries, bool load);
201   void BackendRecoverInsert();
202   void BackendRecoverRemove();
203   void BackendRecoverWithEviction();
204   void BackendInvalidEntry2();
205   void BackendInvalidEntry3();
206   void BackendInvalidEntry7();
207   void BackendInvalidEntry8();
208   void BackendInvalidEntry9(bool eviction);
209   void BackendInvalidEntry10(bool eviction);
210   void BackendInvalidEntry11(bool eviction);
211   void BackendTrimInvalidEntry12();
212   void BackendDoomAll();
213   void BackendDoomAll2();
214   void BackendInvalidRankings();
215   void BackendInvalidRankings2();
216   void BackendDisable();
217   void BackendDisable2();
218   void BackendDisable3();
219   void BackendDisable4();
220   void BackendDisabledAPI();
221   void BackendEviction();
222   void BackendOpenOrCreateEntry();
223   void BackendDeadOpenNextEntry();
224   void BackendIteratorConcurrentDoom();
225   void BackendValidateMigrated();
226 };
227 
CreateKeyAndCheck(disk_cache::Backend * cache,std::string key)228 void DiskCacheBackendTest::CreateKeyAndCheck(disk_cache::Backend* cache,
229                                              std::string key) {
230   const int kBufSize = 4 * 1024;
231   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
232   CacheTestFillBuffer(buffer->data(), kBufSize, true);
233   TestEntryResultCompletionCallback cb_entry;
234   disk_cache::EntryResult result =
235       cache->CreateEntry(key, net::HIGHEST, cb_entry.callback());
236   result = cb_entry.GetResult(std::move(result));
237   ASSERT_EQ(net::OK, result.net_error());
238   disk_cache::Entry* entry = result.ReleaseEntry();
239   EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
240   entry->Close();
241   RunUntilIdle();
242 }
243 
WaitForSimpleCacheIndexAndCheck(disk_cache::Backend * cache)244 void DiskCacheBackendTest::WaitForSimpleCacheIndexAndCheck(
245     disk_cache::Backend* cache) {
246   net::TestCompletionCallback wait_for_index_cb;
247   static_cast<disk_cache::SimpleBackendImpl*>(cache)->index()->ExecuteWhenReady(
248       wait_for_index_cb.callback());
249   int rv = wait_for_index_cb.WaitForResult();
250   ASSERT_THAT(rv, IsOk());
251   RunUntilIdle();
252 }
253 
RunUntilIdle()254 void DiskCacheBackendTest::RunUntilIdle() {
255   DiskCacheTestWithCache::RunUntilIdle();
256   base::RunLoop().RunUntilIdle();
257   disk_cache::FlushCacheThreadForTesting();
258 }
259 
GeneratePendingIO(net::TestCompletionCallback * cb)260 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
261   if (!use_current_thread_ && !simple_cache_mode_) {
262     ADD_FAILURE();
263     return net::ERR_FAILED;
264   }
265 
266   TestEntryResultCompletionCallback create_cb;
267   EntryResult entry_result;
268   entry_result =
269       cache_->CreateEntry("some key", net::HIGHEST, create_cb.callback());
270   entry_result = create_cb.GetResult(std::move(entry_result));
271   if (entry_result.net_error() != net::OK)
272     return net::ERR_CACHE_CREATE_FAILURE;
273   disk_cache::Entry* entry = entry_result.ReleaseEntry();
274 
275   const int kSize = 25000;
276   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
277   CacheTestFillBuffer(buffer->data(), kSize, false);
278 
279   int rv = net::OK;
280   for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
281     // We are using the current thread as the cache thread because we want to
282     // be able to call directly this method to make sure that the OS (instead
283     // of us switching thread) is returning IO pending.
284     if (!simple_cache_mode_) {
285       rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
286           0, i, buffer.get(), kSize, cb->callback(), false);
287     } else {
288       rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
289     }
290 
291     if (rv == net::ERR_IO_PENDING)
292       break;
293     if (rv != kSize)
294       rv = net::ERR_FAILED;
295   }
296 
297   // Don't call Close() to avoid going through the queue or we'll deadlock
298   // waiting for the operation to finish.
299   if (!simple_cache_mode_)
300     static_cast<disk_cache::EntryImpl*>(entry)->Release();
301   else
302     entry->Close();
303 
304   return rv;
305 }
306 
InitSparseCache(base::Time * doomed_start,base::Time * doomed_end)307 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
308                                            base::Time* doomed_end) {
309   InitCache();
310 
311   const int kSize = 50;
312   // This must be greater than MemEntryImpl::kMaxSparseEntrySize.
313   const int kOffset = 10 + 1024 * 1024;
314 
315   disk_cache::Entry* entry0 = nullptr;
316   disk_cache::Entry* entry1 = nullptr;
317   disk_cache::Entry* entry2 = nullptr;
318 
319   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
320   CacheTestFillBuffer(buffer->data(), kSize, false);
321 
322   ASSERT_THAT(CreateEntry("zeroth", &entry0), IsOk());
323   ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
324   ASSERT_EQ(kSize,
325             WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
326   entry0->Close();
327 
328   FlushQueueForTest();
329   AddDelay();
330   if (doomed_start)
331     *doomed_start = base::Time::Now();
332 
333   // Order in rankings list:
334   // first_part1, first_part2, second_part1, second_part2
335   ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
336   ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
337   ASSERT_EQ(kSize,
338             WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
339   entry1->Close();
340 
341   ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
342   ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
343   ASSERT_EQ(kSize,
344             WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
345   entry2->Close();
346 
347   FlushQueueForTest();
348   AddDelay();
349   if (doomed_end)
350     *doomed_end = base::Time::Now();
351 
352   // Order in rankings list:
353   // third_part1, fourth_part1, third_part2, fourth_part2
354   disk_cache::Entry* entry3 = nullptr;
355   disk_cache::Entry* entry4 = nullptr;
356   ASSERT_THAT(CreateEntry("third", &entry3), IsOk());
357   ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
358   ASSERT_THAT(CreateEntry("fourth", &entry4), IsOk());
359   ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
360   ASSERT_EQ(kSize,
361             WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
362   ASSERT_EQ(kSize,
363             WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
364   entry3->Close();
365   entry4->Close();
366 
367   FlushQueueForTest();
368   AddDelay();
369 }
370 
371 // Creates entries based on random keys. Stores these keys in |key_pool|.
CreateSetOfRandomEntries(std::set<std::string> * key_pool)372 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
373     std::set<std::string>* key_pool) {
374   const int kNumEntries = 10;
375   const int initial_entry_count = cache_->GetEntryCount();
376 
377   for (int i = 0; i < kNumEntries; ++i) {
378     std::string key = GenerateKey(true);
379     disk_cache::Entry* entry;
380     if (CreateEntry(key, &entry) != net::OK) {
381       return false;
382     }
383     key_pool->insert(key);
384     entry->Close();
385   }
386   return key_pool->size() ==
387          static_cast<size_t>(cache_->GetEntryCount() - initial_entry_count);
388 }
389 
390 // Performs iteration over the backend and checks that the keys of entries
391 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
392 // will be opened, if it is positive. Otherwise, iteration will continue until
393 // OpenNextEntry stops returning net::OK.
EnumerateAndMatchKeys(int max_to_open,TestIterator * iter,std::set<std::string> * keys_to_match,size_t * count)394 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
395     int max_to_open,
396     TestIterator* iter,
397     std::set<std::string>* keys_to_match,
398     size_t* count) {
399   disk_cache::Entry* entry;
400 
401   if (!iter)
402     return false;
403   while (iter->OpenNextEntry(&entry) == net::OK) {
404     if (!entry)
405       return false;
406     EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
407     entry->Close();
408     ++(*count);
409     if (max_to_open >= 0 && static_cast<int>(*count) >= max_to_open)
410       break;
411   };
412 
413   return true;
414 }
415 
GetEntryMetadataSize(std::string key)416 int DiskCacheBackendTest::GetEntryMetadataSize(std::string key) {
417   // For blockfile and memory backends, it is just the key size.
418   if (!simple_cache_mode_)
419     return key.size();
420 
421   // For the simple cache, we must add the file header and EOF, and that for
422   // every stream.
423   return disk_cache::kSimpleEntryStreamCount *
424          (sizeof(disk_cache::SimpleFileHeader) +
425           sizeof(disk_cache::SimpleFileEOF) + key.size());
426 }
427 
GetRoundedSize(int exact_size)428 int DiskCacheBackendTest::GetRoundedSize(int exact_size) {
429   if (!simple_cache_mode_)
430     return exact_size;
431 
432   return (exact_size + 255) & 0xFFFFFF00;
433 }
434 
BackendBasics()435 void DiskCacheBackendTest::BackendBasics() {
436   InitCache();
437   disk_cache::Entry *entry1 = nullptr, *entry2 = nullptr;
438   EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
439   ASSERT_THAT(CreateEntry("the first key", &entry1), IsOk());
440   ASSERT_TRUE(nullptr != entry1);
441   entry1->Close();
442   entry1 = nullptr;
443 
444   ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
445   ASSERT_TRUE(nullptr != entry1);
446   entry1->Close();
447   entry1 = nullptr;
448 
449   EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
450   ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
451   EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
452   ASSERT_THAT(CreateEntry("some other key", &entry2), IsOk());
453   ASSERT_TRUE(nullptr != entry1);
454   ASSERT_TRUE(nullptr != entry2);
455   EXPECT_EQ(2, cache_->GetEntryCount());
456 
457   disk_cache::Entry* entry3 = nullptr;
458   ASSERT_THAT(OpenEntry("some other key", &entry3), IsOk());
459   ASSERT_TRUE(nullptr != entry3);
460   EXPECT_TRUE(entry2 == entry3);
461 
462   EXPECT_THAT(DoomEntry("some other key"), IsOk());
463   EXPECT_EQ(1, cache_->GetEntryCount());
464   entry1->Close();
465   entry2->Close();
466   entry3->Close();
467 
468   EXPECT_THAT(DoomEntry("the first key"), IsOk());
469   EXPECT_EQ(0, cache_->GetEntryCount());
470 
471   ASSERT_THAT(CreateEntry("the first key", &entry1), IsOk());
472   ASSERT_THAT(CreateEntry("some other key", &entry2), IsOk());
473   entry1->Doom();
474   entry1->Close();
475   EXPECT_THAT(DoomEntry("some other key"), IsOk());
476   EXPECT_EQ(0, cache_->GetEntryCount());
477   entry2->Close();
478 }
479 
TEST_F(DiskCacheBackendTest,Basics)480 TEST_F(DiskCacheBackendTest, Basics) {
481   BackendBasics();
482 }
483 
TEST_F(DiskCacheBackendTest,NewEvictionBasics)484 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
485   SetNewEviction();
486   BackendBasics();
487 }
488 
TEST_F(DiskCacheBackendTest,MemoryOnlyBasics)489 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
490   SetMemoryOnlyMode();
491   BackendBasics();
492 }
493 
TEST_F(DiskCacheBackendTest,AppCacheBasics)494 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
495   SetCacheType(net::APP_CACHE);
496   BackendBasics();
497 }
498 
TEST_F(DiskCacheBackendTest,ShaderCacheBasics)499 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
500   SetCacheType(net::SHADER_CACHE);
501   BackendBasics();
502 }
503 
BackendKeying()504 void DiskCacheBackendTest::BackendKeying() {
505   InitCache();
506   const char kName1[] = "the first key";
507   const char kName2[] = "the first Key";
508   disk_cache::Entry *entry1, *entry2;
509   ASSERT_THAT(CreateEntry(kName1, &entry1), IsOk());
510 
511   ASSERT_THAT(CreateEntry(kName2, &entry2), IsOk());
512   EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
513   entry2->Close();
514 
515   char buffer[30];
516   base::strlcpy(buffer, kName1, std::size(buffer));
517   ASSERT_THAT(OpenEntry(buffer, &entry2), IsOk());
518   EXPECT_TRUE(entry1 == entry2);
519   entry2->Close();
520 
521   base::strlcpy(buffer + 1, kName1, std::size(buffer) - 1);
522   ASSERT_THAT(OpenEntry(buffer + 1, &entry2), IsOk());
523   EXPECT_TRUE(entry1 == entry2);
524   entry2->Close();
525 
526   base::strlcpy(buffer + 3, kName1, std::size(buffer) - 3);
527   ASSERT_THAT(OpenEntry(buffer + 3, &entry2), IsOk());
528   EXPECT_TRUE(entry1 == entry2);
529   entry2->Close();
530 
531   // Now verify long keys.
532   char buffer2[20000];
533   memset(buffer2, 's', sizeof(buffer2));
534   buffer2[1023] = '\0';
535   ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
536   entry2->Close();
537 
538   buffer2[1023] = 'g';
539   buffer2[19999] = '\0';
540   ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
541   entry2->Close();
542   entry1->Close();
543 
544   // Create entries with null terminator(s), and check equality. Note we create
545   // the strings via the ctor instead of using literals because literals are
546   // implicitly C strings which will stop at the first null terminator.
547   std::string key1(4, '\0');
548   key1[1] = 's';
549   std::string key2(3, '\0');
550   key2[1] = 's';
551   ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
552   ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
553   EXPECT_TRUE(entry1 != entry2) << "Different lengths";
554   EXPECT_EQ(entry1->GetKey(), key1);
555   EXPECT_EQ(entry2->GetKey(), key2);
556   entry1->Close();
557   entry2->Close();
558 }
559 
TEST_F(DiskCacheBackendTest,Keying)560 TEST_F(DiskCacheBackendTest, Keying) {
561   BackendKeying();
562 }
563 
TEST_F(DiskCacheBackendTest,NewEvictionKeying)564 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
565   SetNewEviction();
566   BackendKeying();
567 }
568 
TEST_F(DiskCacheBackendTest,MemoryOnlyKeying)569 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
570   SetMemoryOnlyMode();
571   BackendKeying();
572 }
573 
TEST_F(DiskCacheBackendTest,AppCacheKeying)574 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
575   SetCacheType(net::APP_CACHE);
576   BackendKeying();
577 }
578 
TEST_F(DiskCacheBackendTest,ShaderCacheKeying)579 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
580   SetCacheType(net::SHADER_CACHE);
581   BackendKeying();
582 }
583 
TEST_F(DiskCacheTest,CreateBackend)584 TEST_F(DiskCacheTest, CreateBackend) {
585   TestBackendResultCompletionCallback cb;
586 
587   {
588     ASSERT_TRUE(CleanupCacheDir());
589 
590     // Test the private factory method(s).
591     std::unique_ptr<disk_cache::Backend> cache;
592     cache = disk_cache::MemBackendImpl::CreateBackend(0, nullptr);
593     ASSERT_TRUE(cache.get());
594     cache.reset();
595 
596     // Now test the public API.
597 
598     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
599         net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT,
600         /*file_operations=*/nullptr, cache_path_, 0,
601         disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
602     rv = cb.GetResult(std::move(rv));
603     ASSERT_THAT(rv.net_error, IsOk());
604     ASSERT_TRUE(rv.backend);
605     rv.backend.reset();
606 
607     rv = disk_cache::CreateCacheBackend(
608         net::MEMORY_CACHE, net::CACHE_BACKEND_DEFAULT,
609         /*file_operations=*/nullptr, base::FilePath(), 0,
610         disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
611     rv = cb.GetResult(std::move(rv));
612     ASSERT_THAT(rv.net_error, IsOk());
613     ASSERT_TRUE(rv.backend);
614     rv.backend.reset();
615   }
616 
617   base::RunLoop().RunUntilIdle();
618 }
619 
TEST_F(DiskCacheTest,MemBackendPostCleanupCallback)620 TEST_F(DiskCacheTest, MemBackendPostCleanupCallback) {
621   TestBackendResultCompletionCallback cb;
622 
623   net::TestClosure on_cleanup;
624 
625   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
626       net::MEMORY_CACHE, net::CACHE_BACKEND_DEFAULT,
627       /*file_operations=*/nullptr, base::FilePath(), 0,
628       disk_cache::ResetHandling::kNeverReset, nullptr, on_cleanup.closure(),
629       cb.callback());
630   rv = cb.GetResult(std::move(rv));
631   ASSERT_THAT(rv.net_error, IsOk());
632   ASSERT_TRUE(rv.backend);
633   // The callback should be posted after backend is destroyed.
634   base::RunLoop().RunUntilIdle();
635   EXPECT_FALSE(on_cleanup.have_result());
636 
637   rv.backend.reset();
638 
639   EXPECT_FALSE(on_cleanup.have_result());
640   base::RunLoop().RunUntilIdle();
641   EXPECT_TRUE(on_cleanup.have_result());
642 }
643 
TEST_F(DiskCacheTest,CreateBackendDouble)644 TEST_F(DiskCacheTest, CreateBackendDouble) {
645   // Make sure that creation for the second backend for same path happens
646   // after the first one completes.
647   TestBackendResultCompletionCallback cb, cb2;
648 
649   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
650       net::APP_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
651       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
652       /*net_log=*/nullptr, cb.callback());
653 
654   disk_cache::BackendResult rv2 = disk_cache::CreateCacheBackend(
655       net::APP_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
656       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
657       /*net_log=*/nullptr, cb2.callback());
658 
659   rv = cb.GetResult(std::move(rv));
660   EXPECT_THAT(rv.net_error, IsOk());
661   EXPECT_TRUE(rv.backend);
662   disk_cache::FlushCacheThreadForTesting();
663 
664   // No rv2.backend yet.
665   EXPECT_EQ(net::ERR_IO_PENDING, rv2.net_error);
666   EXPECT_FALSE(rv2.backend);
667   EXPECT_FALSE(cb2.have_result());
668 
669   rv.backend.reset();
670 
671   // Now rv2.backend should exist.
672   rv2 = cb2.GetResult(std::move(rv2));
673   EXPECT_THAT(rv2.net_error, IsOk());
674   EXPECT_TRUE(rv2.backend);
675 }
676 
TEST_F(DiskCacheBackendTest,CreateBackendDoubleOpenEntry)677 TEST_F(DiskCacheBackendTest, CreateBackendDoubleOpenEntry) {
678   // Demonstrate the creation sequencing with an open entry. This is done
679   // with SimpleCache since the block-file cache cancels most of I/O on
680   // destruction and blocks for what it can't cancel.
681 
682   // Don't try to sanity-check things as a blockfile cache
683   SetSimpleCacheMode();
684 
685   // Make sure that creation for the second backend for same path happens
686   // after the first one completes, and all of its ops complete.
687   TestBackendResultCompletionCallback cb, cb2;
688 
689   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
690       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
691       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
692       /*net_log=*/nullptr, cb.callback());
693 
694   disk_cache::BackendResult rv2 = disk_cache::CreateCacheBackend(
695       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
696       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
697       /*net_log=*/nullptr, cb2.callback());
698 
699   rv = cb.GetResult(std::move(rv));
700   EXPECT_THAT(rv.net_error, IsOk());
701   ASSERT_TRUE(rv.backend);
702   disk_cache::FlushCacheThreadForTesting();
703 
704   // No cache 2 yet.
705   EXPECT_EQ(net::ERR_IO_PENDING, rv2.net_error);
706   EXPECT_FALSE(rv2.backend);
707   EXPECT_FALSE(cb2.have_result());
708 
709   TestEntryResultCompletionCallback cb3;
710   EntryResult entry_result =
711       rv.backend->CreateEntry("key", net::HIGHEST, cb3.callback());
712   entry_result = cb3.GetResult(std::move(entry_result));
713   ASSERT_EQ(net::OK, entry_result.net_error());
714 
715   rv.backend.reset();
716 
717   // Still doesn't exist.
718   EXPECT_FALSE(cb2.have_result());
719 
720   entry_result.ReleaseEntry()->Close();
721 
722   // Now should exist.
723   rv2 = cb2.GetResult(std::move(rv2));
724   EXPECT_THAT(rv2.net_error, IsOk());
725   EXPECT_TRUE(rv2.backend);
726 }
727 
TEST_F(DiskCacheBackendTest,CreateBackendPostCleanup)728 TEST_F(DiskCacheBackendTest, CreateBackendPostCleanup) {
729   // Test for the explicit PostCleanupCallback parameter to CreateCacheBackend.
730 
731   // Extravagant size payload to make reproducing races easier.
732   const int kBufSize = 256 * 1024;
733   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
734   CacheTestFillBuffer(buffer->data(), kBufSize, true);
735 
736   SetSimpleCacheMode();
737   CleanupCacheDir();
738 
739   base::RunLoop run_loop;
740   TestBackendResultCompletionCallback cb;
741 
742   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
743       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
744       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
745       /*net_log=*/nullptr, run_loop.QuitClosure(), cb.callback());
746   rv = cb.GetResult(std::move(rv));
747   EXPECT_THAT(rv.net_error, IsOk());
748   ASSERT_TRUE(rv.backend);
749 
750   TestEntryResultCompletionCallback cb2;
751   EntryResult result =
752       rv.backend->CreateEntry("key", net::HIGHEST, cb2.callback());
753   result = cb2.GetResult(std::move(result));
754   ASSERT_EQ(net::OK, result.net_error());
755   disk_cache::Entry* entry = result.ReleaseEntry();
756   EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
757   entry->Close();
758 
759   rv.backend.reset();
760 
761   // Wait till the post-cleanup callback.
762   run_loop.Run();
763 
764   // All of the payload should be on disk, despite stream 0 being written
765   // back in the async Close()
766   base::FilePath entry_path = cache_path_.AppendASCII(
767       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex("key", 0));
768   int64_t size = 0;
769   EXPECT_TRUE(base::GetFileSize(entry_path, &size));
770   EXPECT_GT(size, kBufSize);
771 }
772 
TEST_F(DiskCacheBackendTest,SimpleCreateBackendRecoveryAppCache)773 TEST_F(DiskCacheBackendTest, SimpleCreateBackendRecoveryAppCache) {
774   // Tests index recovery in APP_CACHE mode. (This is harder to test for
775   // DISK_CACHE since post-cleanup callbacks aren't permitted there).
776   const int kBufSize = 4 * 1024;
777   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
778   CacheTestFillBuffer(buffer->data(), kBufSize, true);
779 
780   SetSimpleCacheMode();
781   SetCacheType(net::APP_CACHE);
782   DisableFirstCleanup();
783   CleanupCacheDir();
784 
785   base::RunLoop run_loop;
786   TestBackendResultCompletionCallback cb;
787 
788   // Create a backend with post-cleanup callback specified, in order to know
789   // when the index has been written back (so it can be deleted race-free).
790   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
791       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
792       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
793       /*net_log=*/nullptr, run_loop.QuitClosure(), cb.callback());
794   rv = cb.GetResult(std::move(rv));
795   EXPECT_THAT(rv.net_error, IsOk());
796   ASSERT_TRUE(rv.backend);
797 
798   // Create an entry.
799   TestEntryResultCompletionCallback cb2;
800   disk_cache::EntryResult result =
801       rv.backend->CreateEntry("key", net::HIGHEST, cb2.callback());
802   result = cb2.GetResult(std::move(result));
803   ASSERT_EQ(net::OK, result.net_error());
804   disk_cache::Entry* entry = result.ReleaseEntry();
805   EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
806   entry->Close();
807 
808   rv.backend.reset();
809 
810   // Wait till the post-cleanup callback.
811   run_loop.Run();
812 
813   // Delete the index.
814   base::DeleteFile(
815       cache_path_.AppendASCII("index-dir").AppendASCII("the-real-index"));
816 
817   // Open the cache again. The fixture will also waits for index init.
818   InitCache();
819 
820   // Entry should not have a trailer size, since can't tell what it should be
821   // when doing recovery (and definitely shouldn't interpret last use time as
822   // such).
823   EXPECT_EQ(0, simple_cache_impl_->index()->GetTrailerPrefetchSize(
824                    disk_cache::simple_util::GetEntryHashKey("key")));
825 }
826 
827 // Tests that |BackendImpl| fails to initialize with a missing file.
TEST_F(DiskCacheBackendTest,CreateBackend_MissingFile)828 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
829   ASSERT_TRUE(CopyTestCache("bad_entry"));
830   base::FilePath filename = cache_path_.AppendASCII("data_1");
831   base::DeleteFile(filename);
832   net::TestCompletionCallback cb;
833 
834   // Blocking shouldn't be needed to create the cache.
835   std::optional<base::ScopedDisallowBlocking> disallow_blocking(std::in_place);
836   std::unique_ptr<disk_cache::BackendImpl> cache(
837       std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
838                                                 net::DISK_CACHE, nullptr));
839   cache->Init(cb.callback());
840   EXPECT_THAT(cb.WaitForResult(), IsError(net::ERR_FAILED));
841   disallow_blocking.reset();
842 
843   cache.reset();
844   DisableIntegrityCheck();
845 }
846 
TEST_F(DiskCacheBackendTest,MemoryListensToMemoryPressure)847 TEST_F(DiskCacheBackendTest, MemoryListensToMemoryPressure) {
848   const int kLimit = 16 * 1024;
849   const int kEntrySize = 256;
850   SetMaxSize(kLimit);
851   SetMemoryOnlyMode();
852   InitCache();
853 
854   // Fill in to about 80-90% full.
855   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kEntrySize);
856   CacheTestFillBuffer(buffer->data(), kEntrySize, false);
857 
858   for (int i = 0; i < 0.9 * (kLimit / kEntrySize); ++i) {
859     disk_cache::Entry* entry = nullptr;
860     ASSERT_EQ(net::OK, CreateEntry(base::NumberToString(i), &entry));
861     EXPECT_EQ(kEntrySize,
862               WriteData(entry, 0, 0, buffer.get(), kEntrySize, true));
863     entry->Close();
864   }
865 
866   EXPECT_GT(CalculateSizeOfAllEntries(), 0.8 * kLimit);
867 
868   // Signal low-memory of various sorts, and see how small it gets.
869   base::MemoryPressureListener::NotifyMemoryPressure(
870       base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE);
871   base::RunLoop().RunUntilIdle();
872   EXPECT_LT(CalculateSizeOfAllEntries(), 0.5 * kLimit);
873 
874   base::MemoryPressureListener::NotifyMemoryPressure(
875       base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
876   base::RunLoop().RunUntilIdle();
877   EXPECT_LT(CalculateSizeOfAllEntries(), 0.1 * kLimit);
878 }
879 
TEST_F(DiskCacheBackendTest,ExternalFiles)880 TEST_F(DiskCacheBackendTest, ExternalFiles) {
881   InitCache();
882   // First, let's create a file on the folder.
883   base::FilePath filename = cache_path_.AppendASCII("f_000001");
884 
885   const int kSize = 50;
886   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
887   CacheTestFillBuffer(buffer1->data(), kSize, false);
888   ASSERT_TRUE(base::WriteFile(
889       filename, std::string_view(buffer1->data(), static_cast<size_t>(kSize))));
890 
891   // Now let's create a file with the cache.
892   disk_cache::Entry* entry;
893   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
894   ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
895   entry->Close();
896 
897   // And verify that the first file is still there.
898   auto buffer2(base::MakeRefCounted<net::IOBufferWithSize>(kSize));
899   ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
900   EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
901 }
902 
903 // Tests that we deal with file-level pending operations at destruction time.
BackendShutdownWithPendingFileIO(bool fast)904 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
905   ASSERT_TRUE(CleanupCacheDir());
906   uint32_t flags = disk_cache::kNoBuffering;
907   if (!fast)
908     flags |= disk_cache::kNoRandom;
909 
910   if (!simple_cache_mode_)
911     UseCurrentThread();
912   CreateBackend(flags);
913 
914   net::TestCompletionCallback cb;
915   int rv = GeneratePendingIO(&cb);
916 
917   // The cache destructor will see one pending operation here.
918   ResetCaches();
919 
920   if (rv == net::ERR_IO_PENDING) {
921     if (fast || simple_cache_mode_)
922       EXPECT_FALSE(cb.have_result());
923     else
924       EXPECT_TRUE(cb.have_result());
925   }
926 
927   base::RunLoop().RunUntilIdle();
928 
929 #if !BUILDFLAG(IS_IOS)
930   // Wait for the actual operation to complete, or we'll keep a file handle that
931   // may cause issues later. Note that on iOS systems even though this test
932   // uses a single thread, the actual IO is posted to a worker thread and the
933   // cache destructor breaks the link to reach cb when the operation completes.
934   rv = cb.GetResult(rv);
935 #endif
936 }
937 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingFileIO)938 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
939   BackendShutdownWithPendingFileIO(false);
940 }
941 
942 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
943 // builds because they contain a lot of intentional memory leaks.
944 #if !defined(LEAK_SANITIZER)
945 // We'll be leaking from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingFileIO_Fast)946 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
947   // The integrity test sets kNoRandom so there's a version mismatch if we don't
948   // force new eviction.
949   SetNewEviction();
950   BackendShutdownWithPendingFileIO(true);
951 }
952 #endif
953 
954 // See crbug.com/330074
955 #if !BUILDFLAG(IS_IOS)
956 // Tests that one cache instance is not affected by another one going away.
TEST_F(DiskCacheBackendTest,MultipleInstancesWithPendingFileIO)957 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
958   base::ScopedTempDir store;
959   ASSERT_TRUE(store.CreateUniqueTempDir());
960 
961   net::TestCompletionCallback cb;
962   TestBackendResultCompletionCallback create_cb;
963   disk_cache::BackendResult backend_rv = disk_cache::CreateCacheBackend(
964       net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
965       store.GetPath(), 0, disk_cache::ResetHandling::kNeverReset,
966       /* net_log = */ nullptr, create_cb.callback());
967   backend_rv = create_cb.GetResult(std::move(backend_rv));
968   ASSERT_THAT(backend_rv.net_error, IsOk());
969   ASSERT_TRUE(backend_rv.backend);
970 
971   ASSERT_TRUE(CleanupCacheDir());
972   SetNewEviction();  // Match the expected behavior for integrity verification.
973   UseCurrentThread();
974 
975   CreateBackend(disk_cache::kNoBuffering);
976   int rv = GeneratePendingIO(&cb);
977 
978   // cache_ has a pending operation, and backend_rv.backend will go away.
979   backend_rv.backend.reset();
980 
981   if (rv == net::ERR_IO_PENDING)
982     EXPECT_FALSE(cb.have_result());
983 
984   disk_cache::FlushCacheThreadForTesting();
985   base::RunLoop().RunUntilIdle();
986 
987   // Wait for the actual operation to complete, or we'll keep a file handle that
988   // may cause issues later.
989   rv = cb.GetResult(rv);
990 }
991 #endif
992 
993 // Tests that we deal with background-thread pending operations.
BackendShutdownWithPendingIO(bool fast)994 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
995   TestEntryResultCompletionCallback cb;
996 
997   {
998     ASSERT_TRUE(CleanupCacheDir());
999 
1000     uint32_t flags = disk_cache::kNoBuffering;
1001     if (!fast)
1002       flags |= disk_cache::kNoRandom;
1003 
1004     CreateBackend(flags);
1005 
1006     EntryResult result =
1007         cache_->CreateEntry("some key", net::HIGHEST, cb.callback());
1008     result = cb.GetResult(std::move(result));
1009     ASSERT_THAT(result.net_error(), IsOk());
1010 
1011     result.ReleaseEntry()->Close();
1012 
1013     // The cache destructor will see one pending operation here.
1014     ResetCaches();
1015   }
1016 
1017   base::RunLoop().RunUntilIdle();
1018   EXPECT_FALSE(cb.have_result());
1019 }
1020 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingIO)1021 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
1022   BackendShutdownWithPendingIO(false);
1023 }
1024 
1025 #if !defined(LEAK_SANITIZER)
1026 // We'll be leaking from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingIO_Fast)1027 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
1028   // The integrity test sets kNoRandom so there's a version mismatch if we don't
1029   // force new eviction.
1030   SetNewEviction();
1031   BackendShutdownWithPendingIO(true);
1032 }
1033 #endif
1034 
1035 // Tests that we deal with create-type pending operations.
BackendShutdownWithPendingCreate(bool fast)1036 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
1037   TestEntryResultCompletionCallback cb;
1038 
1039   {
1040     ASSERT_TRUE(CleanupCacheDir());
1041 
1042     disk_cache::BackendFlags flags =
1043         fast ? disk_cache::kNone : disk_cache::kNoRandom;
1044     CreateBackend(flags);
1045 
1046     EntryResult result =
1047         cache_->CreateEntry("some key", net::HIGHEST, cb.callback());
1048     ASSERT_THAT(result.net_error(), IsError(net::ERR_IO_PENDING));
1049 
1050     ResetCaches();
1051     EXPECT_FALSE(cb.have_result());
1052   }
1053 
1054   base::RunLoop().RunUntilIdle();
1055   EXPECT_FALSE(cb.have_result());
1056 }
1057 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingCreate)1058 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
1059   BackendShutdownWithPendingCreate(false);
1060 }
1061 
1062 #if !defined(LEAK_SANITIZER)
1063 // We'll be leaking an entry from this test.
TEST_F(DiskCacheBackendTest,ShutdownWithPendingCreate_Fast)1064 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
1065   // The integrity test sets kNoRandom so there's a version mismatch if we don't
1066   // force new eviction.
1067   SetNewEviction();
1068   BackendShutdownWithPendingCreate(true);
1069 }
1070 #endif
1071 
BackendShutdownWithPendingDoom()1072 void DiskCacheBackendTest::BackendShutdownWithPendingDoom() {
1073   net::TestCompletionCallback cb;
1074   {
1075     ASSERT_TRUE(CleanupCacheDir());
1076 
1077     disk_cache::BackendFlags flags = disk_cache::kNoRandom;
1078     CreateBackend(flags);
1079 
1080     TestEntryResultCompletionCallback cb2;
1081     EntryResult result =
1082         cache_->CreateEntry("some key", net::HIGHEST, cb2.callback());
1083     result = cb2.GetResult(std::move(result));
1084     ASSERT_THAT(result.net_error(), IsOk());
1085     result.ReleaseEntry()->Close();
1086 
1087     int rv = cache_->DoomEntry("some key", net::HIGHEST, cb.callback());
1088     ASSERT_THAT(rv, IsError(net::ERR_IO_PENDING));
1089 
1090     ResetCaches();
1091     EXPECT_FALSE(cb.have_result());
1092   }
1093 
1094   base::RunLoop().RunUntilIdle();
1095   EXPECT_FALSE(cb.have_result());
1096 }
1097 
TEST_F(DiskCacheBackendTest,ShutdownWithPendingDoom)1098 TEST_F(DiskCacheBackendTest, ShutdownWithPendingDoom) {
1099   BackendShutdownWithPendingDoom();
1100 }
1101 
1102 // Disabled on android since this test requires cache creator to create
1103 // blockfile caches.
1104 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheTest,TruncatedIndex)1105 TEST_F(DiskCacheTest, TruncatedIndex) {
1106   ASSERT_TRUE(CleanupCacheDir());
1107   base::FilePath index = cache_path_.AppendASCII("index");
1108   ASSERT_TRUE(base::WriteFile(index, "hello"));
1109 
1110   TestBackendResultCompletionCallback cb;
1111 
1112   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
1113       net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
1114       /*file_operations=*/nullptr, cache_path_, 0,
1115       disk_cache::ResetHandling::kNeverReset, /*net_log=*/nullptr,
1116       cb.callback());
1117   rv = cb.GetResult(std::move(rv));
1118   ASSERT_NE(net::OK, rv.net_error);
1119   ASSERT_FALSE(rv.backend);
1120 }
1121 #endif
1122 
BackendSetSize()1123 void DiskCacheBackendTest::BackendSetSize() {
1124   const int cache_size = 0x10000;  // 64 kB
1125   SetMaxSize(cache_size);
1126   InitCache();
1127 
1128   std::string first("some key");
1129   std::string second("something else");
1130   disk_cache::Entry* entry;
1131   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
1132 
1133   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(cache_size);
1134   memset(buffer->data(), 0, cache_size);
1135   EXPECT_EQ(cache_size / 10,
1136             WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
1137       << "normal file";
1138 
1139   EXPECT_EQ(net::ERR_FAILED,
1140             WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
1141       << "file size above the limit";
1142 
1143   // By doubling the total size, we make this file cacheable.
1144   SetMaxSize(cache_size * 2);
1145   EXPECT_EQ(cache_size / 5,
1146             WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
1147 
1148   // Let's fill up the cache!.
1149   SetMaxSize(cache_size * 10);
1150   EXPECT_EQ(cache_size * 3 / 4,
1151             WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
1152   entry->Close();
1153   FlushQueueForTest();
1154 
1155   SetMaxSize(cache_size);
1156 
1157   // The cache is 95% full.
1158 
1159   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
1160   EXPECT_EQ(cache_size / 10,
1161             WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
1162 
1163   disk_cache::Entry* entry2;
1164   ASSERT_THAT(CreateEntry("an extra key", &entry2), IsOk());
1165   EXPECT_EQ(cache_size / 10,
1166             WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
1167   entry2->Close();  // This will trigger the cache trim.
1168 
1169   EXPECT_NE(net::OK, OpenEntry(first, &entry2));
1170 
1171   FlushQueueForTest();  // Make sure that we are done trimming the cache.
1172   FlushQueueForTest();  // We may have posted two tasks to evict stuff.
1173 
1174   entry->Close();
1175   ASSERT_THAT(OpenEntry(second, &entry), IsOk());
1176   EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
1177   entry->Close();
1178 }
1179 
TEST_F(DiskCacheBackendTest,SetSize)1180 TEST_F(DiskCacheBackendTest, SetSize) {
1181   BackendSetSize();
1182 }
1183 
TEST_F(DiskCacheBackendTest,NewEvictionSetSize)1184 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
1185   SetNewEviction();
1186   BackendSetSize();
1187 }
1188 
TEST_F(DiskCacheBackendTest,MemoryOnlySetSize)1189 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
1190   SetMemoryOnlyMode();
1191   BackendSetSize();
1192 }
1193 
BackendLoad()1194 void DiskCacheBackendTest::BackendLoad() {
1195   InitCache();
1196   int seed = static_cast<int>(Time::Now().ToInternalValue());
1197   srand(seed);
1198 
1199   disk_cache::Entry* entries[kLargeNumEntries];
1200   for (auto*& entry : entries) {
1201     std::string key = GenerateKey(true);
1202     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1203   }
1204   EXPECT_EQ(kLargeNumEntries, cache_->GetEntryCount());
1205 
1206   for (int i = 0; i < kLargeNumEntries; i++) {
1207     int source1 = rand() % kLargeNumEntries;
1208     int source2 = rand() % kLargeNumEntries;
1209     disk_cache::Entry* temp = entries[source1];
1210     entries[source1] = entries[source2];
1211     entries[source2] = temp;
1212   }
1213 
1214   for (auto* entry : entries) {
1215     disk_cache::Entry* new_entry;
1216     ASSERT_THAT(OpenEntry(entry->GetKey(), &new_entry), IsOk());
1217     EXPECT_TRUE(new_entry == entry);
1218     new_entry->Close();
1219     entry->Doom();
1220     entry->Close();
1221   }
1222   FlushQueueForTest();
1223   EXPECT_EQ(0, cache_->GetEntryCount());
1224 }
1225 
TEST_F(DiskCacheBackendTest,Load)1226 TEST_F(DiskCacheBackendTest, Load) {
1227   // Work with a tiny index table (16 entries)
1228   SetMask(0xf);
1229   SetMaxSize(0x100000);
1230   BackendLoad();
1231 }
1232 
TEST_F(DiskCacheBackendTest,NewEvictionLoad)1233 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
1234   SetNewEviction();
1235   // Work with a tiny index table (16 entries)
1236   SetMask(0xf);
1237   SetMaxSize(0x100000);
1238   BackendLoad();
1239 }
1240 
TEST_F(DiskCacheBackendTest,MemoryOnlyLoad)1241 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
1242   SetMaxSize(0x100000);
1243   SetMemoryOnlyMode();
1244   BackendLoad();
1245 }
1246 
TEST_F(DiskCacheBackendTest,AppCacheLoad)1247 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
1248   SetCacheType(net::APP_CACHE);
1249   // Work with a tiny index table (16 entries)
1250   SetMask(0xf);
1251   SetMaxSize(0x100000);
1252   BackendLoad();
1253 }
1254 
TEST_F(DiskCacheBackendTest,ShaderCacheLoad)1255 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
1256   SetCacheType(net::SHADER_CACHE);
1257   // Work with a tiny index table (16 entries)
1258   SetMask(0xf);
1259   SetMaxSize(0x100000);
1260   BackendLoad();
1261 }
1262 
1263 // Tests the chaining of an entry to the current head.
BackendChain()1264 void DiskCacheBackendTest::BackendChain() {
1265   SetMask(0x1);        // 2-entry table.
1266   SetMaxSize(0x3000);  // 12 kB.
1267   InitCache();
1268 
1269   disk_cache::Entry* entry;
1270   ASSERT_THAT(CreateEntry("The first key", &entry), IsOk());
1271   entry->Close();
1272   ASSERT_THAT(CreateEntry("The Second key", &entry), IsOk());
1273   entry->Close();
1274 }
1275 
TEST_F(DiskCacheBackendTest,Chain)1276 TEST_F(DiskCacheBackendTest, Chain) {
1277   BackendChain();
1278 }
1279 
TEST_F(DiskCacheBackendTest,NewEvictionChain)1280 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
1281   SetNewEviction();
1282   BackendChain();
1283 }
1284 
TEST_F(DiskCacheBackendTest,AppCacheChain)1285 TEST_F(DiskCacheBackendTest, AppCacheChain) {
1286   SetCacheType(net::APP_CACHE);
1287   BackendChain();
1288 }
1289 
TEST_F(DiskCacheBackendTest,ShaderCacheChain)1290 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
1291   SetCacheType(net::SHADER_CACHE);
1292   BackendChain();
1293 }
1294 
TEST_F(DiskCacheBackendTest,NewEvictionTrim)1295 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
1296   SetNewEviction();
1297   InitCache();
1298 
1299   disk_cache::Entry* entry;
1300   for (int i = 0; i < 100; i++) {
1301     std::string name(base::StringPrintf("Key %d", i));
1302     ASSERT_THAT(CreateEntry(name, &entry), IsOk());
1303     entry->Close();
1304     if (i < 90) {
1305       // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
1306       ASSERT_THAT(OpenEntry(name, &entry), IsOk());
1307       entry->Close();
1308     }
1309   }
1310 
1311   // The first eviction must come from list 1 (10% limit), the second must come
1312   // from list 0.
1313   TrimForTest(false);
1314   EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
1315   TrimForTest(false);
1316   EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
1317 
1318   // Double check that we still have the list tails.
1319   ASSERT_THAT(OpenEntry("Key 1", &entry), IsOk());
1320   entry->Close();
1321   ASSERT_THAT(OpenEntry("Key 91", &entry), IsOk());
1322   entry->Close();
1323 }
1324 
1325 // Before looking for invalid entries, let's check a valid entry.
BackendValidEntry()1326 void DiskCacheBackendTest::BackendValidEntry() {
1327   InitCache();
1328 
1329   std::string key("Some key");
1330   disk_cache::Entry* entry;
1331   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1332 
1333   const int kSize = 50;
1334   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1335   memset(buffer1->data(), 0, kSize);
1336   base::strlcpy(buffer1->data(), "And the data to save", kSize);
1337   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
1338   entry->Close();
1339   SimulateCrash();
1340 
1341   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1342 
1343   auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1344   memset(buffer2->data(), 0, kSize);
1345   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
1346   entry->Close();
1347   EXPECT_STREQ(buffer1->data(), buffer2->data());
1348 }
1349 
TEST_F(DiskCacheBackendTest,ValidEntry)1350 TEST_F(DiskCacheBackendTest, ValidEntry) {
1351   BackendValidEntry();
1352 }
1353 
TEST_F(DiskCacheBackendTest,NewEvictionValidEntry)1354 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
1355   SetNewEviction();
1356   BackendValidEntry();
1357 }
1358 
1359 // The same logic of the previous test (ValidEntry), but this time force the
1360 // entry to be invalid, simulating a crash in the middle.
1361 // We'll be leaking memory from this test.
BackendInvalidEntry()1362 void DiskCacheBackendTest::BackendInvalidEntry() {
1363   InitCache();
1364 
1365   std::string key("Some key");
1366   disk_cache::Entry* entry;
1367   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1368 
1369   const int kSize = 50;
1370   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1371   memset(buffer->data(), 0, kSize);
1372   base::strlcpy(buffer->data(), "And the data to save", kSize);
1373   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1374   SimulateCrash();
1375 
1376   EXPECT_NE(net::OK, OpenEntry(key, &entry));
1377   EXPECT_EQ(0, cache_->GetEntryCount());
1378 }
1379 
1380 #if !defined(LEAK_SANITIZER)
1381 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntry)1382 TEST_F(DiskCacheBackendTest, InvalidEntry) {
1383   BackendInvalidEntry();
1384 }
1385 
1386 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry)1387 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
1388   SetNewEviction();
1389   BackendInvalidEntry();
1390 }
1391 
1392 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntry)1393 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
1394   SetCacheType(net::APP_CACHE);
1395   BackendInvalidEntry();
1396 }
1397 
1398 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntry)1399 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
1400   SetCacheType(net::SHADER_CACHE);
1401   BackendInvalidEntry();
1402 }
1403 
1404 // Almost the same test, but this time crash the cache after reading an entry.
1405 // We'll be leaking memory from this test.
BackendInvalidEntryRead()1406 void DiskCacheBackendTest::BackendInvalidEntryRead() {
1407   InitCache();
1408 
1409   std::string key("Some key");
1410   disk_cache::Entry* entry;
1411   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1412 
1413   const int kSize = 50;
1414   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1415   memset(buffer->data(), 0, kSize);
1416   base::strlcpy(buffer->data(), "And the data to save", kSize);
1417   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1418   entry->Close();
1419   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1420   EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
1421 
1422   SimulateCrash();
1423 
1424   if (type_ == net::APP_CACHE) {
1425     // Reading an entry and crashing should not make it dirty.
1426     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1427     EXPECT_EQ(1, cache_->GetEntryCount());
1428     entry->Close();
1429   } else {
1430     EXPECT_NE(net::OK, OpenEntry(key, &entry));
1431     EXPECT_EQ(0, cache_->GetEntryCount());
1432   }
1433 }
1434 
1435 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryRead)1436 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1437   BackendInvalidEntryRead();
1438 }
1439 
1440 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryRead)1441 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1442   SetNewEviction();
1443   BackendInvalidEntryRead();
1444 }
1445 
1446 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntryRead)1447 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1448   SetCacheType(net::APP_CACHE);
1449   BackendInvalidEntryRead();
1450 }
1451 
1452 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntryRead)1453 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1454   SetCacheType(net::SHADER_CACHE);
1455   BackendInvalidEntryRead();
1456 }
1457 
1458 // We'll be leaking memory from this test.
BackendInvalidEntryWithLoad()1459 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1460   // Work with a tiny index table (16 entries)
1461   SetMask(0xf);
1462   SetMaxSize(0x100000);
1463   InitCache();
1464 
1465   int seed = static_cast<int>(Time::Now().ToInternalValue());
1466   srand(seed);
1467 
1468   const int kNumEntries = 100;
1469   disk_cache::Entry* entries[kNumEntries];
1470   for (auto*& entry : entries) {
1471     std::string key = GenerateKey(true);
1472     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1473   }
1474   EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1475 
1476   for (int i = 0; i < kNumEntries; i++) {
1477     int source1 = rand() % kNumEntries;
1478     int source2 = rand() % kNumEntries;
1479     disk_cache::Entry* temp = entries[source1];
1480     entries[source1] = entries[source2];
1481     entries[source2] = temp;
1482   }
1483 
1484   std::string keys[kNumEntries];
1485   for (int i = 0; i < kNumEntries; i++) {
1486     keys[i] = entries[i]->GetKey();
1487     if (i < kNumEntries / 2)
1488       entries[i]->Close();
1489   }
1490 
1491   SimulateCrash();
1492 
1493   for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1494     disk_cache::Entry* entry;
1495     EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1496   }
1497 
1498   for (int i = 0; i < kNumEntries / 2; i++) {
1499     disk_cache::Entry* entry;
1500     ASSERT_THAT(OpenEntry(keys[i], &entry), IsOk());
1501     entry->Close();
1502   }
1503 
1504   EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1505 }
1506 
1507 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryWithLoad)1508 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1509   BackendInvalidEntryWithLoad();
1510 }
1511 
1512 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryWithLoad)1513 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1514   SetNewEviction();
1515   BackendInvalidEntryWithLoad();
1516 }
1517 
1518 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,AppCacheInvalidEntryWithLoad)1519 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1520   SetCacheType(net::APP_CACHE);
1521   BackendInvalidEntryWithLoad();
1522 }
1523 
1524 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,ShaderCacheInvalidEntryWithLoad)1525 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1526   SetCacheType(net::SHADER_CACHE);
1527   BackendInvalidEntryWithLoad();
1528 }
1529 
1530 // We'll be leaking memory from this test.
BackendTrimInvalidEntry()1531 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1532   const int kSize = 0x3000;  // 12 kB
1533   SetMaxSize(kSize * 10);
1534   InitCache();
1535 
1536   std::string first("some key");
1537   std::string second("something else");
1538   disk_cache::Entry* entry;
1539   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
1540 
1541   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1542   memset(buffer->data(), 0, kSize);
1543   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1544 
1545   // Simulate a crash.
1546   SimulateCrash();
1547 
1548   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
1549   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1550 
1551   EXPECT_EQ(2, cache_->GetEntryCount());
1552   SetMaxSize(kSize);
1553   entry->Close();  // Trim the cache.
1554   FlushQueueForTest();
1555 
1556   // If we evicted the entry in less than 20mS, we have one entry in the cache;
1557   // if it took more than that, we posted a task and we'll delete the second
1558   // entry too.
1559   base::RunLoop().RunUntilIdle();
1560 
1561   // This may be not thread-safe in general, but for now it's OK so add some
1562   // ThreadSanitizer annotations to ignore data races on cache_.
1563   // See http://crbug.com/55970
1564   ABSL_ANNOTATE_IGNORE_READS_BEGIN();
1565   EXPECT_GE(1, cache_->GetEntryCount());
1566   ABSL_ANNOTATE_IGNORE_READS_END();
1567 
1568   EXPECT_NE(net::OK, OpenEntry(first, &entry));
1569 }
1570 
1571 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,TrimInvalidEntry)1572 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1573   BackendTrimInvalidEntry();
1574 }
1575 
1576 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry)1577 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1578   SetNewEviction();
1579   BackendTrimInvalidEntry();
1580 }
1581 
1582 // We'll be leaking memory from this test.
BackendTrimInvalidEntry2()1583 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1584   SetMask(0xf);  // 16-entry table.
1585 
1586   const int kSize = 0x3000;  // 12 kB
1587   SetMaxSize(kSize * 40);
1588   InitCache();
1589 
1590   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1591   memset(buffer->data(), 0, kSize);
1592   disk_cache::Entry* entry;
1593 
1594   // Writing 32 entries to this cache chains most of them.
1595   for (int i = 0; i < 32; i++) {
1596     std::string key(base::StringPrintf("some key %d", i));
1597     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1598     EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1599     entry->Close();
1600     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
1601     // Note that we are not closing the entries.
1602   }
1603 
1604   // Simulate a crash.
1605   SimulateCrash();
1606 
1607   ASSERT_THAT(CreateEntry("Something else", &entry), IsOk());
1608   EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1609 
1610   FlushQueueForTest();
1611   EXPECT_EQ(33, cache_->GetEntryCount());
1612   SetMaxSize(kSize);
1613 
1614   // For the new eviction code, all corrupt entries are on the second list so
1615   // they are not going away that easy.
1616   if (new_eviction_) {
1617     EXPECT_THAT(DoomAllEntries(), IsOk());
1618   }
1619 
1620   entry->Close();  // Trim the cache.
1621   FlushQueueForTest();
1622 
1623   // We may abort the eviction before cleaning up everything.
1624   base::RunLoop().RunUntilIdle();
1625   FlushQueueForTest();
1626   // If it's not clear enough: we may still have eviction tasks running at this
1627   // time, so the number of entries is changing while we read it.
1628   ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1629   EXPECT_GE(30, cache_->GetEntryCount());
1630   ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
1631 
1632   // For extra messiness, the integrity check for the cache can actually cause
1633   // evictions if it's over-capacity, which would race with above. So change the
1634   // size we pass to CheckCacheIntegrity (but don't mess with existing backend's
1635   // state.
1636   size_ = 0;
1637 }
1638 
1639 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,TrimInvalidEntry2)1640 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1641   BackendTrimInvalidEntry2();
1642 }
1643 
1644 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry2)1645 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1646   SetNewEviction();
1647   BackendTrimInvalidEntry2();
1648 }
1649 #endif  // !defined(LEAK_SANITIZER)
1650 
BackendEnumerations()1651 void DiskCacheBackendTest::BackendEnumerations() {
1652   InitCache();
1653   Time initial = Time::Now();
1654 
1655   const int kNumEntries = 100;
1656   for (int i = 0; i < kNumEntries; i++) {
1657     std::string key = GenerateKey(true);
1658     disk_cache::Entry* entry;
1659     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1660     entry->Close();
1661   }
1662   EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1663   Time final = Time::Now();
1664 
1665   disk_cache::Entry* entry;
1666   std::unique_ptr<TestIterator> iter = CreateIterator();
1667   int count = 0;
1668   Time last_modified[kNumEntries];
1669   Time last_used[kNumEntries];
1670   while (iter->OpenNextEntry(&entry) == net::OK) {
1671     ASSERT_TRUE(nullptr != entry);
1672     if (count < kNumEntries) {
1673       last_modified[count] = entry->GetLastModified();
1674       last_used[count] = entry->GetLastUsed();
1675       EXPECT_TRUE(initial <= last_modified[count]);
1676       EXPECT_TRUE(final >= last_modified[count]);
1677     }
1678 
1679     entry->Close();
1680     count++;
1681   };
1682   EXPECT_EQ(kNumEntries, count);
1683 
1684   iter = CreateIterator();
1685   count = 0;
1686   // The previous enumeration should not have changed the timestamps.
1687   while (iter->OpenNextEntry(&entry) == net::OK) {
1688     ASSERT_TRUE(nullptr != entry);
1689     if (count < kNumEntries) {
1690       EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1691       EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1692     }
1693     entry->Close();
1694     count++;
1695   };
1696   EXPECT_EQ(kNumEntries, count);
1697 }
1698 
TEST_F(DiskCacheBackendTest,Enumerations)1699 TEST_F(DiskCacheBackendTest, Enumerations) {
1700   BackendEnumerations();
1701 }
1702 
TEST_F(DiskCacheBackendTest,NewEvictionEnumerations)1703 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1704   SetNewEviction();
1705   BackendEnumerations();
1706 }
1707 
TEST_F(DiskCacheBackendTest,MemoryOnlyEnumerations)1708 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1709   SetMemoryOnlyMode();
1710   BackendEnumerations();
1711 }
1712 
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerations)1713 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1714   SetCacheType(net::SHADER_CACHE);
1715   BackendEnumerations();
1716 }
1717 
TEST_F(DiskCacheBackendTest,AppCacheEnumerations)1718 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1719   SetCacheType(net::APP_CACHE);
1720   BackendEnumerations();
1721 }
1722 
1723 // Verifies enumerations while entries are open.
BackendEnumerations2()1724 void DiskCacheBackendTest::BackendEnumerations2() {
1725   InitCache();
1726   const std::string first("first");
1727   const std::string second("second");
1728   disk_cache::Entry *entry1, *entry2;
1729   ASSERT_THAT(CreateEntry(first, &entry1), IsOk());
1730   entry1->Close();
1731   ASSERT_THAT(CreateEntry(second, &entry2), IsOk());
1732   entry2->Close();
1733   FlushQueueForTest();
1734 
1735   // Make sure that the timestamp is not the same.
1736   AddDelay();
1737   ASSERT_THAT(OpenEntry(second, &entry1), IsOk());
1738   std::unique_ptr<TestIterator> iter = CreateIterator();
1739   ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1740   EXPECT_EQ(entry2->GetKey(), second);
1741 
1742   // Two entries and the iterator pointing at "first".
1743   entry1->Close();
1744   entry2->Close();
1745 
1746   // The iterator should still be valid, so we should not crash.
1747   ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1748   EXPECT_EQ(entry2->GetKey(), first);
1749   entry2->Close();
1750   iter = CreateIterator();
1751 
1752   // Modify the oldest entry and get the newest element.
1753   ASSERT_THAT(OpenEntry(first, &entry1), IsOk());
1754   EXPECT_EQ(0, WriteData(entry1, 0, 200, nullptr, 0, false));
1755   ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1756   if (type_ == net::APP_CACHE) {
1757     // The list is not updated.
1758     EXPECT_EQ(entry2->GetKey(), second);
1759   } else {
1760     EXPECT_EQ(entry2->GetKey(), first);
1761   }
1762 
1763   entry1->Close();
1764   entry2->Close();
1765 }
1766 
TEST_F(DiskCacheBackendTest,Enumerations2)1767 TEST_F(DiskCacheBackendTest, Enumerations2) {
1768   BackendEnumerations2();
1769 }
1770 
TEST_F(DiskCacheBackendTest,NewEvictionEnumerations2)1771 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1772   SetNewEviction();
1773   BackendEnumerations2();
1774 }
1775 
TEST_F(DiskCacheBackendTest,AppCacheEnumerations2)1776 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1777   SetCacheType(net::APP_CACHE);
1778   BackendEnumerations2();
1779 }
1780 
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerations2)1781 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1782   SetCacheType(net::SHADER_CACHE);
1783   BackendEnumerations2();
1784 }
1785 
BackendDoomMidEnumeration()1786 void DiskCacheBackendTest::BackendDoomMidEnumeration() {
1787   InitCache();
1788 
1789   const int kNumEntries = 100;
1790   std::set<std::string> keys;
1791   for (int i = 0; i < kNumEntries; i++) {
1792     std::string key = GenerateKey(true);
1793     keys.insert(key);
1794     disk_cache::Entry* entry;
1795     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1796     entry->Close();
1797   }
1798 
1799   disk_cache::Entry* entry;
1800   std::unique_ptr<TestIterator> iter = CreateIterator();
1801   int count = 0;
1802   while (iter->OpenNextEntry(&entry) == net::OK) {
1803     if (count == 0) {
1804       // Delete a random entry from the cache while in the midst of iteration.
1805       auto key_to_doom = keys.begin();
1806       while (*key_to_doom == entry->GetKey())
1807         key_to_doom++;
1808       ASSERT_THAT(DoomEntry(*key_to_doom), IsOk());
1809       ASSERT_EQ(1u, keys.erase(*key_to_doom));
1810     }
1811     ASSERT_NE(nullptr, entry);
1812     EXPECT_EQ(1u, keys.erase(entry->GetKey()));
1813     entry->Close();
1814     count++;
1815   };
1816 
1817   EXPECT_EQ(kNumEntries - 1, cache_->GetEntryCount());
1818   EXPECT_EQ(0u, keys.size());
1819 }
1820 
TEST_F(DiskCacheBackendTest,DoomEnumerations)1821 TEST_F(DiskCacheBackendTest, DoomEnumerations) {
1822   BackendDoomMidEnumeration();
1823 }
1824 
TEST_F(DiskCacheBackendTest,NewEvictionDoomEnumerations)1825 TEST_F(DiskCacheBackendTest, NewEvictionDoomEnumerations) {
1826   SetNewEviction();
1827   BackendDoomMidEnumeration();
1828 }
1829 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEnumerations)1830 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEnumerations) {
1831   SetMemoryOnlyMode();
1832   BackendDoomMidEnumeration();
1833 }
1834 
TEST_F(DiskCacheBackendTest,ShaderCacheDoomEnumerations)1835 TEST_F(DiskCacheBackendTest, ShaderCacheDoomEnumerations) {
1836   SetCacheType(net::SHADER_CACHE);
1837   BackendDoomMidEnumeration();
1838 }
1839 
TEST_F(DiskCacheBackendTest,AppCacheDoomEnumerations)1840 TEST_F(DiskCacheBackendTest, AppCacheDoomEnumerations) {
1841   SetCacheType(net::APP_CACHE);
1842   BackendDoomMidEnumeration();
1843 }
1844 
TEST_F(DiskCacheBackendTest,SimpleDoomEnumerations)1845 TEST_F(DiskCacheBackendTest, SimpleDoomEnumerations) {
1846   SetSimpleCacheMode();
1847   BackendDoomMidEnumeration();
1848 }
1849 
1850 // Verify that ReadData calls do not update the LRU cache
1851 // when using the SHADER_CACHE type.
TEST_F(DiskCacheBackendTest,ShaderCacheEnumerationReadData)1852 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1853   SetCacheType(net::SHADER_CACHE);
1854   InitCache();
1855   const std::string first("first");
1856   const std::string second("second");
1857   disk_cache::Entry *entry1, *entry2;
1858   const int kSize = 50;
1859   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1860 
1861   ASSERT_THAT(CreateEntry(first, &entry1), IsOk());
1862   memset(buffer1->data(), 0, kSize);
1863   base::strlcpy(buffer1->data(), "And the data to save", kSize);
1864   EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1865 
1866   ASSERT_THAT(CreateEntry(second, &entry2), IsOk());
1867   entry2->Close();
1868 
1869   FlushQueueForTest();
1870 
1871   // Make sure that the timestamp is not the same.
1872   AddDelay();
1873 
1874   // Read from the last item in the LRU.
1875   EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1876   entry1->Close();
1877 
1878   std::unique_ptr<TestIterator> iter = CreateIterator();
1879   ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
1880   EXPECT_EQ(entry2->GetKey(), second);
1881   entry2->Close();
1882 }
1883 
1884 #if !defined(LEAK_SANITIZER)
1885 // Verify handling of invalid entries while doing enumerations.
1886 // We'll be leaking memory from this test.
BackendInvalidEntryEnumeration()1887 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1888   InitCache();
1889 
1890   std::string key("Some key");
1891   disk_cache::Entry *entry, *entry1, *entry2;
1892   ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
1893 
1894   const int kSize = 50;
1895   auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
1896   memset(buffer1->data(), 0, kSize);
1897   base::strlcpy(buffer1->data(), "And the data to save", kSize);
1898   EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1899   entry1->Close();
1900   ASSERT_THAT(OpenEntry(key, &entry1), IsOk());
1901   EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1902 
1903   std::string key2("Another key");
1904   ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
1905   entry2->Close();
1906   ASSERT_EQ(2, cache_->GetEntryCount());
1907 
1908   SimulateCrash();
1909 
1910   std::unique_ptr<TestIterator> iter = CreateIterator();
1911   int count = 0;
1912   while (iter->OpenNextEntry(&entry) == net::OK) {
1913     ASSERT_TRUE(nullptr != entry);
1914     EXPECT_EQ(key2, entry->GetKey());
1915     entry->Close();
1916     count++;
1917   };
1918   EXPECT_EQ(1, count);
1919   EXPECT_EQ(1, cache_->GetEntryCount());
1920 }
1921 
1922 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,InvalidEntryEnumeration)1923 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1924   BackendInvalidEntryEnumeration();
1925 }
1926 
1927 // We'll be leaking memory from this test.
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntryEnumeration)1928 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1929   SetNewEviction();
1930   BackendInvalidEntryEnumeration();
1931 }
1932 #endif  // !defined(LEAK_SANITIZER)
1933 
1934 // Tests that if for some reason entries are modified close to existing cache
1935 // iterators, we don't generate fatal errors or reset the cache.
BackendFixEnumerators()1936 void DiskCacheBackendTest::BackendFixEnumerators() {
1937   InitCache();
1938 
1939   int seed = static_cast<int>(Time::Now().ToInternalValue());
1940   srand(seed);
1941 
1942   const int kNumEntries = 10;
1943   for (int i = 0; i < kNumEntries; i++) {
1944     std::string key = GenerateKey(true);
1945     disk_cache::Entry* entry;
1946     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
1947     entry->Close();
1948   }
1949   EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1950 
1951   disk_cache::Entry *entry1, *entry2;
1952   std::unique_ptr<TestIterator> iter1 = CreateIterator(),
1953                                 iter2 = CreateIterator();
1954   ASSERT_THAT(iter1->OpenNextEntry(&entry1), IsOk());
1955   ASSERT_TRUE(nullptr != entry1);
1956   entry1->Close();
1957   entry1 = nullptr;
1958 
1959   // Let's go to the middle of the list.
1960   for (int i = 0; i < kNumEntries / 2; i++) {
1961     if (entry1)
1962       entry1->Close();
1963     ASSERT_THAT(iter1->OpenNextEntry(&entry1), IsOk());
1964     ASSERT_TRUE(nullptr != entry1);
1965 
1966     ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
1967     ASSERT_TRUE(nullptr != entry2);
1968     entry2->Close();
1969   }
1970 
1971   // Messing up with entry1 will modify entry2->next.
1972   entry1->Doom();
1973   ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
1974   ASSERT_TRUE(nullptr != entry2);
1975 
1976   // The link entry2->entry1 should be broken.
1977   EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1978   entry1->Close();
1979   entry2->Close();
1980 
1981   // And the second iterator should keep working.
1982   ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
1983   ASSERT_TRUE(nullptr != entry2);
1984   entry2->Close();
1985 }
1986 
TEST_F(DiskCacheBackendTest,FixEnumerators)1987 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1988   BackendFixEnumerators();
1989 }
1990 
TEST_F(DiskCacheBackendTest,NewEvictionFixEnumerators)1991 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1992   SetNewEviction();
1993   BackendFixEnumerators();
1994 }
1995 
BackendDoomRecent()1996 void DiskCacheBackendTest::BackendDoomRecent() {
1997   InitCache();
1998 
1999   disk_cache::Entry* entry;
2000   ASSERT_THAT(CreateEntry("first", &entry), IsOk());
2001   entry->Close();
2002   ASSERT_THAT(CreateEntry("second", &entry), IsOk());
2003   entry->Close();
2004   FlushQueueForTest();
2005 
2006   AddDelay();
2007   Time middle = Time::Now();
2008 
2009   ASSERT_THAT(CreateEntry("third", &entry), IsOk());
2010   entry->Close();
2011   ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
2012   entry->Close();
2013   FlushQueueForTest();
2014 
2015   AddDelay();
2016   Time final = Time::Now();
2017 
2018   ASSERT_EQ(4, cache_->GetEntryCount());
2019   EXPECT_THAT(DoomEntriesSince(final), IsOk());
2020   ASSERT_EQ(4, cache_->GetEntryCount());
2021 
2022   EXPECT_THAT(DoomEntriesSince(middle), IsOk());
2023   ASSERT_EQ(2, cache_->GetEntryCount());
2024 
2025   ASSERT_THAT(OpenEntry("second", &entry), IsOk());
2026   entry->Close();
2027 }
2028 
TEST_F(DiskCacheBackendTest,DoomRecent)2029 TEST_F(DiskCacheBackendTest, DoomRecent) {
2030   BackendDoomRecent();
2031 }
2032 
TEST_F(DiskCacheBackendTest,NewEvictionDoomRecent)2033 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
2034   SetNewEviction();
2035   BackendDoomRecent();
2036 }
2037 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomRecent)2038 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
2039   SetMemoryOnlyMode();
2040   BackendDoomRecent();
2041 }
2042 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEntriesSinceSparse)2043 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
2044   SetMemoryOnlyMode();
2045   base::Time start;
2046   InitSparseCache(&start, nullptr);
2047   DoomEntriesSince(start);
2048   EXPECT_EQ(1, cache_->GetEntryCount());
2049 }
2050 
TEST_F(DiskCacheBackendTest,DoomEntriesSinceSparse)2051 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
2052   base::Time start;
2053   InitSparseCache(&start, nullptr);
2054   DoomEntriesSince(start);
2055   // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
2056   // MemBackendImpl does not. Thats why expected value differs here from
2057   // MemoryOnlyDoomEntriesSinceSparse.
2058   EXPECT_EQ(3, cache_->GetEntryCount());
2059 }
2060 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomAllSparse)2061 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
2062   SetMemoryOnlyMode();
2063   InitSparseCache(nullptr, nullptr);
2064   EXPECT_THAT(DoomAllEntries(), IsOk());
2065   EXPECT_EQ(0, cache_->GetEntryCount());
2066 }
2067 
TEST_F(DiskCacheBackendTest,DoomAllSparse)2068 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
2069   InitSparseCache(nullptr, nullptr);
2070   EXPECT_THAT(DoomAllEntries(), IsOk());
2071   EXPECT_EQ(0, cache_->GetEntryCount());
2072 }
2073 
2074 // This test is for https://crbug.com/827492.
TEST_F(DiskCacheBackendTest,InMemorySparseEvict)2075 TEST_F(DiskCacheBackendTest, InMemorySparseEvict) {
2076   const int kMaxSize = 512;
2077 
2078   SetMaxSize(kMaxSize);
2079   SetMemoryOnlyMode();
2080   InitCache();
2081 
2082   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(64);
2083   CacheTestFillBuffer(buffer->data(), 64, false /* no_nulls */);
2084 
2085   std::vector<disk_cache::ScopedEntryPtr> entries;
2086 
2087   disk_cache::Entry* entry = nullptr;
2088   // Create a bunch of entries
2089   for (size_t i = 0; i < 14; i++) {
2090     std::string name = "http://www." + base::NumberToString(i) + ".com/";
2091     ASSERT_THAT(CreateEntry(name, &entry), IsOk());
2092     entries.push_back(disk_cache::ScopedEntryPtr(entry));
2093   }
2094 
2095   // Create several sparse entries and fill with enough data to
2096   // pass eviction threshold
2097   ASSERT_EQ(64, WriteSparseData(entries[0].get(), 0, buffer.get(), 64));
2098   ASSERT_EQ(net::ERR_FAILED,
2099             WriteSparseData(entries[0].get(), 10000, buffer.get(), 4));
2100   ASSERT_EQ(63, WriteSparseData(entries[1].get(), 0, buffer.get(), 63));
2101   ASSERT_EQ(64, WriteSparseData(entries[2].get(), 0, buffer.get(), 64));
2102   ASSERT_EQ(64, WriteSparseData(entries[3].get(), 0, buffer.get(), 64));
2103 
2104   // Close all the entries, leaving a populated LRU list
2105   // with all entries having refcount 0 (doom implies deletion)
2106   entries.clear();
2107 
2108   // Create a new entry, triggering buggy eviction
2109   ASSERT_THAT(CreateEntry("http://www.14.com/", &entry), IsOk());
2110   entry->Close();
2111 }
2112 
BackendDoomBetween()2113 void DiskCacheBackendTest::BackendDoomBetween() {
2114   InitCache();
2115 
2116   disk_cache::Entry* entry;
2117   ASSERT_THAT(CreateEntry("first", &entry), IsOk());
2118   entry->Close();
2119   FlushQueueForTest();
2120 
2121   AddDelay();
2122   Time middle_start = Time::Now();
2123 
2124   ASSERT_THAT(CreateEntry("second", &entry), IsOk());
2125   entry->Close();
2126   ASSERT_THAT(CreateEntry("third", &entry), IsOk());
2127   entry->Close();
2128   FlushQueueForTest();
2129 
2130   AddDelay();
2131   Time middle_end = Time::Now();
2132 
2133   ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
2134   entry->Close();
2135   ASSERT_THAT(OpenEntry("fourth", &entry), IsOk());
2136   entry->Close();
2137   FlushQueueForTest();
2138 
2139   AddDelay();
2140   Time final = Time::Now();
2141 
2142   ASSERT_EQ(4, cache_->GetEntryCount());
2143   EXPECT_THAT(DoomEntriesBetween(middle_start, middle_end), IsOk());
2144   ASSERT_EQ(2, cache_->GetEntryCount());
2145 
2146   ASSERT_THAT(OpenEntry("fourth", &entry), IsOk());
2147   entry->Close();
2148 
2149   EXPECT_THAT(DoomEntriesBetween(middle_start, final), IsOk());
2150   ASSERT_EQ(1, cache_->GetEntryCount());
2151 
2152   ASSERT_THAT(OpenEntry("first", &entry), IsOk());
2153   entry->Close();
2154 }
2155 
TEST_F(DiskCacheBackendTest,DoomBetween)2156 TEST_F(DiskCacheBackendTest, DoomBetween) {
2157   BackendDoomBetween();
2158 }
2159 
TEST_F(DiskCacheBackendTest,NewEvictionDoomBetween)2160 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
2161   SetNewEviction();
2162   BackendDoomBetween();
2163 }
2164 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomBetween)2165 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
2166   SetMemoryOnlyMode();
2167   BackendDoomBetween();
2168 }
2169 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomEntriesBetweenSparse)2170 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
2171   SetMemoryOnlyMode();
2172   base::Time start, end;
2173   InitSparseCache(&start, &end);
2174   DoomEntriesBetween(start, end);
2175   EXPECT_EQ(3, cache_->GetEntryCount());
2176 
2177   start = end;
2178   end = base::Time::Now();
2179   DoomEntriesBetween(start, end);
2180   EXPECT_EQ(1, cache_->GetEntryCount());
2181 }
2182 
TEST_F(DiskCacheBackendTest,DoomEntriesBetweenSparse)2183 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
2184   base::Time start, end;
2185   InitSparseCache(&start, &end);
2186   DoomEntriesBetween(start, end);
2187   EXPECT_EQ(9, cache_->GetEntryCount());
2188 
2189   start = end;
2190   end = base::Time::Now();
2191   DoomEntriesBetween(start, end);
2192   EXPECT_EQ(3, cache_->GetEntryCount());
2193 }
2194 
BackendCalculateSizeOfAllEntries()2195 void DiskCacheBackendTest::BackendCalculateSizeOfAllEntries() {
2196   InitCache();
2197 
2198   // The cache is initially empty.
2199   EXPECT_EQ(0, CalculateSizeOfAllEntries());
2200 
2201   // Generate random entries and populate them with data of respective
2202   // sizes 0, 1, ..., count - 1 bytes.
2203   std::set<std::string> key_pool;
2204   CreateSetOfRandomEntries(&key_pool);
2205 
2206   int count = 0;
2207   int total_size = 0;
2208   for (std::string key : key_pool) {
2209     std::string data(count, ' ');
2210     scoped_refptr<net::StringIOBuffer> buffer =
2211         base::MakeRefCounted<net::StringIOBuffer>(data);
2212 
2213     // Alternate between writing to first two streams to test that we do not
2214     // take only one stream into account.
2215     disk_cache::Entry* entry;
2216     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
2217     ASSERT_EQ(count, WriteData(entry, count % 2, 0, buffer.get(), count, true));
2218     entry->Close();
2219 
2220     total_size += GetRoundedSize(count + GetEntryMetadataSize(key));
2221     ++count;
2222   }
2223 
2224   int result = CalculateSizeOfAllEntries();
2225   EXPECT_EQ(total_size, result);
2226 
2227   // Add another entry and test if the size is updated. Then remove it and test
2228   // if the size is back to original value.
2229   {
2230     const int last_entry_size = 47;
2231     std::string data(last_entry_size, ' ');
2232     scoped_refptr<net::StringIOBuffer> buffer =
2233         base::MakeRefCounted<net::StringIOBuffer>(data);
2234 
2235     disk_cache::Entry* entry;
2236     std::string key = GenerateKey(true);
2237     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
2238     ASSERT_EQ(last_entry_size,
2239               WriteData(entry, 0, 0, buffer.get(), last_entry_size, true));
2240     entry->Close();
2241 
2242     int new_result = CalculateSizeOfAllEntries();
2243     EXPECT_EQ(
2244         result + GetRoundedSize(last_entry_size + GetEntryMetadataSize(key)),
2245         new_result);
2246 
2247     DoomEntry(key);
2248     new_result = CalculateSizeOfAllEntries();
2249     EXPECT_EQ(result, new_result);
2250   }
2251 
2252   // After dooming the entries, the size should be back to zero.
2253   ASSERT_THAT(DoomAllEntries(), IsOk());
2254   EXPECT_EQ(0, CalculateSizeOfAllEntries());
2255 }
2256 
TEST_F(DiskCacheBackendTest,CalculateSizeOfAllEntries)2257 TEST_F(DiskCacheBackendTest, CalculateSizeOfAllEntries) {
2258   BackendCalculateSizeOfAllEntries();
2259 }
2260 
TEST_F(DiskCacheBackendTest,MemoryOnlyCalculateSizeOfAllEntries)2261 TEST_F(DiskCacheBackendTest, MemoryOnlyCalculateSizeOfAllEntries) {
2262   SetMemoryOnlyMode();
2263   BackendCalculateSizeOfAllEntries();
2264 }
2265 
TEST_F(DiskCacheBackendTest,SimpleCacheCalculateSizeOfAllEntries)2266 TEST_F(DiskCacheBackendTest, SimpleCacheCalculateSizeOfAllEntries) {
2267   // Use net::APP_CACHE to make size estimations deterministic via
2268   // non-optimistic writes.
2269   SetCacheType(net::APP_CACHE);
2270   SetSimpleCacheMode();
2271   BackendCalculateSizeOfAllEntries();
2272 }
2273 
BackendCalculateSizeOfEntriesBetween(bool expect_access_time_comparisons)2274 void DiskCacheBackendTest::BackendCalculateSizeOfEntriesBetween(
2275     bool expect_access_time_comparisons) {
2276   InitCache();
2277 
2278   EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2279 
2280   Time start = Time::Now();
2281 
2282   disk_cache::Entry* entry;
2283   ASSERT_THAT(CreateEntry("first", &entry), IsOk());
2284   entry->Close();
2285   FlushQueueForTest();
2286   base::RunLoop().RunUntilIdle();
2287 
2288   AddDelay();
2289   Time middle = Time::Now();
2290   AddDelay();
2291 
2292   ASSERT_THAT(CreateEntry("second", &entry), IsOk());
2293   entry->Close();
2294   ASSERT_THAT(CreateEntry("third_entry", &entry), IsOk());
2295   entry->Close();
2296   FlushQueueForTest();
2297   base::RunLoop().RunUntilIdle();
2298 
2299   AddDelay();
2300   Time end = Time::Now();
2301 
2302   int size_1 = GetRoundedSize(GetEntryMetadataSize("first"));
2303   int size_2 = GetRoundedSize(GetEntryMetadataSize("second"));
2304   int size_3 = GetRoundedSize(GetEntryMetadataSize("third_entry"));
2305 
2306   ASSERT_EQ(3, cache_->GetEntryCount());
2307   ASSERT_EQ(CalculateSizeOfAllEntries(),
2308             CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2309 
2310   if (expect_access_time_comparisons) {
2311     int start_end = CalculateSizeOfEntriesBetween(start, end);
2312     ASSERT_EQ(CalculateSizeOfAllEntries(), start_end);
2313     ASSERT_EQ(size_1 + size_2 + size_3, start_end);
2314 
2315     ASSERT_EQ(size_1, CalculateSizeOfEntriesBetween(start, middle));
2316     ASSERT_EQ(size_2 + size_3, CalculateSizeOfEntriesBetween(middle, end));
2317   }
2318 
2319   // After dooming the entries, the size should be back to zero.
2320   ASSERT_THAT(DoomAllEntries(), IsOk());
2321   EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2322 }
2323 
TEST_F(DiskCacheBackendTest,CalculateSizeOfEntriesBetween)2324 TEST_F(DiskCacheBackendTest, CalculateSizeOfEntriesBetween) {
2325   InitCache();
2326   ASSERT_EQ(net::ERR_NOT_IMPLEMENTED,
2327             CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
2328 }
2329 
TEST_F(DiskCacheBackendTest,MemoryOnlyCalculateSizeOfEntriesBetween)2330 TEST_F(DiskCacheBackendTest, MemoryOnlyCalculateSizeOfEntriesBetween) {
2331   SetMemoryOnlyMode();
2332   BackendCalculateSizeOfEntriesBetween(true);
2333 }
2334 
TEST_F(DiskCacheBackendTest,SimpleCacheCalculateSizeOfEntriesBetween)2335 TEST_F(DiskCacheBackendTest, SimpleCacheCalculateSizeOfEntriesBetween) {
2336   // Test normal mode in where access time range comparisons are supported.
2337   SetSimpleCacheMode();
2338   BackendCalculateSizeOfEntriesBetween(true);
2339 }
2340 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheCalculateSizeOfEntriesBetween)2341 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheCalculateSizeOfEntriesBetween) {
2342   // Test SimpleCache in APP_CACHE mode separately since it does not support
2343   // access time range comparisons.
2344   SetCacheType(net::APP_CACHE);
2345   SetSimpleCacheMode();
2346   BackendCalculateSizeOfEntriesBetween(false);
2347 }
2348 
BackendTransaction(const std::string & name,int num_entries,bool load)2349 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
2350                                               int num_entries,
2351                                               bool load) {
2352   success_ = false;
2353   ASSERT_TRUE(CopyTestCache(name));
2354   DisableFirstCleanup();
2355 
2356   uint32_t mask;
2357   if (load) {
2358     mask = 0xf;
2359     SetMaxSize(0x100000);
2360   } else {
2361     // Clear the settings from the previous run.
2362     mask = 0;
2363     SetMaxSize(0);
2364   }
2365   SetMask(mask);
2366 
2367   InitCache();
2368   ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
2369 
2370   std::string key("the first key");
2371   disk_cache::Entry* entry1;
2372   ASSERT_NE(net::OK, OpenEntry(key, &entry1));
2373 
2374   int actual = cache_->GetEntryCount();
2375   if (num_entries != actual) {
2376     ASSERT_TRUE(load);
2377     // If there is a heavy load, inserting an entry will make another entry
2378     // dirty (on the hash bucket) so two entries are removed.
2379     ASSERT_EQ(num_entries - 1, actual);
2380   }
2381 
2382   ResetCaches();
2383 
2384   ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, MaxSize(), mask));
2385   success_ = true;
2386 }
2387 
BackendRecoverInsert()2388 void DiskCacheBackendTest::BackendRecoverInsert() {
2389   // Tests with an empty cache.
2390   BackendTransaction("insert_empty1", 0, false);
2391   ASSERT_TRUE(success_) << "insert_empty1";
2392   BackendTransaction("insert_empty2", 0, false);
2393   ASSERT_TRUE(success_) << "insert_empty2";
2394   BackendTransaction("insert_empty3", 0, false);
2395   ASSERT_TRUE(success_) << "insert_empty3";
2396 
2397   // Tests with one entry on the cache.
2398   BackendTransaction("insert_one1", 1, false);
2399   ASSERT_TRUE(success_) << "insert_one1";
2400   BackendTransaction("insert_one2", 1, false);
2401   ASSERT_TRUE(success_) << "insert_one2";
2402   BackendTransaction("insert_one3", 1, false);
2403   ASSERT_TRUE(success_) << "insert_one3";
2404 
2405   // Tests with one hundred entries on the cache, tiny index.
2406   BackendTransaction("insert_load1", 100, true);
2407   ASSERT_TRUE(success_) << "insert_load1";
2408   BackendTransaction("insert_load2", 100, true);
2409   ASSERT_TRUE(success_) << "insert_load2";
2410 }
2411 
TEST_F(DiskCacheBackendTest,RecoverInsert)2412 TEST_F(DiskCacheBackendTest, RecoverInsert) {
2413   BackendRecoverInsert();
2414 }
2415 
TEST_F(DiskCacheBackendTest,NewEvictionRecoverInsert)2416 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
2417   SetNewEviction();
2418   BackendRecoverInsert();
2419 }
2420 
BackendRecoverRemove()2421 void DiskCacheBackendTest::BackendRecoverRemove() {
2422   // Removing the only element.
2423   BackendTransaction("remove_one1", 0, false);
2424   ASSERT_TRUE(success_) << "remove_one1";
2425   BackendTransaction("remove_one2", 0, false);
2426   ASSERT_TRUE(success_) << "remove_one2";
2427   BackendTransaction("remove_one3", 0, false);
2428   ASSERT_TRUE(success_) << "remove_one3";
2429 
2430   // Removing the head.
2431   BackendTransaction("remove_head1", 1, false);
2432   ASSERT_TRUE(success_) << "remove_head1";
2433   BackendTransaction("remove_head2", 1, false);
2434   ASSERT_TRUE(success_) << "remove_head2";
2435   BackendTransaction("remove_head3", 1, false);
2436   ASSERT_TRUE(success_) << "remove_head3";
2437 
2438   // Removing the tail.
2439   BackendTransaction("remove_tail1", 1, false);
2440   ASSERT_TRUE(success_) << "remove_tail1";
2441   BackendTransaction("remove_tail2", 1, false);
2442   ASSERT_TRUE(success_) << "remove_tail2";
2443   BackendTransaction("remove_tail3", 1, false);
2444   ASSERT_TRUE(success_) << "remove_tail3";
2445 
2446   // Removing with one hundred entries on the cache, tiny index.
2447   BackendTransaction("remove_load1", 100, true);
2448   ASSERT_TRUE(success_) << "remove_load1";
2449   BackendTransaction("remove_load2", 100, true);
2450   ASSERT_TRUE(success_) << "remove_load2";
2451   BackendTransaction("remove_load3", 100, true);
2452   ASSERT_TRUE(success_) << "remove_load3";
2453 
2454   // This case cannot be reverted.
2455   BackendTransaction("remove_one4", 0, false);
2456   ASSERT_TRUE(success_) << "remove_one4";
2457   BackendTransaction("remove_head4", 1, false);
2458   ASSERT_TRUE(success_) << "remove_head4";
2459 }
2460 
2461 #if BUILDFLAG(IS_WIN)
2462 // http://crbug.com/396392
2463 #define MAYBE_RecoverRemove DISABLED_RecoverRemove
2464 #else
2465 #define MAYBE_RecoverRemove RecoverRemove
2466 #endif
TEST_F(DiskCacheBackendTest,MAYBE_RecoverRemove)2467 TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) {
2468   BackendRecoverRemove();
2469 }
2470 
2471 #if BUILDFLAG(IS_WIN)
2472 // http://crbug.com/396392
2473 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
2474 #else
2475 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
2476 #endif
TEST_F(DiskCacheBackendTest,MAYBE_NewEvictionRecoverRemove)2477 TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) {
2478   SetNewEviction();
2479   BackendRecoverRemove();
2480 }
2481 
BackendRecoverWithEviction()2482 void DiskCacheBackendTest::BackendRecoverWithEviction() {
2483   success_ = false;
2484   ASSERT_TRUE(CopyTestCache("insert_load1"));
2485   DisableFirstCleanup();
2486 
2487   SetMask(0xf);
2488   SetMaxSize(0x1000);
2489 
2490   // We should not crash here.
2491   InitCache();
2492   DisableIntegrityCheck();
2493 }
2494 
TEST_F(DiskCacheBackendTest,RecoverWithEviction)2495 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
2496   BackendRecoverWithEviction();
2497 }
2498 
TEST_F(DiskCacheBackendTest,NewEvictionRecoverWithEviction)2499 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
2500   SetNewEviction();
2501   BackendRecoverWithEviction();
2502 }
2503 
2504 // Tests that the |BackendImpl| fails to start with the wrong cache version.
TEST_F(DiskCacheTest,WrongVersion)2505 TEST_F(DiskCacheTest, WrongVersion) {
2506   ASSERT_TRUE(CopyTestCache("wrong_version"));
2507   net::TestCompletionCallback cb;
2508 
2509   std::unique_ptr<disk_cache::BackendImpl> cache(
2510       std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
2511                                                 net::DISK_CACHE, nullptr));
2512   cache->Init(cb.callback());
2513   ASSERT_THAT(cb.WaitForResult(), IsError(net::ERR_FAILED));
2514 }
2515 
2516 // Tests that the disk cache successfully joins the control group, dropping the
2517 // existing cache in favour of a new empty cache.
2518 // Disabled on android since this test requires cache creator to create
2519 // blockfile caches.
2520 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheTest,SimpleCacheControlJoin)2521 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
2522   std::unique_ptr<disk_cache::BackendImpl> cache =
2523       CreateExistingEntryCache(cache_path_);
2524   ASSERT_TRUE(cache.get());
2525   cache.reset();
2526 
2527   // Instantiate the SimpleCacheTrial, forcing this run into the
2528   // ExperimentControl group.
2529   base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
2530                                          "ExperimentControl");
2531   TestBackendResultCompletionCallback cb;
2532   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
2533       net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
2534       /*file_operations=*/nullptr, cache_path_, 0,
2535       disk_cache::ResetHandling::kResetOnError, /*net_log=*/nullptr,
2536       cb.callback());
2537   rv = cb.GetResult(std::move(rv));
2538   ASSERT_THAT(rv.net_error, IsOk());
2539   EXPECT_EQ(0, rv.backend->GetEntryCount());
2540 }
2541 #endif
2542 
2543 // Tests that the disk cache can restart in the control group preserving
2544 // existing entries.
TEST_F(DiskCacheTest,SimpleCacheControlRestart)2545 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
2546   // Instantiate the SimpleCacheTrial, forcing this run into the
2547   // ExperimentControl group.
2548   base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
2549                                          "ExperimentControl");
2550 
2551   std::unique_ptr<disk_cache::BackendImpl> cache =
2552       CreateExistingEntryCache(cache_path_);
2553   ASSERT_TRUE(cache.get());
2554 
2555   net::TestCompletionCallback cb;
2556 
2557   const int kRestartCount = 5;
2558   for (int i = 0; i < kRestartCount; ++i) {
2559     cache = std::make_unique<disk_cache::BackendImpl>(
2560         cache_path_, nullptr, nullptr, net::DISK_CACHE, nullptr);
2561     cache->Init(cb.callback());
2562     ASSERT_THAT(cb.WaitForResult(), IsOk());
2563     EXPECT_EQ(1, cache->GetEntryCount());
2564 
2565     TestEntryResultCompletionCallback cb2;
2566     EntryResult result =
2567         cache->OpenEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
2568     result = cb2.GetResult(std::move(result));
2569     result.ReleaseEntry()->Close();
2570   }
2571 }
2572 
2573 // Tests that the disk cache can leave the control group preserving existing
2574 // entries.
TEST_F(DiskCacheTest,SimpleCacheControlLeave)2575 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
2576   {
2577     // Instantiate the SimpleCacheTrial, forcing this run into the
2578     // ExperimentControl group.
2579     base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
2580                                            "ExperimentControl");
2581 
2582     std::unique_ptr<disk_cache::BackendImpl> cache =
2583         CreateExistingEntryCache(cache_path_);
2584     ASSERT_TRUE(cache.get());
2585   }
2586 
2587   // Instantiate the SimpleCacheTrial, forcing this run into the
2588   // ExperimentNo group.
2589   base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
2590   net::TestCompletionCallback cb;
2591 
2592   const int kRestartCount = 5;
2593   for (int i = 0; i < kRestartCount; ++i) {
2594     std::unique_ptr<disk_cache::BackendImpl> cache(
2595         std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
2596                                                   net::DISK_CACHE, nullptr));
2597     cache->Init(cb.callback());
2598     ASSERT_THAT(cb.WaitForResult(), IsOk());
2599     EXPECT_EQ(1, cache->GetEntryCount());
2600 
2601     TestEntryResultCompletionCallback cb2;
2602     EntryResult result =
2603         cache->OpenEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
2604     result = cb2.GetResult(std::move(result));
2605     ASSERT_THAT(result.net_error(), IsOk());
2606     result.ReleaseEntry()->Close();
2607   }
2608 }
2609 
2610 // Tests that the cache is properly restarted on recovery error.
2611 // Disabled on android since this test requires cache creator to create
2612 // blockfile caches.
2613 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheBackendTest,DeleteOld)2614 TEST_F(DiskCacheBackendTest, DeleteOld) {
2615   ASSERT_TRUE(CopyTestCache("wrong_version"));
2616   SetNewEviction();
2617 
2618   TestBackendResultCompletionCallback cb;
2619   {
2620     base::ScopedDisallowBlocking disallow_blocking;
2621     base::FilePath path(cache_path_);
2622     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
2623         net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
2624         /*file_operations=*/nullptr, path, 0,
2625         disk_cache::ResetHandling::kResetOnError, /*net_log=*/nullptr,
2626         cb.callback());
2627     path.clear();  // Make sure path was captured by the previous call.
2628     rv = cb.GetResult(std::move(rv));
2629     ASSERT_THAT(rv.net_error, IsOk());
2630   }
2631   EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, /*max_size = */ 0,
2632                                   mask_));
2633 }
2634 #endif
2635 
2636 // We want to be able to deal with messed up entries on disk.
BackendInvalidEntry2()2637 void DiskCacheBackendTest::BackendInvalidEntry2() {
2638   ASSERT_TRUE(CopyTestCache("bad_entry"));
2639   DisableFirstCleanup();
2640   InitCache();
2641 
2642   disk_cache::Entry *entry1, *entry2;
2643   ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
2644   EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
2645   entry1->Close();
2646 
2647   // CheckCacheIntegrity will fail at this point.
2648   DisableIntegrityCheck();
2649 }
2650 
TEST_F(DiskCacheBackendTest,InvalidEntry2)2651 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
2652   BackendInvalidEntry2();
2653 }
2654 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry2)2655 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
2656   SetNewEviction();
2657   BackendInvalidEntry2();
2658 }
2659 
2660 // Tests that we don't crash or hang when enumerating this cache.
BackendInvalidEntry3()2661 void DiskCacheBackendTest::BackendInvalidEntry3() {
2662   SetMask(0x1);        // 2-entry table.
2663   SetMaxSize(0x3000);  // 12 kB.
2664   DisableFirstCleanup();
2665   InitCache();
2666 
2667   disk_cache::Entry* entry;
2668   std::unique_ptr<TestIterator> iter = CreateIterator();
2669   while (iter->OpenNextEntry(&entry) == net::OK) {
2670     entry->Close();
2671   }
2672 }
2673 
TEST_F(DiskCacheBackendTest,InvalidEntry3)2674 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2675   ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2676   BackendInvalidEntry3();
2677 }
2678 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry3)2679 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2680   ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2681   SetNewEviction();
2682   BackendInvalidEntry3();
2683   DisableIntegrityCheck();
2684 }
2685 
2686 // Test that we handle a dirty entry on the LRU list, already replaced with
2687 // the same key, and with hash collisions.
TEST_F(DiskCacheBackendTest,InvalidEntry4)2688 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2689   ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2690   SetMask(0x1);        // 2-entry table.
2691   SetMaxSize(0x3000);  // 12 kB.
2692   DisableFirstCleanup();
2693   InitCache();
2694 
2695   TrimForTest(false);
2696 }
2697 
2698 // Test that we handle a dirty entry on the deleted list, already replaced with
2699 // the same key, and with hash collisions.
TEST_F(DiskCacheBackendTest,InvalidEntry5)2700 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2701   ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2702   SetNewEviction();
2703   SetMask(0x1);        // 2-entry table.
2704   SetMaxSize(0x3000);  // 12 kB.
2705   DisableFirstCleanup();
2706   InitCache();
2707 
2708   TrimDeletedListForTest(false);
2709 }
2710 
TEST_F(DiskCacheBackendTest,InvalidEntry6)2711 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2712   ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2713   SetMask(0x1);        // 2-entry table.
2714   SetMaxSize(0x3000);  // 12 kB.
2715   DisableFirstCleanup();
2716   InitCache();
2717 
2718   // There is a dirty entry (but marked as clean) at the end, pointing to a
2719   // deleted entry through the hash collision list. We should not re-insert the
2720   // deleted entry into the index table.
2721 
2722   TrimForTest(false);
2723   // The cache should be clean (as detected by CheckCacheIntegrity).
2724 }
2725 
2726 // Tests that we don't hang when there is a loop on the hash collision list.
2727 // The test cache could be a result of bug 69135.
TEST_F(DiskCacheBackendTest,BadNextEntry1)2728 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2729   ASSERT_TRUE(CopyTestCache("list_loop2"));
2730   SetMask(0x1);        // 2-entry table.
2731   SetMaxSize(0x3000);  // 12 kB.
2732   DisableFirstCleanup();
2733   InitCache();
2734 
2735   // The second entry points at itselft, and the first entry is not accessible
2736   // though the index, but it is at the head of the LRU.
2737 
2738   disk_cache::Entry* entry;
2739   ASSERT_THAT(CreateEntry("The first key", &entry), IsOk());
2740   entry->Close();
2741 
2742   TrimForTest(false);
2743   TrimForTest(false);
2744   ASSERT_THAT(OpenEntry("The first key", &entry), IsOk());
2745   entry->Close();
2746   EXPECT_EQ(1, cache_->GetEntryCount());
2747 }
2748 
2749 // Tests that we don't hang when there is a loop on the hash collision list.
2750 // The test cache could be a result of bug 69135.
TEST_F(DiskCacheBackendTest,BadNextEntry2)2751 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2752   ASSERT_TRUE(CopyTestCache("list_loop3"));
2753   SetMask(0x1);        // 2-entry table.
2754   SetMaxSize(0x3000);  // 12 kB.
2755   DisableFirstCleanup();
2756   InitCache();
2757 
2758   // There is a wide loop of 5 entries.
2759 
2760   disk_cache::Entry* entry;
2761   ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2762 }
2763 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry6)2764 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2765   ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2766   DisableFirstCleanup();
2767   SetNewEviction();
2768   InitCache();
2769 
2770   // The second entry is dirty, but removing it should not corrupt the list.
2771   disk_cache::Entry* entry;
2772   ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2773   ASSERT_THAT(OpenEntry("the first key", &entry), IsOk());
2774 
2775   // This should not delete the cache.
2776   entry->Doom();
2777   FlushQueueForTest();
2778   entry->Close();
2779 
2780   ASSERT_THAT(OpenEntry("some other key", &entry), IsOk());
2781   entry->Close();
2782 }
2783 
2784 // Tests handling of corrupt entries by keeping the rankings node around, with
2785 // a fatal failure.
BackendInvalidEntry7()2786 void DiskCacheBackendTest::BackendInvalidEntry7() {
2787   const int kSize = 0x3000;  // 12 kB.
2788   SetMaxSize(kSize * 10);
2789   InitCache();
2790 
2791   std::string first("some key");
2792   std::string second("something else");
2793   disk_cache::Entry* entry;
2794   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2795   entry->Close();
2796   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2797 
2798   // Corrupt this entry.
2799   disk_cache::EntryImpl* entry_impl =
2800       static_cast<disk_cache::EntryImpl*>(entry);
2801 
2802   entry_impl->rankings()->Data()->next = 0;
2803   entry_impl->rankings()->Store();
2804   entry->Close();
2805   FlushQueueForTest();
2806   EXPECT_EQ(2, cache_->GetEntryCount());
2807 
2808   // This should detect the bad entry.
2809   EXPECT_NE(net::OK, OpenEntry(second, &entry));
2810   EXPECT_EQ(1, cache_->GetEntryCount());
2811 
2812   // We should delete the cache. The list still has a corrupt node.
2813   std::unique_ptr<TestIterator> iter = CreateIterator();
2814   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2815   FlushQueueForTest();
2816   EXPECT_EQ(0, cache_->GetEntryCount());
2817 }
2818 
TEST_F(DiskCacheBackendTest,InvalidEntry7)2819 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2820   BackendInvalidEntry7();
2821 }
2822 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry7)2823 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2824   SetNewEviction();
2825   BackendInvalidEntry7();
2826 }
2827 
2828 // Tests handling of corrupt entries by keeping the rankings node around, with
2829 // a non fatal failure.
BackendInvalidEntry8()2830 void DiskCacheBackendTest::BackendInvalidEntry8() {
2831   const int kSize = 0x3000;  // 12 kB
2832   SetMaxSize(kSize * 10);
2833   InitCache();
2834 
2835   std::string first("some key");
2836   std::string second("something else");
2837   disk_cache::Entry* entry;
2838   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2839   entry->Close();
2840   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2841 
2842   // Corrupt this entry.
2843   disk_cache::EntryImpl* entry_impl =
2844       static_cast<disk_cache::EntryImpl*>(entry);
2845 
2846   entry_impl->rankings()->Data()->contents = 0;
2847   entry_impl->rankings()->Store();
2848   entry->Close();
2849   FlushQueueForTest();
2850   EXPECT_EQ(2, cache_->GetEntryCount());
2851 
2852   // This should detect the bad entry.
2853   EXPECT_NE(net::OK, OpenEntry(second, &entry));
2854   EXPECT_EQ(1, cache_->GetEntryCount());
2855 
2856   // We should not delete the cache.
2857   std::unique_ptr<TestIterator> iter = CreateIterator();
2858   ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
2859   entry->Close();
2860   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2861   EXPECT_EQ(1, cache_->GetEntryCount());
2862 }
2863 
TEST_F(DiskCacheBackendTest,InvalidEntry8)2864 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2865   BackendInvalidEntry8();
2866 }
2867 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry8)2868 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2869   SetNewEviction();
2870   BackendInvalidEntry8();
2871 }
2872 
2873 // Tests handling of corrupt entries detected by enumerations. Note that these
2874 // tests (xx9 to xx11) are basically just going though slightly different
2875 // codepaths so they are tighlty coupled with the code, but that is better than
2876 // not testing error handling code.
BackendInvalidEntry9(bool eviction)2877 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2878   const int kSize = 0x3000;  // 12 kB.
2879   SetMaxSize(kSize * 10);
2880   InitCache();
2881 
2882   std::string first("some key");
2883   std::string second("something else");
2884   disk_cache::Entry* entry;
2885   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2886   entry->Close();
2887   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2888 
2889   // Corrupt this entry.
2890   disk_cache::EntryImpl* entry_impl =
2891       static_cast<disk_cache::EntryImpl*>(entry);
2892 
2893   entry_impl->entry()->Data()->state = 0xbad;
2894   entry_impl->entry()->Store();
2895   entry->Close();
2896   FlushQueueForTest();
2897   EXPECT_EQ(2, cache_->GetEntryCount());
2898 
2899   if (eviction) {
2900     TrimForTest(false);
2901     EXPECT_EQ(1, cache_->GetEntryCount());
2902     TrimForTest(false);
2903     EXPECT_EQ(1, cache_->GetEntryCount());
2904   } else {
2905     // We should detect the problem through the list, but we should not delete
2906     // the entry, just fail the iteration.
2907     std::unique_ptr<TestIterator> iter = CreateIterator();
2908     EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2909 
2910     // Now a full iteration will work, and return one entry.
2911     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
2912     entry->Close();
2913     EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2914 
2915     // This should detect what's left of the bad entry.
2916     EXPECT_NE(net::OK, OpenEntry(second, &entry));
2917     EXPECT_EQ(2, cache_->GetEntryCount());
2918   }
2919   DisableIntegrityCheck();
2920 }
2921 
TEST_F(DiskCacheBackendTest,InvalidEntry9)2922 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2923   BackendInvalidEntry9(false);
2924 }
2925 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidEntry9)2926 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2927   SetNewEviction();
2928   BackendInvalidEntry9(false);
2929 }
2930 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry9)2931 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2932   BackendInvalidEntry9(true);
2933 }
2934 
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry9)2935 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2936   SetNewEviction();
2937   BackendInvalidEntry9(true);
2938 }
2939 
2940 // Tests handling of corrupt entries detected by enumerations.
BackendInvalidEntry10(bool eviction)2941 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2942   const int kSize = 0x3000;  // 12 kB.
2943   SetMaxSize(kSize * 10);
2944   SetNewEviction();
2945   InitCache();
2946 
2947   std::string first("some key");
2948   std::string second("something else");
2949   disk_cache::Entry* entry;
2950   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
2951   entry->Close();
2952   ASSERT_THAT(OpenEntry(first, &entry), IsOk());
2953   EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
2954   entry->Close();
2955   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
2956 
2957   // Corrupt this entry.
2958   disk_cache::EntryImpl* entry_impl =
2959       static_cast<disk_cache::EntryImpl*>(entry);
2960 
2961   entry_impl->entry()->Data()->state = 0xbad;
2962   entry_impl->entry()->Store();
2963   entry->Close();
2964   ASSERT_THAT(CreateEntry("third", &entry), IsOk());
2965   entry->Close();
2966   EXPECT_EQ(3, cache_->GetEntryCount());
2967 
2968   // We have:
2969   // List 0: third -> second (bad).
2970   // List 1: first.
2971 
2972   if (eviction) {
2973     // Detection order: second -> first -> third.
2974     TrimForTest(false);
2975     EXPECT_EQ(3, cache_->GetEntryCount());
2976     TrimForTest(false);
2977     EXPECT_EQ(2, cache_->GetEntryCount());
2978     TrimForTest(false);
2979     EXPECT_EQ(1, cache_->GetEntryCount());
2980   } else {
2981     // Detection order: third -> second -> first.
2982     // We should detect the problem through the list, but we should not delete
2983     // the entry.
2984     std::unique_ptr<TestIterator> iter = CreateIterator();
2985     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
2986     entry->Close();
2987     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
2988     EXPECT_EQ(first, entry->GetKey());
2989     entry->Close();
2990     EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2991   }
2992   DisableIntegrityCheck();
2993 }
2994 
TEST_F(DiskCacheBackendTest,InvalidEntry10)2995 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2996   BackendInvalidEntry10(false);
2997 }
2998 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry10)2999 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
3000   BackendInvalidEntry10(true);
3001 }
3002 
3003 // Tests handling of corrupt entries detected by enumerations.
BackendInvalidEntry11(bool eviction)3004 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
3005   const int kSize = 0x3000;  // 12 kB.
3006   SetMaxSize(kSize * 10);
3007   SetNewEviction();
3008   InitCache();
3009 
3010   std::string first("some key");
3011   std::string second("something else");
3012   disk_cache::Entry* entry;
3013   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
3014   entry->Close();
3015   ASSERT_THAT(OpenEntry(first, &entry), IsOk());
3016   EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
3017   entry->Close();
3018   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
3019   entry->Close();
3020   ASSERT_THAT(OpenEntry(second, &entry), IsOk());
3021   EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
3022 
3023   // Corrupt this entry.
3024   disk_cache::EntryImpl* entry_impl =
3025       static_cast<disk_cache::EntryImpl*>(entry);
3026 
3027   entry_impl->entry()->Data()->state = 0xbad;
3028   entry_impl->entry()->Store();
3029   entry->Close();
3030   ASSERT_THAT(CreateEntry("third", &entry), IsOk());
3031   entry->Close();
3032   FlushQueueForTest();
3033   EXPECT_EQ(3, cache_->GetEntryCount());
3034 
3035   // We have:
3036   // List 0: third.
3037   // List 1: second (bad) -> first.
3038 
3039   if (eviction) {
3040     // Detection order: third -> first -> second.
3041     TrimForTest(false);
3042     EXPECT_EQ(2, cache_->GetEntryCount());
3043     TrimForTest(false);
3044     EXPECT_EQ(1, cache_->GetEntryCount());
3045     TrimForTest(false);
3046     EXPECT_EQ(1, cache_->GetEntryCount());
3047   } else {
3048     // Detection order: third -> second.
3049     // We should detect the problem through the list, but we should not delete
3050     // the entry, just fail the iteration.
3051     std::unique_ptr<TestIterator> iter = CreateIterator();
3052     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3053     entry->Close();
3054     EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
3055 
3056     // Now a full iteration will work, and return two entries.
3057     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3058     entry->Close();
3059     ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3060     entry->Close();
3061     EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
3062   }
3063   DisableIntegrityCheck();
3064 }
3065 
TEST_F(DiskCacheBackendTest,InvalidEntry11)3066 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
3067   BackendInvalidEntry11(false);
3068 }
3069 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry11)3070 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
3071   BackendInvalidEntry11(true);
3072 }
3073 
3074 // Tests handling of corrupt entries in the middle of a long eviction run.
BackendTrimInvalidEntry12()3075 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
3076   const int kSize = 0x3000;  // 12 kB
3077   SetMaxSize(kSize * 10);
3078   InitCache();
3079 
3080   std::string first("some key");
3081   std::string second("something else");
3082   disk_cache::Entry* entry;
3083   ASSERT_THAT(CreateEntry(first, &entry), IsOk());
3084   entry->Close();
3085   ASSERT_THAT(CreateEntry(second, &entry), IsOk());
3086 
3087   // Corrupt this entry.
3088   disk_cache::EntryImpl* entry_impl =
3089       static_cast<disk_cache::EntryImpl*>(entry);
3090 
3091   entry_impl->entry()->Data()->state = 0xbad;
3092   entry_impl->entry()->Store();
3093   entry->Close();
3094   ASSERT_THAT(CreateEntry("third", &entry), IsOk());
3095   entry->Close();
3096   ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
3097   TrimForTest(true);
3098   EXPECT_EQ(1, cache_->GetEntryCount());
3099   entry->Close();
3100   DisableIntegrityCheck();
3101 }
3102 
TEST_F(DiskCacheBackendTest,TrimInvalidEntry12)3103 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
3104   BackendTrimInvalidEntry12();
3105 }
3106 
TEST_F(DiskCacheBackendTest,NewEvictionTrimInvalidEntry12)3107 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
3108   SetNewEviction();
3109   BackendTrimInvalidEntry12();
3110 }
3111 
3112 // We want to be able to deal with messed up entries on disk.
BackendInvalidRankings2()3113 void DiskCacheBackendTest::BackendInvalidRankings2() {
3114   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3115   DisableFirstCleanup();
3116   InitCache();
3117 
3118   disk_cache::Entry *entry1, *entry2;
3119   EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
3120   ASSERT_THAT(OpenEntry("some other key", &entry2), IsOk());
3121   entry2->Close();
3122 
3123   // CheckCacheIntegrity will fail at this point.
3124   DisableIntegrityCheck();
3125 }
3126 
TEST_F(DiskCacheBackendTest,InvalidRankings2)3127 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
3128   BackendInvalidRankings2();
3129 }
3130 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankings2)3131 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
3132   SetNewEviction();
3133   BackendInvalidRankings2();
3134 }
3135 
3136 // If the LRU is corrupt, we delete the cache.
BackendInvalidRankings()3137 void DiskCacheBackendTest::BackendInvalidRankings() {
3138   disk_cache::Entry* entry;
3139   std::unique_ptr<TestIterator> iter = CreateIterator();
3140   ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
3141   entry->Close();
3142   EXPECT_EQ(2, cache_->GetEntryCount());
3143 
3144   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
3145   FlushQueueForTest();  // Allow the restart to finish.
3146   EXPECT_EQ(0, cache_->GetEntryCount());
3147 }
3148 
TEST_F(DiskCacheBackendTest,InvalidRankingsSuccess)3149 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
3150   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3151   DisableFirstCleanup();
3152   InitCache();
3153   BackendInvalidRankings();
3154 }
3155 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankingsSuccess)3156 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
3157   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3158   DisableFirstCleanup();
3159   SetNewEviction();
3160   InitCache();
3161   BackendInvalidRankings();
3162 }
3163 
TEST_F(DiskCacheBackendTest,InvalidRankingsFailure)3164 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
3165   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3166   DisableFirstCleanup();
3167   InitCache();
3168   SetTestMode();  // Fail cache reinitialization.
3169   BackendInvalidRankings();
3170 }
3171 
TEST_F(DiskCacheBackendTest,NewEvictionInvalidRankingsFailure)3172 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
3173   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3174   DisableFirstCleanup();
3175   SetNewEviction();
3176   InitCache();
3177   SetTestMode();  // Fail cache reinitialization.
3178   BackendInvalidRankings();
3179 }
3180 
3181 // If the LRU is corrupt and we have open entries, we disable the cache.
BackendDisable()3182 void DiskCacheBackendTest::BackendDisable() {
3183   disk_cache::Entry *entry1, *entry2;
3184   std::unique_ptr<TestIterator> iter = CreateIterator();
3185   ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3186 
3187   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3188   EXPECT_EQ(0, cache_->GetEntryCount());
3189   EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
3190 
3191   entry1->Close();
3192   FlushQueueForTest();  // Flushing the Close posts a task to restart the cache.
3193   FlushQueueForTest();  // This one actually allows that task to complete.
3194 
3195   EXPECT_EQ(0, cache_->GetEntryCount());
3196 }
3197 
TEST_F(DiskCacheBackendTest,DisableSuccess)3198 TEST_F(DiskCacheBackendTest, DisableSuccess) {
3199   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3200   DisableFirstCleanup();
3201   InitCache();
3202   BackendDisable();
3203 }
3204 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess)3205 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
3206   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3207   DisableFirstCleanup();
3208   SetNewEviction();
3209   InitCache();
3210   BackendDisable();
3211 }
3212 
TEST_F(DiskCacheBackendTest,DisableFailure)3213 TEST_F(DiskCacheBackendTest, DisableFailure) {
3214   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3215   DisableFirstCleanup();
3216   InitCache();
3217   SetTestMode();  // Fail cache reinitialization.
3218   BackendDisable();
3219 }
3220 
TEST_F(DiskCacheBackendTest,NewEvictionDisableFailure)3221 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
3222   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3223   DisableFirstCleanup();
3224   SetNewEviction();
3225   InitCache();
3226   SetTestMode();  // Fail cache reinitialization.
3227   BackendDisable();
3228 }
3229 
3230 // This is another type of corruption on the LRU; disable the cache.
BackendDisable2()3231 void DiskCacheBackendTest::BackendDisable2() {
3232   EXPECT_EQ(8, cache_->GetEntryCount());
3233 
3234   disk_cache::Entry* entry;
3235   std::unique_ptr<TestIterator> iter = CreateIterator();
3236   int count = 0;
3237   while (iter->OpenNextEntry(&entry) == net::OK) {
3238     ASSERT_TRUE(nullptr != entry);
3239     entry->Close();
3240     count++;
3241     ASSERT_LT(count, 9);
3242   };
3243 
3244   FlushQueueForTest();
3245   EXPECT_EQ(0, cache_->GetEntryCount());
3246 }
3247 
TEST_F(DiskCacheBackendTest,DisableSuccess2)3248 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
3249   ASSERT_TRUE(CopyTestCache("list_loop"));
3250   DisableFirstCleanup();
3251   InitCache();
3252   BackendDisable2();
3253 }
3254 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess2)3255 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
3256   ASSERT_TRUE(CopyTestCache("list_loop"));
3257   DisableFirstCleanup();
3258   SetNewEviction();
3259   InitCache();
3260   BackendDisable2();
3261 }
3262 
TEST_F(DiskCacheBackendTest,DisableFailure2)3263 TEST_F(DiskCacheBackendTest, DisableFailure2) {
3264   ASSERT_TRUE(CopyTestCache("list_loop"));
3265   DisableFirstCleanup();
3266   InitCache();
3267   SetTestMode();  // Fail cache reinitialization.
3268   BackendDisable2();
3269 }
3270 
TEST_F(DiskCacheBackendTest,NewEvictionDisableFailure2)3271 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
3272   ASSERT_TRUE(CopyTestCache("list_loop"));
3273   DisableFirstCleanup();
3274   SetNewEviction();
3275   InitCache();
3276   SetTestMode();  // Fail cache reinitialization.
3277   BackendDisable2();
3278 }
3279 
3280 // If the index size changes when we disable the cache, we should not crash.
BackendDisable3()3281 void DiskCacheBackendTest::BackendDisable3() {
3282   disk_cache::Entry *entry1, *entry2;
3283   std::unique_ptr<TestIterator> iter = CreateIterator();
3284   EXPECT_EQ(2, cache_->GetEntryCount());
3285   ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3286   entry1->Close();
3287 
3288   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3289   FlushQueueForTest();
3290 
3291   ASSERT_THAT(CreateEntry("Something new", &entry2), IsOk());
3292   entry2->Close();
3293 
3294   EXPECT_EQ(1, cache_->GetEntryCount());
3295 }
3296 
TEST_F(DiskCacheBackendTest,DisableSuccess3)3297 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
3298   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3299   DisableFirstCleanup();
3300   SetMaxSize(20 * 1024 * 1024);
3301   InitCache();
3302   BackendDisable3();
3303 }
3304 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess3)3305 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
3306   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3307   DisableFirstCleanup();
3308   SetMaxSize(20 * 1024 * 1024);
3309   SetNewEviction();
3310   InitCache();
3311   BackendDisable3();
3312 }
3313 
3314 // If we disable the cache, already open entries should work as far as possible.
BackendDisable4()3315 void DiskCacheBackendTest::BackendDisable4() {
3316   disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
3317   std::unique_ptr<TestIterator> iter = CreateIterator();
3318   ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3319 
3320   char key2[2000];
3321   char key3[20000];
3322   CacheTestFillBuffer(key2, sizeof(key2), true);
3323   CacheTestFillBuffer(key3, sizeof(key3), true);
3324   key2[sizeof(key2) - 1] = '\0';
3325   key3[sizeof(key3) - 1] = '\0';
3326   ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
3327   ASSERT_THAT(CreateEntry(key3, &entry3), IsOk());
3328 
3329   const int kBufSize = 20000;
3330   auto buf = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
3331   memset(buf->data(), 0, kBufSize);
3332   EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
3333   EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
3334 
3335   // This line should disable the cache but not delete it.
3336   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4));
3337   EXPECT_EQ(0, cache_->GetEntryCount());
3338 
3339   EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
3340 
3341   EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
3342   EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
3343   EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
3344 
3345   EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
3346   EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
3347   EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
3348 
3349   std::string key = entry2->GetKey();
3350   EXPECT_EQ(sizeof(key2) - 1, key.size());
3351   key = entry3->GetKey();
3352   EXPECT_EQ(sizeof(key3) - 1, key.size());
3353 
3354   entry1->Close();
3355   entry2->Close();
3356   entry3->Close();
3357   FlushQueueForTest();  // Flushing the Close posts a task to restart the cache.
3358   FlushQueueForTest();  // This one actually allows that task to complete.
3359 
3360   EXPECT_EQ(0, cache_->GetEntryCount());
3361 }
3362 
TEST_F(DiskCacheBackendTest,DisableSuccess4)3363 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
3364   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3365   DisableFirstCleanup();
3366   InitCache();
3367   BackendDisable4();
3368 }
3369 
TEST_F(DiskCacheBackendTest,NewEvictionDisableSuccess4)3370 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
3371   ASSERT_TRUE(CopyTestCache("bad_rankings"));
3372   DisableFirstCleanup();
3373   SetNewEviction();
3374   InitCache();
3375   BackendDisable4();
3376 }
3377 
3378 // Tests the exposed API with a disabled cache.
BackendDisabledAPI()3379 void DiskCacheBackendTest::BackendDisabledAPI() {
3380   cache_impl_->SetUnitTestMode();  // Simulate failure restarting the cache.
3381 
3382   disk_cache::Entry *entry1, *entry2;
3383   std::unique_ptr<TestIterator> iter = CreateIterator();
3384   EXPECT_EQ(2, cache_->GetEntryCount());
3385   ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
3386   entry1->Close();
3387   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3388   FlushQueueForTest();
3389   // The cache should be disabled.
3390 
3391   EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
3392   EXPECT_EQ(0, cache_->GetEntryCount());
3393   EXPECT_NE(net::OK, OpenEntry("First", &entry2));
3394   EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
3395   EXPECT_NE(net::OK, DoomEntry("First"));
3396   EXPECT_NE(net::OK, DoomAllEntries());
3397   EXPECT_NE(net::OK, DoomEntriesBetween(Time(), Time::Now()));
3398   EXPECT_NE(net::OK, DoomEntriesSince(Time()));
3399   iter = CreateIterator();
3400   EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
3401 
3402   base::StringPairs stats;
3403   cache_->GetStats(&stats);
3404   EXPECT_TRUE(stats.empty());
3405   OnExternalCacheHit("First");
3406 }
3407 
TEST_F(DiskCacheBackendTest,DisabledAPI)3408 TEST_F(DiskCacheBackendTest, DisabledAPI) {
3409   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3410   DisableFirstCleanup();
3411   InitCache();
3412   BackendDisabledAPI();
3413 }
3414 
TEST_F(DiskCacheBackendTest,NewEvictionDisabledAPI)3415 TEST_F(DiskCacheBackendTest, NewEvictionDisabledAPI) {
3416   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3417   DisableFirstCleanup();
3418   SetNewEviction();
3419   InitCache();
3420   BackendDisabledAPI();
3421 }
3422 
3423 // Test that some eviction of some kind happens.
BackendEviction()3424 void DiskCacheBackendTest::BackendEviction() {
3425   const int kMaxSize = 200 * 1024;
3426   const int kMaxEntryCount = 20;
3427   const int kWriteSize = kMaxSize / kMaxEntryCount;
3428 
3429   const int kWriteEntryCount = kMaxEntryCount * 2;
3430 
3431   static_assert(kWriteEntryCount * kWriteSize > kMaxSize,
3432                 "must write more than MaxSize");
3433 
3434   SetMaxSize(kMaxSize);
3435   InitSparseCache(nullptr, nullptr);
3436 
3437   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kWriteSize);
3438   CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3439 
3440   std::string key_prefix("prefix");
3441   for (int i = 0; i < kWriteEntryCount; ++i) {
3442     AddDelay();
3443     disk_cache::Entry* entry = nullptr;
3444     ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
3445                 IsOk());
3446     disk_cache::ScopedEntryPtr entry_closer(entry);
3447     EXPECT_EQ(kWriteSize,
3448               WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3449   }
3450 
3451   int size = CalculateSizeOfAllEntries();
3452   EXPECT_GT(kMaxSize, size);
3453 }
3454 
TEST_F(DiskCacheBackendTest,BackendEviction)3455 TEST_F(DiskCacheBackendTest, BackendEviction) {
3456   BackendEviction();
3457 }
3458 
TEST_F(DiskCacheBackendTest,MemoryOnlyBackendEviction)3459 TEST_F(DiskCacheBackendTest, MemoryOnlyBackendEviction) {
3460   SetMemoryOnlyMode();
3461   BackendEviction();
3462 }
3463 
3464 // TODO(morlovich): Enable BackendEviction test for simple cache after
3465 // performance problems are addressed. See crbug.com/588184 for more
3466 // information.
3467 
3468 // This overly specific looking test is a regression test aimed at
3469 // crbug.com/589186.
TEST_F(DiskCacheBackendTest,MemoryOnlyUseAfterFree)3470 TEST_F(DiskCacheBackendTest, MemoryOnlyUseAfterFree) {
3471   SetMemoryOnlyMode();
3472 
3473   const int kMaxSize = 200 * 1024;
3474   const int kMaxEntryCount = 20;
3475   const int kWriteSize = kMaxSize / kMaxEntryCount;
3476 
3477   SetMaxSize(kMaxSize);
3478   InitCache();
3479 
3480   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kWriteSize);
3481   CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3482 
3483   // Create an entry to be our sparse entry that gets written later.
3484   disk_cache::Entry* entry;
3485   ASSERT_THAT(CreateEntry("first parent", &entry), IsOk());
3486   disk_cache::ScopedEntryPtr first_parent(entry);
3487 
3488   // Create a ton of entries, and keep them open, to put the cache well above
3489   // its eviction threshhold.
3490   const int kTooManyEntriesCount = kMaxEntryCount * 2;
3491   std::list<disk_cache::ScopedEntryPtr> open_entries;
3492   std::string key_prefix("prefix");
3493   for (int i = 0; i < kTooManyEntriesCount; ++i) {
3494     ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
3495                 IsOk());
3496     // Not checking the result because it will start to fail once the max size
3497     // is reached.
3498     WriteData(entry, 1, 0, buffer.get(), kWriteSize, false);
3499     open_entries.push_back(disk_cache::ScopedEntryPtr(entry));
3500   }
3501 
3502   // Writing this sparse data should not crash. Ignoring the result because
3503   // we're only concerned with not crashing in this particular test.
3504   first_parent->WriteSparseData(32768, buffer.get(), 1024,
3505                                 net::CompletionOnceCallback());
3506 }
3507 
TEST_F(DiskCacheBackendTest,MemoryCapsWritesToMaxSize)3508 TEST_F(DiskCacheBackendTest, MemoryCapsWritesToMaxSize) {
3509   // Verify that the memory backend won't grow beyond its max size if lots of
3510   // open entries (each smaller than the max entry size) are trying to write
3511   // beyond the max size.
3512   SetMemoryOnlyMode();
3513 
3514   const int kMaxSize = 100 * 1024;       // 100KB cache
3515   const int kNumEntries = 20;            // 20 entries to write
3516   const int kWriteSize = kMaxSize / 10;  // Each entry writes 1/10th the max
3517 
3518   SetMaxSize(kMaxSize);
3519   InitCache();
3520 
3521   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kWriteSize);
3522   CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3523 
3524   // Create an entry to be the final entry that gets written later.
3525   disk_cache::Entry* entry;
3526   ASSERT_THAT(CreateEntry("final", &entry), IsOk());
3527   disk_cache::ScopedEntryPtr final_entry(entry);
3528 
3529   // Create a ton of entries, write to the cache, and keep the entries open.
3530   // They should start failing writes once the cache fills.
3531   std::list<disk_cache::ScopedEntryPtr> open_entries;
3532   std::string key_prefix("prefix");
3533   for (int i = 0; i < kNumEntries; ++i) {
3534     ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
3535                 IsOk());
3536     WriteData(entry, 1, 0, buffer.get(), kWriteSize, false);
3537     open_entries.push_back(disk_cache::ScopedEntryPtr(entry));
3538   }
3539   EXPECT_GE(kMaxSize, CalculateSizeOfAllEntries());
3540 
3541   // Any more writing at this point should cause an error.
3542   EXPECT_THAT(
3543       WriteData(final_entry.get(), 1, 0, buffer.get(), kWriteSize, false),
3544       IsError(net::ERR_INSUFFICIENT_RESOURCES));
3545 }
3546 
TEST_F(DiskCacheTest,Backend_UsageStatsTimer)3547 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
3548   MessageLoopHelper helper;
3549 
3550   ASSERT_TRUE(CleanupCacheDir());
3551   // Want to use our thread since we call SyncInit ourselves.
3552   std::unique_ptr<disk_cache::BackendImpl> cache(
3553       std::make_unique<disk_cache::BackendImpl>(
3554           cache_path_, nullptr,
3555           base::SingleThreadTaskRunner::GetCurrentDefault(), net::DISK_CACHE,
3556           nullptr));
3557   ASSERT_TRUE(nullptr != cache.get());
3558   cache->SetUnitTestMode();
3559   ASSERT_THAT(cache->SyncInit(), IsOk());
3560 
3561   // Wait for a callback that never comes... about 2 secs :). The message loop
3562   // has to run to allow invocation of the usage timer.
3563   helper.WaitUntilCacheIoFinished(1);
3564 }
3565 
TEST_F(DiskCacheBackendTest,TimerNotCreated)3566 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
3567   ASSERT_TRUE(CopyTestCache("wrong_version"));
3568 
3569   // Want to use our thread since we call SyncInit ourselves.
3570   std::unique_ptr<disk_cache::BackendImpl> cache(
3571       std::make_unique<disk_cache::BackendImpl>(
3572           cache_path_, nullptr,
3573           base::SingleThreadTaskRunner::GetCurrentDefault(), net::DISK_CACHE,
3574           nullptr));
3575   ASSERT_TRUE(nullptr != cache.get());
3576   cache->SetUnitTestMode();
3577   ASSERT_NE(net::OK, cache->SyncInit());
3578 
3579   ASSERT_TRUE(nullptr == cache->GetTimerForTest());
3580 
3581   DisableIntegrityCheck();
3582 }
3583 
TEST_F(DiskCacheBackendTest,Backend_UsageStats)3584 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
3585   InitCache();
3586   disk_cache::Entry* entry;
3587   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
3588   entry->Close();
3589   FlushQueueForTest();
3590 
3591   disk_cache::StatsItems stats;
3592   cache_->GetStats(&stats);
3593   EXPECT_FALSE(stats.empty());
3594 
3595   disk_cache::StatsItems::value_type hits("Create hit", "0x1");
3596   EXPECT_EQ(1, base::ranges::count(stats, hits));
3597 
3598   ResetCaches();
3599 
3600   // Now open the cache and verify that the stats are still there.
3601   DisableFirstCleanup();
3602   InitCache();
3603   EXPECT_EQ(1, cache_->GetEntryCount());
3604 
3605   stats.clear();
3606   cache_->GetStats(&stats);
3607   EXPECT_FALSE(stats.empty());
3608 
3609   EXPECT_EQ(1, base::ranges::count(stats, hits));
3610 }
3611 
BackendDoomAll()3612 void DiskCacheBackendTest::BackendDoomAll() {
3613   InitCache();
3614 
3615   disk_cache::Entry *entry1, *entry2;
3616   ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
3617   ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
3618   entry1->Close();
3619   entry2->Close();
3620 
3621   ASSERT_THAT(CreateEntry("third", &entry1), IsOk());
3622   ASSERT_THAT(CreateEntry("fourth", &entry2), IsOk());
3623 
3624   ASSERT_EQ(4, cache_->GetEntryCount());
3625   EXPECT_THAT(DoomAllEntries(), IsOk());
3626   ASSERT_EQ(0, cache_->GetEntryCount());
3627 
3628   // We should stop posting tasks at some point (if we post any).
3629   base::RunLoop().RunUntilIdle();
3630 
3631   disk_cache::Entry *entry3, *entry4;
3632   EXPECT_NE(net::OK, OpenEntry("third", &entry3));
3633   ASSERT_THAT(CreateEntry("third", &entry3), IsOk());
3634   ASSERT_THAT(CreateEntry("fourth", &entry4), IsOk());
3635 
3636   EXPECT_THAT(DoomAllEntries(), IsOk());
3637   ASSERT_EQ(0, cache_->GetEntryCount());
3638 
3639   entry1->Close();
3640   entry2->Close();
3641   entry3->Doom();  // The entry should be already doomed, but this must work.
3642   entry3->Close();
3643   entry4->Close();
3644 
3645   // Now try with all references released.
3646   ASSERT_THAT(CreateEntry("third", &entry1), IsOk());
3647   ASSERT_THAT(CreateEntry("fourth", &entry2), IsOk());
3648   entry1->Close();
3649   entry2->Close();
3650 
3651   ASSERT_EQ(2, cache_->GetEntryCount());
3652   EXPECT_THAT(DoomAllEntries(), IsOk());
3653   ASSERT_EQ(0, cache_->GetEntryCount());
3654 
3655   EXPECT_THAT(DoomAllEntries(), IsOk());
3656 }
3657 
TEST_F(DiskCacheBackendTest,DoomAll)3658 TEST_F(DiskCacheBackendTest, DoomAll) {
3659   BackendDoomAll();
3660 }
3661 
TEST_F(DiskCacheBackendTest,NewEvictionDoomAll)3662 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
3663   SetNewEviction();
3664   BackendDoomAll();
3665 }
3666 
TEST_F(DiskCacheBackendTest,MemoryOnlyDoomAll)3667 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
3668   SetMemoryOnlyMode();
3669   BackendDoomAll();
3670 }
3671 
TEST_F(DiskCacheBackendTest,AppCacheOnlyDoomAll)3672 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
3673   SetCacheType(net::APP_CACHE);
3674   BackendDoomAll();
3675 }
3676 
TEST_F(DiskCacheBackendTest,ShaderCacheOnlyDoomAll)3677 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
3678   SetCacheType(net::SHADER_CACHE);
3679   BackendDoomAll();
3680 }
3681 
3682 // If the index size changes when we doom the cache, we should not crash.
BackendDoomAll2()3683 void DiskCacheBackendTest::BackendDoomAll2() {
3684   EXPECT_EQ(2, cache_->GetEntryCount());
3685   EXPECT_THAT(DoomAllEntries(), IsOk());
3686 
3687   disk_cache::Entry* entry;
3688   ASSERT_THAT(CreateEntry("Something new", &entry), IsOk());
3689   entry->Close();
3690 
3691   EXPECT_EQ(1, cache_->GetEntryCount());
3692 }
3693 
TEST_F(DiskCacheBackendTest,DoomAll2)3694 TEST_F(DiskCacheBackendTest, DoomAll2) {
3695   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3696   DisableFirstCleanup();
3697   SetMaxSize(20 * 1024 * 1024);
3698   InitCache();
3699   BackendDoomAll2();
3700 }
3701 
TEST_F(DiskCacheBackendTest,NewEvictionDoomAll2)3702 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
3703   ASSERT_TRUE(CopyTestCache("bad_rankings2"));
3704   DisableFirstCleanup();
3705   SetMaxSize(20 * 1024 * 1024);
3706   SetNewEviction();
3707   InitCache();
3708   BackendDoomAll2();
3709 }
3710 
3711 // We should be able to create the same entry on multiple simultaneous instances
3712 // of the cache.
TEST_F(DiskCacheTest,MultipleInstances)3713 TEST_F(DiskCacheTest, MultipleInstances) {
3714   base::ScopedTempDir store1, store2;
3715   ASSERT_TRUE(store1.CreateUniqueTempDir());
3716   ASSERT_TRUE(store2.CreateUniqueTempDir());
3717 
3718   TestBackendResultCompletionCallback cb;
3719 
3720   const int kNumberOfCaches = 2;
3721   std::unique_ptr<disk_cache::Backend> caches[kNumberOfCaches];
3722 
3723   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
3724       net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
3725       store1.GetPath(), 0, disk_cache::ResetHandling::kNeverReset,
3726       /*net_log=*/nullptr, cb.callback());
3727   rv = cb.GetResult(std::move(rv));
3728   ASSERT_THAT(rv.net_error, IsOk());
3729   caches[0] = std::move(rv.backend);
3730   rv = disk_cache::CreateCacheBackend(
3731       net::GENERATED_BYTE_CODE_CACHE, net::CACHE_BACKEND_DEFAULT,
3732       /*file_operations=*/nullptr, store2.GetPath(), 0,
3733       disk_cache::ResetHandling::kNeverReset, /*net_log=*/nullptr,
3734       cb.callback());
3735   rv = cb.GetResult(std::move(rv));
3736   ASSERT_THAT(rv.net_error, IsOk());
3737   caches[1] = std::move(rv.backend);
3738 
3739   ASSERT_TRUE(caches[0].get() != nullptr && caches[1].get() != nullptr);
3740 
3741   std::string key("the first key");
3742   for (auto& cache : caches) {
3743     TestEntryResultCompletionCallback cb2;
3744     EntryResult result = cache->CreateEntry(key, net::HIGHEST, cb2.callback());
3745     result = cb2.GetResult(std::move(result));
3746     ASSERT_THAT(result.net_error(), IsOk());
3747     result.ReleaseEntry()->Close();
3748   }
3749 }
3750 
3751 // Test the six regions of the curve that determines the max cache size.
TEST_F(DiskCacheTest,AutomaticMaxSize)3752 TEST_F(DiskCacheTest, AutomaticMaxSize) {
3753   using disk_cache::kDefaultCacheSize;
3754   int64_t large_size = kDefaultCacheSize;
3755 
3756   // Region 1: expected = available * 0.8
3757   EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
3758             disk_cache::PreferredCacheSize(large_size - 1));
3759   EXPECT_EQ(kDefaultCacheSize * 8 / 10,
3760             disk_cache::PreferredCacheSize(large_size));
3761   EXPECT_EQ(kDefaultCacheSize - 1,
3762             disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
3763 
3764   // Region 2: expected = default_size
3765   EXPECT_EQ(kDefaultCacheSize,
3766             disk_cache::PreferredCacheSize(large_size * 10 / 8));
3767   EXPECT_EQ(kDefaultCacheSize,
3768             disk_cache::PreferredCacheSize(large_size * 10 - 1));
3769 
3770   // Region 3: expected = available * 0.1
3771   EXPECT_EQ(kDefaultCacheSize, disk_cache::PreferredCacheSize(large_size * 10));
3772   EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
3773             disk_cache::PreferredCacheSize(large_size * 25 - 1));
3774 
3775   // Region 4: expected = default_size * 2.5
3776   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3777             disk_cache::PreferredCacheSize(large_size * 25));
3778   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3779             disk_cache::PreferredCacheSize(large_size * 100 - 1));
3780   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3781             disk_cache::PreferredCacheSize(large_size * 100));
3782   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3783             disk_cache::PreferredCacheSize(large_size * 250 - 1));
3784 
3785   // Region 5: expected = available * 0.1
3786   int64_t largest_size = kDefaultCacheSize * 4;
3787   EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3788             disk_cache::PreferredCacheSize(large_size * 250));
3789   EXPECT_EQ(largest_size - 1,
3790             disk_cache::PreferredCacheSize(largest_size * 100 - 1));
3791 
3792   // Region 6: expected = largest possible size
3793   EXPECT_EQ(largest_size, disk_cache::PreferredCacheSize(largest_size * 100));
3794   EXPECT_EQ(largest_size, disk_cache::PreferredCacheSize(largest_size * 10000));
3795 }
3796 
3797 // Make sure that we keep the total memory used by the internal buffers under
3798 // control.
TEST_F(DiskCacheBackendTest,TotalBuffersSize1)3799 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
3800   InitCache();
3801   std::string key("the first key");
3802   disk_cache::Entry* entry;
3803   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3804 
3805   const int kSize = 200;
3806   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
3807   CacheTestFillBuffer(buffer->data(), kSize, true);
3808 
3809   for (int i = 0; i < 10; i++) {
3810     SCOPED_TRACE(i);
3811     // Allocate 2MB for this entry.
3812     EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
3813     EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3814     EXPECT_EQ(kSize,
3815               WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3816     EXPECT_EQ(kSize,
3817               WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3818 
3819     // Delete one of the buffers and truncate the other.
3820     EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3821     EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3822 
3823     // Delete the second buffer, writing 10 bytes to disk.
3824     entry->Close();
3825     ASSERT_THAT(OpenEntry(key, &entry), IsOk());
3826   }
3827 
3828   entry->Close();
3829   EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3830 }
3831 
3832 // This test assumes at least 150MB of system memory.
TEST_F(DiskCacheBackendTest,TotalBuffersSize2)3833 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3834   InitCache();
3835 
3836   const int kOneMB = 1024 * 1024;
3837   EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3838   EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3839 
3840   EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3841   EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3842 
3843   EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3844   EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3845 
3846   cache_impl_->BufferDeleted(kOneMB);
3847   EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3848 
3849   // Check the upper limit.
3850   EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3851 
3852   for (int i = 0; i < 30; i++)
3853     cache_impl_->IsAllocAllowed(0, kOneMB);  // Ignore the result.
3854 
3855   EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3856 }
3857 
3858 // Tests that sharing of external files works and we are able to delete the
3859 // files when we need to.
TEST_F(DiskCacheBackendTest,FileSharing)3860 TEST_F(DiskCacheBackendTest, FileSharing) {
3861   InitCache();
3862 
3863   disk_cache::Addr address(0x80000001);
3864   ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3865   base::FilePath name = cache_impl_->GetFileName(address);
3866 
3867   {
3868     auto file = base::MakeRefCounted<disk_cache::File>(false);
3869     file->Init(name);
3870 
3871 #if BUILDFLAG(IS_WIN)
3872     DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3873     DWORD access = GENERIC_READ | GENERIC_WRITE;
3874     base::win::ScopedHandle file2(CreateFile(name.value().c_str(), access,
3875                                              sharing, nullptr, OPEN_EXISTING, 0,
3876                                              nullptr));
3877     EXPECT_FALSE(file2.IsValid());
3878 
3879     sharing |= FILE_SHARE_DELETE;
3880     file2.Set(CreateFile(name.value().c_str(), access, sharing, nullptr,
3881                          OPEN_EXISTING, 0, nullptr));
3882     EXPECT_TRUE(file2.IsValid());
3883 #endif
3884 
3885     EXPECT_TRUE(base::DeleteFile(name));
3886 
3887     // We should be able to use the file.
3888     const int kSize = 200;
3889     char buffer1[kSize];
3890     char buffer2[kSize];
3891     memset(buffer1, 't', kSize);
3892     memset(buffer2, 0, kSize);
3893     EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3894     EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3895     EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3896   }
3897 
3898   base::File file(name, base::File::FLAG_OPEN | base::File::FLAG_READ);
3899   EXPECT_FALSE(file.IsValid());
3900   EXPECT_EQ(file.error_details(), base::File::FILE_ERROR_NOT_FOUND);
3901 }
3902 
TEST_F(DiskCacheBackendTest,UpdateRankForExternalCacheHit)3903 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3904   InitCache();
3905 
3906   disk_cache::Entry* entry;
3907 
3908   for (int i = 0; i < 2; ++i) {
3909     std::string key = base::StringPrintf("key%d", i);
3910     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3911     entry->Close();
3912   }
3913 
3914   // Ping the oldest entry.
3915   OnExternalCacheHit("key0");
3916 
3917   TrimForTest(false);
3918 
3919   // Make sure the older key remains.
3920   EXPECT_EQ(1, cache_->GetEntryCount());
3921   ASSERT_THAT(OpenEntry("key0", &entry), IsOk());
3922   entry->Close();
3923 }
3924 
TEST_F(DiskCacheBackendTest,ShaderCacheUpdateRankForExternalCacheHit)3925 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3926   SetCacheType(net::SHADER_CACHE);
3927   InitCache();
3928 
3929   disk_cache::Entry* entry;
3930 
3931   for (int i = 0; i < 2; ++i) {
3932     std::string key = base::StringPrintf("key%d", i);
3933     ASSERT_THAT(CreateEntry(key, &entry), IsOk());
3934     entry->Close();
3935   }
3936 
3937   // Ping the oldest entry.
3938   OnExternalCacheHit("key0");
3939 
3940   TrimForTest(false);
3941 
3942   // Make sure the older key remains.
3943   EXPECT_EQ(1, cache_->GetEntryCount());
3944   ASSERT_THAT(OpenEntry("key0", &entry), IsOk());
3945   entry->Close();
3946 }
3947 
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingCreate)3948 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3949   // Use net::APP_CACHE to make size estimations deterministic via
3950   // non-optimistic writes.
3951   SetCacheType(net::APP_CACHE);
3952   SetSimpleCacheMode();
3953   BackendShutdownWithPendingCreate(false);
3954 }
3955 
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingDoom)3956 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingDoom) {
3957   SetCacheType(net::APP_CACHE);
3958   SetSimpleCacheMode();
3959   BackendShutdownWithPendingDoom();
3960 }
3961 
TEST_F(DiskCacheBackendTest,SimpleCacheShutdownWithPendingFileIO)3962 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3963   SetCacheType(net::APP_CACHE);
3964   SetSimpleCacheMode();
3965   BackendShutdownWithPendingFileIO(false);
3966 }
3967 
TEST_F(DiskCacheBackendTest,SimpleCacheBasics)3968 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3969   SetSimpleCacheMode();
3970   BackendBasics();
3971 }
3972 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheBasics)3973 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3974   SetCacheType(net::APP_CACHE);
3975   SetSimpleCacheMode();
3976   BackendBasics();
3977 }
3978 
TEST_F(DiskCacheBackendTest,SimpleCacheKeying)3979 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3980   SetSimpleCacheMode();
3981   BackendKeying();
3982 }
3983 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheKeying)3984 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3985   SetSimpleCacheMode();
3986   SetCacheType(net::APP_CACHE);
3987   BackendKeying();
3988 }
3989 
TEST_F(DiskCacheBackendTest,SimpleCacheLoad)3990 TEST_F(DiskCacheBackendTest, SimpleCacheLoad) {
3991   SetMaxSize(0x100000);
3992   SetSimpleCacheMode();
3993   BackendLoad();
3994 }
3995 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheLoad)3996 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheLoad) {
3997   SetCacheType(net::APP_CACHE);
3998   SetSimpleCacheMode();
3999   SetMaxSize(0x100000);
4000   BackendLoad();
4001 }
4002 
TEST_F(DiskCacheBackendTest,SimpleDoomRecent)4003 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
4004   SetSimpleCacheMode();
4005   BackendDoomRecent();
4006 }
4007 
4008 // crbug.com/330926, crbug.com/370677
TEST_F(DiskCacheBackendTest,DISABLED_SimpleDoomBetween)4009 TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
4010   SetSimpleCacheMode();
4011   BackendDoomBetween();
4012 }
4013 
TEST_F(DiskCacheBackendTest,SimpleCacheDoomAll)4014 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
4015   SetSimpleCacheMode();
4016   BackendDoomAll();
4017 }
4018 
TEST_F(DiskCacheBackendTest,SimpleCacheAppCacheOnlyDoomAll)4019 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
4020   SetCacheType(net::APP_CACHE);
4021   SetSimpleCacheMode();
4022   BackendDoomAll();
4023 }
4024 
TEST_F(DiskCacheBackendTest,SimpleCacheOpenMissingFile)4025 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
4026   SetSimpleCacheMode();
4027   InitCache();
4028 
4029   const char key[] = "the first key";
4030   disk_cache::Entry* entry = nullptr;
4031 
4032   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4033   ASSERT_TRUE(entry != nullptr);
4034   entry->Close();
4035   entry = nullptr;
4036 
4037   // To make sure the file creation completed we need to call open again so that
4038   // we block until it actually created the files.
4039   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4040   ASSERT_TRUE(entry != nullptr);
4041   entry->Close();
4042   entry = nullptr;
4043 
4044   // Delete one of the files in the entry.
4045   base::FilePath to_delete_file = cache_path_.AppendASCII(
4046       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
4047   EXPECT_TRUE(base::PathExists(to_delete_file));
4048   EXPECT_TRUE(base::DeleteFile(to_delete_file));
4049 
4050   // Failing to open the entry should delete the rest of these files.
4051   ASSERT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
4052 
4053   // Confirm the rest of the files are gone.
4054   for (int i = 1; i < disk_cache::kSimpleEntryNormalFileCount; ++i) {
4055     base::FilePath should_be_gone_file(cache_path_.AppendASCII(
4056         disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
4057     EXPECT_FALSE(base::PathExists(should_be_gone_file));
4058   }
4059 }
4060 
TEST_F(DiskCacheBackendTest,SimpleCacheOpenBadFile)4061 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
4062   SetSimpleCacheMode();
4063   InitCache();
4064 
4065   const char key[] = "the first key";
4066   disk_cache::Entry* entry = nullptr;
4067 
4068   ASSERT_THAT(CreateEntry(key, &entry), IsOk());
4069   disk_cache::Entry* null = nullptr;
4070   ASSERT_NE(null, entry);
4071   entry->Close();
4072   entry = nullptr;
4073 
4074   // To make sure the file creation completed we need to call open again so that
4075   // we block until it actually created the files.
4076   ASSERT_THAT(OpenEntry(key, &entry), IsOk());
4077   ASSERT_NE(null, entry);
4078   entry->Close();
4079   entry = nullptr;
4080 
4081   // The entry is being closed on the Simple Cache worker pool
4082   disk_cache::FlushCacheThreadForTesting();
4083   base::RunLoop().RunUntilIdle();
4084 
4085   // Write an invalid header for stream 0 and stream 1.
4086   base::FilePath entry_file1_path = cache_path_.AppendASCII(
4087       disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
4088 
4089   disk_cache::SimpleFileHeader header;
4090   header.initial_magic_number = UINT64_C(0xbadf00d);
4091   EXPECT_TRUE(base::WriteFile(entry_file1_path,
4092                               base::as_bytes(base::make_span(&header, 1u))));
4093   ASSERT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
4094 }
4095 
4096 // Tests that the Simple Cache Backend fails to initialize with non-matching
4097 // file structure on disk.
TEST_F(DiskCacheBackendTest,SimpleCacheOverBlockfileCache)4098 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
4099   // Create a cache structure with the |BackendImpl|.
4100   InitCache();
4101   disk_cache::Entry* entry;
4102   const int kSize = 50;
4103   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4104   CacheTestFillBuffer(buffer->data(), kSize, false);
4105   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
4106   ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
4107   entry->Close();
4108   ResetCaches();
4109 
4110   // Check that the |SimpleBackendImpl| does not favor this structure.
4111   auto simple_cache = std::make_unique<disk_cache::SimpleBackendImpl>(
4112       /*file_operations_factory=*/nullptr, cache_path_, nullptr, nullptr, 0,
4113       net::DISK_CACHE, nullptr);
4114   net::TestCompletionCallback cb;
4115   simple_cache->Init(cb.callback());
4116   EXPECT_NE(net::OK, cb.WaitForResult());
4117   simple_cache.reset();
4118   DisableIntegrityCheck();
4119 }
4120 
4121 // Tests that the |BackendImpl| refuses to initialize on top of the files
4122 // generated by the Simple Cache Backend.
TEST_F(DiskCacheBackendTest,BlockfileCacheOverSimpleCache)4123 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
4124   // Create a cache structure with the |SimpleBackendImpl|.
4125   SetSimpleCacheMode();
4126   InitCache();
4127   disk_cache::Entry* entry;
4128   const int kSize = 50;
4129   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4130   CacheTestFillBuffer(buffer->data(), kSize, false);
4131   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
4132   ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
4133   entry->Close();
4134   ResetCaches();
4135 
4136   // Check that the |BackendImpl| does not favor this structure.
4137   auto cache = std::make_unique<disk_cache::BackendImpl>(
4138       cache_path_, nullptr, nullptr, net::DISK_CACHE, nullptr);
4139   cache->SetUnitTestMode();
4140   net::TestCompletionCallback cb;
4141   cache->Init(cb.callback());
4142   EXPECT_NE(net::OK, cb.WaitForResult());
4143   cache.reset();
4144   DisableIntegrityCheck();
4145 }
4146 
TEST_F(DiskCacheBackendTest,SimpleCacheFixEnumerators)4147 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
4148   SetSimpleCacheMode();
4149   BackendFixEnumerators();
4150 }
4151 
4152 // Tests basic functionality of the SimpleBackend implementation of the
4153 // enumeration API.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationBasics)4154 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
4155   SetSimpleCacheMode();
4156   InitCache();
4157   std::set<std::string> key_pool;
4158   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4159 
4160   // Check that enumeration returns all entries.
4161   std::set<std::string> keys_to_match(key_pool);
4162   std::unique_ptr<TestIterator> iter = CreateIterator();
4163   size_t count = 0;
4164   ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4165   iter.reset();
4166   EXPECT_EQ(key_pool.size(), count);
4167   EXPECT_TRUE(keys_to_match.empty());
4168 
4169   // Check that opening entries does not affect enumeration.
4170   keys_to_match = key_pool;
4171   iter = CreateIterator();
4172   count = 0;
4173   disk_cache::Entry* entry_opened_before;
4174   ASSERT_THAT(OpenEntry(*(key_pool.begin()), &entry_opened_before), IsOk());
4175   ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size() / 2, iter.get(),
4176                                     &keys_to_match, &count));
4177 
4178   disk_cache::Entry* entry_opened_middle;
4179   ASSERT_EQ(net::OK, OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
4180   ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4181   iter.reset();
4182   entry_opened_before->Close();
4183   entry_opened_middle->Close();
4184 
4185   EXPECT_EQ(key_pool.size(), count);
4186   EXPECT_TRUE(keys_to_match.empty());
4187 }
4188 
4189 // Tests that the enumerations are not affected by dooming an entry in the
4190 // middle.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationWhileDoomed)4191 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
4192   SetSimpleCacheMode();
4193   InitCache();
4194   std::set<std::string> key_pool;
4195   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4196 
4197   // Check that enumeration returns all entries but the doomed one.
4198   std::set<std::string> keys_to_match(key_pool);
4199   std::unique_ptr<TestIterator> iter = CreateIterator();
4200   size_t count = 0;
4201   ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size() / 2, iter.get(),
4202                                     &keys_to_match, &count));
4203 
4204   std::string key_to_delete = *(keys_to_match.begin());
4205   DoomEntry(key_to_delete);
4206   keys_to_match.erase(key_to_delete);
4207   key_pool.erase(key_to_delete);
4208   ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4209   iter.reset();
4210 
4211   EXPECT_EQ(key_pool.size(), count);
4212   EXPECT_TRUE(keys_to_match.empty());
4213 }
4214 
4215 // Tests that enumerations are not affected by corrupt files.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationCorruption)4216 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
4217   SetSimpleCacheMode();
4218   InitCache();
4219   // Create a corrupt entry.
4220   const std::string key = "the key";
4221   disk_cache::Entry* corrupted_entry;
4222 
4223   ASSERT_THAT(CreateEntry(key, &corrupted_entry), IsOk());
4224   ASSERT_TRUE(corrupted_entry);
4225   const int kSize = 50;
4226   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4227   CacheTestFillBuffer(buffer->data(), kSize, false);
4228   ASSERT_EQ(kSize,
4229             WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
4230   ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
4231   corrupted_entry->Close();
4232   // Let all I/O finish so it doesn't race with corrupting the file below.
4233   RunUntilIdle();
4234 
4235   std::set<std::string> key_pool;
4236   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4237 
4238   EXPECT_TRUE(
4239       disk_cache::simple_util::CreateCorruptFileForTests(key, cache_path_));
4240   EXPECT_EQ(key_pool.size() + 1, static_cast<size_t>(cache_->GetEntryCount()));
4241 
4242   // Check that enumeration returns all entries but the corrupt one.
4243   std::set<std::string> keys_to_match(key_pool);
4244   std::unique_ptr<TestIterator> iter = CreateIterator();
4245   size_t count = 0;
4246   ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
4247   iter.reset();
4248 
4249   EXPECT_EQ(key_pool.size(), count);
4250   EXPECT_TRUE(keys_to_match.empty());
4251 }
4252 
4253 // Tests that enumerations don't leak memory when the backend is destructed
4254 // mid-enumeration.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationDestruction)4255 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
4256   SetSimpleCacheMode();
4257   InitCache();
4258   std::set<std::string> key_pool;
4259   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4260 
4261   std::unique_ptr<TestIterator> iter = CreateIterator();
4262   disk_cache::Entry* entry = nullptr;
4263   ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
4264   EXPECT_TRUE(entry);
4265   disk_cache::ScopedEntryPtr entry_closer(entry);
4266 
4267   ResetCaches();
4268   // This test passes if we don't leak memory.
4269 }
4270 
4271 // Verify that tasks run in priority order when the experiment is enabled.
4272 // Test has races, disabling until fixed: https://crbug.com/853283
TEST_F(DiskCacheBackendTest,DISABLED_SimpleCachePrioritizedEntryOrder)4273 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCachePrioritizedEntryOrder) {
4274   base::test::ScopedFeatureList scoped_feature_list;
4275   SetSimpleCacheMode();
4276   InitCache();
4277 
4278   // Set the SimpleCache's worker pool to a sequenced type for testing
4279   // priority order.
4280   disk_cache::SimpleBackendImpl* simple_cache =
4281       static_cast<disk_cache::SimpleBackendImpl*>(cache_.get());
4282   auto task_runner = base::ThreadPool::CreateSequencedTaskRunner(
4283       {base::TaskPriority::USER_VISIBLE, base::MayBlock()});
4284   simple_cache->SetTaskRunnerForTesting(task_runner);
4285 
4286   // Create three entries. Priority order is 3, 1, 2 because 3 has the highest
4287   // request priority and 1 is created before 2.
4288   disk_cache::Entry* entry1 = nullptr;
4289   disk_cache::Entry* entry2 = nullptr;
4290   disk_cache::Entry* entry3 = nullptr;
4291   ASSERT_THAT(CreateEntryWithPriority("first", net::LOWEST, &entry1), IsOk());
4292   ASSERT_THAT(CreateEntryWithPriority("second", net::LOWEST, &entry2), IsOk());
4293   ASSERT_THAT(CreateEntryWithPriority("third", net::HIGHEST, &entry3), IsOk());
4294 
4295   // Write some data to the entries.
4296   const int kSize = 10;
4297   auto buf1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4298   auto buf2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4299   auto buf3 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4300   CacheTestFillBuffer(buf1->data(), kSize, false);
4301   CacheTestFillBuffer(buf2->data(), kSize, false);
4302   CacheTestFillBuffer(buf3->data(), kSize, false);
4303 
4304   // Write to stream 2 because it's the only stream that can't be read from
4305   // synchronously.
4306   EXPECT_EQ(kSize, WriteData(entry1, 2, 0, buf1.get(), kSize, true));
4307   EXPECT_EQ(kSize, WriteData(entry2, 2, 0, buf1.get(), kSize, true));
4308   EXPECT_EQ(kSize, WriteData(entry3, 2, 0, buf1.get(), kSize, true));
4309 
4310   // Wait until the task_runner's queue is empty (WriteData might have
4311   // optimistically returned synchronously but still had some tasks to run in
4312   // the worker pool.
4313   base::RunLoop run_loop;
4314   task_runner->PostTaskAndReply(FROM_HERE, base::DoNothing(),
4315                                 run_loop.QuitClosure());
4316   run_loop.Run();
4317 
4318   std::vector<int> finished_read_order;
4319   auto finished_callback = [](std::vector<int>* finished_read_order,
4320                               int entry_number, base::OnceClosure quit_closure,
4321                               int rv) {
4322     finished_read_order->push_back(entry_number);
4323     if (quit_closure)
4324       std::move(quit_closure).Run();
4325   };
4326 
4327   auto read_buf1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4328   auto read_buf2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4329   auto read_buf3 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4330 
4331   // Read from the entries in order 2, 3, 1. They should be reprioritized to
4332   // 3, 1, 2.
4333   base::RunLoop read_run_loop;
4334 
4335   entry2->ReadData(2, 0, read_buf2.get(), kSize,
4336                    base::BindOnce(finished_callback, &finished_read_order, 2,
4337                                   read_run_loop.QuitClosure()));
4338   entry3->ReadData(2, 0, read_buf3.get(), kSize,
4339                    base::BindOnce(finished_callback, &finished_read_order, 3,
4340                                   base::OnceClosure()));
4341   entry1->ReadData(2, 0, read_buf1.get(), kSize,
4342                    base::BindOnce(finished_callback, &finished_read_order, 1,
4343                                   base::OnceClosure()));
4344   EXPECT_EQ(0u, finished_read_order.size());
4345 
4346   read_run_loop.Run();
4347   EXPECT_EQ((std::vector<int>{3, 1, 2}), finished_read_order);
4348   entry1->Close();
4349   entry2->Close();
4350   entry3->Close();
4351 }
4352 
4353 // Tests that enumerations include entries with long keys.
TEST_F(DiskCacheBackendTest,SimpleCacheEnumerationLongKeys)4354 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationLongKeys) {
4355   SetSimpleCacheMode();
4356   InitCache();
4357   std::set<std::string> key_pool;
4358   ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
4359 
4360   const size_t long_key_length =
4361       disk_cache::SimpleSynchronousEntry::kInitialHeaderRead + 10;
4362   std::string long_key(long_key_length, 'X');
4363   key_pool.insert(long_key);
4364   disk_cache::Entry* entry = nullptr;
4365   ASSERT_THAT(CreateEntry(long_key.c_str(), &entry), IsOk());
4366   entry->Close();
4367 
4368   std::unique_ptr<TestIterator> iter = CreateIterator();
4369   size_t count = 0;
4370   EXPECT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &key_pool, &count));
4371   EXPECT_TRUE(key_pool.empty());
4372 }
4373 
4374 // Tests that a SimpleCache doesn't crash when files are deleted very quickly
4375 // after closing.
4376 // NOTE: IF THIS TEST IS FLAKY THEN IT IS FAILING. See https://crbug.com/416940
TEST_F(DiskCacheBackendTest,SimpleCacheDeleteQuickly)4377 TEST_F(DiskCacheBackendTest, SimpleCacheDeleteQuickly) {
4378   SetSimpleCacheMode();
4379   for (int i = 0; i < 100; ++i) {
4380     InitCache();
4381     ResetCaches();
4382     EXPECT_TRUE(CleanupCacheDir());
4383   }
4384 }
4385 
TEST_F(DiskCacheBackendTest,SimpleCacheLateDoom)4386 TEST_F(DiskCacheBackendTest, SimpleCacheLateDoom) {
4387   SetSimpleCacheMode();
4388   InitCache();
4389 
4390   disk_cache::Entry *entry1, *entry2;
4391   ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
4392   ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
4393   entry1->Close();
4394 
4395   // Ensure that the directory mtime is flushed to disk before serializing the
4396   // index.
4397   disk_cache::FlushCacheThreadForTesting();
4398 #if BUILDFLAG(IS_POSIX)
4399   base::File cache_dir(cache_path_,
4400                        base::File::FLAG_OPEN | base::File::FLAG_READ);
4401   EXPECT_TRUE(cache_dir.Flush());
4402 #endif  // BUILDFLAG(IS_POSIX)
4403   ResetCaches();
4404   disk_cache::FlushCacheThreadForTesting();
4405 
4406   // The index is now written. Dooming the last entry can't delete a file,
4407   // because that would advance the cache directory mtime and invalidate the
4408   // index.
4409   entry2->Doom();
4410   entry2->Close();
4411 
4412   DisableFirstCleanup();
4413   InitCache();
4414   EXPECT_EQ(disk_cache::SimpleIndex::INITIALIZE_METHOD_LOADED,
4415             simple_cache_impl_->index()->init_method());
4416 }
4417 
TEST_F(DiskCacheBackendTest,SimpleCacheNegMaxSize)4418 TEST_F(DiskCacheBackendTest, SimpleCacheNegMaxSize) {
4419   SetMaxSize(-1);
4420   SetSimpleCacheMode();
4421   InitCache();
4422   // We don't know what it will pick, but it's limited to what
4423   // disk_cache::PreferredCacheSize would return, scaled by the size experiment,
4424   // which only goes as much as 4x. It definitely should not be MAX_UINT64.
4425   EXPECT_NE(simple_cache_impl_->index()->max_size(),
4426             std::numeric_limits<uint64_t>::max());
4427 
4428   int max_default_size =
4429       2 * disk_cache::PreferredCacheSize(std::numeric_limits<int32_t>::max());
4430 
4431   ASSERT_GE(max_default_size, 0);
4432   EXPECT_LT(simple_cache_impl_->index()->max_size(),
4433             static_cast<unsigned>(max_default_size));
4434 
4435   uint64_t max_size_without_scaling = simple_cache_impl_->index()->max_size();
4436 
4437   // Scale to 200%. The size should be twice of |max_size_without_scaling| but
4438   // since that's capped on 20% of available size, checking for the size to be
4439   // between max_size_without_scaling and max_size_without_scaling*2.
4440   {
4441     base::test::ScopedFeatureList scoped_feature_list;
4442     std::map<std::string, std::string> field_trial_params;
4443     field_trial_params["percent_relative_size"] = "200";
4444     scoped_feature_list.InitAndEnableFeatureWithParameters(
4445         disk_cache::kChangeDiskCacheSizeExperiment, field_trial_params);
4446 
4447     InitCache();
4448 
4449     uint64_t max_size_scaled = simple_cache_impl_->index()->max_size();
4450 
4451     EXPECT_GE(max_size_scaled, max_size_without_scaling);
4452     EXPECT_LE(max_size_scaled, 2 * max_size_without_scaling);
4453   }
4454 }
4455 
TEST_F(DiskCacheBackendTest,SimpleLastModified)4456 TEST_F(DiskCacheBackendTest, SimpleLastModified) {
4457   // Simple cache used to incorrectly set LastModified on entries based on
4458   // timestamp of the cache directory, and not the entries' file
4459   // (https://crbug.com/714143). So this test arranges for a situation
4460   // where this would occur by doing:
4461   // 1) Write entry 1
4462   // 2) Delay
4463   // 3) Write entry 2. This sets directory time stamp to be different from
4464   //    timestamp of entry 1 (due to the delay)
4465   // It then checks whether the entry 1 got the proper timestamp or not.
4466 
4467   SetSimpleCacheMode();
4468   InitCache();
4469   std::string key1 = GenerateKey(true);
4470   std::string key2 = GenerateKey(true);
4471 
4472   disk_cache::Entry* entry1;
4473   ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
4474 
4475   // Make the Create complete --- SimpleCache can handle it optimistically,
4476   // and if we let it go fully async then trying to flush the Close might just
4477   // flush the Create.
4478   disk_cache::FlushCacheThreadForTesting();
4479   base::RunLoop().RunUntilIdle();
4480 
4481   entry1->Close();
4482 
4483   // Make the ::Close actually complete, since it is asynchronous.
4484   disk_cache::FlushCacheThreadForTesting();
4485   base::RunLoop().RunUntilIdle();
4486 
4487   Time entry1_timestamp = Time::NowFromSystemTime();
4488 
4489   // Don't want AddDelay since it sleep 1s(!) for SimpleCache, and we don't
4490   // care about reduced precision in index here.
4491   while (base::Time::NowFromSystemTime() <=
4492          (entry1_timestamp + base::Milliseconds(10))) {
4493     base::PlatformThread::Sleep(base::Milliseconds(1));
4494   }
4495 
4496   disk_cache::Entry* entry2;
4497   ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
4498   entry2->Close();
4499   disk_cache::FlushCacheThreadForTesting();
4500   base::RunLoop().RunUntilIdle();
4501 
4502   disk_cache::Entry* reopen_entry1;
4503   ASSERT_THAT(OpenEntry(key1, &reopen_entry1), IsOk());
4504 
4505   // This shouldn't pick up entry2's write time incorrectly.
4506   EXPECT_LE(reopen_entry1->GetLastModified(), entry1_timestamp);
4507   reopen_entry1->Close();
4508 }
4509 
TEST_F(DiskCacheBackendTest,SimpleFdLimit)4510 TEST_F(DiskCacheBackendTest, SimpleFdLimit) {
4511   base::HistogramTester histogram_tester;
4512   SetSimpleCacheMode();
4513   // Make things blocking so CreateEntry actually waits for file to be
4514   // created.
4515   SetCacheType(net::APP_CACHE);
4516   InitCache();
4517 
4518   disk_cache::Entry* entries[kLargeNumEntries];
4519   std::string keys[kLargeNumEntries];
4520   for (int i = 0; i < kLargeNumEntries; ++i) {
4521     keys[i] = GenerateKey(true);
4522     ASSERT_THAT(CreateEntry(keys[i], &entries[i]), IsOk());
4523   }
4524 
4525   // Note the fixture sets the file limit to 64.
4526   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4527                                      disk_cache::FD_LIMIT_CLOSE_FILE,
4528                                      kLargeNumEntries - 64);
4529   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4530                                      disk_cache::FD_LIMIT_REOPEN_FILE, 0);
4531   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4532                                      disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4533 
4534   const int kSize = 25000;
4535   auto buf1 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4536   CacheTestFillBuffer(buf1->data(), kSize, false);
4537 
4538   auto buf2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4539   CacheTestFillBuffer(buf2->data(), kSize, false);
4540 
4541   // Doom an entry and create a new one with same name, to test that both
4542   // re-open properly.
4543   EXPECT_EQ(net::OK, DoomEntry(keys[0]));
4544   disk_cache::Entry* alt_entry;
4545   ASSERT_THAT(CreateEntry(keys[0], &alt_entry), IsOk());
4546 
4547   // One more file closure here to accommodate for alt_entry.
4548   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4549                                      disk_cache::FD_LIMIT_CLOSE_FILE,
4550                                      kLargeNumEntries - 64 + 1);
4551   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4552                                      disk_cache::FD_LIMIT_REOPEN_FILE, 0);
4553   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4554                                      disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4555 
4556   // Do some writes in [1...kLargeNumEntries) range, both testing bring those in
4557   // and kicking out [0] and [alt_entry]. These have to be to stream != 0 to
4558   // actually need files.
4559   for (int i = 1; i < kLargeNumEntries; ++i) {
4560     EXPECT_EQ(kSize, WriteData(entries[i], 1, 0, buf1.get(), kSize, true));
4561     auto read_buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4562     ASSERT_EQ(kSize, ReadData(entries[i], 1, 0, read_buf.get(), kSize));
4563     EXPECT_EQ(0, memcmp(read_buf->data(), buf1->data(), kSize));
4564   }
4565 
4566   histogram_tester.ExpectBucketCount(
4567       "SimpleCache.FileDescriptorLimiterAction",
4568       disk_cache::FD_LIMIT_CLOSE_FILE,
4569       kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1);
4570   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4571                                      disk_cache::FD_LIMIT_REOPEN_FILE,
4572                                      kLargeNumEntries - 1);
4573   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4574                                      disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4575   EXPECT_EQ(kSize, WriteData(entries[0], 1, 0, buf1.get(), kSize, true));
4576   EXPECT_EQ(kSize, WriteData(alt_entry, 1, 0, buf2.get(), kSize, true));
4577 
4578   auto read_buf = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4579   ASSERT_EQ(kSize, ReadData(entries[0], 1, 0, read_buf.get(), kSize));
4580   EXPECT_EQ(0, memcmp(read_buf->data(), buf1->data(), kSize));
4581 
4582   auto read_buf2 = base::MakeRefCounted<net::IOBufferWithSize>(kSize);
4583   ASSERT_EQ(kSize, ReadData(alt_entry, 1, 0, read_buf2.get(), kSize));
4584   EXPECT_EQ(0, memcmp(read_buf2->data(), buf2->data(), kSize));
4585 
4586   // Two more things than last time --- entries[0] and |alt_entry|
4587   histogram_tester.ExpectBucketCount(
4588       "SimpleCache.FileDescriptorLimiterAction",
4589       disk_cache::FD_LIMIT_CLOSE_FILE,
4590       kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
4591   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4592                                      disk_cache::FD_LIMIT_REOPEN_FILE,
4593                                      kLargeNumEntries + 1);
4594   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4595                                      disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4596 
4597   for (auto* entry : entries) {
4598     entry->Close();
4599     RunUntilIdle();
4600   }
4601   alt_entry->Close();
4602   RunUntilIdle();
4603 
4604   // Closes have to pull things in to write out the footer, but they also
4605   // free up FDs.
4606   histogram_tester.ExpectBucketCount(
4607       "SimpleCache.FileDescriptorLimiterAction",
4608       disk_cache::FD_LIMIT_CLOSE_FILE,
4609       kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
4610   histogram_tester.ExpectBucketCount(
4611       "SimpleCache.FileDescriptorLimiterAction",
4612       disk_cache::FD_LIMIT_REOPEN_FILE,
4613       kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
4614   histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
4615                                      disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
4616 }
4617 
TEST_F(DiskCacheBackendTest,SparseEvict)4618 TEST_F(DiskCacheBackendTest, SparseEvict) {
4619   const int kMaxSize = 512;
4620 
4621   SetMaxSize(kMaxSize);
4622   InitCache();
4623 
4624   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(64);
4625   CacheTestFillBuffer(buffer->data(), 64, false);
4626 
4627   disk_cache::Entry* entry0 = nullptr;
4628   ASSERT_THAT(CreateEntry("http://www.0.com/", &entry0), IsOk());
4629 
4630   disk_cache::Entry* entry1 = nullptr;
4631   ASSERT_THAT(CreateEntry("http://www.1.com/", &entry1), IsOk());
4632 
4633   disk_cache::Entry* entry2 = nullptr;
4634   // This strange looking domain name affects cache trim order
4635   // due to hashing
4636   ASSERT_THAT(CreateEntry("http://www.15360.com/", &entry2), IsOk());
4637 
4638   // Write sparse data to put us over the eviction threshold
4639   ASSERT_EQ(64, WriteSparseData(entry0, 0, buffer.get(), 64));
4640   ASSERT_EQ(1, WriteSparseData(entry0, 67108923, buffer.get(), 1));
4641   ASSERT_EQ(1, WriteSparseData(entry1, 53, buffer.get(), 1));
4642   ASSERT_EQ(1, WriteSparseData(entry2, 0, buffer.get(), 1));
4643 
4644   // Closing these in a special order should not lead to buggy reentrant
4645   // eviction.
4646   entry1->Close();
4647   entry2->Close();
4648   entry0->Close();
4649 }
4650 
TEST_F(DiskCacheBackendTest,InMemorySparseDoom)4651 TEST_F(DiskCacheBackendTest, InMemorySparseDoom) {
4652   const int kMaxSize = 512;
4653 
4654   SetMaxSize(kMaxSize);
4655   SetMemoryOnlyMode();
4656   InitCache();
4657 
4658   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(64);
4659   CacheTestFillBuffer(buffer->data(), 64, false);
4660 
4661   disk_cache::Entry* entry = nullptr;
4662   ASSERT_THAT(CreateEntry("http://www.0.com/", &entry), IsOk());
4663 
4664   ASSERT_EQ(net::ERR_FAILED, WriteSparseData(entry, 4337, buffer.get(), 64));
4665   entry->Close();
4666 
4667   // Dooming all entries at this point should properly iterate over
4668   // the parent and its children
4669   DoomAllEntries();
4670 }
4671 
TEST_F(DiskCacheBackendTest,BlockFileMaxSizeLimit)4672 TEST_F(DiskCacheBackendTest, BlockFileMaxSizeLimit) {
4673   InitCache();
4674 
4675   int64_t size = std::numeric_limits<int32_t>::max();
4676   SetMaxSize(size, true /* should_succeed */);
4677 
4678   size += 1;
4679   SetMaxSize(size, false /* should_succeed */);
4680 }
4681 
TEST_F(DiskCacheBackendTest,InMemoryMaxSizeLimit)4682 TEST_F(DiskCacheBackendTest, InMemoryMaxSizeLimit) {
4683   SetMemoryOnlyMode();
4684   InitCache();
4685 
4686   int64_t size = std::numeric_limits<int32_t>::max();
4687   SetMaxSize(size, true /* should_succeed */);
4688 
4689   size += 1;
4690   SetMaxSize(size, false /* should_succeed */);
4691 }
4692 
TEST_F(DiskCacheBackendTest,SimpleMaxSizeLimit)4693 TEST_F(DiskCacheBackendTest, SimpleMaxSizeLimit) {
4694   SetSimpleCacheMode();
4695   InitCache();
4696 
4697   int64_t size = std::numeric_limits<int32_t>::max();
4698   SetMaxSize(size, true /* should_succeed */);
4699 
4700   size += 1;
4701   SetMaxSize(size, true /* should_succeed */);
4702 }
4703 
BackendOpenOrCreateEntry()4704 void DiskCacheBackendTest::BackendOpenOrCreateEntry() {
4705   // Avoid the weird kNoRandom flag on blockfile, since this needs to
4706   // test cleanup behavior actually used in production.
4707   if (memory_only_) {
4708     InitCache();
4709   } else {
4710     CleanupCacheDir();
4711     // Since we're not forcing a clean shutdown, integrity check may fail.
4712     DisableIntegrityCheck();
4713     CreateBackend(disk_cache::kNone);
4714   }
4715 
4716   // Test that new key is created.
4717   disk_cache::EntryResult es1 = OpenOrCreateEntry("first");
4718   ASSERT_THAT(es1.net_error(), IsOk());
4719   ASSERT_FALSE(es1.opened());
4720   disk_cache::Entry* e1 = es1.ReleaseEntry();
4721   ASSERT_TRUE(nullptr != e1);
4722 
4723   // Test that existing key is opened and its entry matches.
4724   disk_cache::EntryResult es2 = OpenOrCreateEntry("first");
4725   ASSERT_THAT(es2.net_error(), IsOk());
4726   ASSERT_TRUE(es2.opened());
4727   disk_cache::Entry* e2 = es2.ReleaseEntry();
4728   ASSERT_TRUE(nullptr != e2);
4729   ASSERT_EQ(e1, e2);
4730 
4731   // Test that different keys' entries are not the same.
4732   disk_cache::EntryResult es3 = OpenOrCreateEntry("second");
4733   ASSERT_THAT(es3.net_error(), IsOk());
4734   ASSERT_FALSE(es3.opened());
4735   disk_cache::Entry* e3 = es3.ReleaseEntry();
4736   ASSERT_TRUE(nullptr != e3);
4737   ASSERT_NE(e3, e1);
4738 
4739   // Test that a new entry can be created with the same key as a doomed entry.
4740   e3->Doom();
4741   disk_cache::EntryResult es4 = OpenOrCreateEntry("second");
4742   ASSERT_THAT(es4.net_error(), IsOk());
4743   ASSERT_FALSE(es4.opened());
4744   disk_cache::Entry* e4 = es4.ReleaseEntry();
4745   ASSERT_TRUE(nullptr != e4);
4746   ASSERT_NE(e4, e3);
4747 
4748   // Verify the expected number of entries
4749   ASSERT_EQ(2, cache_->GetEntryCount());
4750 
4751   e1->Close();
4752   e2->Close();
4753   e3->Close();
4754   e4->Close();
4755 
4756   // Test proper cancellation of callback. In-memory cache
4757   // is always synchronous, so this isn't' meaningful for it.
4758   if (!memory_only_) {
4759     TestEntryResultCompletionCallback callback;
4760 
4761     // Using "first" here:
4762     // 1) It's an existing entry, so SimpleCache can't cheat with an optimistic
4763     //    create.
4764     // 2) "second"'s creation is a cheated post-doom create one, which also
4765     //    makes testing trickier.
4766     EntryResult result =
4767         cache_->OpenOrCreateEntry("first", net::HIGHEST, callback.callback());
4768     ASSERT_EQ(net::ERR_IO_PENDING, result.net_error());
4769     ResetCaches();
4770 
4771     // Callback is supposed to be cancelled, so have to flush everything
4772     // to check for any trouble.
4773     disk_cache::FlushCacheThreadForTesting();
4774     RunUntilIdle();
4775     EXPECT_FALSE(callback.have_result());
4776   }
4777 }
4778 
TEST_F(DiskCacheBackendTest,InMemoryOnlyOpenOrCreateEntry)4779 TEST_F(DiskCacheBackendTest, InMemoryOnlyOpenOrCreateEntry) {
4780   SetMemoryOnlyMode();
4781   BackendOpenOrCreateEntry();
4782 }
4783 
TEST_F(DiskCacheBackendTest,MAYBE_BlockFileOpenOrCreateEntry)4784 TEST_F(DiskCacheBackendTest, MAYBE_BlockFileOpenOrCreateEntry) {
4785   BackendOpenOrCreateEntry();
4786 }
4787 
TEST_F(DiskCacheBackendTest,MAYBE_SimpleOpenOrCreateEntry)4788 TEST_F(DiskCacheBackendTest, MAYBE_SimpleOpenOrCreateEntry) {
4789   SetSimpleCacheMode();
4790   BackendOpenOrCreateEntry();
4791 }
4792 
BackendDeadOpenNextEntry()4793 void DiskCacheBackendTest::BackendDeadOpenNextEntry() {
4794   InitCache();
4795   std::unique_ptr<disk_cache::Backend::Iterator> iter =
4796       cache_->CreateIterator();
4797   ResetCaches();
4798   EntryResult result = iter->OpenNextEntry(base::DoNothing());
4799   ASSERT_EQ(net::ERR_FAILED, result.net_error());
4800 }
4801 
TEST_F(DiskCacheBackendTest,BlockFileBackendDeadOpenNextEntry)4802 TEST_F(DiskCacheBackendTest, BlockFileBackendDeadOpenNextEntry) {
4803   BackendDeadOpenNextEntry();
4804 }
4805 
TEST_F(DiskCacheBackendTest,SimpleBackendDeadOpenNextEntry)4806 TEST_F(DiskCacheBackendTest, SimpleBackendDeadOpenNextEntry) {
4807   SetSimpleCacheMode();
4808   BackendDeadOpenNextEntry();
4809 }
4810 
TEST_F(DiskCacheBackendTest,InMemorySimpleBackendDeadOpenNextEntry)4811 TEST_F(DiskCacheBackendTest, InMemorySimpleBackendDeadOpenNextEntry) {
4812   SetMemoryOnlyMode();
4813   BackendDeadOpenNextEntry();
4814 }
4815 
BackendIteratorConcurrentDoom()4816 void DiskCacheBackendTest::BackendIteratorConcurrentDoom() {
4817   disk_cache::Entry* entry1 = nullptr;
4818   disk_cache::Entry* entry2 = nullptr;
4819   EXPECT_EQ(net::OK, CreateEntry("Key0", &entry1));
4820   EXPECT_EQ(net::OK, CreateEntry("Key1", &entry2));
4821 
4822   std::unique_ptr<disk_cache::Backend::Iterator> iter =
4823       cache_->CreateIterator();
4824 
4825   disk_cache::Entry* entry3 = nullptr;
4826   EXPECT_EQ(net::OK, OpenEntry("Key0", &entry3));
4827 
4828   TestEntryResultCompletionCallback cb;
4829   EntryResult result_iter = iter->OpenNextEntry(cb.callback());
4830   result_iter = cb.GetResult(std::move(result_iter));
4831   EXPECT_EQ(net::OK, result_iter.net_error());
4832 
4833   net::TestCompletionCallback cb_doom;
4834   int rv_doom = cache_->DoomAllEntries(cb_doom.callback());
4835   EXPECT_EQ(net::OK, cb_doom.GetResult(rv_doom));
4836 
4837   TestEntryResultCompletionCallback cb2;
4838   EntryResult result_iter2 = iter->OpenNextEntry(cb2.callback());
4839   result_iter2 = cb2.GetResult(std::move(result_iter2));
4840 
4841   EXPECT_TRUE(result_iter2.net_error() == net::ERR_FAILED ||
4842               result_iter2.net_error() == net::OK);
4843 
4844   entry1->Close();
4845   entry2->Close();
4846   entry3->Close();
4847 }
4848 
TEST_F(DiskCacheBackendTest,BlockFileIteratorConcurrentDoom)4849 TEST_F(DiskCacheBackendTest, BlockFileIteratorConcurrentDoom) {
4850   // Init in normal mode, bug not reproducible with kNoRandom. Still need to
4851   // let the test fixture know the new eviction algorithm will be on.
4852   CleanupCacheDir();
4853   SetNewEviction();
4854   CreateBackend(disk_cache::kNone);
4855   BackendIteratorConcurrentDoom();
4856 }
4857 
TEST_F(DiskCacheBackendTest,SimpleIteratorConcurrentDoom)4858 TEST_F(DiskCacheBackendTest, SimpleIteratorConcurrentDoom) {
4859   SetSimpleCacheMode();
4860   InitCache();
4861   BackendIteratorConcurrentDoom();
4862 }
4863 
TEST_F(DiskCacheBackendTest,InMemoryConcurrentDoom)4864 TEST_F(DiskCacheBackendTest, InMemoryConcurrentDoom) {
4865   SetMemoryOnlyMode();
4866   InitCache();
4867   BackendIteratorConcurrentDoom();
4868 }
4869 
TEST_F(DiskCacheBackendTest,EmptyCorruptSimpleCacheRecovery)4870 TEST_F(DiskCacheBackendTest, EmptyCorruptSimpleCacheRecovery) {
4871   SetSimpleCacheMode();
4872 
4873   const std::string kCorruptData("corrupted");
4874 
4875   // Create a corrupt fake index in an otherwise empty simple cache.
4876   ASSERT_TRUE(base::PathExists(cache_path_));
4877   const base::FilePath index = cache_path_.AppendASCII("index");
4878   ASSERT_TRUE(base::WriteFile(index, kCorruptData));
4879 
4880   TestBackendResultCompletionCallback cb;
4881 
4882   // Simple cache should be able to recover.
4883   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
4884       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
4885       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
4886       /*net_log=*/nullptr, cb.callback());
4887   rv = cb.GetResult(std::move(rv));
4888   EXPECT_THAT(rv.net_error, IsOk());
4889 }
4890 
TEST_F(DiskCacheBackendTest,MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover)4891 TEST_F(DiskCacheBackendTest, MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover) {
4892   SetSimpleCacheMode();
4893   BackendOpenOrCreateEntry();
4894 
4895   const std::string kCorruptData("corrupted");
4896 
4897   // Corrupt the fake index file for the populated simple cache.
4898   ASSERT_TRUE(base::PathExists(cache_path_));
4899   const base::FilePath index = cache_path_.AppendASCII("index");
4900   ASSERT_TRUE(base::WriteFile(index, kCorruptData));
4901 
4902   TestBackendResultCompletionCallback cb;
4903 
4904   // Simple cache should not be able to recover when there are entry files.
4905   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
4906       net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
4907       cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
4908       /*net_log=*/nullptr, cb.callback());
4909   rv = cb.GetResult(std::move(rv));
4910   EXPECT_THAT(rv.net_error, IsError(net::ERR_FAILED));
4911 }
4912 
TEST_F(DiskCacheBackendTest,SimpleOwnershipTransferBackendDestroyRace)4913 TEST_F(DiskCacheBackendTest, SimpleOwnershipTransferBackendDestroyRace) {
4914   struct CleanupContext {
4915     explicit CleanupContext(bool* ran_ptr) : ran_ptr(ran_ptr) {}
4916     ~CleanupContext() {
4917       *ran_ptr = true;
4918     }
4919 
4920     raw_ptr<bool> ran_ptr;
4921   };
4922 
4923   const char kKey[] = "skeleton";
4924 
4925   // This test was for a fix for see https://crbug.com/946349, but the mechanics
4926   // of that failure became impossible after a follow up API refactor. Still,
4927   // the timing is strange, and warrant coverage; in particular this tests what
4928   // happen if the SimpleBackendImpl is destroyed after SimpleEntryImpl
4929   // decides to return an entry to the caller, but before the callback is run.
4930   SetSimpleCacheMode();
4931   InitCache();
4932 
4933   disk_cache::Entry* entry = nullptr;
4934   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
4935   // Make sure create actually succeeds, not just optimistically.
4936   RunUntilIdle();
4937 
4938   bool cleanup_context_ran = false;
4939   auto cleanup_context = std::make_unique<CleanupContext>(&cleanup_context_ran);
4940 
4941   // The OpenEntry code below will find a pre-existing entry in a READY state,
4942   // so it will immediately post a task to return a result. Destroying the
4943   // backend before running the event loop again will run that callback in the
4944   // dead-backend state, while OpenEntry completion was still with it alive.
4945 
4946   EntryResult result = cache_->OpenEntry(
4947       kKey, net::HIGHEST,
4948       base::BindOnce(
4949           [](std::unique_ptr<CleanupContext>, EntryResult result) {
4950             // The callback is here for ownership of CleanupContext,
4951             // and it shouldn't get invoked in this test. Normal
4952             // one would transfer result.entry to CleanupContext.
4953             ADD_FAILURE() << "This should not actually run";
4954 
4955             // ... but if it ran, it also shouldn't see the pointer.
4956             EXPECT_EQ(nullptr, result.ReleaseEntry());
4957           },
4958           std::move(cleanup_context)));
4959   EXPECT_EQ(net::ERR_IO_PENDING, result.net_error());
4960   ResetCaches();
4961 
4962   // Give CleanupContext a chance to do its thing.
4963   RunUntilIdle();
4964   EXPECT_TRUE(cleanup_context_ran);
4965 
4966   entry->Close();
4967 }
4968 
4969 // Verify that reloading the cache will preserve indices in kNeverReset mode.
TEST_F(DiskCacheBackendTest,SimpleCacheSoftResetKeepsValues)4970 TEST_F(DiskCacheBackendTest, SimpleCacheSoftResetKeepsValues) {
4971   SetSimpleCacheMode();
4972   SetCacheType(net::APP_CACHE);
4973   DisableFirstCleanup();
4974   CleanupCacheDir();
4975 
4976   {  // Do the initial cache creation then delete the values.
4977     TestBackendResultCompletionCallback cb;
4978 
4979     // Create an initial back-end and wait for indexing
4980     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
4981         net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
4982         cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
4983         /*net_log=*/nullptr, cb.callback());
4984     rv = cb.GetResult(std::move(rv));
4985     EXPECT_THAT(rv.net_error, IsOk());
4986     std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
4987     ASSERT_TRUE(cache.get());
4988     WaitForSimpleCacheIndexAndCheck(cache.get());
4989 
4990     // Create an entry in the cache
4991     CreateKeyAndCheck(cache.get(), "key");
4992   }
4993 
4994   RunUntilIdle();
4995 
4996   {  // Do the second cache creation with no reset flag, preserving entries.
4997     TestBackendResultCompletionCallback cb;
4998 
4999     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5000         net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
5001         cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
5002         /*net_log=*/nullptr, cb.callback());
5003     rv = cb.GetResult(std::move(rv));
5004     EXPECT_THAT(rv.net_error, IsOk());
5005     std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
5006     ASSERT_TRUE(cache.get());
5007     WaitForSimpleCacheIndexAndCheck(cache.get());
5008 
5009     // The entry should be present, as a forced reset was not called for.
5010     EXPECT_TRUE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
5011                     ->index()
5012                     ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
5013   }
5014 }
5015 
5016 // Verify that reloading the cache will not preserve indices in Reset mode.
TEST_F(DiskCacheBackendTest,SimpleCacheHardResetDropsValues)5017 TEST_F(DiskCacheBackendTest, SimpleCacheHardResetDropsValues) {
5018   SetSimpleCacheMode();
5019   SetCacheType(net::APP_CACHE);
5020   DisableFirstCleanup();
5021   CleanupCacheDir();
5022 
5023   {  // Create the initial back-end.
5024     TestBackendResultCompletionCallback cb;
5025 
5026     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5027         net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
5028         cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
5029         /*net_log=*/nullptr, cb.callback());
5030     rv = cb.GetResult(std::move(rv));
5031     EXPECT_THAT(rv.net_error, IsOk());
5032     std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
5033     ASSERT_TRUE(cache.get());
5034     WaitForSimpleCacheIndexAndCheck(cache.get());
5035 
5036     // Create an entry in the cache.
5037     CreateKeyAndCheck(cache.get(), "key");
5038   }
5039 
5040   RunUntilIdle();
5041 
5042   {  // Re-load cache with a reset flag, which should ignore existing entries.
5043     TestBackendResultCompletionCallback cb;
5044 
5045     disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5046         net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
5047         cache_path_, 0, disk_cache::ResetHandling::kReset, /*net_log=*/nullptr,
5048         cb.callback());
5049     rv = cb.GetResult(std::move(rv));
5050     EXPECT_THAT(rv.net_error, IsOk());
5051     std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
5052     ASSERT_TRUE(cache.get());
5053     WaitForSimpleCacheIndexAndCheck(cache.get());
5054 
5055     // The entry shouldn't be present, as a forced reset was called for.
5056     EXPECT_FALSE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
5057                      ->index()
5058                      ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
5059 
5060     // Add the entry back in the cache, then make sure it's present.
5061     CreateKeyAndCheck(cache.get(), "key");
5062 
5063     EXPECT_TRUE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
5064                     ->index()
5065                     ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
5066   }
5067 }
5068 
5069 // Test to make sure cancelation of backend operation that got queued after
5070 // a pending doom on backend destruction happens properly.
TEST_F(DiskCacheBackendTest,SimpleCancelOpPendingDoom)5071 TEST_F(DiskCacheBackendTest, SimpleCancelOpPendingDoom) {
5072   struct CleanupContext {
5073     explicit CleanupContext(bool* ran_ptr) : ran_ptr(ran_ptr) {}
5074     ~CleanupContext() { *ran_ptr = true; }
5075 
5076     raw_ptr<bool> ran_ptr;
5077   };
5078 
5079   const char kKey[] = "skeleton";
5080 
5081   // Disable optimistic ops.
5082   SetCacheType(net::APP_CACHE);
5083   SetSimpleCacheMode();
5084   InitCache();
5085 
5086   disk_cache::Entry* entry = nullptr;
5087   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5088   entry->Close();
5089 
5090   // Queue doom.
5091   cache_->DoomEntry(kKey, net::LOWEST, base::DoNothing());
5092 
5093   // Queue create after it.
5094   bool cleanup_context_ran = false;
5095   auto cleanup_context = std::make_unique<CleanupContext>(&cleanup_context_ran);
5096 
5097   EntryResult entry_result = cache_->CreateEntry(
5098       kKey, net::HIGHEST,
5099       base::BindOnce(
5100           [](std::unique_ptr<CleanupContext>, EntryResult result) {
5101             ADD_FAILURE() << "This should not actually run";
5102           },
5103           std::move(cleanup_context)));
5104 
5105   EXPECT_EQ(net::ERR_IO_PENDING, entry_result.net_error());
5106   ResetCaches();
5107 
5108   RunUntilIdle();
5109   EXPECT_TRUE(cleanup_context_ran);
5110 }
5111 
TEST_F(DiskCacheBackendTest,SimpleDontLeakPostDoomCreate)5112 TEST_F(DiskCacheBackendTest, SimpleDontLeakPostDoomCreate) {
5113   // If an entry has been optimistically created after a pending doom, and the
5114   // backend destroyed before the doom completed, the entry would get wedged,
5115   // with no operations on it workable and entry leaked.
5116   // (See https://crbug.com/1015774).
5117   const char kKey[] = "for_lock";
5118   const int kBufSize = 2 * 1024;
5119   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5120   CacheTestFillBuffer(buffer->data(), kBufSize, true);
5121 
5122   SetSimpleCacheMode();
5123   InitCache();
5124 
5125   disk_cache::Entry* entry = nullptr;
5126   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5127   entry->Close();
5128 
5129   // Make sure create actually succeeds, not just optimistically.
5130   RunUntilIdle();
5131 
5132   // Queue doom.
5133   int rv = cache_->DoomEntry(kKey, net::LOWEST, base::DoNothing());
5134   ASSERT_EQ(net::ERR_IO_PENDING, rv);
5135 
5136   // And then do a create. This actually succeeds optimistically.
5137   EntryResult result =
5138       cache_->CreateEntry(kKey, net::LOWEST, base::DoNothing());
5139   ASSERT_EQ(net::OK, result.net_error());
5140   entry = result.ReleaseEntry();
5141 
5142   ResetCaches();
5143 
5144   // Entry is still supposed to be operable. This part is needed to see the bug
5145   // without a leak checker.
5146   EXPECT_EQ(kBufSize, WriteData(entry, 1, 0, buffer.get(), kBufSize, false));
5147 
5148   entry->Close();
5149 
5150   // Should not have leaked files here.
5151 }
5152 
TEST_F(DiskCacheBackendTest,BlockFileDelayedWriteFailureRecovery)5153 TEST_F(DiskCacheBackendTest, BlockFileDelayedWriteFailureRecovery) {
5154   // Test that blockfile recovers appropriately when some entries are
5155   // in a screwed up state due to an error in delayed writeback.
5156   //
5157   // https://crbug.com/1086727
5158   InitCache();
5159 
5160   const char kKey[] = "Key2";
5161   disk_cache::Entry* entry = nullptr;
5162   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5163 
5164   const int kBufSize = 24320;
5165   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5166   CacheTestFillBuffer(buffer->data(), kBufSize, true);
5167 
5168   ASSERT_EQ(kBufSize, WriteSparseData(entry, 0, buffer.get(), kBufSize));
5169 
5170   // Setting the size limit artificially low injects a failure on writing back
5171   // data buffered above.
5172   SetMaxSize(4096);
5173 
5174   // This causes SparseControl to close the child entry corresponding to
5175   // low portion of offset space, triggering the writeback --- which fails
5176   // due to the space cap, and in particular fails to allocate data for
5177   // a stream, so it gets address 0.
5178   ASSERT_EQ(net::ERR_FAILED, WriteSparseData(entry, 16773118, buffer.get(), 4));
5179 
5180   // Now try reading the broken child. This should report an error, not
5181   // DCHECK.
5182   ASSERT_EQ(net::ERR_FAILED, ReadSparseData(entry, 4, buffer.get(), 4));
5183 
5184   entry->Close();
5185 }
5186 
TEST_F(DiskCacheBackendTest,BlockFileInsertAliasing)5187 TEST_F(DiskCacheBackendTest, BlockFileInsertAliasing) {
5188   // Test for not having rankings corruption due to aliasing between iterator
5189   // and other ranking list copies during insertion operations.
5190   //
5191   // https://crbug.com/1156288
5192 
5193   // Need to disable weird extra sync behavior to hit the bug.
5194   CreateBackend(disk_cache::kNone);
5195   SetNewEviction();  // default, but integrity check doesn't realize that.
5196 
5197   const char kKey[] = "Key0";
5198   const char kKeyA[] = "KeyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA41";
5199   disk_cache::Entry* entry = nullptr;
5200   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5201 
5202   const int kBufSize = 61188;
5203   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5204   CacheTestFillBuffer(buffer->data(), kBufSize, true);
5205 
5206   net::TestCompletionCallback cb_write64;
5207   EXPECT_EQ(net::ERR_IO_PENDING,
5208             entry->WriteSparseData(8, buffer.get(), 64, cb_write64.callback()));
5209 
5210   net::TestCompletionCallback cb_write61k;
5211   EXPECT_EQ(net::ERR_IO_PENDING,
5212             entry->WriteSparseData(16773118, buffer.get(), 61188,
5213                                    cb_write61k.callback()));
5214 
5215   EXPECT_EQ(64, cb_write64.WaitForResult());
5216   EXPECT_EQ(61188, cb_write61k.WaitForResult());
5217 
5218   EXPECT_EQ(4128, WriteSparseData(entry, 2147479550, buffer.get(), 4128));
5219 
5220   std::unique_ptr<TestIterator> iter = CreateIterator();
5221   EXPECT_EQ(4128, WriteSparseData(entry, 2147479550, buffer.get(), 4128));
5222   EXPECT_EQ(64, WriteSparseData(entry, 8, buffer.get(), 64));
5223 
5224   disk_cache::Entry* itEntry1 = nullptr;
5225   ASSERT_EQ(net::OK, iter->OpenNextEntry(&itEntry1));
5226   // These are actually child nodes for range.
5227 
5228   entry->Close();
5229 
5230   disk_cache::Entry* itEntry2 = nullptr;
5231   ASSERT_EQ(net::OK, iter->OpenNextEntry(&itEntry2));
5232 
5233   net::TestCompletionCallback doom_cb;
5234   EXPECT_EQ(net::ERR_IO_PENDING, cache_->DoomAllEntries(doom_cb.callback()));
5235 
5236   TestEntryResultCompletionCallback cb_create1;
5237   disk_cache::EntryResult result =
5238       cache_->CreateEntry(kKey, net::HIGHEST, cb_create1.callback());
5239   EXPECT_EQ(net::OK, doom_cb.WaitForResult());
5240   result = cb_create1.WaitForResult();
5241   EXPECT_EQ(net::OK, result.net_error());
5242   entry = result.ReleaseEntry();
5243 
5244   disk_cache::Entry* entryA = nullptr;
5245   ASSERT_THAT(CreateEntry(kKeyA, &entryA), IsOk());
5246   entryA->Close();
5247 
5248   disk_cache::Entry* itEntry3 = nullptr;
5249   EXPECT_EQ(net::OK, iter->OpenNextEntry(&itEntry3));
5250 
5251   EXPECT_EQ(net::OK, DoomEntry(kKeyA));
5252   itEntry1->Close();
5253   entry->Close();
5254   itEntry2->Close();
5255   if (itEntry3)
5256     itEntry3->Close();
5257 }
5258 
TEST_F(DiskCacheBackendTest,MemCacheBackwardsClock)5259 TEST_F(DiskCacheBackendTest, MemCacheBackwardsClock) {
5260   // Test to make sure that wall clock going backwards is tolerated.
5261 
5262   base::SimpleTestClock clock;
5263   clock.SetNow(base::Time::Now());
5264 
5265   SetMemoryOnlyMode();
5266   InitCache();
5267   mem_cache_->SetClockForTesting(&clock);
5268 
5269   const int kBufSize = 4 * 1024;
5270   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5271   CacheTestFillBuffer(buffer->data(), kBufSize, true);
5272 
5273   disk_cache::Entry* entry = nullptr;
5274   ASSERT_THAT(CreateEntry("key1", &entry), IsOk());
5275   EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
5276   entry->Close();
5277 
5278   clock.Advance(-base::Hours(1));
5279 
5280   ASSERT_THAT(CreateEntry("key2", &entry), IsOk());
5281   EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
5282   entry->Close();
5283 
5284   EXPECT_LE(2 * kBufSize,
5285             CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
5286   EXPECT_EQ(net::OK, DoomEntriesBetween(base::Time(), base::Time::Max()));
5287   EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
5288   EXPECT_EQ(0, CalculateSizeOfAllEntries());
5289 
5290   mem_cache_->SetClockForTesting(nullptr);
5291 }
5292 
TEST_F(DiskCacheBackendTest,SimpleOpenOrCreateIndexError)5293 TEST_F(DiskCacheBackendTest, SimpleOpenOrCreateIndexError) {
5294   // Exercise behavior of OpenOrCreateEntry in SimpleCache where the index
5295   // incorrectly claims the entry is missing. Regression test for
5296   // https://crbug.com/1316034
5297   const char kKey[] = "http://example.org";
5298 
5299   const int kBufSize = 256;
5300   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5301   CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
5302 
5303   SetSimpleCacheMode();
5304   InitCache();
5305 
5306   // Create an entry.
5307   disk_cache::Entry* entry = nullptr;
5308   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5309 
5310   EXPECT_EQ(kBufSize, WriteData(entry, /*index=*/1, /*offset=*/0, buffer.get(),
5311                                 /*len=*/kBufSize, /*truncate=*/false));
5312   entry->Close();
5313 
5314   // Mess up the index to say it's not there.
5315   simple_cache_impl_->index()->Remove(
5316       disk_cache::simple_util::GetEntryHashKey(kKey));
5317 
5318   // Reopening with OpenOrCreateEntry should still work.
5319   disk_cache::EntryResult result = OpenOrCreateEntry(kKey);
5320   ASSERT_THAT(result.net_error(), IsOk());
5321   ASSERT_TRUE(result.opened());
5322   entry = result.ReleaseEntry();
5323   EXPECT_EQ(kBufSize, entry->GetDataSize(/*index=*/1));
5324   entry->Close();
5325 }
5326 
TEST_F(DiskCacheBackendTest,SimpleOpenOrCreateIndexErrorOptimistic)5327 TEST_F(DiskCacheBackendTest, SimpleOpenOrCreateIndexErrorOptimistic) {
5328   // Exercise behavior of OpenOrCreateEntry in SimpleCache where the index
5329   // incorrectly claims the entry is missing and we do an optimistic create.
5330   // Covers a codepath adjacent to the one that caused https://crbug.com/1316034
5331   const char kKey[] = "http://example.org";
5332 
5333   SetSimpleCacheMode();
5334   InitCache();
5335 
5336   const int kBufSize = 256;
5337   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5338   CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
5339 
5340   // Create an entry.
5341   disk_cache::Entry* entry = nullptr;
5342   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5343   EXPECT_EQ(kBufSize, WriteData(entry, /*index=*/1, /*offset=*/0, buffer.get(),
5344                                 /*len=*/kBufSize, /*truncate=*/false));
5345   entry->Close();
5346 
5347   // Let all the I/O finish, so that OpenOrCreateEntry can try optimistic path.
5348   RunUntilIdle();
5349 
5350   // Mess up the index to say it's not there.
5351   simple_cache_impl_->index()->Remove(
5352       disk_cache::simple_util::GetEntryHashKey(kKey));
5353 
5354   // Reopening with OpenOrCreateEntry should still work, but since the backend
5355   // chose to be optimistic based on index, the result should be a fresh empty
5356   // entry.
5357   disk_cache::EntryResult result = OpenOrCreateEntry(kKey);
5358   ASSERT_THAT(result.net_error(), IsOk());
5359   ASSERT_FALSE(result.opened());
5360   entry = result.ReleaseEntry();
5361   EXPECT_EQ(0, entry->GetDataSize(/*index=*/1));
5362   entry->Close();
5363 }
5364 
TEST_F(DiskCacheBackendTest,SimpleDoomAfterBackendDestruction)5365 TEST_F(DiskCacheBackendTest, SimpleDoomAfterBackendDestruction) {
5366   // Test for when validating file headers/footers during close on simple
5367   // backend fails. To get the header to be checked on close, there needs to be
5368   // a stream 2, since 0/1 are validated on open, and no other operation must
5369   // have happened to stream 2, since those will force it, too. A way of getting
5370   // the validation to fail is to perform a doom on the file after the backend
5371   // is destroyed, since that will truncated the files to mark them invalid. See
5372   // https://crbug.com/1317884
5373   const char kKey[] = "Key0";
5374 
5375   const int kBufSize = 256;
5376   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kBufSize);
5377   CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
5378 
5379   SetCacheType(net::SHADER_CACHE);
5380   SetSimpleCacheMode();
5381 
5382   InitCache();
5383   disk_cache::Entry* entry = nullptr;
5384   ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
5385 
5386   EXPECT_EQ(0, WriteData(entry, /*index=*/2, /*offset=*/1, buffer.get(),
5387                          /*len=*/0, /*truncate=*/false));
5388   entry->Close();
5389 
5390   ASSERT_THAT(OpenEntry(kKey, &entry), IsOk());
5391   ResetCaches();
5392 
5393   entry->Doom();
5394   entry->Close();
5395 }
5396 
BackendValidateMigrated()5397 void DiskCacheBackendTest::BackendValidateMigrated() {
5398   // Blockfile 3.0 migration test.
5399   DisableFirstCleanup();  // started from copied dir, not cleaned dir.
5400   InitCache();
5401 
5402   // The total size comes straight from the headers, and is expected to be 1258
5403   // for either set of testdata.
5404   EXPECT_EQ(1258, CalculateSizeOfAllEntries());
5405   EXPECT_EQ(1, cache_->GetEntryCount());
5406 
5407   disk_cache::Entry* entry = nullptr;
5408   ASSERT_THAT(OpenEntry("https://example.org/data", &entry), IsOk());
5409 
5410   // Size of the actual payload.
5411   EXPECT_EQ(1234, entry->GetDataSize(1));
5412 
5413   entry->Close();
5414 }
5415 
TEST_F(DiskCacheBackendTest,BlockfileMigrate20)5416 TEST_F(DiskCacheBackendTest, BlockfileMigrate20) {
5417   ASSERT_TRUE(CopyTestCache("good_2_0"));
5418   BackendValidateMigrated();
5419 }
5420 
TEST_F(DiskCacheBackendTest,BlockfileMigrate21)5421 TEST_F(DiskCacheBackendTest, BlockfileMigrate21) {
5422   ASSERT_TRUE(CopyTestCache("good_2_1"));
5423   BackendValidateMigrated();
5424 }
5425 
TEST_F(DiskCacheBackendTest,BlockfileMigrateNewEviction20)5426 TEST_F(DiskCacheBackendTest, BlockfileMigrateNewEviction20) {
5427   ASSERT_TRUE(CopyTestCache("good_2_0"));
5428   SetNewEviction();
5429   BackendValidateMigrated();
5430 }
5431 
TEST_F(DiskCacheBackendTest,BlockfileMigrateNewEviction21)5432 TEST_F(DiskCacheBackendTest, BlockfileMigrateNewEviction21) {
5433   ASSERT_TRUE(CopyTestCache("good_2_1"));
5434   SetNewEviction();
5435   BackendValidateMigrated();
5436 }
5437 
5438 // Disabled on android since this test requires cache creator to create
5439 // blockfile caches, and we don't use them on Android anyway.
5440 #if !BUILDFLAG(IS_ANDROID)
TEST_F(DiskCacheBackendTest,BlockfileEmptyIndex)5441 TEST_F(DiskCacheBackendTest, BlockfileEmptyIndex) {
5442   // Regression case for https://crbug.com/1441330 --- blockfile DCHECKing
5443   // on mmap error for files it uses.
5444 
5445   // Create a cache.
5446   TestBackendResultCompletionCallback cb;
5447   disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
5448       net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
5449       /*file_operations=*/nullptr, cache_path_, 0,
5450       disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
5451   rv = cb.GetResult(std::move(rv));
5452   ASSERT_THAT(rv.net_error, IsOk());
5453   ASSERT_TRUE(rv.backend);
5454   rv.backend.reset();
5455 
5456   // Make sure it's done doing I/O stuff.
5457   disk_cache::BackendImpl::FlushForTesting();
5458 
5459   // Truncate the index to zero bytes.
5460   base::File index(cache_path_.AppendASCII("index"),
5461                    base::File::FLAG_OPEN | base::File::FLAG_WRITE);
5462   ASSERT_TRUE(index.IsValid());
5463   ASSERT_TRUE(index.SetLength(0));
5464   index.Close();
5465 
5466   // Open the backend again. Fails w/o error-recovery.
5467   rv = disk_cache::CreateCacheBackend(
5468       net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
5469       /*file_operations=*/nullptr, cache_path_, 0,
5470       disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
5471   rv = cb.GetResult(std::move(rv));
5472   EXPECT_EQ(rv.net_error, net::ERR_FAILED);
5473   EXPECT_FALSE(rv.backend);
5474 
5475   // Now try again with the "delete and start over on error" flag people
5476   // normally use.
5477   rv = disk_cache::CreateCacheBackend(
5478       net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
5479       /*file_operations=*/nullptr, cache_path_, 0,
5480       disk_cache::ResetHandling::kResetOnError, nullptr, cb.callback());
5481   rv = cb.GetResult(std::move(rv));
5482   ASSERT_THAT(rv.net_error, IsOk());
5483   ASSERT_TRUE(rv.backend);
5484 }
5485 #endif
5486 
5487 // See https://crbug.com/1486958
TEST_F(DiskCacheBackendTest,SimpleDoomIter)5488 TEST_F(DiskCacheBackendTest, SimpleDoomIter) {
5489   const int kEntries = 1000;
5490 
5491   SetSimpleCacheMode();
5492   // Note: this test relies on InitCache() making sure the index is ready.
5493   InitCache();
5494 
5495   // We create a whole bunch of entries so that deleting them will hopefully
5496   // finish after the iteration, in order to reproduce timing for the bug.
5497   for (int i = 0; i < kEntries; ++i) {
5498     disk_cache::Entry* entry = nullptr;
5499     ASSERT_THAT(CreateEntry(base::NumberToString(i), &entry), IsOk());
5500     entry->Close();
5501   }
5502   RunUntilIdle();  // Make sure close completes.
5503 
5504   auto iterator = cache_->CreateIterator();
5505   base::RunLoop run_loop;
5506 
5507   disk_cache::EntryResult result = iterator->OpenNextEntry(
5508       base::BindLambdaForTesting([&](disk_cache::EntryResult result) {
5509         ASSERT_EQ(result.net_error(), net::OK);
5510         disk_cache::Entry* entry = result.ReleaseEntry();
5511         entry->Doom();
5512         entry->Close();
5513         run_loop.Quit();
5514       }));
5515   ASSERT_EQ(result.net_error(), net::ERR_IO_PENDING);
5516   cache_->DoomAllEntries(base::DoNothing());
5517   run_loop.Run();
5518 }
5519 
5520 // See https://crbug.com/1486958 for non-corrupting version,
5521 // https://crbug.com/1510452 for corrupting one.
TEST_F(DiskCacheBackendTest,SimpleOpenIter)5522 TEST_F(DiskCacheBackendTest, SimpleOpenIter) {
5523   constexpr int kEntries = 50;
5524 
5525   SetSimpleCacheMode();
5526 
5527   for (bool do_corrupt : {false, true}) {
5528     SCOPED_TRACE(do_corrupt);
5529 
5530     // Note: this test relies on InitCache() making sure the index is ready.
5531     InitCache();
5532 
5533     // We create a whole bunch of entries so that deleting them will hopefully
5534     // finish after the iteration, in order to reproduce timing for the bug.
5535     for (int i = 0; i < kEntries; ++i) {
5536       disk_cache::Entry* entry = nullptr;
5537       ASSERT_THAT(CreateEntry(base::NumberToString(i), &entry), IsOk());
5538       entry->Close();
5539     }
5540     RunUntilIdle();  // Make sure close completes.
5541     EXPECT_EQ(kEntries, cache_->GetEntryCount());
5542 
5543     // Iterate once to get the order.
5544     std::list<std::string> keys;
5545     auto iterator = cache_->CreateIterator();
5546     base::RunLoop run_loop;
5547     base::RepeatingCallback<void(EntryResult)> collect_entry_key =
5548         base::BindLambdaForTesting([&](disk_cache::EntryResult result) {
5549           if (result.net_error() == net::ERR_FAILED) {
5550             run_loop.Quit();
5551             return;  // iteration complete.
5552           }
5553           ASSERT_EQ(result.net_error(), net::OK);
5554           disk_cache::Entry* entry = result.ReleaseEntry();
5555           keys.push_back(entry->GetKey());
5556           entry->Close();
5557           result = iterator->OpenNextEntry(collect_entry_key);
5558           EXPECT_EQ(result.net_error(), net::ERR_IO_PENDING);
5559         });
5560 
5561     disk_cache::EntryResult result = iterator->OpenNextEntry(collect_entry_key);
5562     ASSERT_EQ(result.net_error(), net::ERR_IO_PENDING);
5563     run_loop.Run();
5564 
5565     // Corrupt all the files, if we're exercising that.
5566     if (do_corrupt) {
5567       for (const auto& key : keys) {
5568         EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
5569             key, cache_path_));
5570       }
5571     }
5572 
5573     // Open all entries with iterator...
5574     int opened = 0;
5575     int iter_opened = 0;
5576     bool iter_done = false;
5577     auto all_done = [&]() { return opened == kEntries && iter_done; };
5578 
5579     iterator = cache_->CreateIterator();
5580     base::RunLoop run_loop2;
5581     base::RepeatingCallback<void(EntryResult)> handle_entry =
5582         base::BindLambdaForTesting([&](disk_cache::EntryResult result) {
5583           ++iter_opened;
5584           if (result.net_error() == net::ERR_FAILED) {
5585             EXPECT_EQ(iter_opened - 1, do_corrupt ? 0 : kEntries);
5586             iter_done = true;
5587             if (all_done()) {
5588               run_loop2.Quit();
5589             }
5590             return;  // iteration complete.
5591           }
5592           EXPECT_EQ(result.net_error(), net::OK);
5593           result = iterator->OpenNextEntry(handle_entry);
5594           EXPECT_EQ(result.net_error(), net::ERR_IO_PENDING);
5595         });
5596 
5597     result = iterator->OpenNextEntry(handle_entry);
5598     ASSERT_EQ(result.net_error(), net::ERR_IO_PENDING);
5599 
5600     // ... while simultaneously opening them via name.
5601     auto handle_open_result =
5602         base::BindLambdaForTesting([&](disk_cache::EntryResult result) {
5603           int expected_status = do_corrupt ? net::ERR_FAILED : net::OK;
5604           if (result.net_error() == expected_status) {
5605             ++opened;
5606           }
5607           if (all_done()) {
5608             run_loop2.Quit();
5609           }
5610         });
5611 
5612     base::RepeatingClosure open_one_entry = base::BindLambdaForTesting([&]() {
5613       std::string key = keys.front();
5614       keys.pop_front();
5615       disk_cache::EntryResult result =
5616           cache_->OpenEntry(key, net::DEFAULT_PRIORITY, handle_open_result);
5617       if (result.net_error() != net::ERR_IO_PENDING) {
5618         handle_open_result.Run(std::move(result));
5619       }
5620 
5621       if (!keys.empty()) {
5622         base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
5623             FROM_HERE, open_one_entry);
5624       }
5625     });
5626     base::SequencedTaskRunner::GetCurrentDefault()->PostTask(FROM_HERE,
5627                                                              open_one_entry);
5628 
5629     run_loop2.Run();
5630 
5631     // Should not have eaten any entries, if not corrupting them.
5632     EXPECT_EQ(do_corrupt ? 0 : kEntries, cache_->GetEntryCount());
5633   }
5634 }
5635 
5636 // Make sure that if we close an entry in callback from open/create we do not
5637 // trigger dangling pointer warnings.
TEST_F(DiskCacheBackendTest,BlockFileImmediateCloseNoDangle)5638 TEST_F(DiskCacheBackendTest, BlockFileImmediateCloseNoDangle) {
5639   InitCache();
5640   base::RunLoop run_loop;
5641   EntryResult result =
5642       cache_->CreateEntry("some key", net::HIGHEST,
5643                           base::BindLambdaForTesting([&](EntryResult result) {
5644                             ASSERT_EQ(result.net_error(), net::OK);
5645                             result.ReleaseEntry()->Close();
5646                             // Make sure the close actually happens now.
5647                             disk_cache::BackendImpl::FlushForTesting();
5648                             run_loop.Quit();
5649                           }));
5650   EXPECT_EQ(result.net_error(), net::ERR_IO_PENDING);
5651   run_loop.Run();
5652 }
5653 
5654 // Test that when a write causes a doom, it doesn't result in wrong delivery
5655 // order of callbacks due to re-entrant operation execution.
TEST_F(DiskCacheBackendTest,SimpleWriteOrderEviction)5656 TEST_F(DiskCacheBackendTest, SimpleWriteOrderEviction) {
5657   SetSimpleCacheMode();
5658   SetMaxSize(4096);
5659   InitCache();
5660 
5661   // Writes of [1, 2, ..., kMaxSize] are more than enough to trigger eviction,
5662   // as (1 + 80)*80/2 * 2 = 6480 (last * 2 since two streams are written).
5663   constexpr int kMaxSize = 80;
5664 
5665   scoped_refptr<net::IOBufferWithSize> buffer =
5666       CacheTestCreateAndFillBuffer(kMaxSize, /*no_nulls=*/false);
5667 
5668   disk_cache::Entry* entry = nullptr;
5669   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
5670   ASSERT_TRUE(entry);
5671 
5672   bool expected_next_write_stream_1 = true;
5673   int expected_next_write_size = 1;
5674   int next_offset = 0;
5675   base::RunLoop run_loop;
5676   for (int size = 1; size <= kMaxSize; ++size) {
5677     entry->WriteData(/*index=*/1, /*offset = */ next_offset, buffer.get(),
5678                      /*buf_len=*/size,
5679                      base::BindLambdaForTesting([&](int result) {
5680                        EXPECT_TRUE(expected_next_write_stream_1);
5681                        EXPECT_EQ(result, expected_next_write_size);
5682                        expected_next_write_stream_1 = false;
5683                      }),
5684                      /*truncate=*/true);
5685     // Stream 0 writes are used here because unlike with stream 1 ones,
5686     // WriteDataInternal can succeed and queue response callback immediately.
5687     entry->WriteData(/*index=*/0, /*offset = */ next_offset, buffer.get(),
5688                      /*buf_len=*/size,
5689                      base::BindLambdaForTesting([&](int result) {
5690                        EXPECT_FALSE(expected_next_write_stream_1);
5691                        EXPECT_EQ(result, expected_next_write_size);
5692                        expected_next_write_stream_1 = true;
5693                        ++expected_next_write_size;
5694                        if (expected_next_write_size == (kMaxSize + 1)) {
5695                          run_loop.Quit();
5696                        }
5697                      }),
5698                      /*truncate=*/true);
5699     next_offset += size;
5700   }
5701 
5702   entry->Close();
5703   run_loop.Run();
5704 }
5705 
5706 // Test that when a write causes a doom, it doesn't result in wrong delivery
5707 // order of callbacks due to re-entrant operation execution. Variant that
5708 // uses stream 0 ops only.
TEST_F(DiskCacheBackendTest,SimpleWriteOrderEvictionStream0)5709 TEST_F(DiskCacheBackendTest, SimpleWriteOrderEvictionStream0) {
5710   SetSimpleCacheMode();
5711   SetMaxSize(4096);
5712   InitCache();
5713 
5714   // Writes of [1, 2, ..., kMaxSize] are more than enough to trigger eviction,
5715   // as (1 + 120)*120/2 = 7260.
5716   constexpr int kMaxSize = 120;
5717 
5718   scoped_refptr<net::IOBufferWithSize> buffer =
5719       CacheTestCreateAndFillBuffer(kMaxSize, /*no_nulls=*/false);
5720 
5721   disk_cache::Entry* entry = nullptr;
5722   ASSERT_THAT(CreateEntry("key", &entry), IsOk());
5723   ASSERT_TRUE(entry);
5724 
5725   int expected_next_write_size = 1;
5726   int next_offset = 0;
5727   base::RunLoop run_loop;
5728   for (int size = 1; size <= kMaxSize; ++size) {
5729     // Stream 0 writes are used here because unlike with stream 1 ones,
5730     // WriteDataInternal can succeed and queue response callback immediately.
5731     entry->WriteData(/*index=*/0, /*offset = */ next_offset, buffer.get(),
5732                      /*buf_len=*/size,
5733                      base::BindLambdaForTesting([&](int result) {
5734                        EXPECT_EQ(result, expected_next_write_size);
5735                        ++expected_next_write_size;
5736                        if (expected_next_write_size == (kMaxSize + 1)) {
5737                          run_loop.Quit();
5738                        }
5739                      }),
5740                      /*truncate=*/true);
5741     next_offset += size;
5742   }
5743 
5744   entry->Close();
5745   run_loop.Run();
5746 }
5747 
5748 // Test to make sure that if entry creation triggers eviction, a queued up
5749 // close (possible with optimistic ops) doesn't run from within creation
5750 // completion handler (which is indirectly detected as a dangling pointer).
TEST_F(DiskCacheBackendTest,SimpleNoCloseFromWithinCreate)5751 TEST_F(DiskCacheBackendTest, SimpleNoCloseFromWithinCreate) {
5752   SetSimpleCacheMode();
5753   SetMaxSize(4096);
5754   InitCache();
5755 
5756   // Make entries big enough to force their eviction.
5757   constexpr int kDataSize = 4097;
5758 
5759   auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kDataSize);
5760   CacheTestFillBuffer(buffer->data(), kDataSize, false);
5761 
5762   for (int i = 0; i < 100; ++i) {
5763     std::string key = base::NumberToString(i);
5764     EntryResult entry_result =
5765         cache_->CreateEntry(key, net::HIGHEST, base::DoNothing());
5766     ASSERT_EQ(entry_result.net_error(), net::OK);
5767     disk_cache::Entry* entry = entry_result.ReleaseEntry();
5768     // Doing stream 0 write to avoid need for thread round-trips for it to take
5769     // effect if SimpleEntryImpl runs it.
5770     entry->WriteData(/*index=*/0, /*offset = */ 0, buffer.get(),
5771                      /*buf_len=*/kDataSize,
5772                      base::BindLambdaForTesting(
5773                          [&](int result) { EXPECT_EQ(kDataSize, result); }),
5774                      /*truncate=*/true);
5775     entry->Close();
5776   }
5777   RunUntilIdle();
5778 }
5779