xref: /aosp_15_r20/external/cronet/net/disk_cache/blockfile/backend_impl.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/blockfile/backend_impl.h"
6 
7 #include <algorithm>
8 #include <limits>
9 #include <memory>
10 #include <utility>
11 
12 #include "base/files/file.h"
13 #include "base/files/file_path.h"
14 #include "base/files/file_util.h"
15 #include "base/functional/bind.h"
16 #include "base/functional/callback_helpers.h"
17 #include "base/hash/hash.h"
18 #include "base/lazy_instance.h"
19 #include "base/location.h"
20 #include "base/message_loop/message_pump_type.h"
21 #include "base/metrics/field_trial.h"
22 #include "base/metrics/histogram_functions.h"
23 #include "base/rand_util.h"
24 #include "base/strings/string_number_conversions.h"
25 #include "base/strings/string_util.h"
26 #include "base/strings/stringprintf.h"
27 #include "base/synchronization/waitable_event.h"
28 #include "base/system/sys_info.h"
29 #include "base/task/sequenced_task_runner.h"
30 #include "base/task/single_thread_task_runner.h"
31 #include "base/threading/thread.h"
32 #include "base/threading/thread_restrictions.h"
33 #include "base/time/time.h"
34 #include "base/timer/timer.h"
35 #include "net/base/net_errors.h"
36 #include "net/base/tracing.h"
37 #include "net/disk_cache/backend_cleanup_tracker.h"
38 #include "net/disk_cache/blockfile/disk_format.h"
39 #include "net/disk_cache/blockfile/entry_impl.h"
40 #include "net/disk_cache/blockfile/errors.h"
41 #include "net/disk_cache/blockfile/experiments.h"
42 #include "net/disk_cache/blockfile/file.h"
43 #include "net/disk_cache/cache_util.h"
44 
45 using base::Time;
46 using base::TimeTicks;
47 
48 namespace {
49 
50 const char kIndexName[] = "index";
51 
52 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
53 // Note that the actual target is to keep the index table load factor under 55%
54 // for most users.
55 const int k64kEntriesStore = 240 * 1000 * 1000;
56 const int kBaseTableLen = 64 * 1024;
57 
58 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
59 const int kTrimDelay = 10;
60 
DesiredIndexTableLen(int32_t storage_size)61 int DesiredIndexTableLen(int32_t storage_size) {
62   if (storage_size <= k64kEntriesStore)
63     return kBaseTableLen;
64   if (storage_size <= k64kEntriesStore * 2)
65     return kBaseTableLen * 2;
66   if (storage_size <= k64kEntriesStore * 4)
67     return kBaseTableLen * 4;
68   if (storage_size <= k64kEntriesStore * 8)
69     return kBaseTableLen * 8;
70 
71   // The biggest storage_size for int32_t requires a 4 MB table.
72   return kBaseTableLen * 16;
73 }
74 
MaxStorageSizeForTable(int table_len)75 int MaxStorageSizeForTable(int table_len) {
76   return table_len * (k64kEntriesStore / kBaseTableLen);
77 }
78 
GetIndexSize(int table_len)79 size_t GetIndexSize(int table_len) {
80   size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
81   return sizeof(disk_cache::IndexHeader) + table_size;
82 }
83 
84 // ------------------------------------------------------------------------
85 
86 // Sets group for the current experiment. Returns false if the files should be
87 // discarded.
InitExperiment(disk_cache::IndexHeader * header,bool cache_created)88 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
89   if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
90       header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
91     // Discard current cache.
92     return false;
93   }
94 
95   if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
96           "ExperimentControl") {
97     if (cache_created) {
98       header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
99       return true;
100     }
101     return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL;
102   }
103 
104   header->experiment = disk_cache::NO_EXPERIMENT;
105   return true;
106 }
107 
108 // A callback to perform final cleanup on the background thread.
FinalCleanupCallback(disk_cache::BackendImpl * backend,base::WaitableEvent * done)109 void FinalCleanupCallback(disk_cache::BackendImpl* backend,
110                           base::WaitableEvent* done) {
111   backend->CleanupCache();
112   done->Signal();
113 }
114 
115 class CacheThread : public base::Thread {
116  public:
CacheThread()117   CacheThread() : base::Thread("CacheThread_BlockFile") {
118     CHECK(
119         StartWithOptions(base::Thread::Options(base::MessagePumpType::IO, 0)));
120   }
121 
~CacheThread()122   ~CacheThread() override {
123     // We don't expect to be deleted, but call Stop() in dtor 'cause docs
124     // say we should.
125     Stop();
126   }
127 };
128 
129 static base::LazyInstance<CacheThread>::Leaky g_internal_cache_thread =
130     LAZY_INSTANCE_INITIALIZER;
131 
InternalCacheThread()132 scoped_refptr<base::SingleThreadTaskRunner> InternalCacheThread() {
133   return g_internal_cache_thread.Get().task_runner();
134 }
135 
FallbackToInternalIfNull(const scoped_refptr<base::SingleThreadTaskRunner> & cache_thread)136 scoped_refptr<base::SingleThreadTaskRunner> FallbackToInternalIfNull(
137     const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread) {
138   return cache_thread ? cache_thread : InternalCacheThread();
139 }
140 
141 }  // namespace
142 
143 // ------------------------------------------------------------------------
144 
145 namespace disk_cache {
146 
BackendImpl(const base::FilePath & path,scoped_refptr<BackendCleanupTracker> cleanup_tracker,const scoped_refptr<base::SingleThreadTaskRunner> & cache_thread,net::CacheType cache_type,net::NetLog * net_log)147 BackendImpl::BackendImpl(
148     const base::FilePath& path,
149     scoped_refptr<BackendCleanupTracker> cleanup_tracker,
150     const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
151     net::CacheType cache_type,
152     net::NetLog* net_log)
153     : Backend(cache_type),
154       cleanup_tracker_(std::move(cleanup_tracker)),
155       background_queue_(this, FallbackToInternalIfNull(cache_thread)),
156       path_(path),
157       block_files_(path),
158       user_flags_(0),
159       net_log_(net_log) {
160   TRACE_EVENT0("disk_cache", "BackendImpl::BackendImpl");
161 }
162 
BackendImpl(const base::FilePath & path,uint32_t mask,const scoped_refptr<base::SingleThreadTaskRunner> & cache_thread,net::CacheType cache_type,net::NetLog * net_log)163 BackendImpl::BackendImpl(
164     const base::FilePath& path,
165     uint32_t mask,
166     const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
167     net::CacheType cache_type,
168     net::NetLog* net_log)
169     : Backend(cache_type),
170       background_queue_(this, FallbackToInternalIfNull(cache_thread)),
171       path_(path),
172       block_files_(path),
173       mask_(mask),
174       user_flags_(kMask),
175       net_log_(net_log) {
176   TRACE_EVENT0("disk_cache", "BackendImpl::BackendImpl");
177 }
178 
~BackendImpl()179 BackendImpl::~BackendImpl() {
180   TRACE_EVENT0("disk_cache", "BackendImpl::~BackendImpl");
181   if (user_flags_ & kNoRandom) {
182     // This is a unit test, so we want to be strict about not leaking entries
183     // and completing all the work.
184     background_queue_.WaitForPendingIO();
185   } else {
186     // This is most likely not a test, so we want to do as little work as
187     // possible at this time, at the price of leaving dirty entries behind.
188     background_queue_.DropPendingIO();
189   }
190 
191   if (background_queue_.BackgroundIsCurrentSequence()) {
192     // Unit tests may use the same sequence for everything.
193     CleanupCache();
194   } else {
195     // Signals the end of background work.
196     base::WaitableEvent done;
197 
198     background_queue_.background_thread()->PostTask(
199         FROM_HERE, base::BindOnce(&FinalCleanupCallback, base::Unretained(this),
200                                   base::Unretained(&done)));
201     // http://crbug.com/74623
202     base::ScopedAllowBaseSyncPrimitivesOutsideBlockingScope allow_wait;
203     done.Wait();
204   }
205 }
206 
Init(CompletionOnceCallback callback)207 void BackendImpl::Init(CompletionOnceCallback callback) {
208   background_queue_.Init(std::move(callback));
209 }
210 
SyncInit()211 int BackendImpl::SyncInit() {
212   TRACE_EVENT0("disk_cache", "BackendImpl::SyncInit");
213 
214 #if defined(NET_BUILD_STRESS_CACHE)
215   // Start evictions right away.
216   up_ticks_ = kTrimDelay * 2;
217 #endif
218   DCHECK(!init_);
219   if (init_)
220     return net::ERR_FAILED;
221 
222   bool create_files = false;
223   if (!InitBackingStore(&create_files)) {
224     ReportError(ERR_STORAGE_ERROR);
225     return net::ERR_FAILED;
226   }
227 
228   num_refs_ = num_pending_io_ = max_refs_ = 0;
229   entry_count_ = byte_count_ = 0;
230 
231   bool should_create_timer = false;
232   if (!restarted_) {
233     buffer_bytes_ = 0;
234     should_create_timer = true;
235   }
236 
237   init_ = true;
238 
239   if (data_->header.experiment != NO_EXPERIMENT &&
240       GetCacheType() != net::DISK_CACHE) {
241     // No experiment for other caches.
242     return net::ERR_FAILED;
243   }
244 
245   if (!(user_flags_ & kNoRandom)) {
246     // The unit test controls directly what to test.
247     new_eviction_ = (GetCacheType() == net::DISK_CACHE);
248   }
249 
250   if (!CheckIndex()) {
251     ReportError(ERR_INIT_FAILED);
252     return net::ERR_FAILED;
253   }
254 
255   if (!restarted_ && (create_files || !data_->header.num_entries))
256     ReportError(ERR_CACHE_CREATED);
257 
258   if (!(user_flags_ & kNoRandom) && GetCacheType() == net::DISK_CACHE &&
259       !InitExperiment(&data_->header, create_files)) {
260     return net::ERR_FAILED;
261   }
262 
263   // We don't care if the value overflows. The only thing we care about is that
264   // the id cannot be zero, because that value is used as "not dirty".
265   // Increasing the value once per second gives us many years before we start
266   // having collisions.
267   data_->header.this_id++;
268   if (!data_->header.this_id)
269     data_->header.this_id++;
270 
271   bool previous_crash = (data_->header.crash != 0);
272   data_->header.crash = 1;
273 
274   if (!block_files_.Init(create_files))
275     return net::ERR_FAILED;
276 
277   // We want to minimize the changes to cache for an AppCache.
278   if (GetCacheType() == net::APP_CACHE) {
279     DCHECK(!new_eviction_);
280     read_only_ = true;
281   } else if (GetCacheType() == net::SHADER_CACHE) {
282     DCHECK(!new_eviction_);
283   }
284 
285   eviction_.Init(this);
286 
287   // stats_ and rankings_ may end up calling back to us so we better be enabled.
288   disabled_ = false;
289   if (!InitStats())
290     return net::ERR_FAILED;
291 
292   disabled_ = !rankings_.Init(this, new_eviction_);
293 
294 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
295   trace_object_->EnableTracing(false);
296   int sc = SelfCheck();
297   if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
298     NOTREACHED();
299   trace_object_->EnableTracing(true);
300 #endif
301 
302   if (previous_crash) {
303     ReportError(ERR_PREVIOUS_CRASH);
304   } else if (!restarted_) {
305     ReportError(ERR_NO_ERROR);
306   }
307 
308   FlushIndex();
309 
310   if (!disabled_ && should_create_timer) {
311     // Create a recurrent timer of 30 secs.
312     DCHECK(background_queue_.BackgroundIsCurrentSequence());
313     int timer_delay = unit_test_ ? 1000 : 30000;
314     timer_ = std::make_unique<base::RepeatingTimer>();
315     timer_->Start(FROM_HERE, base::Milliseconds(timer_delay), this,
316                   &BackendImpl::OnStatsTimer);
317   }
318 
319   return disabled_ ? net::ERR_FAILED : net::OK;
320 }
321 
CleanupCache()322 void BackendImpl::CleanupCache() {
323   DCHECK(background_queue_.BackgroundIsCurrentSequence());
324   TRACE_EVENT0("disk_cache", "BackendImpl::CleanupCache");
325 
326   eviction_.Stop();
327   timer_.reset();
328 
329   if (init_) {
330     StoreStats();
331     if (data_)
332       data_->header.crash = 0;
333 
334     if (user_flags_ & kNoRandom) {
335       // This is a net_unittest, verify that we are not 'leaking' entries.
336       // TODO(https://crbug.com/1184679): Refactor this and eliminate the
337       //    WaitForPendingIOForTesting API.
338       File::WaitForPendingIOForTesting(&num_pending_io_);
339       DCHECK(!num_refs_);
340     } else {
341       File::DropPendingIO();
342     }
343   }
344   block_files_.CloseFiles();
345   FlushIndex();
346   index_ = nullptr;
347   ptr_factory_.InvalidateWeakPtrs();
348 }
349 
350 // ------------------------------------------------------------------------
351 
SyncOpenEntry(const std::string & key,scoped_refptr<EntryImpl> * entry)352 int BackendImpl::SyncOpenEntry(const std::string& key,
353                                scoped_refptr<EntryImpl>* entry) {
354   DCHECK(entry);
355   *entry = OpenEntryImpl(key);
356   return (*entry) ? net::OK : net::ERR_FAILED;
357 }
358 
SyncCreateEntry(const std::string & key,scoped_refptr<EntryImpl> * entry)359 int BackendImpl::SyncCreateEntry(const std::string& key,
360                                  scoped_refptr<EntryImpl>* entry) {
361   DCHECK(entry);
362   *entry = CreateEntryImpl(key);
363   return (*entry) ? net::OK : net::ERR_FAILED;
364 }
365 
SyncDoomEntry(const std::string & key)366 int BackendImpl::SyncDoomEntry(const std::string& key) {
367   if (disabled_)
368     return net::ERR_FAILED;
369 
370   scoped_refptr<EntryImpl> entry = OpenEntryImpl(key);
371   if (!entry)
372     return net::ERR_FAILED;
373 
374   entry->DoomImpl();
375   return net::OK;
376 }
377 
SyncDoomAllEntries()378 int BackendImpl::SyncDoomAllEntries() {
379   if (disabled_)
380     return net::ERR_FAILED;
381 
382   // This is not really an error, but it is an interesting condition.
383   ReportError(ERR_CACHE_DOOMED);
384   stats_.OnEvent(Stats::DOOM_CACHE);
385   if (!num_refs_) {
386     RestartCache(false);
387     return disabled_ ? net::ERR_FAILED : net::OK;
388   } else {
389     if (disabled_)
390       return net::ERR_FAILED;
391 
392     eviction_.TrimCache(true);
393     return net::OK;
394   }
395 }
396 
SyncDoomEntriesBetween(const base::Time initial_time,const base::Time end_time)397 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
398                                         const base::Time end_time) {
399   TRACE_EVENT0("disk_cache", "BackendImpl::SyncDoomEntriesBetween");
400 
401   DCHECK_NE(net::APP_CACHE, GetCacheType());
402   if (end_time.is_null())
403     return SyncDoomEntriesSince(initial_time);
404 
405   DCHECK(end_time >= initial_time);
406 
407   if (disabled_)
408     return net::ERR_FAILED;
409 
410   scoped_refptr<EntryImpl> node;
411   auto iterator = std::make_unique<Rankings::Iterator>();
412   scoped_refptr<EntryImpl> next = OpenNextEntryImpl(iterator.get());
413   if (!next)
414     return net::OK;
415 
416   while (next) {
417     node = std::move(next);
418     next = OpenNextEntryImpl(iterator.get());
419 
420     if (node->GetLastUsed() >= initial_time &&
421         node->GetLastUsed() < end_time) {
422       node->DoomImpl();
423     } else if (node->GetLastUsed() < initial_time) {
424       next = nullptr;
425       SyncEndEnumeration(std::move(iterator));
426     }
427   }
428 
429   return net::OK;
430 }
431 
SyncCalculateSizeOfAllEntries()432 int BackendImpl::SyncCalculateSizeOfAllEntries() {
433   TRACE_EVENT0("disk_cache", "BackendImpl::SyncCalculateSizeOfAllEntries");
434 
435   DCHECK_NE(net::APP_CACHE, GetCacheType());
436   if (disabled_)
437     return net::ERR_FAILED;
438 
439   return data_->header.num_bytes;
440 }
441 
442 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
443 // entries that are too old.
SyncDoomEntriesSince(const base::Time initial_time)444 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
445   TRACE_EVENT0("disk_cache", "BackendImpl::SyncDoomEntriesSince");
446 
447   DCHECK_NE(net::APP_CACHE, GetCacheType());
448   if (disabled_)
449     return net::ERR_FAILED;
450 
451   stats_.OnEvent(Stats::DOOM_RECENT);
452   for (;;) {
453     auto iterator = std::make_unique<Rankings::Iterator>();
454     scoped_refptr<EntryImpl> entry = OpenNextEntryImpl(iterator.get());
455     if (!entry)
456       return net::OK;
457 
458     if (initial_time > entry->GetLastUsed()) {
459       entry = nullptr;
460       SyncEndEnumeration(std::move(iterator));
461       return net::OK;
462     }
463 
464     entry->DoomImpl();
465     entry = nullptr;
466     SyncEndEnumeration(
467         std::move(iterator));  // The doom invalidated the iterator.
468   }
469 }
470 
SyncOpenNextEntry(Rankings::Iterator * iterator,scoped_refptr<EntryImpl> * next_entry)471 int BackendImpl::SyncOpenNextEntry(Rankings::Iterator* iterator,
472                                    scoped_refptr<EntryImpl>* next_entry) {
473   TRACE_EVENT0("disk_cache", "BackendImpl::SyncOpenNextEntry");
474 
475   *next_entry = OpenNextEntryImpl(iterator);
476   return (*next_entry) ? net::OK : net::ERR_FAILED;
477 }
478 
SyncEndEnumeration(std::unique_ptr<Rankings::Iterator> iterator)479 void BackendImpl::SyncEndEnumeration(
480     std::unique_ptr<Rankings::Iterator> iterator) {
481   iterator->Reset();
482 }
483 
SyncOnExternalCacheHit(const std::string & key)484 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) {
485   if (disabled_)
486     return;
487 
488   uint32_t hash = base::PersistentHash(key);
489   bool error;
490   scoped_refptr<EntryImpl> cache_entry =
491       MatchEntry(key, hash, false, Addr(), &error);
492   if (cache_entry && ENTRY_NORMAL == cache_entry->entry()->Data()->state)
493     UpdateRank(cache_entry.get(), GetCacheType() == net::SHADER_CACHE);
494 }
495 
OpenEntryImpl(const std::string & key)496 scoped_refptr<EntryImpl> BackendImpl::OpenEntryImpl(const std::string& key) {
497   TRACE_EVENT0("disk_cache", "BackendImpl::OpenEntryImpl");
498 
499   if (disabled_)
500     return nullptr;
501 
502   uint32_t hash = base::PersistentHash(key);
503 
504   bool error;
505   scoped_refptr<EntryImpl> cache_entry =
506       MatchEntry(key, hash, false, Addr(), &error);
507   if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
508     // The entry was already evicted.
509     cache_entry = nullptr;
510   }
511 
512   if (!cache_entry) {
513     stats_.OnEvent(Stats::OPEN_MISS);
514     return nullptr;
515   }
516 
517   eviction_.OnOpenEntry(cache_entry.get());
518   entry_count_++;
519 
520   stats_.OnEvent(Stats::OPEN_HIT);
521   return cache_entry;
522 }
523 
CreateEntryImpl(const std::string & key)524 scoped_refptr<EntryImpl> BackendImpl::CreateEntryImpl(const std::string& key) {
525   TRACE_EVENT0("disk_cache", "BackendImpl::CreateEntryImpl");
526 
527   if (disabled_ || key.empty())
528     return nullptr;
529 
530   uint32_t hash = base::PersistentHash(key);
531 
532   scoped_refptr<EntryImpl> parent;
533   Addr entry_address(data_->table[hash & mask_]);
534   if (entry_address.is_initialized()) {
535     // We have an entry already. It could be the one we are looking for, or just
536     // a hash conflict.
537     bool error;
538     scoped_refptr<EntryImpl> old_entry =
539         MatchEntry(key, hash, false, Addr(), &error);
540     if (old_entry)
541       return ResurrectEntry(std::move(old_entry));
542 
543     parent = MatchEntry(key, hash, true, Addr(), &error);
544     DCHECK(!error);
545     if (!parent && data_->table[hash & mask_]) {
546       // We should have corrected the problem.
547       NOTREACHED();
548       return nullptr;
549     }
550   }
551 
552   // The general flow is to allocate disk space and initialize the entry data,
553   // followed by saving that to disk, then linking the entry though the index
554   // and finally through the lists. If there is a crash in this process, we may
555   // end up with:
556   // a. Used, unreferenced empty blocks on disk (basically just garbage).
557   // b. Used, unreferenced but meaningful data on disk (more garbage).
558   // c. A fully formed entry, reachable only through the index.
559   // d. A fully formed entry, also reachable through the lists, but still dirty.
560   //
561   // Anything after (b) can be automatically cleaned up. We may consider saving
562   // the current operation (as we do while manipulating the lists) so that we
563   // can detect and cleanup (a) and (b).
564 
565   int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
566   if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
567     LOG(ERROR) << "Create entry failed " << key.c_str();
568     stats_.OnEvent(Stats::CREATE_ERROR);
569     return nullptr;
570   }
571 
572   Addr node_address(0);
573   if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
574     block_files_.DeleteBlock(entry_address, false);
575     LOG(ERROR) << "Create entry failed " << key.c_str();
576     stats_.OnEvent(Stats::CREATE_ERROR);
577     return nullptr;
578   }
579 
580   auto cache_entry =
581       base::MakeRefCounted<EntryImpl>(this, entry_address, false);
582   IncreaseNumRefs();
583 
584   if (!cache_entry->CreateEntry(node_address, key, hash)) {
585     block_files_.DeleteBlock(entry_address, false);
586     block_files_.DeleteBlock(node_address, false);
587     LOG(ERROR) << "Create entry failed " << key.c_str();
588     stats_.OnEvent(Stats::CREATE_ERROR);
589     return nullptr;
590   }
591 
592   cache_entry->BeginLogging(net_log_, true);
593 
594   // We are not failing the operation; let's add this to the map.
595   open_entries_[entry_address.value()] = cache_entry.get();
596 
597   // Save the entry.
598   cache_entry->entry()->Store();
599   cache_entry->rankings()->Store();
600   IncreaseNumEntries();
601   entry_count_++;
602 
603   // Link this entry through the index.
604   if (parent.get()) {
605     parent->SetNextAddress(entry_address);
606   } else {
607     data_->table[hash & mask_] = entry_address.value();
608   }
609 
610   // Link this entry through the lists.
611   eviction_.OnCreateEntry(cache_entry.get());
612 
613   stats_.OnEvent(Stats::CREATE_HIT);
614   FlushIndex();
615   return cache_entry;
616 }
617 
OpenNextEntryImpl(Rankings::Iterator * iterator)618 scoped_refptr<EntryImpl> BackendImpl::OpenNextEntryImpl(
619     Rankings::Iterator* iterator) {
620   if (disabled_)
621     return nullptr;
622 
623   const int kListsToSearch = 3;
624   scoped_refptr<EntryImpl> entries[kListsToSearch];
625   if (!iterator->my_rankings) {
626     iterator->my_rankings = &rankings_;
627     bool ret = false;
628 
629     // Get an entry from each list.
630     for (int i = 0; i < kListsToSearch; i++) {
631       ret |= OpenFollowingEntryFromList(static_cast<Rankings::List>(i),
632                                         &iterator->nodes[i], &entries[i]);
633     }
634     if (!ret) {
635       iterator->Reset();
636       return nullptr;
637     }
638   } else {
639     // Get the next entry from the last list, and the actual entries for the
640     // elements on the other lists.
641     for (int i = 0; i < kListsToSearch; i++) {
642       if (iterator->list == i) {
643         OpenFollowingEntryFromList(iterator->list, &iterator->nodes[i],
644                                    &entries[i]);
645       } else {
646         entries[i] = GetEnumeratedEntry(iterator->nodes[i],
647                                         static_cast<Rankings::List>(i));
648       }
649     }
650   }
651 
652   int newest = -1;
653   int oldest = -1;
654   Time access_times[kListsToSearch];
655   for (int i = 0; i < kListsToSearch; i++) {
656     if (entries[i].get()) {
657       access_times[i] = entries[i]->GetLastUsed();
658       if (newest < 0) {
659         DCHECK_LT(oldest, 0);
660         newest = oldest = i;
661         continue;
662       }
663       if (access_times[i] > access_times[newest])
664         newest = i;
665       if (access_times[i] < access_times[oldest])
666         oldest = i;
667     }
668   }
669 
670   if (newest < 0 || oldest < 0) {
671     iterator->Reset();
672     return nullptr;
673   }
674 
675   scoped_refptr<EntryImpl> next_entry = entries[newest];
676   iterator->list = static_cast<Rankings::List>(newest);
677   return next_entry;
678 }
679 
SetMaxSize(int64_t max_bytes)680 bool BackendImpl::SetMaxSize(int64_t max_bytes) {
681   if (max_bytes < 0 || max_bytes > std::numeric_limits<int>::max())
682     return false;
683 
684   // Zero size means use the default.
685   if (!max_bytes)
686     return true;
687 
688   // Avoid a DCHECK later on.
689   if (max_bytes >= std::numeric_limits<int32_t>::max() -
690                        std::numeric_limits<int32_t>::max() / 10) {
691     max_bytes = std::numeric_limits<int32_t>::max() -
692                 std::numeric_limits<int32_t>::max() / 10 - 1;
693   }
694 
695   user_flags_ |= kMaxSize;
696   max_size_ = max_bytes;
697   return true;
698 }
699 
GetFileName(Addr address) const700 base::FilePath BackendImpl::GetFileName(Addr address) const {
701   if (!address.is_separate_file() || !address.is_initialized()) {
702     NOTREACHED();
703     return base::FilePath();
704   }
705 
706   std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
707   return path_.AppendASCII(tmp);
708 }
709 
File(Addr address)710 MappedFile* BackendImpl::File(Addr address) {
711   if (disabled_)
712     return nullptr;
713   return block_files_.GetFile(address);
714 }
715 
GetBackgroundQueue()716 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() {
717   return background_queue_.GetWeakPtr();
718 }
719 
CreateExternalFile(Addr * address)720 bool BackendImpl::CreateExternalFile(Addr* address) {
721   TRACE_EVENT0("disk_cache", "BackendImpl::CreateExternalFile");
722   int file_number = data_->header.last_file + 1;
723   Addr file_address(0);
724   bool success = false;
725   for (int i = 0; i < 0x0fffffff; i++, file_number++) {
726     if (!file_address.SetFileNumber(file_number)) {
727       file_number = 1;
728       continue;
729     }
730     base::FilePath name = GetFileName(file_address);
731     int flags = base::File::FLAG_READ | base::File::FLAG_WRITE |
732                 base::File::FLAG_CREATE | base::File::FLAG_WIN_EXCLUSIVE_WRITE;
733     base::File file(name, flags);
734     if (!file.IsValid()) {
735       base::File::Error error = file.error_details();
736       if (error != base::File::FILE_ERROR_EXISTS) {
737         LOG(ERROR) << "Unable to create file: " << error;
738         return false;
739       }
740       continue;
741     }
742 
743     success = true;
744     break;
745   }
746 
747   DCHECK(success);
748   if (!success)
749     return false;
750 
751   data_->header.last_file = file_number;
752   address->set_value(file_address.value());
753   return true;
754 }
755 
CreateBlock(FileType block_type,int block_count,Addr * block_address)756 bool BackendImpl::CreateBlock(FileType block_type, int block_count,
757                              Addr* block_address) {
758   return block_files_.CreateBlock(block_type, block_count, block_address);
759 }
760 
DeleteBlock(Addr block_address,bool deep)761 void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
762   block_files_.DeleteBlock(block_address, deep);
763 }
764 
GetLruData()765 LruData* BackendImpl::GetLruData() {
766   return &data_->header.lru;
767 }
768 
UpdateRank(EntryImpl * entry,bool modified)769 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
770   if (read_only_ || (!modified && GetCacheType() == net::SHADER_CACHE))
771     return;
772   eviction_.UpdateRank(entry, modified);
773 }
774 
RecoveredEntry(CacheRankingsBlock * rankings)775 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
776   Addr address(rankings->Data()->contents);
777   scoped_refptr<EntryImpl> cache_entry;
778   if (NewEntry(address, &cache_entry)) {
779     STRESS_NOTREACHED();
780     return;
781   }
782 
783   uint32_t hash = cache_entry->GetHash();
784   cache_entry = nullptr;
785 
786   // Anything on the table means that this entry is there.
787   if (data_->table[hash & mask_])
788     return;
789 
790   data_->table[hash & mask_] = address.value();
791   FlushIndex();
792 }
793 
InternalDoomEntry(EntryImpl * entry)794 void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
795   uint32_t hash = entry->GetHash();
796   std::string key = entry->GetKey();
797   Addr entry_addr = entry->entry()->address();
798   bool error;
799   scoped_refptr<EntryImpl> parent_entry =
800       MatchEntry(key, hash, true, entry_addr, &error);
801   CacheAddr child(entry->GetNextAddress());
802 
803   if (!entry->doomed()) {
804     // We may have doomed this entry from within MatchEntry.
805     eviction_.OnDoomEntry(entry);
806     entry->InternalDoom();
807     if (!new_eviction_) {
808       DecreaseNumEntries();
809     }
810     stats_.OnEvent(Stats::DOOM_ENTRY);
811   }
812 
813   if (parent_entry) {
814     parent_entry->SetNextAddress(Addr(child));
815     parent_entry = nullptr;
816   } else if (!error) {
817     data_->table[hash & mask_] = child;
818   }
819 
820   FlushIndex();
821 }
822 
823 #if defined(NET_BUILD_STRESS_CACHE)
824 
GetNextAddr(Addr address)825 CacheAddr BackendImpl::GetNextAddr(Addr address) {
826   EntriesMap::iterator it = open_entries_.find(address.value());
827   if (it != open_entries_.end()) {
828     EntryImpl* this_entry = it->second;
829     return this_entry->GetNextAddress();
830   }
831   DCHECK(block_files_.IsValid(address));
832   DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256);
833 
834   CacheEntryBlock entry(File(address), address);
835   CHECK(entry.Load());
836   return entry.Data()->next;
837 }
838 
NotLinked(EntryImpl * entry)839 void BackendImpl::NotLinked(EntryImpl* entry) {
840   Addr entry_addr = entry->entry()->address();
841   uint32_t i = entry->GetHash() & mask_;
842   Addr address(data_->table[i]);
843   if (!address.is_initialized())
844     return;
845 
846   for (;;) {
847     DCHECK(entry_addr.value() != address.value());
848     address.set_value(GetNextAddr(address));
849     if (!address.is_initialized())
850       break;
851   }
852 }
853 #endif  // NET_BUILD_STRESS_CACHE
854 
855 // An entry may be linked on the DELETED list for a while after being doomed.
856 // This function is called when we want to remove it.
RemoveEntry(EntryImpl * entry)857 void BackendImpl::RemoveEntry(EntryImpl* entry) {
858 #if defined(NET_BUILD_STRESS_CACHE)
859   NotLinked(entry);
860 #endif
861   if (!new_eviction_)
862     return;
863 
864   DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state);
865 
866   eviction_.OnDestroyEntry(entry);
867   DecreaseNumEntries();
868 }
869 
OnEntryDestroyBegin(Addr address)870 void BackendImpl::OnEntryDestroyBegin(Addr address) {
871   auto it = open_entries_.find(address.value());
872   if (it != open_entries_.end())
873     open_entries_.erase(it);
874 }
875 
OnEntryDestroyEnd()876 void BackendImpl::OnEntryDestroyEnd() {
877   DecreaseNumRefs();
878   consider_evicting_at_op_end_ = true;
879 }
880 
OnSyncBackendOpComplete()881 void BackendImpl::OnSyncBackendOpComplete() {
882   if (consider_evicting_at_op_end_) {
883     if (data_->header.num_bytes > max_size_ && !read_only_ &&
884         (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
885       eviction_.TrimCache(false);
886     consider_evicting_at_op_end_ = false;
887   }
888 }
889 
GetOpenEntry(CacheRankingsBlock * rankings) const890 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
891   DCHECK(rankings->HasData());
892   auto it = open_entries_.find(rankings->Data()->contents);
893   if (it != open_entries_.end()) {
894     // We have this entry in memory.
895     return it->second;
896   }
897 
898   return nullptr;
899 }
900 
GetCurrentEntryId() const901 int32_t BackendImpl::GetCurrentEntryId() const {
902   return data_->header.this_id;
903 }
904 
MaxFileSize() const905 int64_t BackendImpl::MaxFileSize() const {
906   return GetCacheType() == net::PNACL_CACHE ? max_size_ : max_size_ / 8;
907 }
908 
ModifyStorageSize(int32_t old_size,int32_t new_size)909 void BackendImpl::ModifyStorageSize(int32_t old_size, int32_t new_size) {
910   if (disabled_ || old_size == new_size)
911     return;
912   if (old_size > new_size)
913     SubstractStorageSize(old_size - new_size);
914   else
915     AddStorageSize(new_size - old_size);
916 
917   FlushIndex();
918 
919   // Update the usage statistics.
920   stats_.ModifyStorageStats(old_size, new_size);
921 }
922 
TooMuchStorageRequested(int32_t size)923 void BackendImpl::TooMuchStorageRequested(int32_t size) {
924   stats_.ModifyStorageStats(0, size);
925 }
926 
IsAllocAllowed(int current_size,int new_size)927 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
928   DCHECK_GT(new_size, current_size);
929   if (user_flags_ & kNoBuffering)
930     return false;
931 
932   int to_add = new_size - current_size;
933   if (buffer_bytes_ + to_add > MaxBuffersSize())
934     return false;
935 
936   buffer_bytes_ += to_add;
937   return true;
938 }
939 
BufferDeleted(int size)940 void BackendImpl::BufferDeleted(int size) {
941   buffer_bytes_ -= size;
942   DCHECK_GE(size, 0);
943 }
944 
IsLoaded() const945 bool BackendImpl::IsLoaded() const {
946   if (user_flags_ & kNoLoadProtection)
947     return false;
948 
949   return (num_pending_io_ > 5 || user_load_);
950 }
951 
GetWeakPtr()952 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
953   return ptr_factory_.GetWeakPtr();
954 }
955 
956 // Previously this method was used to determine when to report histograms, so
957 // the logic is surprisingly convoluted.
ShouldUpdateStats()958 bool BackendImpl::ShouldUpdateStats() {
959   if (should_update_) {
960     return should_update_ == 2;
961   }
962 
963   should_update_++;
964   int64_t last_report = stats_.GetCounter(Stats::LAST_REPORT);
965   Time last_time = Time::FromInternalValue(last_report);
966   if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
967     stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
968     should_update_++;
969     return true;
970   }
971   return false;
972 }
973 
FirstEviction()974 void BackendImpl::FirstEviction() {
975   DCHECK(data_->header.create_time);
976   if (!GetEntryCount())
977     return;  // This is just for unit tests.
978 
979   stats_.ResetRatios();
980 }
981 
CriticalError(int error)982 void BackendImpl::CriticalError(int error) {
983   STRESS_NOTREACHED();
984   LOG(ERROR) << "Critical error found " << error;
985   if (disabled_)
986     return;
987 
988   stats_.OnEvent(Stats::FATAL_ERROR);
989   LogStats();
990   ReportError(error);
991 
992   // Setting the index table length to an invalid value will force re-creation
993   // of the cache files.
994   data_->header.table_len = 1;
995   disabled_ = true;
996 
997   if (!num_refs_)
998     base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
999         FROM_HERE,
1000         base::BindOnce(&BackendImpl::RestartCache, GetWeakPtr(), true));
1001 }
1002 
ReportError(int error)1003 void BackendImpl::ReportError(int error) {
1004   STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
1005                 error == ERR_CACHE_CREATED);
1006 
1007   // We transmit positive numbers, instead of direct error codes.
1008   DCHECK_LE(error, 0);
1009   if (GetCacheType() == net::DISK_CACHE) {
1010     base::UmaHistogramExactLinear("DiskCache.0.Error", error * -1, 50);
1011   }
1012 }
1013 
OnEvent(Stats::Counters an_event)1014 void BackendImpl::OnEvent(Stats::Counters an_event) {
1015   stats_.OnEvent(an_event);
1016 }
1017 
OnRead(int32_t bytes)1018 void BackendImpl::OnRead(int32_t bytes) {
1019   DCHECK_GE(bytes, 0);
1020   byte_count_ += bytes;
1021   if (byte_count_ < 0)
1022     byte_count_ = std::numeric_limits<int32_t>::max();
1023 }
1024 
OnWrite(int32_t bytes)1025 void BackendImpl::OnWrite(int32_t bytes) {
1026   // We use the same implementation as OnRead... just log the number of bytes.
1027   OnRead(bytes);
1028 }
1029 
OnStatsTimer()1030 void BackendImpl::OnStatsTimer() {
1031   if (disabled_)
1032     return;
1033 
1034   stats_.OnEvent(Stats::TIMER);
1035   int64_t time = stats_.GetCounter(Stats::TIMER);
1036   int64_t current = stats_.GetCounter(Stats::OPEN_ENTRIES);
1037 
1038   // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
1039   // the bias towards 0.
1040   if (num_refs_ && (current != num_refs_)) {
1041     int64_t diff = (num_refs_ - current) / 50;
1042     if (!diff)
1043       diff = num_refs_ > current ? 1 : -1;
1044     current = current + diff;
1045     stats_.SetCounter(Stats::OPEN_ENTRIES, current);
1046     stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
1047   }
1048 
1049   // These values cover about 99.5% of the population (Oct 2011).
1050   user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
1051   entry_count_ = 0;
1052   byte_count_ = 0;
1053   up_ticks_++;
1054 
1055   if (!data_)
1056     first_timer_ = false;
1057   if (first_timer_) {
1058     first_timer_ = false;
1059     if (ShouldUpdateStats()) {
1060       UpdateStats();
1061     }
1062   }
1063 
1064   // Save stats to disk at 5 min intervals.
1065   if (time % 10 == 0)
1066     StoreStats();
1067 }
1068 
IncrementIoCount()1069 void BackendImpl::IncrementIoCount() {
1070   num_pending_io_++;
1071 }
1072 
DecrementIoCount()1073 void BackendImpl::DecrementIoCount() {
1074   num_pending_io_--;
1075 }
1076 
SetUnitTestMode()1077 void BackendImpl::SetUnitTestMode() {
1078   user_flags_ |= kUnitTestMode;
1079   unit_test_ = true;
1080 }
1081 
SetUpgradeMode()1082 void BackendImpl::SetUpgradeMode() {
1083   user_flags_ |= kUpgradeMode;
1084   read_only_ = true;
1085 }
1086 
SetNewEviction()1087 void BackendImpl::SetNewEviction() {
1088   user_flags_ |= kNewEviction;
1089   new_eviction_ = true;
1090 }
1091 
SetFlags(uint32_t flags)1092 void BackendImpl::SetFlags(uint32_t flags) {
1093   user_flags_ |= flags;
1094 }
1095 
ClearRefCountForTest()1096 void BackendImpl::ClearRefCountForTest() {
1097   num_refs_ = 0;
1098 }
1099 
FlushQueueForTest(CompletionOnceCallback callback)1100 int BackendImpl::FlushQueueForTest(CompletionOnceCallback callback) {
1101   background_queue_.FlushQueue(std::move(callback));
1102   return net::ERR_IO_PENDING;
1103 }
1104 
RunTaskForTest(base::OnceClosure task,CompletionOnceCallback callback)1105 int BackendImpl::RunTaskForTest(base::OnceClosure task,
1106                                 CompletionOnceCallback callback) {
1107   background_queue_.RunTask(std::move(task), std::move(callback));
1108   return net::ERR_IO_PENDING;
1109 }
1110 
TrimForTest(bool empty)1111 void BackendImpl::TrimForTest(bool empty) {
1112   eviction_.SetTestMode();
1113   eviction_.TrimCache(empty);
1114 }
1115 
TrimDeletedListForTest(bool empty)1116 void BackendImpl::TrimDeletedListForTest(bool empty) {
1117   eviction_.SetTestMode();
1118   eviction_.TrimDeletedList(empty);
1119 }
1120 
GetTimerForTest()1121 base::RepeatingTimer* BackendImpl::GetTimerForTest() {
1122   return timer_.get();
1123 }
1124 
SelfCheck()1125 int BackendImpl::SelfCheck() {
1126   if (!init_) {
1127     LOG(ERROR) << "Init failed";
1128     return ERR_INIT_FAILED;
1129   }
1130 
1131   int num_entries = rankings_.SelfCheck();
1132   if (num_entries < 0) {
1133     LOG(ERROR) << "Invalid rankings list, error " << num_entries;
1134 #if !defined(NET_BUILD_STRESS_CACHE)
1135     return num_entries;
1136 #endif
1137   }
1138 
1139   if (num_entries != data_->header.num_entries) {
1140     LOG(ERROR) << "Number of entries mismatch";
1141 #if !defined(NET_BUILD_STRESS_CACHE)
1142     return ERR_NUM_ENTRIES_MISMATCH;
1143 #endif
1144   }
1145 
1146   return CheckAllEntries();
1147 }
1148 
FlushIndex()1149 void BackendImpl::FlushIndex() {
1150   if (index_.get() && !disabled_)
1151     index_->Flush();
1152 }
1153 
1154 // ------------------------------------------------------------------------
1155 
GetEntryCount() const1156 int32_t BackendImpl::GetEntryCount() const {
1157   if (!index_.get() || disabled_)
1158     return 0;
1159   // num_entries includes entries already evicted.
1160   int32_t not_deleted =
1161       data_->header.num_entries - data_->header.lru.sizes[Rankings::DELETED];
1162 
1163   if (not_deleted < 0) {
1164     DUMP_WILL_BE_NOTREACHED_NORETURN();
1165     not_deleted = 0;
1166   }
1167 
1168   return not_deleted;
1169 }
1170 
OpenOrCreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)1171 EntryResult BackendImpl::OpenOrCreateEntry(
1172     const std::string& key,
1173     net::RequestPriority request_priority,
1174     EntryResultCallback callback) {
1175   DCHECK(!callback.is_null());
1176   background_queue_.OpenOrCreateEntry(key, std::move(callback));
1177   return EntryResult::MakeError(net::ERR_IO_PENDING);
1178 }
1179 
OpenEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)1180 EntryResult BackendImpl::OpenEntry(const std::string& key,
1181                                    net::RequestPriority request_priority,
1182                                    EntryResultCallback callback) {
1183   DCHECK(!callback.is_null());
1184   background_queue_.OpenEntry(key, std::move(callback));
1185   return EntryResult::MakeError(net::ERR_IO_PENDING);
1186 }
1187 
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)1188 EntryResult BackendImpl::CreateEntry(const std::string& key,
1189                                      net::RequestPriority request_priority,
1190                                      EntryResultCallback callback) {
1191   DCHECK(!callback.is_null());
1192   background_queue_.CreateEntry(key, std::move(callback));
1193   return EntryResult::MakeError(net::ERR_IO_PENDING);
1194 }
1195 
DoomEntry(const std::string & key,net::RequestPriority priority,CompletionOnceCallback callback)1196 net::Error BackendImpl::DoomEntry(const std::string& key,
1197                                   net::RequestPriority priority,
1198                                   CompletionOnceCallback callback) {
1199   DCHECK(!callback.is_null());
1200   background_queue_.DoomEntry(key, std::move(callback));
1201   return net::ERR_IO_PENDING;
1202 }
1203 
DoomAllEntries(CompletionOnceCallback callback)1204 net::Error BackendImpl::DoomAllEntries(CompletionOnceCallback callback) {
1205   DCHECK(!callback.is_null());
1206   background_queue_.DoomAllEntries(std::move(callback));
1207   return net::ERR_IO_PENDING;
1208 }
1209 
DoomEntriesBetween(const base::Time initial_time,const base::Time end_time,CompletionOnceCallback callback)1210 net::Error BackendImpl::DoomEntriesBetween(const base::Time initial_time,
1211                                            const base::Time end_time,
1212                                            CompletionOnceCallback callback) {
1213   DCHECK(!callback.is_null());
1214   background_queue_.DoomEntriesBetween(initial_time, end_time,
1215                                        std::move(callback));
1216   return net::ERR_IO_PENDING;
1217 }
1218 
DoomEntriesSince(const base::Time initial_time,CompletionOnceCallback callback)1219 net::Error BackendImpl::DoomEntriesSince(const base::Time initial_time,
1220                                          CompletionOnceCallback callback) {
1221   DCHECK(!callback.is_null());
1222   background_queue_.DoomEntriesSince(initial_time, std::move(callback));
1223   return net::ERR_IO_PENDING;
1224 }
1225 
CalculateSizeOfAllEntries(Int64CompletionOnceCallback callback)1226 int64_t BackendImpl::CalculateSizeOfAllEntries(
1227     Int64CompletionOnceCallback callback) {
1228   DCHECK(!callback.is_null());
1229   background_queue_.CalculateSizeOfAllEntries(BindOnce(
1230       [](Int64CompletionOnceCallback callback, int result) {
1231         std::move(callback).Run(static_cast<int64_t>(result));
1232       },
1233       std::move(callback)));
1234   return net::ERR_IO_PENDING;
1235 }
1236 
1237 class BackendImpl::IteratorImpl : public Backend::Iterator {
1238  public:
IteratorImpl(base::WeakPtr<InFlightBackendIO> background_queue)1239   explicit IteratorImpl(base::WeakPtr<InFlightBackendIO> background_queue)
1240       : background_queue_(background_queue),
1241         iterator_(std::make_unique<Rankings::Iterator>()) {}
1242 
~IteratorImpl()1243   ~IteratorImpl() override {
1244     if (background_queue_)
1245       background_queue_->EndEnumeration(std::move(iterator_));
1246   }
1247 
OpenNextEntry(EntryResultCallback callback)1248   EntryResult OpenNextEntry(EntryResultCallback callback) override {
1249     if (!background_queue_)
1250       return EntryResult::MakeError(net::ERR_FAILED);
1251     background_queue_->OpenNextEntry(iterator_.get(), std::move(callback));
1252     return EntryResult::MakeError(net::ERR_IO_PENDING);
1253   }
1254 
1255  private:
1256   const base::WeakPtr<InFlightBackendIO> background_queue_;
1257   std::unique_ptr<Rankings::Iterator> iterator_;
1258 };
1259 
CreateIterator()1260 std::unique_ptr<Backend::Iterator> BackendImpl::CreateIterator() {
1261   return std::make_unique<IteratorImpl>(GetBackgroundQueue());
1262 }
1263 
GetStats(StatsItems * stats)1264 void BackendImpl::GetStats(StatsItems* stats) {
1265   if (disabled_)
1266     return;
1267 
1268   std::pair<std::string, std::string> item;
1269 
1270   item.first = "Entries";
1271   item.second = base::NumberToString(data_->header.num_entries);
1272   stats->push_back(item);
1273 
1274   item.first = "Pending IO";
1275   item.second = base::NumberToString(num_pending_io_);
1276   stats->push_back(item);
1277 
1278   item.first = "Max size";
1279   item.second = base::NumberToString(max_size_);
1280   stats->push_back(item);
1281 
1282   item.first = "Current size";
1283   item.second = base::NumberToString(data_->header.num_bytes);
1284   stats->push_back(item);
1285 
1286   item.first = "Cache type";
1287   item.second = "Blockfile Cache";
1288   stats->push_back(item);
1289 
1290   stats_.GetItems(stats);
1291 }
1292 
OnExternalCacheHit(const std::string & key)1293 void BackendImpl::OnExternalCacheHit(const std::string& key) {
1294   background_queue_.OnExternalCacheHit(key);
1295 }
1296 
1297 // ------------------------------------------------------------------------
1298 
1299 // We just created a new file so we're going to write the header and set the
1300 // file length to include the hash table (zero filled).
CreateBackingStore(disk_cache::File * file)1301 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
1302   AdjustMaxCacheSize(0);
1303 
1304   IndexHeader header;
1305   header.table_len = DesiredIndexTableLen(max_size_);
1306   header.create_time = Time::Now().ToInternalValue();
1307 
1308   if (!file->Write(&header, sizeof(header), 0))
1309     return false;
1310 
1311   size_t size = GetIndexSize(header.table_len);
1312   if (!file->SetLength(size))
1313     return false;
1314 
1315   // The call to SetLength() above is supposed to have already expanded the file
1316   // to |size| and zero-filled it, but on some systems the actual storage may
1317   // not get allocated until the pages are actually touched... resulting in a
1318   // SIGBUS trying to search through the index if the system is out of disk
1319   // space. So actually write out the zeroes (for pages after the one with the
1320   // header), to force allocation now and fail cleanly if there is no space.
1321   //
1322   // See https://crbug.com/1097518
1323   const int kPageSize = 4096;
1324   static_assert(sizeof(disk_cache::IndexHeader) < kPageSize,
1325                 "Code below assumes it wouldn't overwrite header by starting "
1326                 "at kPageSize");
1327   auto page = std::make_unique<char[]>(kPageSize);
1328   memset(page.get(), 0, kPageSize);
1329 
1330   for (size_t offset = kPageSize; offset < size; offset += kPageSize) {
1331     size_t end = std::min(offset + kPageSize, size);
1332     if (!file->Write(page.get(), end - offset, offset))
1333       return false;
1334   }
1335   return true;
1336 }
1337 
InitBackingStore(bool * file_created)1338 bool BackendImpl::InitBackingStore(bool* file_created) {
1339   if (!base::CreateDirectory(path_))
1340     return false;
1341 
1342   base::FilePath index_name = path_.AppendASCII(kIndexName);
1343 
1344   int flags = base::File::FLAG_READ | base::File::FLAG_WRITE |
1345               base::File::FLAG_OPEN_ALWAYS |
1346               base::File::FLAG_WIN_EXCLUSIVE_WRITE;
1347   base::File base_file(index_name, flags);
1348   if (!base_file.IsValid())
1349     return false;
1350 
1351   bool ret = true;
1352   *file_created = base_file.created();
1353 
1354   auto file = base::MakeRefCounted<disk_cache::File>(std::move(base_file));
1355   if (*file_created)
1356     ret = CreateBackingStore(file.get());
1357 
1358   file = nullptr;
1359   if (!ret)
1360     return false;
1361 
1362   index_ = base::MakeRefCounted<MappedFile>();
1363   data_ = static_cast<Index*>(index_->Init(index_name, 0));
1364   if (!data_) {
1365     LOG(ERROR) << "Unable to map Index file";
1366     return false;
1367   }
1368 
1369   if (index_->GetLength() < sizeof(Index)) {
1370     // We verify this again on CheckIndex() but it's easier to make sure now
1371     // that the header is there.
1372     LOG(ERROR) << "Corrupt Index file";
1373     return false;
1374   }
1375 
1376   return true;
1377 }
1378 
1379 // The maximum cache size will be either set explicitly by the caller, or
1380 // calculated by this code.
AdjustMaxCacheSize(int table_len)1381 void BackendImpl::AdjustMaxCacheSize(int table_len) {
1382   if (max_size_)
1383     return;
1384 
1385   // If table_len is provided, the index file exists.
1386   DCHECK(!table_len || data_->header.magic);
1387 
1388   // The user is not setting the size, let's figure it out.
1389   int64_t available = base::SysInfo::AmountOfFreeDiskSpace(path_);
1390   if (available < 0) {
1391     max_size_ = kDefaultCacheSize;
1392     return;
1393   }
1394 
1395   if (table_len)
1396     available += data_->header.num_bytes;
1397 
1398   max_size_ = PreferredCacheSize(available, GetCacheType());
1399 
1400   if (!table_len)
1401     return;
1402 
1403   // If we already have a table, adjust the size to it.
1404   max_size_ = std::min(max_size_, MaxStorageSizeForTable(table_len));
1405 }
1406 
InitStats()1407 bool BackendImpl::InitStats() {
1408   Addr address(data_->header.stats);
1409   int size = stats_.StorageSize();
1410 
1411   if (!address.is_initialized()) {
1412     FileType file_type = Addr::RequiredFileType(size);
1413     DCHECK_NE(file_type, EXTERNAL);
1414     int num_blocks = Addr::RequiredBlocks(size, file_type);
1415 
1416     if (!CreateBlock(file_type, num_blocks, &address))
1417       return false;
1418 
1419     data_->header.stats = address.value();
1420     return stats_.Init(nullptr, 0, address);
1421   }
1422 
1423   if (!address.is_block_file()) {
1424     NOTREACHED();
1425     return false;
1426   }
1427 
1428   // Load the required data.
1429   size = address.num_blocks() * address.BlockSize();
1430   MappedFile* file = File(address);
1431   if (!file)
1432     return false;
1433 
1434   auto data = std::make_unique<char[]>(size);
1435   size_t offset = address.start_block() * address.BlockSize() +
1436                   kBlockHeaderSize;
1437   if (!file->Read(data.get(), size, offset))
1438     return false;
1439 
1440   if (!stats_.Init(data.get(), size, address))
1441     return false;
1442   if (GetCacheType() == net::DISK_CACHE && ShouldUpdateStats()) {
1443     stats_.InitSizeHistogram();
1444   }
1445   return true;
1446 }
1447 
StoreStats()1448 void BackendImpl::StoreStats() {
1449   int size = stats_.StorageSize();
1450   auto data = std::make_unique<char[]>(size);
1451   Addr address;
1452   size = stats_.SerializeStats(data.get(), size, &address);
1453   DCHECK(size);
1454   if (!address.is_initialized())
1455     return;
1456 
1457   MappedFile* file = File(address);
1458   if (!file)
1459     return;
1460 
1461   size_t offset = address.start_block() * address.BlockSize() +
1462                   kBlockHeaderSize;
1463   file->Write(data.get(), size, offset);  // ignore result.
1464 }
1465 
RestartCache(bool failure)1466 void BackendImpl::RestartCache(bool failure) {
1467   TRACE_EVENT0("disk_cache", "BackendImpl::RestartCache");
1468 
1469   int64_t errors = stats_.GetCounter(Stats::FATAL_ERROR);
1470   int64_t full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
1471   int64_t partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
1472   int64_t last_report = stats_.GetCounter(Stats::LAST_REPORT);
1473 
1474   PrepareForRestart();
1475   if (failure) {
1476     DCHECK(!num_refs_);
1477     DCHECK(open_entries_.empty());
1478     CleanupDirectorySync(path_);
1479   } else {
1480     DeleteCache(path_, false);
1481   }
1482 
1483   // Don't call Init() if directed by the unit test: we are simulating a failure
1484   // trying to re-enable the cache.
1485   if (unit_test_) {
1486     init_ = true;  // Let the destructor do proper cleanup.
1487   } else if (SyncInit() == net::OK) {
1488     stats_.SetCounter(Stats::FATAL_ERROR, errors);
1489     stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
1490     stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
1491     stats_.SetCounter(Stats::LAST_REPORT, last_report);
1492   }
1493 }
1494 
PrepareForRestart()1495 void BackendImpl::PrepareForRestart() {
1496   // Reset the mask_ if it was not given by the user.
1497   if (!(user_flags_ & kMask))
1498     mask_ = 0;
1499 
1500   if (!(user_flags_ & kNewEviction))
1501     new_eviction_ = false;
1502 
1503   disabled_ = true;
1504   data_->header.crash = 0;
1505   index_->Flush();
1506   index_ = nullptr;
1507   data_ = nullptr;
1508   block_files_.CloseFiles();
1509   rankings_.Reset();
1510   init_ = false;
1511   restarted_ = true;
1512 }
1513 
NewEntry(Addr address,scoped_refptr<EntryImpl> * entry)1514 int BackendImpl::NewEntry(Addr address, scoped_refptr<EntryImpl>* entry) {
1515   auto it = open_entries_.find(address.value());
1516   if (it != open_entries_.end()) {
1517     // Easy job. This entry is already in memory.
1518     *entry = base::WrapRefCounted(it->second);
1519     return 0;
1520   }
1521 
1522   STRESS_DCHECK(block_files_.IsValid(address));
1523 
1524   if (!address.SanityCheckForEntry()) {
1525     LOG(WARNING) << "Wrong entry address.";
1526     STRESS_NOTREACHED();
1527     return ERR_INVALID_ADDRESS;
1528   }
1529 
1530   auto cache_entry = base::MakeRefCounted<EntryImpl>(this, address, read_only_);
1531   IncreaseNumRefs();
1532   *entry = nullptr;
1533 
1534   if (!cache_entry->entry()->Load())
1535     return ERR_READ_FAILURE;
1536 
1537   if (!cache_entry->SanityCheck()) {
1538     LOG(WARNING) << "Messed up entry found.";
1539     STRESS_NOTREACHED();
1540     return ERR_INVALID_ENTRY;
1541   }
1542 
1543   STRESS_DCHECK(block_files_.IsValid(
1544                     Addr(cache_entry->entry()->Data()->rankings_node)));
1545 
1546   if (!cache_entry->LoadNodeAddress())
1547     return ERR_READ_FAILURE;
1548 
1549   if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
1550     STRESS_NOTREACHED();
1551     cache_entry->SetDirtyFlag(0);
1552     // Don't remove this from the list (it is not linked properly). Instead,
1553     // break the link back to the entry because it is going away, and leave the
1554     // rankings node to be deleted if we find it through a list.
1555     rankings_.SetContents(cache_entry->rankings(), 0);
1556   } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
1557     STRESS_NOTREACHED();
1558     cache_entry->SetDirtyFlag(0);
1559     rankings_.SetContents(cache_entry->rankings(), address.value());
1560   }
1561 
1562   if (!cache_entry->DataSanityCheck()) {
1563     LOG(WARNING) << "Messed up entry found.";
1564     cache_entry->SetDirtyFlag(0);
1565     cache_entry->FixForDelete();
1566   }
1567 
1568   // Prevent overwriting the dirty flag on the destructor.
1569   cache_entry->SetDirtyFlag(GetCurrentEntryId());
1570 
1571   open_entries_[address.value()] = cache_entry.get();
1572 
1573   cache_entry->BeginLogging(net_log_, false);
1574   *entry = std::move(cache_entry);
1575   return 0;
1576 }
1577 
MatchEntry(const std::string & key,uint32_t hash,bool find_parent,Addr entry_addr,bool * match_error)1578 scoped_refptr<EntryImpl> BackendImpl::MatchEntry(const std::string& key,
1579                                                  uint32_t hash,
1580                                                  bool find_parent,
1581                                                  Addr entry_addr,
1582                                                  bool* match_error) {
1583   TRACE_EVENT0("disk_cache", "BackendImpl::MatchEntry");
1584 
1585   Addr address(data_->table[hash & mask_]);
1586   scoped_refptr<EntryImpl> cache_entry, parent_entry;
1587   bool found = false;
1588   std::set<CacheAddr> visited;
1589   *match_error = false;
1590 
1591   for (;;) {
1592     if (disabled_)
1593       break;
1594 
1595     if (visited.find(address.value()) != visited.end()) {
1596       // It's possible for a buggy version of the code to write a loop. Just
1597       // break it.
1598       address.set_value(0);
1599       parent_entry->SetNextAddress(address);
1600     }
1601     visited.insert(address.value());
1602 
1603     if (!address.is_initialized()) {
1604       if (find_parent)
1605         found = true;
1606       break;
1607     }
1608 
1609     int error = NewEntry(address, &cache_entry);
1610     if (error || cache_entry->dirty()) {
1611       // This entry is dirty on disk (it was not properly closed): we cannot
1612       // trust it.
1613       Addr child(0);
1614       if (!error)
1615         child.set_value(cache_entry->GetNextAddress());
1616 
1617       if (parent_entry.get()) {
1618         parent_entry->SetNextAddress(child);
1619         parent_entry = nullptr;
1620       } else {
1621         data_->table[hash & mask_] = child.value();
1622       }
1623 
1624       if (!error) {
1625         // It is important to call DestroyInvalidEntry after removing this
1626         // entry from the table.
1627         DestroyInvalidEntry(cache_entry.get());
1628         cache_entry = nullptr;
1629       }
1630 
1631       // Restart the search.
1632       address.set_value(data_->table[hash & mask_]);
1633       visited.clear();
1634       continue;
1635     }
1636 
1637     DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
1638     if (cache_entry->IsSameEntry(key, hash)) {
1639       if (!cache_entry->Update())
1640         cache_entry = nullptr;
1641       found = true;
1642       if (find_parent && entry_addr.value() != address.value()) {
1643         *match_error = true;
1644         parent_entry = nullptr;
1645       }
1646       break;
1647     }
1648     if (!cache_entry->Update())
1649       cache_entry = nullptr;
1650     parent_entry = cache_entry;
1651     cache_entry = nullptr;
1652     if (!parent_entry.get())
1653       break;
1654 
1655     address.set_value(parent_entry->GetNextAddress());
1656   }
1657 
1658   if (parent_entry.get() && (!find_parent || !found))
1659     parent_entry = nullptr;
1660 
1661   if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) {
1662     *match_error = true;
1663     parent_entry = nullptr;
1664   }
1665 
1666   if (cache_entry.get() && (find_parent || !found))
1667     cache_entry = nullptr;
1668 
1669   FlushIndex();
1670 
1671   return find_parent ? std::move(parent_entry) : std::move(cache_entry);
1672 }
1673 
OpenFollowingEntryFromList(Rankings::List list,CacheRankingsBlock ** from_entry,scoped_refptr<EntryImpl> * next_entry)1674 bool BackendImpl::OpenFollowingEntryFromList(
1675     Rankings::List list,
1676     CacheRankingsBlock** from_entry,
1677     scoped_refptr<EntryImpl>* next_entry) {
1678   if (disabled_)
1679     return false;
1680 
1681   if (!new_eviction_ && Rankings::NO_USE != list)
1682     return false;
1683 
1684   Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
1685   CacheRankingsBlock* next_block = rankings_.GetNext(rankings.get(), list);
1686   Rankings::ScopedRankingsBlock next(&rankings_, next_block);
1687   *from_entry = nullptr;
1688 
1689   *next_entry = GetEnumeratedEntry(next.get(), list);
1690   if (!*next_entry)
1691     return false;
1692 
1693   *from_entry = next.release();
1694   return true;
1695 }
1696 
GetEnumeratedEntry(CacheRankingsBlock * next,Rankings::List list)1697 scoped_refptr<EntryImpl> BackendImpl::GetEnumeratedEntry(
1698     CacheRankingsBlock* next,
1699     Rankings::List list) {
1700   if (!next || disabled_)
1701     return nullptr;
1702 
1703   scoped_refptr<EntryImpl> entry;
1704   int rv = NewEntry(Addr(next->Data()->contents), &entry);
1705   if (rv) {
1706     STRESS_NOTREACHED();
1707     rankings_.Remove(next, list, false);
1708     if (rv == ERR_INVALID_ADDRESS) {
1709       // There is nothing linked from the index. Delete the rankings node.
1710       DeleteBlock(next->address(), true);
1711     }
1712     return nullptr;
1713   }
1714 
1715   if (entry->dirty()) {
1716     // We cannot trust this entry.
1717     InternalDoomEntry(entry.get());
1718     return nullptr;
1719   }
1720 
1721   if (!entry->Update()) {
1722     STRESS_NOTREACHED();
1723     return nullptr;
1724   }
1725 
1726   // Note that it is unfortunate (but possible) for this entry to be clean, but
1727   // not actually the real entry. In other words, we could have lost this entry
1728   // from the index, and it could have been replaced with a newer one. It's not
1729   // worth checking that this entry is "the real one", so we just return it and
1730   // let the enumeration continue; this entry will be evicted at some point, and
1731   // the regular path will work with the real entry. With time, this problem
1732   // will disasappear because this scenario is just a bug.
1733 
1734   // Make sure that we save the key for later.
1735   entry->GetKey();
1736 
1737   return entry;
1738 }
1739 
ResurrectEntry(scoped_refptr<EntryImpl> deleted_entry)1740 scoped_refptr<EntryImpl> BackendImpl::ResurrectEntry(
1741     scoped_refptr<EntryImpl> deleted_entry) {
1742   if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
1743     deleted_entry = nullptr;
1744     stats_.OnEvent(Stats::CREATE_MISS);
1745     return nullptr;
1746   }
1747 
1748   // We are attempting to create an entry and found out that the entry was
1749   // previously deleted.
1750 
1751   eviction_.OnCreateEntry(deleted_entry.get());
1752   entry_count_++;
1753 
1754   stats_.OnEvent(Stats::RESURRECT_HIT);
1755   return deleted_entry;
1756 }
1757 
DestroyInvalidEntry(EntryImpl * entry)1758 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
1759   LOG(WARNING) << "Destroying invalid entry.";
1760 
1761   entry->SetPointerForInvalidEntry(GetCurrentEntryId());
1762 
1763   eviction_.OnDoomEntry(entry);
1764   entry->InternalDoom();
1765 
1766   if (!new_eviction_)
1767     DecreaseNumEntries();
1768   stats_.OnEvent(Stats::INVALID_ENTRY);
1769 }
1770 
AddStorageSize(int32_t bytes)1771 void BackendImpl::AddStorageSize(int32_t bytes) {
1772   data_->header.num_bytes += bytes;
1773   DCHECK_GE(data_->header.num_bytes, 0);
1774 }
1775 
SubstractStorageSize(int32_t bytes)1776 void BackendImpl::SubstractStorageSize(int32_t bytes) {
1777   data_->header.num_bytes -= bytes;
1778   DCHECK_GE(data_->header.num_bytes, 0);
1779 }
1780 
IncreaseNumRefs()1781 void BackendImpl::IncreaseNumRefs() {
1782   num_refs_++;
1783   if (max_refs_ < num_refs_)
1784     max_refs_ = num_refs_;
1785 }
1786 
DecreaseNumRefs()1787 void BackendImpl::DecreaseNumRefs() {
1788   DCHECK(num_refs_);
1789   num_refs_--;
1790 
1791   if (!num_refs_ && disabled_)
1792     base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
1793         FROM_HERE,
1794         base::BindOnce(&BackendImpl::RestartCache, GetWeakPtr(), true));
1795 }
1796 
IncreaseNumEntries()1797 void BackendImpl::IncreaseNumEntries() {
1798   data_->header.num_entries++;
1799   DCHECK_GT(data_->header.num_entries, 0);
1800 }
1801 
DecreaseNumEntries()1802 void BackendImpl::DecreaseNumEntries() {
1803   data_->header.num_entries--;
1804   if (data_->header.num_entries < 0) {
1805     STRESS_NOTREACHED();
1806     data_->header.num_entries = 0;
1807   }
1808 }
1809 
LogStats()1810 void BackendImpl::LogStats() {
1811   StatsItems stats;
1812   GetStats(&stats);
1813 
1814   for (const auto& stat : stats)
1815     VLOG(1) << stat.first << ": " << stat.second;
1816 }
1817 
UpdateStats()1818 void BackendImpl::UpdateStats() {
1819   // Previously this function was used to periodically emit histograms, however
1820   // now it just performs some regular maintenance on the cache statistics.
1821   stats_.SetCounter(Stats::MAX_ENTRIES, 0);
1822   stats_.SetCounter(Stats::FATAL_ERROR, 0);
1823   stats_.SetCounter(Stats::DOOM_CACHE, 0);
1824   stats_.SetCounter(Stats::DOOM_RECENT, 0);
1825 
1826   int64_t total_hours = stats_.GetCounter(Stats::TIMER) / 120;
1827   if (!data_->header.create_time || !data_->header.lru.filled) {
1828     return;
1829   }
1830 
1831   int64_t use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
1832   stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
1833 
1834   // We may see users with no use_hours at this point if this is the first time
1835   // we are running this code.
1836   if (use_hours)
1837     use_hours = total_hours - use_hours;
1838 
1839   if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
1840     return;
1841 
1842   stats_.ResetRatios();
1843   stats_.SetCounter(Stats::TRIM_ENTRY, 0);
1844 }
1845 
UpgradeTo2_1()1846 void BackendImpl::UpgradeTo2_1() {
1847   // 2.1 is basically the same as 2.0, except that new fields are actually
1848   // updated by the new eviction algorithm.
1849   DCHECK_EQ(kVersion2_0, data_->header.version);
1850   data_->header.version = kVersion2_1;
1851   data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
1852 }
1853 
UpgradeTo3_0()1854 void BackendImpl::UpgradeTo3_0() {
1855   // 3.0 uses a 64-bit size field.
1856   DCHECK(kVersion2_0 == data_->header.version ||
1857          kVersion2_1 == data_->header.version);
1858   data_->header.version = kVersion3_0;
1859   data_->header.num_bytes = data_->header.old_v2_num_bytes;
1860 }
1861 
CheckIndex()1862 bool BackendImpl::CheckIndex() {
1863   DCHECK(data_);
1864 
1865   size_t current_size = index_->GetLength();
1866   if (current_size < sizeof(Index)) {
1867     LOG(ERROR) << "Corrupt Index file";
1868     return false;
1869   }
1870 
1871   if (data_->header.magic != kIndexMagic) {
1872     LOG(ERROR) << "Invalid file magic";
1873     return false;
1874   }
1875 
1876   // 2.0 + new_eviction needs conversion to 2.1.
1877   if (data_->header.version == kVersion2_0 && new_eviction_) {
1878     UpgradeTo2_1();
1879   }
1880 
1881   // 2.0 or 2.1 can be upgraded to 3.0
1882   if (data_->header.version == kVersion2_0 ||
1883       data_->header.version == kVersion2_1) {
1884     UpgradeTo3_0();
1885   }
1886 
1887   if (kCurrentVersion != data_->header.version) {
1888     LOG(ERROR) << "Invalid file version";
1889     return false;
1890   }
1891 
1892   if (!data_->header.table_len) {
1893     LOG(ERROR) << "Invalid table size";
1894     return false;
1895   }
1896 
1897   if (current_size < GetIndexSize(data_->header.table_len) ||
1898       data_->header.table_len & (kBaseTableLen - 1)) {
1899     LOG(ERROR) << "Corrupt Index file";
1900     return false;
1901   }
1902 
1903   AdjustMaxCacheSize(data_->header.table_len);
1904 
1905 #if !defined(NET_BUILD_STRESS_CACHE)
1906   if (data_->header.num_bytes < 0 ||
1907       (max_size_ < std::numeric_limits<int32_t>::max() - kDefaultCacheSize &&
1908        data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
1909     LOG(ERROR) << "Invalid cache (current) size";
1910     return false;
1911   }
1912 #endif
1913 
1914   if (data_->header.num_entries < 0) {
1915     LOG(ERROR) << "Invalid number of entries";
1916     return false;
1917   }
1918 
1919   if (!mask_)
1920     mask_ = data_->header.table_len - 1;
1921 
1922   // Load the table into memory.
1923   return index_->Preload();
1924 }
1925 
CheckAllEntries()1926 int BackendImpl::CheckAllEntries() {
1927   int num_dirty = 0;
1928   int num_entries = 0;
1929   DCHECK(mask_ < std::numeric_limits<uint32_t>::max());
1930   for (unsigned int i = 0; i <= mask_; i++) {
1931     Addr address(data_->table[i]);
1932     if (!address.is_initialized())
1933       continue;
1934     for (;;) {
1935       scoped_refptr<EntryImpl> cache_entry;
1936       int ret = NewEntry(address, &cache_entry);
1937       if (ret) {
1938         STRESS_NOTREACHED();
1939         return ret;
1940       }
1941 
1942       if (cache_entry->dirty())
1943         num_dirty++;
1944       else if (CheckEntry(cache_entry.get()))
1945         num_entries++;
1946       else
1947         return ERR_INVALID_ENTRY;
1948 
1949       DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
1950       address.set_value(cache_entry->GetNextAddress());
1951       if (!address.is_initialized())
1952         break;
1953     }
1954   }
1955 
1956   if (num_entries + num_dirty != data_->header.num_entries) {
1957     LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
1958                   " " << data_->header.num_entries;
1959     DCHECK_LT(num_entries, data_->header.num_entries);
1960     return ERR_NUM_ENTRIES_MISMATCH;
1961   }
1962 
1963   return num_dirty;
1964 }
1965 
CheckEntry(EntryImpl * cache_entry)1966 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
1967   bool ok = block_files_.IsValid(cache_entry->entry()->address());
1968   ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
1969   EntryStore* data = cache_entry->entry()->Data();
1970   for (size_t i = 0; i < std::size(data->data_addr); i++) {
1971     if (data->data_addr[i]) {
1972       Addr address(data->data_addr[i]);
1973       if (address.is_block_file())
1974         ok = ok && block_files_.IsValid(address);
1975     }
1976   }
1977 
1978   return ok && cache_entry->rankings()->VerifyHash();
1979 }
1980 
1981 // static
MaxBuffersSize()1982 int BackendImpl::MaxBuffersSize() {
1983   // Calculate based on total memory the first time this function is called,
1984   // then cache the result.
1985   static const int max_buffers_size = ([]() {
1986     constexpr uint64_t kMaxMaxBuffersSize = 30 * 1024 * 1024;
1987     const uint64_t total_memory = base::SysInfo::AmountOfPhysicalMemory();
1988     if (total_memory == 0u) {
1989       return int{kMaxMaxBuffersSize};
1990     }
1991     const uint64_t two_percent = total_memory * 2 / 100;
1992     return static_cast<int>(std::min(two_percent, kMaxMaxBuffersSize));
1993   })();
1994 
1995   return max_buffers_size;
1996 }
1997 
FlushForTesting()1998 void BackendImpl::FlushForTesting() {
1999   if (!g_internal_cache_thread.IsCreated()) {
2000     return;
2001   }
2002 
2003   g_internal_cache_thread.Get().FlushForTesting();
2004 }
2005 
FlushAsynchronouslyForTesting(base::OnceClosure callback)2006 void BackendImpl::FlushAsynchronouslyForTesting(base::OnceClosure callback) {
2007   if (!g_internal_cache_thread.IsCreated()) {
2008     base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
2009         FROM_HERE, std::move(callback));
2010     return;
2011   }
2012 
2013   InternalCacheThread()->PostTaskAndReply(FROM_HERE, base::BindOnce([]() {}),
2014                                           std::move(callback));
2015 }
2016 
2017 }  // namespace disk_cache
2018