xref: /aosp_15_r20/external/cronet/net/disk_cache/simple/simple_index.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/simple/simple_index.h"
6 
7 #include <algorithm>
8 #include <limits>
9 #include <string>
10 #include <utility>
11 
12 #include "base/check_op.h"
13 #include "base/files/file_util.h"
14 #include "base/functional/bind.h"
15 #include "base/numerics/safe_conversions.h"
16 #include "base/pickle.h"
17 #include "base/strings/string_number_conversions.h"
18 #include "base/strings/string_tokenizer.h"
19 #include "base/task/sequenced_task_runner.h"
20 #include "base/task/task_runner.h"
21 #include "base/time/time.h"
22 #include "base/trace_event/memory_usage_estimator.h"
23 #include "build/build_config.h"
24 #include "net/base/net_errors.h"
25 #include "net/disk_cache/backend_cleanup_tracker.h"
26 #include "net/disk_cache/simple/simple_entry_format.h"
27 #include "net/disk_cache/simple/simple_histogram_macros.h"
28 #include "net/disk_cache/simple/simple_index_delegate.h"
29 #include "net/disk_cache/simple/simple_index_file.h"
30 #include "net/disk_cache/simple/simple_synchronous_entry.h"
31 #include "net/disk_cache/simple/simple_util.h"
32 
33 #if BUILDFLAG(IS_POSIX)
34 #include <sys/stat.h>
35 #include <sys/time.h>
36 #endif
37 
38 namespace {
39 
40 // How many milliseconds we delay writing the index to disk since the last cache
41 // operation has happened.
42 const int kWriteToDiskDelayMSecs = 20000;
43 const int kWriteToDiskOnBackgroundDelayMSecs = 100;
44 
45 // Divides the cache space into this amount of parts to evict when only one part
46 // is left.
47 const uint32_t kEvictionMarginDivisor = 20;
48 
49 const uint32_t kBytesInKb = 1024;
50 
51 // This is added to the size of each entry before using the size
52 // to determine which entries to evict first. It's basically an
53 // estimate of the filesystem overhead, but it also serves to flatten
54 // the curve so that 1-byte entries and 2-byte entries are basically
55 // treated the same.
56 static const int kEstimatedEntryOverhead = 512;
57 
58 }  // namespace
59 
60 namespace disk_cache {
61 
EntryMetadata()62 EntryMetadata::EntryMetadata()
63     : last_used_time_seconds_since_epoch_(0),
64       entry_size_256b_chunks_(0),
65       in_memory_data_(0) {}
66 
EntryMetadata(base::Time last_used_time,base::StrictNumeric<uint32_t> entry_size)67 EntryMetadata::EntryMetadata(base::Time last_used_time,
68                              base::StrictNumeric<uint32_t> entry_size)
69     : last_used_time_seconds_since_epoch_(0),
70       entry_size_256b_chunks_(0),
71       in_memory_data_(0) {
72   SetEntrySize(entry_size);  // to round/pack properly.
73   SetLastUsedTime(last_used_time);
74 }
75 
EntryMetadata(int32_t trailer_prefetch_size,base::StrictNumeric<uint32_t> entry_size)76 EntryMetadata::EntryMetadata(int32_t trailer_prefetch_size,
77                              base::StrictNumeric<uint32_t> entry_size)
78     : trailer_prefetch_size_(0),
79       entry_size_256b_chunks_(0),
80       in_memory_data_(0) {
81   SetEntrySize(entry_size);  // to round/pack properly
82   SetTrailerPrefetchSize(trailer_prefetch_size);
83 }
84 
GetLastUsedTime() const85 base::Time EntryMetadata::GetLastUsedTime() const {
86   // Preserve nullity.
87   if (last_used_time_seconds_since_epoch_ == 0)
88     return base::Time();
89 
90   return base::Time::UnixEpoch() +
91          base::Seconds(last_used_time_seconds_since_epoch_);
92 }
93 
SetLastUsedTime(const base::Time & last_used_time)94 void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) {
95   // Preserve nullity.
96   if (last_used_time.is_null()) {
97     last_used_time_seconds_since_epoch_ = 0;
98     return;
99   }
100 
101   last_used_time_seconds_since_epoch_ = base::saturated_cast<uint32_t>(
102       (last_used_time - base::Time::UnixEpoch()).InSeconds());
103   // Avoid accidental nullity.
104   if (last_used_time_seconds_since_epoch_ == 0)
105     last_used_time_seconds_since_epoch_ = 1;
106 }
107 
GetTrailerPrefetchSize() const108 int32_t EntryMetadata::GetTrailerPrefetchSize() const {
109   return trailer_prefetch_size_;
110 }
111 
SetTrailerPrefetchSize(int32_t size)112 void EntryMetadata::SetTrailerPrefetchSize(int32_t size) {
113   if (size <= 0)
114     return;
115   trailer_prefetch_size_ = size;
116 }
117 
GetEntrySize() const118 uint32_t EntryMetadata::GetEntrySize() const {
119   return entry_size_256b_chunks_ << 8;
120 }
121 
SetEntrySize(base::StrictNumeric<uint32_t> entry_size)122 void EntryMetadata::SetEntrySize(base::StrictNumeric<uint32_t> entry_size) {
123   // This should not overflow since we limit entries to 1/8th of the cache.
124   entry_size_256b_chunks_ = (static_cast<uint32_t>(entry_size) + 255) >> 8;
125 }
126 
Serialize(net::CacheType cache_type,base::Pickle * pickle) const127 void EntryMetadata::Serialize(net::CacheType cache_type,
128                               base::Pickle* pickle) const {
129   DCHECK(pickle);
130   // If you modify the size of the size of the pickle, be sure to update
131   // kOnDiskSizeBytes.
132   uint32_t packed_entry_info = (entry_size_256b_chunks_ << 8) | in_memory_data_;
133   if (cache_type == net::APP_CACHE) {
134     pickle->WriteInt64(trailer_prefetch_size_);
135   } else {
136     int64_t internal_last_used_time = GetLastUsedTime().ToInternalValue();
137     pickle->WriteInt64(internal_last_used_time);
138   }
139   pickle->WriteUInt64(packed_entry_info);
140 }
141 
Deserialize(net::CacheType cache_type,base::PickleIterator * it,bool has_entry_in_memory_data,bool app_cache_has_trailer_prefetch_size)142 bool EntryMetadata::Deserialize(net::CacheType cache_type,
143                                 base::PickleIterator* it,
144                                 bool has_entry_in_memory_data,
145                                 bool app_cache_has_trailer_prefetch_size) {
146   DCHECK(it);
147   int64_t tmp_time_or_prefetch_size;
148   uint64_t tmp_entry_size;
149   if (!it->ReadInt64(&tmp_time_or_prefetch_size) ||
150       !it->ReadUInt64(&tmp_entry_size) ||
151       tmp_entry_size > std::numeric_limits<uint32_t>::max())
152     return false;
153   if (cache_type == net::APP_CACHE) {
154     if (app_cache_has_trailer_prefetch_size) {
155       int32_t trailer_prefetch_size = 0;
156       base::CheckedNumeric<int32_t> numeric_size(tmp_time_or_prefetch_size);
157       if (numeric_size.AssignIfValid(&trailer_prefetch_size)) {
158         SetTrailerPrefetchSize(trailer_prefetch_size);
159       }
160     }
161   } else {
162     SetLastUsedTime(base::Time::FromInternalValue(tmp_time_or_prefetch_size));
163   }
164   if (has_entry_in_memory_data) {
165     // tmp_entry_size actually packs entry_size_256b_chunks_ and
166     // in_memory_data_.
167     SetEntrySize(static_cast<uint32_t>(tmp_entry_size & 0xFFFFFF00));
168     SetInMemoryData(static_cast<uint8_t>(tmp_entry_size & 0xFF));
169   } else {
170     SetEntrySize(static_cast<uint32_t>(tmp_entry_size));
171     SetInMemoryData(0);
172   }
173   return true;
174 }
175 
SimpleIndex(const scoped_refptr<base::SequencedTaskRunner> & task_runner,scoped_refptr<BackendCleanupTracker> cleanup_tracker,SimpleIndexDelegate * delegate,net::CacheType cache_type,std::unique_ptr<SimpleIndexFile> index_file)176 SimpleIndex::SimpleIndex(
177     const scoped_refptr<base::SequencedTaskRunner>& task_runner,
178     scoped_refptr<BackendCleanupTracker> cleanup_tracker,
179     SimpleIndexDelegate* delegate,
180     net::CacheType cache_type,
181     std::unique_ptr<SimpleIndexFile> index_file)
182     : cleanup_tracker_(std::move(cleanup_tracker)),
183       delegate_(delegate),
184       cache_type_(cache_type),
185       index_file_(std::move(index_file)),
186       task_runner_(task_runner),
187       // Creating the callback once so it is reused every time
188       // write_to_disk_timer_.Start() is called.
189       write_to_disk_cb_(base::BindRepeating(&SimpleIndex::WriteToDisk,
190                                             AsWeakPtr(),
191                                             INDEX_WRITE_REASON_IDLE)) {}
192 
~SimpleIndex()193 SimpleIndex::~SimpleIndex() {
194   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
195 
196   // Fail all callbacks waiting for the index to come up.
197   for (auto& callback : to_run_when_initialized_) {
198     std::move(callback).Run(net::ERR_ABORTED);
199   }
200 }
201 
Initialize(base::Time cache_mtime)202 void SimpleIndex::Initialize(base::Time cache_mtime) {
203   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
204 
205 #if BUILDFLAG(IS_ANDROID)
206   if (app_status_listener_getter_) {
207     base::android::ApplicationStatusListener* listener =
208         app_status_listener_getter_.Run();
209     if (listener) {
210       listener->SetCallback(base::BindRepeating(
211           &SimpleIndex::OnApplicationStateChange, AsWeakPtr()));
212     }
213     // Not using the fallback on purpose here --- if the getter is set, we may
214     // be in a process where the base::android::ApplicationStatusListener::New
215     // impl is unavailable.
216     // (See https://crbug.com/881572)
217   } else if (base::android::IsVMInitialized()) {
218     owned_app_status_listener_ =
219         base::android::ApplicationStatusListener::New(base::BindRepeating(
220             &SimpleIndex::OnApplicationStateChange, AsWeakPtr()));
221   }
222 #endif
223 
224   auto load_result = std::make_unique<SimpleIndexLoadResult>();
225   auto* load_result_ptr = load_result.get();
226   index_file_->LoadIndexEntries(
227       cache_mtime,
228       base::BindOnce(&SimpleIndex::MergeInitializingSet, AsWeakPtr(),
229                      std::move(load_result)),
230       load_result_ptr);
231 }
232 
SetMaxSize(uint64_t max_bytes)233 void SimpleIndex::SetMaxSize(uint64_t max_bytes) {
234   // Zero size means use the default.
235   if (max_bytes) {
236     max_size_ = max_bytes;
237     high_watermark_ = max_size_ - max_size_ / kEvictionMarginDivisor;
238     low_watermark_ = max_size_ - 2 * (max_size_ / kEvictionMarginDivisor);
239   }
240 }
241 
ExecuteWhenReady(net::CompletionOnceCallback task)242 void SimpleIndex::ExecuteWhenReady(net::CompletionOnceCallback task) {
243   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
244   if (initialized_)
245     task_runner_->PostTask(FROM_HERE, base::BindOnce(std::move(task), net::OK));
246   else
247     to_run_when_initialized_.push_back(std::move(task));
248 }
249 
GetEntriesBetween(base::Time initial_time,base::Time end_time)250 std::unique_ptr<SimpleIndex::HashList> SimpleIndex::GetEntriesBetween(
251     base::Time initial_time,
252     base::Time end_time) {
253   DCHECK_EQ(true, initialized_);
254 
255   // The net::APP_CACHE mode does not track access times.  Assert that external
256   // consumers are not relying on access time ranges.
257   DCHECK(cache_type_ != net::APP_CACHE ||
258          (initial_time.is_null() && end_time.is_null()));
259 
260   if (!initial_time.is_null())
261     initial_time -= EntryMetadata::GetLowerEpsilonForTimeComparisons();
262   if (end_time.is_null())
263     end_time = base::Time::Max();
264   else
265     end_time += EntryMetadata::GetUpperEpsilonForTimeComparisons();
266   DCHECK(end_time >= initial_time);
267 
268   auto ret_hashes = std::make_unique<HashList>();
269   for (const auto& entry : entries_set_) {
270     const EntryMetadata& metadata = entry.second;
271     base::Time entry_time = metadata.GetLastUsedTime();
272     if (initial_time <= entry_time && entry_time < end_time)
273       ret_hashes->push_back(entry.first);
274   }
275   return ret_hashes;
276 }
277 
GetAllHashes()278 std::unique_ptr<SimpleIndex::HashList> SimpleIndex::GetAllHashes() {
279   return GetEntriesBetween(base::Time(), base::Time());
280 }
281 
GetEntryCount() const282 int32_t SimpleIndex::GetEntryCount() const {
283   // TODO(pasko): return a meaningful initial estimate before initialized.
284   return entries_set_.size();
285 }
286 
GetCacheSize() const287 uint64_t SimpleIndex::GetCacheSize() const {
288   DCHECK(initialized_);
289   return cache_size_;
290 }
291 
GetCacheSizeBetween(base::Time initial_time,base::Time end_time) const292 uint64_t SimpleIndex::GetCacheSizeBetween(base::Time initial_time,
293                                           base::Time end_time) const {
294   DCHECK_EQ(true, initialized_);
295 
296   if (!initial_time.is_null())
297     initial_time -= EntryMetadata::GetLowerEpsilonForTimeComparisons();
298   if (end_time.is_null())
299     end_time = base::Time::Max();
300   else
301     end_time += EntryMetadata::GetUpperEpsilonForTimeComparisons();
302 
303   DCHECK(end_time >= initial_time);
304   uint64_t size = 0;
305   for (const auto& entry : entries_set_) {
306     const EntryMetadata& metadata = entry.second;
307     base::Time entry_time = metadata.GetLastUsedTime();
308     if (initial_time <= entry_time && entry_time < end_time)
309       size += metadata.GetEntrySize();
310   }
311   return size;
312 }
313 
GetLastUsedTime(uint64_t entry_hash)314 base::Time SimpleIndex::GetLastUsedTime(uint64_t entry_hash) {
315   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
316   DCHECK_NE(cache_type_, net::APP_CACHE);
317   auto it = entries_set_.find(entry_hash);
318   if (it == entries_set_.end())
319     return base::Time();
320   return it->second.GetLastUsedTime();
321 }
322 
SetLastUsedTimeForTest(uint64_t entry_hash,const base::Time last_used)323 void SimpleIndex::SetLastUsedTimeForTest(uint64_t entry_hash,
324                                          const base::Time last_used) {
325   auto it = entries_set_.find(entry_hash);
326   DCHECK(it != entries_set_.end());
327   it->second.SetLastUsedTime(last_used);
328 }
329 
HasPendingWrite() const330 bool SimpleIndex::HasPendingWrite() const {
331   return write_to_disk_timer_.IsRunning();
332 }
333 
Insert(uint64_t entry_hash)334 void SimpleIndex::Insert(uint64_t entry_hash) {
335   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
336   // Upon insert we don't know yet the size of the entry.
337   // It will be updated later when the SimpleEntryImpl finishes opening or
338   // creating the new entry, and then UpdateEntrySize will be called.
339   bool inserted = false;
340   if (cache_type_ == net::APP_CACHE) {
341     inserted =
342         InsertInEntrySet(entry_hash, EntryMetadata(-1, 0u), &entries_set_);
343   } else {
344     inserted = InsertInEntrySet(
345         entry_hash, EntryMetadata(base::Time::Now(), 0u), &entries_set_);
346   }
347   if (!initialized_)
348     removed_entries_.erase(entry_hash);
349   if (inserted)
350     PostponeWritingToDisk();
351 }
352 
Remove(uint64_t entry_hash)353 void SimpleIndex::Remove(uint64_t entry_hash) {
354   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
355   bool need_write = false;
356   auto it = entries_set_.find(entry_hash);
357   if (it != entries_set_.end()) {
358     UpdateEntryIteratorSize(&it, 0u);
359     entries_set_.erase(it);
360     need_write = true;
361   }
362 
363   if (!initialized_)
364     removed_entries_.insert(entry_hash);
365 
366   if (need_write)
367     PostponeWritingToDisk();
368 }
369 
Has(uint64_t hash) const370 bool SimpleIndex::Has(uint64_t hash) const {
371   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
372   // If not initialized, always return true, forcing it to go to the disk.
373   return !initialized_ || entries_set_.count(hash) > 0;
374 }
375 
GetEntryInMemoryData(uint64_t entry_hash) const376 uint8_t SimpleIndex::GetEntryInMemoryData(uint64_t entry_hash) const {
377   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
378   auto it = entries_set_.find(entry_hash);
379   if (it == entries_set_.end())
380     return 0;
381   return it->second.GetInMemoryData();
382 }
383 
SetEntryInMemoryData(uint64_t entry_hash,uint8_t value)384 void SimpleIndex::SetEntryInMemoryData(uint64_t entry_hash, uint8_t value) {
385   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
386   auto it = entries_set_.find(entry_hash);
387   if (it == entries_set_.end())
388     return;
389   return it->second.SetInMemoryData(value);
390 }
391 
UseIfExists(uint64_t entry_hash)392 bool SimpleIndex::UseIfExists(uint64_t entry_hash) {
393   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
394   // Always update the last used time, even if it is during initialization.
395   // It will be merged later.
396   auto it = entries_set_.find(entry_hash);
397   if (it == entries_set_.end())
398     // If not initialized, always return true, forcing it to go to the disk.
399     return !initialized_;
400   // We do not need to track access times in APP_CACHE mode.
401   if (cache_type_ == net::APP_CACHE)
402     return true;
403   it->second.SetLastUsedTime(base::Time::Now());
404   PostponeWritingToDisk();
405   return true;
406 }
407 
StartEvictionIfNeeded()408 void SimpleIndex::StartEvictionIfNeeded() {
409   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
410   if (eviction_in_progress_ || cache_size_ <= high_watermark_)
411     return;
412   // Take all live key hashes from the index and sort them by time.
413   eviction_in_progress_ = true;
414   eviction_start_time_ = base::TimeTicks::Now();
415 
416   bool use_size_heuristic =
417       (cache_type_ != net::GENERATED_BYTE_CODE_CACHE &&
418        cache_type_ != net::GENERATED_WEBUI_BYTE_CODE_CACHE);
419 
420   // Flatten for sorting.
421   std::vector<std::pair<uint64_t, const EntrySet::value_type*>> entries;
422   entries.reserve(entries_set_.size());
423   uint32_t now = (base::Time::Now() - base::Time::UnixEpoch()).InSeconds();
424   for (EntrySet::const_iterator i = entries_set_.begin();
425        i != entries_set_.end(); ++i) {
426     uint64_t sort_value = now - i->second.RawTimeForSorting();
427     // See crbug.com/736437 for context.
428     //
429     // Will not overflow since we're multiplying two 32-bit values and storing
430     // them in a 64-bit variable.
431     if (use_size_heuristic)
432       sort_value *= i->second.GetEntrySize() + kEstimatedEntryOverhead;
433     // Subtract so we don't need a custom comparator.
434     entries.emplace_back(std::numeric_limits<uint64_t>::max() - sort_value,
435                          &*i);
436   }
437 
438   uint64_t evicted_so_far_size = 0;
439   const uint64_t amount_to_evict = cache_size_ - low_watermark_;
440   std::vector<uint64_t> entry_hashes;
441   std::sort(entries.begin(), entries.end());
442   for (const auto& score_metadata_pair : entries) {
443     if (evicted_so_far_size >= amount_to_evict)
444       break;
445     evicted_so_far_size += score_metadata_pair.second->second.GetEntrySize();
446     entry_hashes.push_back(score_metadata_pair.second->first);
447   }
448 
449   SIMPLE_CACHE_UMA(COUNTS_1M,
450                    "Eviction.EntryCount", cache_type_, entry_hashes.size());
451   SIMPLE_CACHE_UMA(TIMES,
452                    "Eviction.TimeToSelectEntries", cache_type_,
453                    base::TimeTicks::Now() - eviction_start_time_);
454 
455   delegate_->DoomEntries(
456       &entry_hashes, base::BindOnce(&SimpleIndex::EvictionDone, AsWeakPtr()));
457 }
458 
GetTrailerPrefetchSize(uint64_t entry_hash) const459 int32_t SimpleIndex::GetTrailerPrefetchSize(uint64_t entry_hash) const {
460   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
461   DCHECK_EQ(cache_type_, net::APP_CACHE);
462   auto it = entries_set_.find(entry_hash);
463   if (it == entries_set_.end())
464     return -1;
465   return it->second.GetTrailerPrefetchSize();
466 }
467 
SetTrailerPrefetchSize(uint64_t entry_hash,int32_t size)468 void SimpleIndex::SetTrailerPrefetchSize(uint64_t entry_hash, int32_t size) {
469   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
470   DCHECK_EQ(cache_type_, net::APP_CACHE);
471   auto it = entries_set_.find(entry_hash);
472   if (it == entries_set_.end())
473     return;
474   int32_t original_size = it->second.GetTrailerPrefetchSize();
475   it->second.SetTrailerPrefetchSize(size);
476   if (original_size != it->second.GetTrailerPrefetchSize())
477     PostponeWritingToDisk();
478 }
479 
UpdateEntrySize(uint64_t entry_hash,base::StrictNumeric<uint32_t> entry_size)480 bool SimpleIndex::UpdateEntrySize(uint64_t entry_hash,
481                                   base::StrictNumeric<uint32_t> entry_size) {
482   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
483   auto it = entries_set_.find(entry_hash);
484   if (it == entries_set_.end())
485     return false;
486 
487   // Update the entry size.  If there was no change, then there is nothing
488   // else to do here.
489   if (!UpdateEntryIteratorSize(&it, entry_size))
490     return true;
491 
492   PostponeWritingToDisk();
493   StartEvictionIfNeeded();
494   return true;
495 }
496 
EvictionDone(int result)497 void SimpleIndex::EvictionDone(int result) {
498   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
499 
500   // Ignore the result of eviction. We did our best.
501   eviction_in_progress_ = false;
502   SIMPLE_CACHE_UMA(TIMES,
503                    "Eviction.TimeToDone", cache_type_,
504                    base::TimeTicks::Now() - eviction_start_time_);
505 }
506 
507 // static
InsertInEntrySet(uint64_t entry_hash,const disk_cache::EntryMetadata & entry_metadata,EntrySet * entry_set)508 bool SimpleIndex::InsertInEntrySet(
509     uint64_t entry_hash,
510     const disk_cache::EntryMetadata& entry_metadata,
511     EntrySet* entry_set) {
512   DCHECK(entry_set);
513   auto result = entry_set->emplace(entry_hash, entry_metadata);
514   return result.second;
515 }
516 
InsertEntryForTesting(uint64_t entry_hash,const EntryMetadata & entry_metadata)517 void SimpleIndex::InsertEntryForTesting(uint64_t entry_hash,
518                                         const EntryMetadata& entry_metadata) {
519   DCHECK(entries_set_.find(entry_hash) == entries_set_.end());
520   if (InsertInEntrySet(entry_hash, entry_metadata, &entries_set_))
521     cache_size_ += entry_metadata.GetEntrySize();
522 }
523 
PostponeWritingToDisk()524 void SimpleIndex::PostponeWritingToDisk() {
525   if (!initialized_)
526     return;
527   const int delay = app_on_background_ ? kWriteToDiskOnBackgroundDelayMSecs
528                                        : kWriteToDiskDelayMSecs;
529   // If the timer is already active, Start() will just Reset it, postponing it.
530   write_to_disk_timer_.Start(FROM_HERE, base::Milliseconds(delay),
531                              write_to_disk_cb_);
532 }
533 
UpdateEntryIteratorSize(EntrySet::iterator * it,base::StrictNumeric<uint32_t> entry_size)534 bool SimpleIndex::UpdateEntryIteratorSize(
535     EntrySet::iterator* it,
536     base::StrictNumeric<uint32_t> entry_size) {
537   // Update the total cache size with the new entry size.
538   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
539   DCHECK_GE(cache_size_, (*it)->second.GetEntrySize());
540   uint32_t original_size = (*it)->second.GetEntrySize();
541   cache_size_ -= (*it)->second.GetEntrySize();
542   (*it)->second.SetEntrySize(entry_size);
543   // We use GetEntrySize to get consistent rounding.
544   cache_size_ += (*it)->second.GetEntrySize();
545   // Return true if the size of the entry actually changed.  Make sure to
546   // compare the rounded values provided by GetEntrySize().
547   return original_size != (*it)->second.GetEntrySize();
548 }
549 
MergeInitializingSet(std::unique_ptr<SimpleIndexLoadResult> load_result)550 void SimpleIndex::MergeInitializingSet(
551     std::unique_ptr<SimpleIndexLoadResult> load_result) {
552   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
553 
554   EntrySet* index_file_entries = &load_result->entries;
555 
556   for (uint64_t removed_entry : removed_entries_) {
557     index_file_entries->erase(removed_entry);
558   }
559   removed_entries_.clear();
560 
561   for (const auto& it : entries_set_) {
562     const uint64_t entry_hash = it.first;
563     std::pair<EntrySet::iterator, bool> insert_result =
564         index_file_entries->insert(EntrySet::value_type(entry_hash,
565                                                         EntryMetadata()));
566     EntrySet::iterator& possibly_inserted_entry = insert_result.first;
567     possibly_inserted_entry->second = it.second;
568   }
569 
570   uint64_t merged_cache_size = 0;
571   for (const auto& index_file_entry : *index_file_entries) {
572     merged_cache_size += index_file_entry.second.GetEntrySize();
573   }
574 
575   entries_set_.swap(*index_file_entries);
576   cache_size_ = merged_cache_size;
577   initialized_ = true;
578   init_method_ = load_result->init_method;
579 
580   // The actual IO is asynchronous, so calling WriteToDisk() shouldn't slow the
581   // merge down much.
582   if (load_result->flush_required)
583     WriteToDisk(INDEX_WRITE_REASON_STARTUP_MERGE);
584 
585   SIMPLE_CACHE_UMA(CUSTOM_COUNTS, "IndexNumEntriesOnInit", cache_type_,
586                    entries_set_.size(), 0, 100000, 50);
587   SIMPLE_CACHE_UMA(
588       MEMORY_KB, "CacheSizeOnInit", cache_type_,
589       static_cast<base::HistogramBase::Sample>(cache_size_ / kBytesInKb));
590   SIMPLE_CACHE_UMA(
591       MEMORY_KB, "MaxCacheSizeOnInit", cache_type_,
592       static_cast<base::HistogramBase::Sample>(max_size_ / kBytesInKb));
593 
594   // Run all callbacks waiting for the index to come up.
595   for (auto& callback : to_run_when_initialized_) {
596     task_runner_->PostTask(FROM_HERE,
597                            base::BindOnce(std::move(callback), net::OK));
598   }
599   to_run_when_initialized_.clear();
600 }
601 
602 #if BUILDFLAG(IS_ANDROID)
OnApplicationStateChange(base::android::ApplicationState state)603 void SimpleIndex::OnApplicationStateChange(
604     base::android::ApplicationState state) {
605   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
606   // For more info about android activities, see:
607   // developer.android.com/training/basics/activity-lifecycle/pausing.html
608   if (state == base::android::APPLICATION_STATE_HAS_RUNNING_ACTIVITIES) {
609     app_on_background_ = false;
610   } else if (state ==
611       base::android::APPLICATION_STATE_HAS_STOPPED_ACTIVITIES) {
612     app_on_background_ = true;
613     WriteToDisk(INDEX_WRITE_REASON_ANDROID_STOPPED);
614   }
615 }
616 #endif
617 
WriteToDisk(IndexWriteToDiskReason reason)618 void SimpleIndex::WriteToDisk(IndexWriteToDiskReason reason) {
619   DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
620   if (!initialized_)
621     return;
622 
623   // Cancel any pending writes since we are about to write to disk now.
624   write_to_disk_timer_.AbandonAndStop();
625 
626   base::OnceClosure after_write;
627   if (cleanup_tracker_) {
628     // Make anyone synchronizing with our cleanup wait for the index to be
629     // written back.
630     after_write = base::DoNothingWithBoundArgs(cleanup_tracker_);
631   }
632 
633   index_file_->WriteToDisk(cache_type_, reason, entries_set_, cache_size_,
634                            std::move(after_write));
635 }
636 
637 }  // namespace disk_cache
638