xref: /aosp_15_r20/external/cronet/net/disk_cache/memory/mem_entry_impl.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/disk_cache/memory/mem_entry_impl.h"
6 
7 #include <algorithm>
8 #include <memory>
9 #include <utility>
10 
11 #include "base/check_op.h"
12 #include "base/format_macros.h"
13 #include "base/functional/bind.h"
14 #include "base/metrics/histogram_macros.h"
15 #include "base/numerics/safe_math.h"
16 #include "base/strings/stringprintf.h"
17 #include "base/values.h"
18 #include "net/base/interval.h"
19 #include "net/base/io_buffer.h"
20 #include "net/base/net_errors.h"
21 #include "net/disk_cache/memory/mem_backend_impl.h"
22 #include "net/disk_cache/net_log_parameters.h"
23 #include "net/log/net_log_event_type.h"
24 #include "net/log/net_log_source_type.h"
25 
26 using base::Time;
27 
28 namespace disk_cache {
29 
30 namespace {
31 
32 const int kSparseData = 1;
33 
34 // Maximum size of a child of sparse entry is 2 to the power of this number.
35 const int kMaxChildEntryBits = 12;
36 
37 // Sparse entry children have maximum size of 4KB.
38 const int kMaxChildEntrySize = 1 << kMaxChildEntryBits;
39 
40 // Convert global offset to child index.
ToChildIndex(int64_t offset)41 int64_t ToChildIndex(int64_t offset) {
42   return offset >> kMaxChildEntryBits;
43 }
44 
45 // Convert global offset to offset in child entry.
ToChildOffset(int64_t offset)46 int ToChildOffset(int64_t offset) {
47   return static_cast<int>(offset & (kMaxChildEntrySize - 1));
48 }
49 
50 // Returns a name for a child entry given the base_name of the parent and the
51 // child_id.  This name is only used for logging purposes.
52 // If the entry is called entry_name, child entries will be named something
53 // like Range_entry_name:YYY where YYY is the number of the particular child.
GenerateChildName(const std::string & base_name,int64_t child_id)54 std::string GenerateChildName(const std::string& base_name, int64_t child_id) {
55   return base::StringPrintf("Range_%s:%" PRId64, base_name.c_str(), child_id);
56 }
57 
58 // Returns NetLog parameters for the creation of a MemEntryImpl. A separate
59 // function is needed because child entries don't store their key().
NetLogEntryCreationParams(const MemEntryImpl * entry)60 base::Value::Dict NetLogEntryCreationParams(const MemEntryImpl* entry) {
61   base::Value::Dict dict;
62   std::string key;
63   switch (entry->type()) {
64     case MemEntryImpl::EntryType::kParent:
65       key = entry->key();
66       break;
67     case MemEntryImpl::EntryType::kChild:
68       key = GenerateChildName(entry->parent()->key(), entry->child_id());
69       break;
70   }
71   dict.Set("key", key);
72   dict.Set("created", true);
73   return dict;
74 }
75 
76 }  // namespace
77 
MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,const std::string & key,net::NetLog * net_log)78 MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
79                            const std::string& key,
80                            net::NetLog* net_log)
81     : MemEntryImpl(backend,
82                    key,
83                    0,        // child_id
84                    nullptr,  // parent
85                    net_log) {
86   Open();
87   // Just creating the entry (without any data) could cause the storage to
88   // grow beyond capacity, but we allow such infractions.
89   backend_->ModifyStorageSize(GetStorageSize());
90 }
91 
MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,int64_t child_id,MemEntryImpl * parent,net::NetLog * net_log)92 MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
93                            int64_t child_id,
94                            MemEntryImpl* parent,
95                            net::NetLog* net_log)
96     : MemEntryImpl(backend,
97                    std::string(),  // key
98                    child_id,
99                    parent,
100                    net_log) {
101   (*parent_->children_)[child_id] = this;
102 }
103 
Open()104 void MemEntryImpl::Open() {
105   // Only a parent entry can be opened.
106   DCHECK_EQ(EntryType::kParent, type());
107   CHECK_NE(ref_count_, std::numeric_limits<uint32_t>::max());
108   ++ref_count_;
109   DCHECK(!doomed_);
110 }
111 
InUse() const112 bool MemEntryImpl::InUse() const {
113   if (type() == EntryType::kChild)
114     return parent_->InUse();
115 
116   return ref_count_ > 0;
117 }
118 
GetStorageSize() const119 int MemEntryImpl::GetStorageSize() const {
120   int storage_size = static_cast<int32_t>(key_.size());
121   for (const auto& i : data_)
122     storage_size += i.size();
123   return storage_size;
124 }
125 
UpdateStateOnUse(EntryModified modified_enum)126 void MemEntryImpl::UpdateStateOnUse(EntryModified modified_enum) {
127   if (!doomed_ && backend_)
128     backend_->OnEntryUpdated(this);
129 
130   last_used_ = MemBackendImpl::Now(backend_);
131   if (modified_enum == ENTRY_WAS_MODIFIED)
132     last_modified_ = last_used_;
133 }
134 
Doom()135 void MemEntryImpl::Doom() {
136   if (!doomed_) {
137     doomed_ = true;
138     if (backend_)
139       backend_->OnEntryDoomed(this);
140     net_log_.AddEvent(net::NetLogEventType::ENTRY_DOOM);
141   }
142   if (!ref_count_)
143     delete this;
144 }
145 
Close()146 void MemEntryImpl::Close() {
147   DCHECK_EQ(EntryType::kParent, type());
148   CHECK_GT(ref_count_, 0u);
149   --ref_count_;
150   if (ref_count_ == 0 && !doomed_) {
151     // At this point the user is clearly done writing, so make sure there isn't
152     // wastage due to exponential growth of vector for main data stream.
153     Compact();
154     if (children_) {
155       for (const auto& child_info : *children_) {
156         if (child_info.second != this)
157           child_info.second->Compact();
158       }
159     }
160   }
161   if (!ref_count_ && doomed_)
162     delete this;
163 }
164 
GetKey() const165 std::string MemEntryImpl::GetKey() const {
166   // A child entry doesn't have key so this method should not be called.
167   DCHECK_EQ(EntryType::kParent, type());
168   return key_;
169 }
170 
GetLastUsed() const171 Time MemEntryImpl::GetLastUsed() const {
172   return last_used_;
173 }
174 
GetLastModified() const175 Time MemEntryImpl::GetLastModified() const {
176   return last_modified_;
177 }
178 
GetDataSize(int index) const179 int32_t MemEntryImpl::GetDataSize(int index) const {
180   if (index < 0 || index >= kNumStreams)
181     return 0;
182   return data_[index].size();
183 }
184 
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)185 int MemEntryImpl::ReadData(int index,
186                            int offset,
187                            IOBuffer* buf,
188                            int buf_len,
189                            CompletionOnceCallback callback) {
190   if (net_log_.IsCapturing()) {
191     NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
192                         net::NetLogEventPhase::BEGIN, index, offset, buf_len,
193                         false);
194   }
195 
196   int result = InternalReadData(index, offset, buf, buf_len);
197 
198   if (net_log_.IsCapturing()) {
199     NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
200                             net::NetLogEventPhase::END, result);
201   }
202   return result;
203 }
204 
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)205 int MemEntryImpl::WriteData(int index,
206                             int offset,
207                             IOBuffer* buf,
208                             int buf_len,
209                             CompletionOnceCallback callback,
210                             bool truncate) {
211   if (net_log_.IsCapturing()) {
212     NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
213                         net::NetLogEventPhase::BEGIN, index, offset, buf_len,
214                         truncate);
215   }
216 
217   int result = InternalWriteData(index, offset, buf, buf_len, truncate);
218 
219   if (net_log_.IsCapturing()) {
220     NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
221                             net::NetLogEventPhase::END, result);
222   }
223 
224   return result;
225 }
226 
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)227 int MemEntryImpl::ReadSparseData(int64_t offset,
228                                  IOBuffer* buf,
229                                  int buf_len,
230                                  CompletionOnceCallback callback) {
231   if (net_log_.IsCapturing()) {
232     NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_READ,
233                           net::NetLogEventPhase::BEGIN, offset, buf_len);
234   }
235   int result = InternalReadSparseData(offset, buf, buf_len);
236   if (net_log_.IsCapturing())
237     net_log_.EndEvent(net::NetLogEventType::SPARSE_READ);
238   return result;
239 }
240 
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)241 int MemEntryImpl::WriteSparseData(int64_t offset,
242                                   IOBuffer* buf,
243                                   int buf_len,
244                                   CompletionOnceCallback callback) {
245   if (net_log_.IsCapturing()) {
246     NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_WRITE,
247                           net::NetLogEventPhase::BEGIN, offset, buf_len);
248   }
249   int result = InternalWriteSparseData(offset, buf, buf_len);
250   if (net_log_.IsCapturing())
251     net_log_.EndEvent(net::NetLogEventType::SPARSE_WRITE);
252   return result;
253 }
254 
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)255 RangeResult MemEntryImpl::GetAvailableRange(int64_t offset,
256                                             int len,
257                                             RangeResultCallback callback) {
258   if (net_log_.IsCapturing()) {
259     NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_GET_RANGE,
260                           net::NetLogEventPhase::BEGIN, offset, len);
261   }
262   RangeResult result = InternalGetAvailableRange(offset, len);
263   if (net_log_.IsCapturing()) {
264     net_log_.EndEvent(net::NetLogEventType::SPARSE_GET_RANGE, [&] {
265       return CreateNetLogGetAvailableRangeResultParams(result);
266     });
267   }
268   return result;
269 }
270 
CouldBeSparse() const271 bool MemEntryImpl::CouldBeSparse() const {
272   DCHECK_EQ(EntryType::kParent, type());
273   return (children_.get() != nullptr);
274 }
275 
ReadyForSparseIO(CompletionOnceCallback callback)276 net::Error MemEntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
277   return net::OK;
278 }
279 
SetLastUsedTimeForTest(base::Time time)280 void MemEntryImpl::SetLastUsedTimeForTest(base::Time time) {
281   last_used_ = time;
282 }
283 
284 // ------------------------------------------------------------------------
285 
MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,const::std::string & key,int64_t child_id,MemEntryImpl * parent,net::NetLog * net_log)286 MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
287                            const ::std::string& key,
288                            int64_t child_id,
289                            MemEntryImpl* parent,
290                            net::NetLog* net_log)
291     : key_(key),
292       child_id_(child_id),
293       parent_(parent),
294       last_modified_(MemBackendImpl::Now(backend)),
295       last_used_(last_modified_),
296       backend_(backend) {
297   backend_->OnEntryInserted(this);
298   net_log_ = net::NetLogWithSource::Make(
299       net_log, net::NetLogSourceType::MEMORY_CACHE_ENTRY);
300   net_log_.BeginEvent(net::NetLogEventType::DISK_CACHE_MEM_ENTRY_IMPL,
301                       [&] { return NetLogEntryCreationParams(this); });
302 }
303 
~MemEntryImpl()304 MemEntryImpl::~MemEntryImpl() {
305   if (backend_)
306     backend_->ModifyStorageSize(-GetStorageSize());
307 
308   if (type() == EntryType::kParent) {
309     if (children_) {
310       EntryMap children;
311       children_->swap(children);
312 
313       for (auto& it : children) {
314         // Since |this| is stored in the map, it should be guarded against
315         // double dooming, which will result in double destruction.
316         if (it.second != this)
317           it.second->Doom();
318       }
319     }
320   } else {
321     parent_->children_->erase(child_id_);
322   }
323   net_log_.EndEvent(net::NetLogEventType::DISK_CACHE_MEM_ENTRY_IMPL);
324 }
325 
InternalReadData(int index,int offset,IOBuffer * buf,int buf_len)326 int MemEntryImpl::InternalReadData(int index, int offset, IOBuffer* buf,
327                                    int buf_len) {
328   DCHECK(type() == EntryType::kParent || index == kSparseData);
329 
330   if (index < 0 || index >= kNumStreams || buf_len < 0)
331     return net::ERR_INVALID_ARGUMENT;
332 
333   int entry_size = data_[index].size();
334   if (offset >= entry_size || offset < 0 || !buf_len)
335     return 0;
336 
337   int end_offset;
338   if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
339       end_offset > entry_size)
340     buf_len = entry_size - offset;
341 
342   UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED);
343   std::copy(data_[index].begin() + offset,
344             data_[index].begin() + offset + buf_len, buf->data());
345   return buf_len;
346 }
347 
InternalWriteData(int index,int offset,IOBuffer * buf,int buf_len,bool truncate)348 int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf,
349                                     int buf_len, bool truncate) {
350   DCHECK(type() == EntryType::kParent || index == kSparseData);
351   if (!backend_)
352     return net::ERR_INSUFFICIENT_RESOURCES;
353 
354   if (index < 0 || index >= kNumStreams)
355     return net::ERR_INVALID_ARGUMENT;
356 
357   if (offset < 0 || buf_len < 0)
358     return net::ERR_INVALID_ARGUMENT;
359 
360   const int max_file_size = backend_->MaxFileSize();
361 
362   int end_offset;
363   if (offset > max_file_size || buf_len > max_file_size ||
364       !base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
365       end_offset > max_file_size) {
366     return net::ERR_FAILED;
367   }
368 
369   std::vector<char>& data = data_[index];
370   const int old_data_size = base::checked_cast<int>(data.size());
371 
372   // Overwrite any data that fits inside the existing file.
373   if (offset < old_data_size && buf_len > 0) {
374     const int bytes_to_copy = std::min(old_data_size - offset, buf_len);
375     std::copy(buf->data(), buf->data() + bytes_to_copy, data.begin() + offset);
376   }
377 
378   const int delta = end_offset - old_data_size;
379   if (truncate && delta < 0) {
380     // We permit reducing the size even if the storage size has been exceeded,
381     // since it can only improve the situation. See https://crbug.com/331839344.
382     backend_->ModifyStorageSize(delta);
383     data.resize(end_offset);
384   } else if (delta > 0) {
385     backend_->ModifyStorageSize(delta);
386     if (backend_->HasExceededStorageSize()) {
387       backend_->ModifyStorageSize(-delta);
388       return net::ERR_INSUFFICIENT_RESOURCES;
389     }
390 
391     // Zero fill any hole.
392     int current_size = old_data_size;
393     if (current_size < offset) {
394       data.resize(offset);
395       current_size = offset;
396     }
397     // Append any data after the old end of the file.
398     if (end_offset > current_size) {
399       data.insert(data.end(), buf->data() + current_size - offset,
400                   buf->data() + buf_len);
401     }
402   }
403 
404   UpdateStateOnUse(ENTRY_WAS_MODIFIED);
405 
406   return buf_len;
407 }
408 
InternalReadSparseData(int64_t offset,IOBuffer * buf,int buf_len)409 int MemEntryImpl::InternalReadSparseData(int64_t offset,
410                                          IOBuffer* buf,
411                                          int buf_len) {
412   DCHECK_EQ(EntryType::kParent, type());
413 
414   if (!InitSparseInfo())
415     return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
416 
417   if (offset < 0 || buf_len < 0)
418     return net::ERR_INVALID_ARGUMENT;
419 
420   // Ensure that offset + buf_len does not overflow. This ensures that
421   // offset + io_buf->BytesConsumed() never overflows below.
422   // The result of std::min is guaranteed to fit into int since buf_len did.
423   buf_len = std::min(static_cast<int64_t>(buf_len),
424                      std::numeric_limits<int64_t>::max() - offset);
425 
426   // We will keep using this buffer and adjust the offset in this buffer.
427   scoped_refptr<net::DrainableIOBuffer> io_buf =
428       base::MakeRefCounted<net::DrainableIOBuffer>(buf, buf_len);
429 
430   // Iterate until we have read enough.
431   while (io_buf->BytesRemaining()) {
432     MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), false);
433 
434     // No child present for that offset.
435     if (!child)
436       break;
437 
438     // We then need to prepare the child offset and len.
439     int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
440 
441     // If we are trying to read from a position that the child entry has no data
442     // we should stop.
443     if (child_offset < child->child_first_pos_)
444       break;
445     if (net_log_.IsCapturing()) {
446       NetLogSparseReadWrite(net_log_,
447                             net::NetLogEventType::SPARSE_READ_CHILD_DATA,
448                             net::NetLogEventPhase::BEGIN,
449                             child->net_log_.source(), io_buf->BytesRemaining());
450     }
451     int ret =
452         child->ReadData(kSparseData, child_offset, io_buf.get(),
453                         io_buf->BytesRemaining(), CompletionOnceCallback());
454     if (net_log_.IsCapturing()) {
455       net_log_.EndEventWithNetErrorCode(
456           net::NetLogEventType::SPARSE_READ_CHILD_DATA, ret);
457     }
458 
459     // If we encounter an error in one entry, return immediately.
460     if (ret < 0)
461       return ret;
462     else if (ret == 0)
463       break;
464 
465     // Increment the counter by number of bytes read in the child entry.
466     io_buf->DidConsume(ret);
467   }
468 
469   UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED);
470   return io_buf->BytesConsumed();
471 }
472 
InternalWriteSparseData(int64_t offset,IOBuffer * buf,int buf_len)473 int MemEntryImpl::InternalWriteSparseData(int64_t offset,
474                                           IOBuffer* buf,
475                                           int buf_len) {
476   DCHECK_EQ(EntryType::kParent, type());
477 
478   if (!InitSparseInfo())
479     return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
480 
481   // We can't generally do this without the backend since we need it to create
482   // child entries.
483   if (!backend_)
484     return net::ERR_FAILED;
485 
486   // Check that offset + buf_len does not overflow. This ensures that
487   // offset + io_buf->BytesConsumed() never overflows below.
488   if (offset < 0 || buf_len < 0 || !base::CheckAdd(offset, buf_len).IsValid())
489     return net::ERR_INVALID_ARGUMENT;
490 
491   scoped_refptr<net::DrainableIOBuffer> io_buf =
492       base::MakeRefCounted<net::DrainableIOBuffer>(buf, buf_len);
493 
494   // This loop walks through child entries continuously starting from |offset|
495   // and writes blocks of data (of maximum size kMaxChildEntrySize) into each
496   // child entry until all |buf_len| bytes are written. The write operation can
497   // start in the middle of an entry.
498   while (io_buf->BytesRemaining()) {
499     MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), true);
500     int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
501 
502     // Find the right amount to write, this evaluates the remaining bytes to
503     // write and remaining capacity of this child entry.
504     int write_len =
505         std::min(io_buf->BytesRemaining(), kMaxChildEntrySize - child_offset);
506 
507     // Keep a record of the last byte position (exclusive) in the child.
508     int data_size = child->GetDataSize(kSparseData);
509 
510     if (net_log_.IsCapturing()) {
511       NetLogSparseReadWrite(
512           net_log_, net::NetLogEventType::SPARSE_WRITE_CHILD_DATA,
513           net::NetLogEventPhase::BEGIN, child->net_log_.source(), write_len);
514     }
515 
516     // Always writes to the child entry. This operation may overwrite data
517     // previously written.
518     // TODO(hclam): if there is data in the entry and this write is not
519     // continuous we may want to discard this write.
520     int ret = child->WriteData(kSparseData, child_offset, io_buf.get(),
521                                write_len, CompletionOnceCallback(), true);
522     if (net_log_.IsCapturing()) {
523       net_log_.EndEventWithNetErrorCode(
524           net::NetLogEventType::SPARSE_WRITE_CHILD_DATA, ret);
525     }
526     if (ret < 0)
527       return ret;
528     else if (ret == 0)
529       break;
530 
531     // Keep a record of the first byte position in the child if the write was
532     // not aligned nor continuous. This is to enable witting to the middle
533     // of an entry and still keep track of data off the aligned edge.
534     if (data_size != child_offset)
535       child->child_first_pos_ = child_offset;
536 
537     // Adjust the offset in the IO buffer.
538     io_buf->DidConsume(ret);
539   }
540 
541   UpdateStateOnUse(ENTRY_WAS_MODIFIED);
542   return io_buf->BytesConsumed();
543 }
544 
InternalGetAvailableRange(int64_t offset,int len)545 RangeResult MemEntryImpl::InternalGetAvailableRange(int64_t offset, int len) {
546   DCHECK_EQ(EntryType::kParent, type());
547 
548   if (!InitSparseInfo())
549     return RangeResult(net::ERR_CACHE_OPERATION_NOT_SUPPORTED);
550 
551   if (offset < 0 || len < 0)
552     return RangeResult(net::ERR_INVALID_ARGUMENT);
553 
554   // Truncate |len| to make sure that |offset + len| does not overflow.
555   // This is OK since one can't write that far anyway.
556   // The result of std::min is guaranteed to fit into int since |len| did.
557   len = std::min(static_cast<int64_t>(len),
558                  std::numeric_limits<int64_t>::max() - offset);
559 
560   net::Interval<int64_t> requested(offset, offset + len);
561 
562   // Find the first relevant child, if any --- may have to skip over
563   // one entry as it may be before the range (consider, for example,
564   // if the request is for [2048, 10000), while [0, 1024) is a valid range
565   // for the entry).
566   EntryMap::const_iterator i = children_->lower_bound(ToChildIndex(offset));
567   if (i != children_->cend() && !ChildInterval(i).Intersects(requested))
568     ++i;
569   net::Interval<int64_t> found;
570   if (i != children_->cend() &&
571       requested.Intersects(ChildInterval(i), &found)) {
572     // Found something relevant; now just need to expand this out if next
573     // children are contiguous and relevant to the request.
574     while (true) {
575       ++i;
576       net::Interval<int64_t> relevant_in_next_child;
577       if (i == children_->cend() ||
578           !requested.Intersects(ChildInterval(i), &relevant_in_next_child) ||
579           relevant_in_next_child.min() != found.max()) {
580         break;
581       }
582 
583       found.SpanningUnion(relevant_in_next_child);
584     }
585 
586     return RangeResult(found.min(), found.Length());
587   }
588 
589   return RangeResult(offset, 0);
590 }
591 
InitSparseInfo()592 bool MemEntryImpl::InitSparseInfo() {
593   DCHECK_EQ(EntryType::kParent, type());
594 
595   if (!children_) {
596     // If we already have some data in sparse stream but we are being
597     // initialized as a sparse entry, we should fail.
598     if (GetDataSize(kSparseData))
599       return false;
600     children_ = std::make_unique<EntryMap>();
601 
602     // The parent entry stores data for the first block, so save this object to
603     // index 0.
604     (*children_)[0] = this;
605   }
606   return true;
607 }
608 
GetChild(int64_t offset,bool create)609 MemEntryImpl* MemEntryImpl::GetChild(int64_t offset, bool create) {
610   DCHECK_EQ(EntryType::kParent, type());
611   int64_t index = ToChildIndex(offset);
612   auto i = children_->find(index);
613   if (i != children_->end())
614     return i->second;
615   if (create)
616     return new MemEntryImpl(backend_, index, this, net_log_.net_log());
617   return nullptr;
618 }
619 
ChildInterval(MemEntryImpl::EntryMap::const_iterator i)620 net::Interval<int64_t> MemEntryImpl::ChildInterval(
621     MemEntryImpl::EntryMap::const_iterator i) {
622   DCHECK(i != children_->cend());
623   const MemEntryImpl* child = i->second;
624   // The valid range in child is [child_first_pos_, DataSize), since the child
625   // entry ops just use standard disk_cache::Entry API, so DataSize is
626   // not aware of any hole in the beginning.
627   int64_t child_responsibility_start = (i->first) * kMaxChildEntrySize;
628   return net::Interval<int64_t>(
629       child_responsibility_start + child->child_first_pos_,
630       child_responsibility_start + child->GetDataSize(kSparseData));
631 }
632 
Compact()633 void MemEntryImpl::Compact() {
634   // Stream 0 should already be fine since it's written out in a single WriteData().
635   data_[1].shrink_to_fit();
636   data_[2].shrink_to_fit();
637 }
638 
639 }  // namespace disk_cache
640