1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/blockfile/entry_impl.h"
6
7 #include <limits>
8 #include <memory>
9
10 #include "base/files/file_util.h"
11 #include "base/hash/hash.h"
12 #include "base/numerics/safe_math.h"
13 #include "base/strings/string_util.h"
14 #include "base/time/time.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/disk_cache/blockfile/backend_impl.h"
18 #include "net/disk_cache/blockfile/bitmap.h"
19 #include "net/disk_cache/blockfile/disk_format.h"
20 #include "net/disk_cache/blockfile/sparse_control.h"
21 #include "net/disk_cache/cache_util.h"
22 #include "net/disk_cache/net_log_parameters.h"
23 #include "net/log/net_log.h"
24 #include "net/log/net_log_event_type.h"
25 #include "net/log/net_log_source_type.h"
26
27 using base::Time;
28 using base::TimeTicks;
29
30 namespace {
31
32 // Index for the file used to store the key, if any (files_[kKeyFileIndex]).
33 const int kKeyFileIndex = 3;
34
35 // This class implements FileIOCallback to buffer the callback from a file IO
36 // operation from the actual net class.
37 class SyncCallback: public disk_cache::FileIOCallback {
38 public:
39 // |end_event_type| is the event type to log on completion. Logs nothing on
40 // discard, or when the NetLog is not set to log all events.
SyncCallback(scoped_refptr<disk_cache::EntryImpl> entry,net::IOBuffer * buffer,net::CompletionOnceCallback callback,net::NetLogEventType end_event_type)41 SyncCallback(scoped_refptr<disk_cache::EntryImpl> entry,
42 net::IOBuffer* buffer,
43 net::CompletionOnceCallback callback,
44 net::NetLogEventType end_event_type)
45 : entry_(std::move(entry)),
46 callback_(std::move(callback)),
47 buf_(buffer),
48 end_event_type_(end_event_type) {
49 entry_->IncrementIoCount();
50 }
51
52 SyncCallback(const SyncCallback&) = delete;
53 SyncCallback& operator=(const SyncCallback&) = delete;
54
55 ~SyncCallback() override = default;
56
57 void OnFileIOComplete(int bytes_copied) override;
58 void Discard();
59
60 private:
61 scoped_refptr<disk_cache::EntryImpl> entry_;
62 net::CompletionOnceCallback callback_;
63 scoped_refptr<net::IOBuffer> buf_;
64 const net::NetLogEventType end_event_type_;
65 };
66
OnFileIOComplete(int bytes_copied)67 void SyncCallback::OnFileIOComplete(int bytes_copied) {
68 entry_->DecrementIoCount();
69 if (!callback_.is_null()) {
70 if (entry_->net_log().IsCapturing()) {
71 disk_cache::NetLogReadWriteComplete(entry_->net_log(), end_event_type_,
72 net::NetLogEventPhase::END,
73 bytes_copied);
74 }
75 buf_ = nullptr; // Release the buffer before invoking the callback.
76 std::move(callback_).Run(bytes_copied);
77 }
78 delete this;
79 }
80
Discard()81 void SyncCallback::Discard() {
82 callback_.Reset();
83 buf_ = nullptr;
84 OnFileIOComplete(0);
85 }
86
87 const int kMaxBufferSize = 1024 * 1024; // 1 MB.
88
89 } // namespace
90
91 namespace disk_cache {
92
93 // This class handles individual memory buffers that store data before it is
94 // sent to disk. The buffer can start at any offset, but if we try to write to
95 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
96 // zero. The buffer grows up to a size determined by the backend, to keep the
97 // total memory used under control.
98 class EntryImpl::UserBuffer {
99 public:
UserBuffer(BackendImpl * backend)100 explicit UserBuffer(BackendImpl* backend) : backend_(backend->GetWeakPtr()) {
101 buffer_.reserve(kMaxBlockSize);
102 }
103
104 UserBuffer(const UserBuffer&) = delete;
105 UserBuffer& operator=(const UserBuffer&) = delete;
106
~UserBuffer()107 ~UserBuffer() {
108 if (backend_.get())
109 backend_->BufferDeleted(capacity() - kMaxBlockSize);
110 }
111
112 // Returns true if we can handle writing |len| bytes to |offset|.
113 bool PreWrite(int offset, int len);
114
115 // Truncates the buffer to |offset| bytes.
116 void Truncate(int offset);
117
118 // Writes |len| bytes from |buf| at the given |offset|.
119 void Write(int offset, IOBuffer* buf, int len);
120
121 // Returns true if we can read |len| bytes from |offset|, given that the
122 // actual file has |eof| bytes stored. Note that the number of bytes to read
123 // may be modified by this method even though it returns false: that means we
124 // should do a smaller read from disk.
125 bool PreRead(int eof, int offset, int* len);
126
127 // Read |len| bytes from |buf| at the given |offset|.
128 int Read(int offset, IOBuffer* buf, int len);
129
130 // Prepare this buffer for reuse.
131 void Reset();
132
Data()133 char* Data() { return buffer_.data(); }
Size()134 int Size() { return static_cast<int>(buffer_.size()); }
Start()135 int Start() { return offset_; }
End()136 int End() { return offset_ + Size(); }
137
138 private:
capacity()139 int capacity() { return static_cast<int>(buffer_.capacity()); }
140 bool GrowBuffer(int required, int limit);
141
142 base::WeakPtr<BackendImpl> backend_;
143 int offset_ = 0;
144 std::vector<char> buffer_;
145 bool grow_allowed_ = true;
146 };
147
PreWrite(int offset,int len)148 bool EntryImpl::UserBuffer::PreWrite(int offset, int len) {
149 DCHECK_GE(offset, 0);
150 DCHECK_GE(len, 0);
151 DCHECK_GE(offset + len, 0);
152
153 // We don't want to write before our current start.
154 if (offset < offset_)
155 return false;
156
157 // Lets get the common case out of the way.
158 if (offset + len <= capacity())
159 return true;
160
161 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
162 // buffer offset_ at 0.
163 if (!Size() && offset > kMaxBlockSize)
164 return GrowBuffer(len, kMaxBufferSize);
165
166 int required = offset - offset_ + len;
167 return GrowBuffer(required, kMaxBufferSize * 6 / 5);
168 }
169
Truncate(int offset)170 void EntryImpl::UserBuffer::Truncate(int offset) {
171 DCHECK_GE(offset, 0);
172 DCHECK_GE(offset, offset_);
173 DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
174
175 offset -= offset_;
176 if (Size() >= offset)
177 buffer_.resize(offset);
178 }
179
Write(int offset,IOBuffer * buf,int len)180 void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) {
181 DCHECK_GE(offset, 0);
182 DCHECK_GE(len, 0);
183 DCHECK_GE(offset + len, 0);
184
185 // 0-length writes that don't extend can just be ignored here, and are safe
186 // even if they're are before offset_, as truncates are handled elsewhere.
187 if (len == 0 && offset < End())
188 return;
189
190 DCHECK_GE(offset, offset_);
191 DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
192
193 if (!Size() && offset > kMaxBlockSize)
194 offset_ = offset;
195
196 offset -= offset_;
197
198 if (offset > Size())
199 buffer_.resize(offset);
200
201 if (!len)
202 return;
203
204 char* buffer = buf->data();
205 int valid_len = Size() - offset;
206 int copy_len = std::min(valid_len, len);
207 if (copy_len) {
208 memcpy(&buffer_[offset], buffer, copy_len);
209 len -= copy_len;
210 buffer += copy_len;
211 }
212 if (!len)
213 return;
214
215 buffer_.insert(buffer_.end(), buffer, buffer + len);
216 }
217
PreRead(int eof,int offset,int * len)218 bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) {
219 DCHECK_GE(offset, 0);
220 DCHECK_GT(*len, 0);
221
222 if (offset < offset_) {
223 // We are reading before this buffer.
224 if (offset >= eof)
225 return true;
226
227 // If the read overlaps with the buffer, change its length so that there is
228 // no overlap.
229 *len = std::min(*len, offset_ - offset);
230 *len = std::min(*len, eof - offset);
231
232 // We should read from disk.
233 return false;
234 }
235
236 if (!Size())
237 return false;
238
239 // See if we can fulfill the first part of the operation.
240 return (offset - offset_ < Size());
241 }
242
Read(int offset,IOBuffer * buf,int len)243 int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
244 DCHECK_GE(offset, 0);
245 DCHECK_GT(len, 0);
246 DCHECK(Size() || offset < offset_);
247
248 int clean_bytes = 0;
249 if (offset < offset_) {
250 // We don't have a file so lets fill the first part with 0.
251 clean_bytes = std::min(offset_ - offset, len);
252 memset(buf->data(), 0, clean_bytes);
253 if (len == clean_bytes)
254 return len;
255 offset = offset_;
256 len -= clean_bytes;
257 }
258
259 int start = offset - offset_;
260 int available = Size() - start;
261 DCHECK_GE(start, 0);
262 DCHECK_GE(available, 0);
263 len = std::min(len, available);
264 memcpy(buf->data() + clean_bytes, &buffer_[start], len);
265 return len + clean_bytes;
266 }
267
Reset()268 void EntryImpl::UserBuffer::Reset() {
269 if (!grow_allowed_) {
270 if (backend_.get())
271 backend_->BufferDeleted(capacity() - kMaxBlockSize);
272 grow_allowed_ = true;
273 std::vector<char> tmp;
274 buffer_.swap(tmp);
275 buffer_.reserve(kMaxBlockSize);
276 }
277 offset_ = 0;
278 buffer_.clear();
279 }
280
GrowBuffer(int required,int limit)281 bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
282 DCHECK_GE(required, 0);
283 int current_size = capacity();
284 if (required <= current_size)
285 return true;
286
287 if (required > limit)
288 return false;
289
290 if (!backend_.get())
291 return false;
292
293 int to_add = std::max(required - current_size, kMaxBlockSize * 4);
294 to_add = std::max(current_size, to_add);
295 required = std::min(current_size + to_add, limit);
296
297 grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
298 if (!grow_allowed_)
299 return false;
300
301 DVLOG(3) << "Buffer grow to " << required;
302
303 buffer_.reserve(required);
304 return true;
305 }
306
307 // ------------------------------------------------------------------------
308
EntryImpl(BackendImpl * backend,Addr address,bool read_only)309 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
310 : entry_(nullptr, Addr(0)),
311 node_(nullptr, Addr(0)),
312 backend_(backend->GetWeakPtr()),
313 read_only_(read_only) {
314 entry_.LazyInit(backend->File(address), address);
315 }
316
DoomImpl()317 void EntryImpl::DoomImpl() {
318 if (doomed_ || !backend_.get())
319 return;
320
321 SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
322 backend_->InternalDoomEntry(this);
323 }
324
ReadDataImpl(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)325 int EntryImpl::ReadDataImpl(int index,
326 int offset,
327 IOBuffer* buf,
328 int buf_len,
329 CompletionOnceCallback callback) {
330 if (net_log_.IsCapturing()) {
331 NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
332 net::NetLogEventPhase::BEGIN, index, offset, buf_len,
333 false);
334 }
335
336 int result =
337 InternalReadData(index, offset, buf, buf_len, std::move(callback));
338
339 if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
340 NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
341 net::NetLogEventPhase::END, result);
342 }
343 return result;
344 }
345
WriteDataImpl(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)346 int EntryImpl::WriteDataImpl(int index,
347 int offset,
348 IOBuffer* buf,
349 int buf_len,
350 CompletionOnceCallback callback,
351 bool truncate) {
352 if (net_log_.IsCapturing()) {
353 NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
354 net::NetLogEventPhase::BEGIN, index, offset, buf_len,
355 truncate);
356 }
357
358 int result = InternalWriteData(index, offset, buf, buf_len,
359 std::move(callback), truncate);
360
361 if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
362 NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
363 net::NetLogEventPhase::END, result);
364 }
365 return result;
366 }
367
ReadSparseDataImpl(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)368 int EntryImpl::ReadSparseDataImpl(int64_t offset,
369 IOBuffer* buf,
370 int buf_len,
371 CompletionOnceCallback callback) {
372 DCHECK(node_.Data()->dirty || read_only_);
373 int result = InitSparseData();
374 if (net::OK != result)
375 return result;
376
377 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
378 std::move(callback));
379 return result;
380 }
381
WriteSparseDataImpl(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)382 int EntryImpl::WriteSparseDataImpl(int64_t offset,
383 IOBuffer* buf,
384 int buf_len,
385 CompletionOnceCallback callback) {
386 DCHECK(node_.Data()->dirty || read_only_);
387 int result = InitSparseData();
388 if (net::OK != result)
389 return result;
390
391 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
392 buf_len, std::move(callback));
393 return result;
394 }
395
GetAvailableRangeImpl(int64_t offset,int len)396 RangeResult EntryImpl::GetAvailableRangeImpl(int64_t offset, int len) {
397 int result = InitSparseData();
398 if (net::OK != result)
399 return RangeResult(static_cast<net::Error>(result));
400
401 return sparse_->GetAvailableRange(offset, len);
402 }
403
CancelSparseIOImpl()404 void EntryImpl::CancelSparseIOImpl() {
405 if (!sparse_.get())
406 return;
407
408 sparse_->CancelIO();
409 }
410
ReadyForSparseIOImpl(CompletionOnceCallback callback)411 int EntryImpl::ReadyForSparseIOImpl(CompletionOnceCallback callback) {
412 DCHECK(sparse_.get());
413 return sparse_->ReadyToUse(std::move(callback));
414 }
415
GetHash()416 uint32_t EntryImpl::GetHash() {
417 return entry_.Data()->hash;
418 }
419
CreateEntry(Addr node_address,const std::string & key,uint32_t hash)420 bool EntryImpl::CreateEntry(Addr node_address,
421 const std::string& key,
422 uint32_t hash) {
423 EntryStore* entry_store = entry_.Data();
424 RankingsNode* node = node_.Data();
425 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
426 memset(node, 0, sizeof(RankingsNode));
427 if (!node_.LazyInit(backend_->File(node_address), node_address))
428 return false;
429
430 entry_store->rankings_node = node_address.value();
431 node->contents = entry_.address().value();
432
433 entry_store->hash = hash;
434 entry_store->creation_time = Time::Now().ToInternalValue();
435 entry_store->key_len = static_cast<int32_t>(key.size());
436 if (entry_store->key_len > kMaxInternalKeyLength) {
437 Addr address(0);
438 if (!CreateBlock(entry_store->key_len + 1, &address))
439 return false;
440
441 entry_store->long_key = address.value();
442 File* key_file = GetBackingFile(address, kKeyFileIndex);
443 key_ = key;
444
445 size_t offset = 0;
446 if (address.is_block_file())
447 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
448
449 if (!key_file || !key_file->Write(key.data(), key.size() + 1, offset)) {
450 DeleteData(address, kKeyFileIndex);
451 return false;
452 }
453
454 if (address.is_separate_file())
455 key_file->SetLength(key.size() + 1);
456 } else {
457 memcpy(entry_store->key, key.data(), key.size());
458 entry_store->key[key.size()] = '\0';
459 }
460 backend_->ModifyStorageSize(0, static_cast<int32_t>(key.size()));
461 node->dirty = backend_->GetCurrentEntryId();
462 return true;
463 }
464
IsSameEntry(const std::string & key,uint32_t hash)465 bool EntryImpl::IsSameEntry(const std::string& key, uint32_t hash) {
466 if (entry_.Data()->hash != hash ||
467 static_cast<size_t>(entry_.Data()->key_len) != key.size())
468 return false;
469
470 return (key.compare(GetKey()) == 0);
471 }
472
InternalDoom()473 void EntryImpl::InternalDoom() {
474 net_log_.AddEvent(net::NetLogEventType::ENTRY_DOOM);
475 DCHECK(node_.HasData());
476 if (!node_.Data()->dirty) {
477 node_.Data()->dirty = backend_->GetCurrentEntryId();
478 node_.Store();
479 }
480 doomed_ = true;
481 }
482
DeleteEntryData(bool everything)483 void EntryImpl::DeleteEntryData(bool everything) {
484 DCHECK(doomed_ || !everything);
485
486 if (GetEntryFlags() & PARENT_ENTRY) {
487 // We have some child entries that must go away.
488 SparseControl::DeleteChildren(this);
489 }
490
491 for (int index = 0; index < kNumStreams; index++) {
492 Addr address(entry_.Data()->data_addr[index]);
493 if (address.is_initialized()) {
494 backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
495 unreported_size_[index], 0);
496 entry_.Data()->data_addr[index] = 0;
497 entry_.Data()->data_size[index] = 0;
498 entry_.Store();
499 DeleteData(address, index);
500 }
501 }
502
503 if (!everything)
504 return;
505
506 // Remove all traces of this entry.
507 backend_->RemoveEntry(this);
508
509 // Note that at this point node_ and entry_ are just two blocks of data, and
510 // even if they reference each other, nobody should be referencing them.
511
512 Addr address(entry_.Data()->long_key);
513 DeleteData(address, kKeyFileIndex);
514 backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
515
516 backend_->DeleteBlock(entry_.address(), true);
517 entry_.Discard();
518
519 if (!LeaveRankingsBehind()) {
520 backend_->DeleteBlock(node_.address(), true);
521 node_.Discard();
522 }
523 }
524
GetNextAddress()525 CacheAddr EntryImpl::GetNextAddress() {
526 return entry_.Data()->next;
527 }
528
SetNextAddress(Addr address)529 void EntryImpl::SetNextAddress(Addr address) {
530 DCHECK_NE(address.value(), entry_.address().value());
531 entry_.Data()->next = address.value();
532 bool success = entry_.Store();
533 DCHECK(success);
534 }
535
LoadNodeAddress()536 bool EntryImpl::LoadNodeAddress() {
537 Addr address(entry_.Data()->rankings_node);
538 if (!node_.LazyInit(backend_->File(address), address))
539 return false;
540 return node_.Load();
541 }
542
Update()543 bool EntryImpl::Update() {
544 DCHECK(node_.HasData());
545
546 if (read_only_)
547 return true;
548
549 RankingsNode* rankings = node_.Data();
550 if (!rankings->dirty) {
551 rankings->dirty = backend_->GetCurrentEntryId();
552 if (!node_.Store())
553 return false;
554 }
555 return true;
556 }
557
SetDirtyFlag(int32_t current_id)558 void EntryImpl::SetDirtyFlag(int32_t current_id) {
559 DCHECK(node_.HasData());
560 if (node_.Data()->dirty && current_id != node_.Data()->dirty)
561 dirty_ = true;
562
563 if (!current_id)
564 dirty_ = true;
565 }
566
SetPointerForInvalidEntry(int32_t new_id)567 void EntryImpl::SetPointerForInvalidEntry(int32_t new_id) {
568 node_.Data()->dirty = new_id;
569 node_.Store();
570 }
571
LeaveRankingsBehind()572 bool EntryImpl::LeaveRankingsBehind() {
573 return !node_.Data()->contents;
574 }
575
576 // This only includes checks that relate to the first block of the entry (the
577 // first 256 bytes), and values that should be set from the entry creation.
578 // Basically, even if there is something wrong with this entry, we want to see
579 // if it is possible to load the rankings node and delete them together.
SanityCheck()580 bool EntryImpl::SanityCheck() {
581 if (!entry_.VerifyHash())
582 return false;
583
584 EntryStore* stored = entry_.Data();
585 if (!stored->rankings_node || stored->key_len <= 0)
586 return false;
587
588 if (stored->reuse_count < 0 || stored->refetch_count < 0)
589 return false;
590
591 Addr rankings_addr(stored->rankings_node);
592 if (!rankings_addr.SanityCheckForRankings())
593 return false;
594
595 Addr next_addr(stored->next);
596 if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) {
597 STRESS_NOTREACHED();
598 return false;
599 }
600 STRESS_DCHECK(next_addr.value() != entry_.address().value());
601
602 if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL)
603 return false;
604
605 Addr key_addr(stored->long_key);
606 if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) ||
607 (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
608 return false;
609
610 if (!key_addr.SanityCheck())
611 return false;
612
613 if (key_addr.is_initialized() &&
614 ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) ||
615 (stored->key_len >= kMaxBlockSize && key_addr.is_block_file())))
616 return false;
617
618 int num_blocks = NumBlocksForEntry(stored->key_len);
619 if (entry_.address().num_blocks() != num_blocks)
620 return false;
621
622 return true;
623 }
624
DataSanityCheck()625 bool EntryImpl::DataSanityCheck() {
626 EntryStore* stored = entry_.Data();
627 Addr key_addr(stored->long_key);
628
629 // The key must be NULL terminated.
630 if (!key_addr.is_initialized() && stored->key[stored->key_len])
631 return false;
632
633 if (stored->hash != base::PersistentHash(GetKey()))
634 return false;
635
636 for (int i = 0; i < kNumStreams; i++) {
637 Addr data_addr(stored->data_addr[i]);
638 int data_size = stored->data_size[i];
639 if (data_size < 0)
640 return false;
641 if (!data_size && data_addr.is_initialized())
642 return false;
643 if (!data_addr.SanityCheck())
644 return false;
645 if (!data_size)
646 continue;
647 if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
648 return false;
649 if (data_size > kMaxBlockSize && data_addr.is_block_file())
650 return false;
651 }
652 return true;
653 }
654
FixForDelete()655 void EntryImpl::FixForDelete() {
656 EntryStore* stored = entry_.Data();
657 Addr key_addr(stored->long_key);
658
659 if (!key_addr.is_initialized())
660 stored->key[stored->key_len] = '\0';
661
662 for (int i = 0; i < kNumStreams; i++) {
663 Addr data_addr(stored->data_addr[i]);
664 int data_size = stored->data_size[i];
665 if (data_addr.is_initialized()) {
666 if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
667 (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
668 !data_addr.SanityCheck()) {
669 STRESS_NOTREACHED();
670 // The address is weird so don't attempt to delete it.
671 stored->data_addr[i] = 0;
672 // In general, trust the stored size as it should be in sync with the
673 // total size tracked by the backend.
674 }
675 }
676 if (data_size < 0)
677 stored->data_size[i] = 0;
678 }
679 entry_.Store();
680 }
681
IncrementIoCount()682 void EntryImpl::IncrementIoCount() {
683 backend_->IncrementIoCount();
684 }
685
DecrementIoCount()686 void EntryImpl::DecrementIoCount() {
687 if (backend_.get())
688 backend_->DecrementIoCount();
689 }
690
OnEntryCreated(BackendImpl * backend)691 void EntryImpl::OnEntryCreated(BackendImpl* backend) {
692 // Just grab a reference to the backround queue.
693 background_queue_ = backend->GetBackgroundQueue();
694 }
695
SetTimes(base::Time last_used,base::Time last_modified)696 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
697 node_.Data()->last_used = last_used.ToInternalValue();
698 node_.Data()->last_modified = last_modified.ToInternalValue();
699 node_.set_modified();
700 }
701
BeginLogging(net::NetLog * net_log,bool created)702 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) {
703 DCHECK(!net_log_.net_log());
704 net_log_ = net::NetLogWithSource::Make(
705 net_log, net::NetLogSourceType::DISK_CACHE_ENTRY);
706 net_log_.BeginEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL, [&] {
707 return CreateNetLogParametersEntryCreationParams(this, created);
708 });
709 }
710
net_log() const711 const net::NetLogWithSource& EntryImpl::net_log() const {
712 return net_log_;
713 }
714
715 // static
NumBlocksForEntry(int key_size)716 int EntryImpl::NumBlocksForEntry(int key_size) {
717 // The longest key that can be stored using one block.
718 int key1_len =
719 static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key));
720
721 if (key_size < key1_len || key_size > kMaxInternalKeyLength)
722 return 1;
723
724 return ((key_size - key1_len) / 256 + 2);
725 }
726
727 // ------------------------------------------------------------------------
728
Doom()729 void EntryImpl::Doom() {
730 if (background_queue_.get())
731 background_queue_->DoomEntryImpl(this);
732 }
733
Close()734 void EntryImpl::Close() {
735 if (background_queue_.get())
736 background_queue_->CloseEntryImpl(this);
737 }
738
GetKey() const739 std::string EntryImpl::GetKey() const {
740 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
741 int key_len = entry->Data()->key_len;
742 if (key_len <= kMaxInternalKeyLength)
743 return std::string(entry->Data()->key, key_len);
744
745 // We keep a copy of the key so that we can always return it, even if the
746 // backend is disabled.
747 if (!key_.empty())
748 return key_;
749
750 Addr address(entry->Data()->long_key);
751 DCHECK(address.is_initialized());
752 size_t offset = 0;
753 if (address.is_block_file())
754 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
755
756 static_assert(kNumStreams == kKeyFileIndex, "invalid key index");
757 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
758 kKeyFileIndex);
759 if (!key_file)
760 return std::string();
761
762 // We store a trailing \0 on disk.
763 if (!offset && key_file->GetLength() != static_cast<size_t>(key_len + 1)) {
764 return std::string();
765 }
766
767 // Do not attempt read up to the expected on-disk '\0' --- which would be
768 // |key_len + 1| bytes total --- as if due to a corrupt file it isn't |key_|
769 // would get its internal nul messed up.
770 key_.resize(key_len);
771 if (!key_file->Read(key_.data(), key_.size(), offset)) {
772 key_.clear();
773 }
774 DCHECK_LE(strlen(key_.data()), static_cast<size_t>(key_len));
775 return key_;
776 }
777
GetLastUsed() const778 Time EntryImpl::GetLastUsed() const {
779 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
780 return Time::FromInternalValue(node->Data()->last_used);
781 }
782
GetLastModified() const783 Time EntryImpl::GetLastModified() const {
784 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
785 return Time::FromInternalValue(node->Data()->last_modified);
786 }
787
GetDataSize(int index) const788 int32_t EntryImpl::GetDataSize(int index) const {
789 if (index < 0 || index >= kNumStreams)
790 return 0;
791
792 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
793 return entry->Data()->data_size[index];
794 }
795
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)796 int EntryImpl::ReadData(int index,
797 int offset,
798 IOBuffer* buf,
799 int buf_len,
800 CompletionOnceCallback callback) {
801 if (callback.is_null())
802 return ReadDataImpl(index, offset, buf, buf_len, std::move(callback));
803
804 DCHECK(node_.Data()->dirty || read_only_);
805 if (index < 0 || index >= kNumStreams)
806 return net::ERR_INVALID_ARGUMENT;
807
808 int entry_size = entry_.Data()->data_size[index];
809 if (offset >= entry_size || offset < 0 || !buf_len)
810 return 0;
811
812 if (buf_len < 0)
813 return net::ERR_INVALID_ARGUMENT;
814
815 if (!background_queue_.get())
816 return net::ERR_UNEXPECTED;
817
818 background_queue_->ReadData(this, index, offset, buf, buf_len,
819 std::move(callback));
820 return net::ERR_IO_PENDING;
821 }
822
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)823 int EntryImpl::WriteData(int index,
824 int offset,
825 IOBuffer* buf,
826 int buf_len,
827 CompletionOnceCallback callback,
828 bool truncate) {
829 if (callback.is_null()) {
830 return WriteDataImpl(index, offset, buf, buf_len, std::move(callback),
831 truncate);
832 }
833
834 DCHECK(node_.Data()->dirty || read_only_);
835 if (index < 0 || index >= kNumStreams)
836 return net::ERR_INVALID_ARGUMENT;
837
838 if (offset < 0 || buf_len < 0)
839 return net::ERR_INVALID_ARGUMENT;
840
841 if (!background_queue_.get())
842 return net::ERR_UNEXPECTED;
843
844 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
845 std::move(callback));
846 return net::ERR_IO_PENDING;
847 }
848
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)849 int EntryImpl::ReadSparseData(int64_t offset,
850 IOBuffer* buf,
851 int buf_len,
852 CompletionOnceCallback callback) {
853 if (callback.is_null())
854 return ReadSparseDataImpl(offset, buf, buf_len, std::move(callback));
855
856 if (!background_queue_.get())
857 return net::ERR_UNEXPECTED;
858
859 background_queue_->ReadSparseData(this, offset, buf, buf_len,
860 std::move(callback));
861 return net::ERR_IO_PENDING;
862 }
863
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)864 int EntryImpl::WriteSparseData(int64_t offset,
865 IOBuffer* buf,
866 int buf_len,
867 CompletionOnceCallback callback) {
868 if (callback.is_null())
869 return WriteSparseDataImpl(offset, buf, buf_len, std::move(callback));
870
871 if (!background_queue_.get())
872 return net::ERR_UNEXPECTED;
873
874 background_queue_->WriteSparseData(this, offset, buf, buf_len,
875 std::move(callback));
876 return net::ERR_IO_PENDING;
877 }
878
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)879 RangeResult EntryImpl::GetAvailableRange(int64_t offset,
880 int len,
881 RangeResultCallback callback) {
882 if (!background_queue_.get())
883 return RangeResult(net::ERR_UNEXPECTED);
884
885 background_queue_->GetAvailableRange(this, offset, len, std::move(callback));
886 return RangeResult(net::ERR_IO_PENDING);
887 }
888
CouldBeSparse() const889 bool EntryImpl::CouldBeSparse() const {
890 if (sparse_.get())
891 return true;
892
893 auto sparse = std::make_unique<SparseControl>(const_cast<EntryImpl*>(this));
894 return sparse->CouldBeSparse();
895 }
896
CancelSparseIO()897 void EntryImpl::CancelSparseIO() {
898 if (background_queue_.get())
899 background_queue_->CancelSparseIO(this);
900 }
901
ReadyForSparseIO(CompletionOnceCallback callback)902 net::Error EntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
903 if (!sparse_.get())
904 return net::OK;
905
906 if (!background_queue_.get())
907 return net::ERR_UNEXPECTED;
908
909 background_queue_->ReadyForSparseIO(this, std::move(callback));
910 return net::ERR_IO_PENDING;
911 }
912
SetLastUsedTimeForTest(base::Time time)913 void EntryImpl::SetLastUsedTimeForTest(base::Time time) {
914 SetTimes(time, time);
915 }
916
917 // When an entry is deleted from the cache, we clean up all the data associated
918 // with it for two reasons: to simplify the reuse of the block (we know that any
919 // unused block is filled with zeros), and to simplify the handling of write /
920 // read partial information from an entry (don't have to worry about returning
921 // data related to a previous cache entry because the range was not fully
922 // written before).
~EntryImpl()923 EntryImpl::~EntryImpl() {
924 if (!backend_.get()) {
925 entry_.clear_modified();
926 node_.clear_modified();
927 return;
928 }
929
930 // Save the sparse info to disk. This will generate IO for this entry and
931 // maybe for a child entry, so it is important to do it before deleting this
932 // entry.
933 sparse_.reset();
934
935 // Remove this entry from the list of open entries.
936 backend_->OnEntryDestroyBegin(entry_.address());
937
938 if (doomed_) {
939 DeleteEntryData(true);
940 } else {
941 #if defined(NET_BUILD_STRESS_CACHE)
942 SanityCheck();
943 #endif
944 net_log_.AddEvent(net::NetLogEventType::ENTRY_CLOSE);
945 bool ret = true;
946 for (int index = 0; index < kNumStreams; index++) {
947 if (user_buffers_[index].get()) {
948 ret = Flush(index, 0);
949 if (!ret)
950 LOG(ERROR) << "Failed to save user data";
951 }
952 if (unreported_size_[index]) {
953 backend_->ModifyStorageSize(
954 entry_.Data()->data_size[index] - unreported_size_[index],
955 entry_.Data()->data_size[index]);
956 }
957 }
958
959 if (!ret) {
960 // There was a failure writing the actual data. Mark the entry as dirty.
961 int current_id = backend_->GetCurrentEntryId();
962 node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
963 node_.Store();
964 } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) {
965 node_.Data()->dirty = 0;
966 node_.Store();
967 }
968 }
969
970 net_log_.EndEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL);
971 backend_->OnEntryDestroyEnd();
972 }
973
974 // ------------------------------------------------------------------------
975
InternalReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)976 int EntryImpl::InternalReadData(int index,
977 int offset,
978 IOBuffer* buf,
979 int buf_len,
980 CompletionOnceCallback callback) {
981 DCHECK(node_.Data()->dirty || read_only_);
982 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
983 if (index < 0 || index >= kNumStreams)
984 return net::ERR_INVALID_ARGUMENT;
985
986 int entry_size = entry_.Data()->data_size[index];
987 if (offset >= entry_size || offset < 0 || !buf_len)
988 return 0;
989
990 if (buf_len < 0)
991 return net::ERR_INVALID_ARGUMENT;
992
993 if (!backend_.get())
994 return net::ERR_UNEXPECTED;
995
996 int end_offset;
997 if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
998 end_offset > entry_size)
999 buf_len = entry_size - offset;
1000
1001 UpdateRank(false);
1002
1003 backend_->OnEvent(Stats::READ_DATA);
1004 backend_->OnRead(buf_len);
1005
1006 Addr address(entry_.Data()->data_addr[index]);
1007 int eof = address.is_initialized() ? entry_size : 0;
1008 if (user_buffers_[index].get() &&
1009 user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
1010 // Complete the operation locally.
1011 buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
1012 return buf_len;
1013 }
1014
1015 address.set_value(entry_.Data()->data_addr[index]);
1016 if (!address.is_initialized()) {
1017 DoomImpl();
1018 return net::ERR_FAILED;
1019 }
1020
1021 File* file = GetBackingFile(address, index);
1022 if (!file) {
1023 DoomImpl();
1024 LOG(ERROR) << "No file for " << std::hex << address.value();
1025 return net::ERR_FILE_NOT_FOUND;
1026 }
1027
1028 size_t file_offset = offset;
1029 if (address.is_block_file()) {
1030 DCHECK_LE(offset + buf_len, kMaxBlockSize);
1031 file_offset += address.start_block() * address.BlockSize() +
1032 kBlockHeaderSize;
1033 }
1034
1035 SyncCallback* io_callback = nullptr;
1036 bool null_callback = callback.is_null();
1037 if (!null_callback) {
1038 io_callback =
1039 new SyncCallback(base::WrapRefCounted(this), buf, std::move(callback),
1040 net::NetLogEventType::ENTRY_READ_DATA);
1041 }
1042
1043 bool completed;
1044 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
1045 if (io_callback)
1046 io_callback->Discard();
1047 DoomImpl();
1048 return net::ERR_CACHE_READ_FAILURE;
1049 }
1050
1051 if (io_callback && completed)
1052 io_callback->Discard();
1053
1054 return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING;
1055 }
1056
InternalWriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)1057 int EntryImpl::InternalWriteData(int index,
1058 int offset,
1059 IOBuffer* buf,
1060 int buf_len,
1061 CompletionOnceCallback callback,
1062 bool truncate) {
1063 DCHECK(node_.Data()->dirty || read_only_);
1064 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
1065 if (index < 0 || index >= kNumStreams)
1066 return net::ERR_INVALID_ARGUMENT;
1067
1068 if (offset < 0 || buf_len < 0)
1069 return net::ERR_INVALID_ARGUMENT;
1070
1071 if (!backend_.get())
1072 return net::ERR_UNEXPECTED;
1073
1074 int max_file_size = backend_->MaxFileSize();
1075
1076 int end_offset;
1077 if (offset > max_file_size || buf_len > max_file_size ||
1078 !base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
1079 end_offset > max_file_size) {
1080 int size = base::CheckAdd(offset, buf_len)
1081 .ValueOrDefault(std::numeric_limits<int32_t>::max());
1082 backend_->TooMuchStorageRequested(size);
1083 return net::ERR_FAILED;
1084 }
1085
1086 // Read the size at this point (it may change inside prepare).
1087 int entry_size = entry_.Data()->data_size[index];
1088 bool extending = entry_size < offset + buf_len;
1089 truncate = truncate && entry_size > offset + buf_len;
1090 if (!PrepareTarget(index, offset, buf_len, truncate))
1091 return net::ERR_FAILED;
1092
1093 if (extending || truncate)
1094 UpdateSize(index, entry_size, offset + buf_len);
1095
1096 UpdateRank(true);
1097
1098 backend_->OnEvent(Stats::WRITE_DATA);
1099 backend_->OnWrite(buf_len);
1100
1101 if (user_buffers_[index].get()) {
1102 // Complete the operation locally.
1103 user_buffers_[index]->Write(offset, buf, buf_len);
1104 return buf_len;
1105 }
1106
1107 Addr address(entry_.Data()->data_addr[index]);
1108 if (offset + buf_len == 0) {
1109 if (truncate) {
1110 DCHECK(!address.is_initialized());
1111 }
1112 return 0;
1113 }
1114
1115 File* file = GetBackingFile(address, index);
1116 if (!file)
1117 return net::ERR_FILE_NOT_FOUND;
1118
1119 size_t file_offset = offset;
1120 if (address.is_block_file()) {
1121 DCHECK_LE(offset + buf_len, kMaxBlockSize);
1122 file_offset += address.start_block() * address.BlockSize() +
1123 kBlockHeaderSize;
1124 } else if (truncate || (extending && !buf_len)) {
1125 if (!file->SetLength(offset + buf_len))
1126 return net::ERR_FAILED;
1127 }
1128
1129 if (!buf_len)
1130 return 0;
1131
1132 SyncCallback* io_callback = nullptr;
1133 bool null_callback = callback.is_null();
1134 if (!null_callback) {
1135 io_callback = new SyncCallback(this, buf, std::move(callback),
1136 net::NetLogEventType::ENTRY_WRITE_DATA);
1137 }
1138
1139 bool completed;
1140 if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
1141 &completed)) {
1142 if (io_callback)
1143 io_callback->Discard();
1144 return net::ERR_CACHE_WRITE_FAILURE;
1145 }
1146
1147 if (io_callback && completed)
1148 io_callback->Discard();
1149
1150 return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING;
1151 }
1152
1153 // ------------------------------------------------------------------------
1154
CreateDataBlock(int index,int size)1155 bool EntryImpl::CreateDataBlock(int index, int size) {
1156 DCHECK(index >= 0 && index < kNumStreams);
1157
1158 Addr address(entry_.Data()->data_addr[index]);
1159 if (!CreateBlock(size, &address))
1160 return false;
1161
1162 entry_.Data()->data_addr[index] = address.value();
1163 entry_.Store();
1164 return true;
1165 }
1166
CreateBlock(int size,Addr * address)1167 bool EntryImpl::CreateBlock(int size, Addr* address) {
1168 DCHECK(!address->is_initialized());
1169 if (!backend_.get())
1170 return false;
1171
1172 FileType file_type = Addr::RequiredFileType(size);
1173 if (EXTERNAL == file_type) {
1174 if (size > backend_->MaxFileSize())
1175 return false;
1176 if (!backend_->CreateExternalFile(address))
1177 return false;
1178 } else {
1179 int num_blocks = Addr::RequiredBlocks(size, file_type);
1180
1181 if (!backend_->CreateBlock(file_type, num_blocks, address))
1182 return false;
1183 }
1184 return true;
1185 }
1186
1187 // Note that this method may end up modifying a block file so upon return the
1188 // involved block will be free, and could be reused for something else. If there
1189 // is a crash after that point (and maybe before returning to the caller), the
1190 // entry will be left dirty... and at some point it will be discarded; it is
1191 // important that the entry doesn't keep a reference to this address, or we'll
1192 // end up deleting the contents of |address| once again.
DeleteData(Addr address,int index)1193 void EntryImpl::DeleteData(Addr address, int index) {
1194 DCHECK(backend_.get());
1195 if (!address.is_initialized())
1196 return;
1197 if (address.is_separate_file()) {
1198 int failure = !base::DeleteFile(backend_->GetFileName(address));
1199 if (failure) {
1200 LOG(ERROR) << "Failed to delete " <<
1201 backend_->GetFileName(address).value() << " from the cache.";
1202 }
1203 if (files_[index].get())
1204 files_[index] = nullptr; // Releases the object.
1205 } else {
1206 backend_->DeleteBlock(address, true);
1207 }
1208 }
1209
UpdateRank(bool modified)1210 void EntryImpl::UpdateRank(bool modified) {
1211 if (!backend_.get())
1212 return;
1213
1214 if (!doomed_) {
1215 // Everything is handled by the backend.
1216 backend_->UpdateRank(this, modified);
1217 return;
1218 }
1219
1220 Time current = Time::Now();
1221 node_.Data()->last_used = current.ToInternalValue();
1222
1223 if (modified)
1224 node_.Data()->last_modified = current.ToInternalValue();
1225 }
1226
GetBackingFile(Addr address,int index)1227 File* EntryImpl::GetBackingFile(Addr address, int index) {
1228 if (!backend_.get())
1229 return nullptr;
1230
1231 File* file;
1232 if (address.is_separate_file())
1233 file = GetExternalFile(address, index);
1234 else
1235 file = backend_->File(address);
1236 return file;
1237 }
1238
GetExternalFile(Addr address,int index)1239 File* EntryImpl::GetExternalFile(Addr address, int index) {
1240 DCHECK(index >= 0 && index <= kKeyFileIndex);
1241 if (!files_[index].get()) {
1242 // For a key file, use mixed mode IO.
1243 auto file = base::MakeRefCounted<File>(kKeyFileIndex == index);
1244 if (file->Init(backend_->GetFileName(address)))
1245 files_[index].swap(file);
1246 }
1247 return files_[index].get();
1248 }
1249
1250 // We keep a memory buffer for everything that ends up stored on a block file
1251 // (because we don't know yet the final data size), and for some of the data
1252 // that end up on external files. This function will initialize that memory
1253 // buffer and / or the files needed to store the data.
1254 //
1255 // In general, a buffer may overlap data already stored on disk, and in that
1256 // case, the contents of the buffer are the most accurate. It may also extend
1257 // the file, but we don't want to read from disk just to keep the buffer up to
1258 // date. This means that as soon as there is a chance to get confused about what
1259 // is the most recent version of some part of a file, we'll flush the buffer and
1260 // reuse it for the new data. Keep in mind that the normal use pattern is quite
1261 // simple (write sequentially from the beginning), so we optimize for handling
1262 // that case.
PrepareTarget(int index,int offset,int buf_len,bool truncate)1263 bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
1264 bool truncate) {
1265 if (truncate)
1266 return HandleTruncation(index, offset, buf_len);
1267
1268 if (!offset && !buf_len)
1269 return true;
1270
1271 Addr address(entry_.Data()->data_addr[index]);
1272 if (address.is_initialized()) {
1273 if (address.is_block_file() && !MoveToLocalBuffer(index))
1274 return false;
1275
1276 if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
1277 // We are about to create a buffer for the first 16KB, make sure that we
1278 // preserve existing data.
1279 if (!CopyToLocalBuffer(index))
1280 return false;
1281 }
1282 }
1283
1284 if (!user_buffers_[index].get())
1285 user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get());
1286
1287 return PrepareBuffer(index, offset, buf_len);
1288 }
1289
1290 // We get to this function with some data already stored. If there is a
1291 // truncation that results on data stored internally, we'll explicitly
1292 // handle the case here.
HandleTruncation(int index,int offset,int buf_len)1293 bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
1294 Addr address(entry_.Data()->data_addr[index]);
1295
1296 int current_size = entry_.Data()->data_size[index];
1297 int new_size = offset + buf_len;
1298
1299 // This is only called when actually truncating the file, not simply when
1300 // truncate = true is passed to WriteData(), which could be growing the file.
1301 DCHECK_LT(new_size, current_size);
1302
1303 if (new_size == 0) {
1304 // This is by far the most common scenario.
1305 backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
1306 entry_.Data()->data_addr[index] = 0;
1307 entry_.Data()->data_size[index] = 0;
1308 unreported_size_[index] = 0;
1309 entry_.Store();
1310 DeleteData(address, index);
1311
1312 user_buffers_[index].reset();
1313 return true;
1314 }
1315
1316 // We never postpone truncating a file, if there is one, but we may postpone
1317 // telling the backend about the size reduction.
1318 if (user_buffers_[index].get()) {
1319 DCHECK_GE(current_size, user_buffers_[index]->Start());
1320 if (!address.is_initialized()) {
1321 // There is no overlap between the buffer and disk.
1322 if (new_size > user_buffers_[index]->Start()) {
1323 // Truncate our buffer.
1324 DCHECK_LT(new_size, user_buffers_[index]->End());
1325 user_buffers_[index]->Truncate(new_size);
1326
1327 if (offset < user_buffers_[index]->Start()) {
1328 // Request to write before the current buffer's start, so flush it to
1329 // disk and re-init.
1330 UpdateSize(index, current_size, new_size);
1331 if (!Flush(index, 0))
1332 return false;
1333 return PrepareBuffer(index, offset, buf_len);
1334 } else {
1335 // Can just stick to using the memory buffer.
1336 return true;
1337 }
1338 }
1339
1340 // Truncated to before the current buffer, so can just discard it.
1341 user_buffers_[index]->Reset();
1342 return PrepareBuffer(index, offset, buf_len);
1343 }
1344
1345 // There is some overlap or we need to extend the file before the
1346 // truncation.
1347 if (offset > user_buffers_[index]->Start())
1348 user_buffers_[index]->Truncate(new_size);
1349 UpdateSize(index, current_size, new_size);
1350 if (!Flush(index, 0))
1351 return false;
1352 user_buffers_[index].reset();
1353 }
1354
1355 // We have data somewhere, and it is not in a buffer.
1356 DCHECK(!user_buffers_[index].get());
1357 DCHECK(address.is_initialized());
1358
1359 if (new_size > kMaxBlockSize)
1360 return true; // Let the operation go directly to disk.
1361
1362 return ImportSeparateFile(index, offset + buf_len);
1363 }
1364
CopyToLocalBuffer(int index)1365 bool EntryImpl::CopyToLocalBuffer(int index) {
1366 Addr address(entry_.Data()->data_addr[index]);
1367 DCHECK(!user_buffers_[index].get());
1368 DCHECK(address.is_initialized());
1369
1370 int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
1371 user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get());
1372 user_buffers_[index]->Write(len, nullptr, 0);
1373
1374 File* file = GetBackingFile(address, index);
1375 int offset = 0;
1376
1377 if (address.is_block_file())
1378 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1379
1380 if (!file || !file->Read(user_buffers_[index]->Data(), len, offset, nullptr,
1381 nullptr)) {
1382 user_buffers_[index].reset();
1383 return false;
1384 }
1385 return true;
1386 }
1387
MoveToLocalBuffer(int index)1388 bool EntryImpl::MoveToLocalBuffer(int index) {
1389 if (!CopyToLocalBuffer(index))
1390 return false;
1391
1392 Addr address(entry_.Data()->data_addr[index]);
1393 entry_.Data()->data_addr[index] = 0;
1394 entry_.Store();
1395 DeleteData(address, index);
1396
1397 // If we lose this entry we'll see it as zero sized.
1398 int len = entry_.Data()->data_size[index];
1399 backend_->ModifyStorageSize(len - unreported_size_[index], 0);
1400 unreported_size_[index] = len;
1401 return true;
1402 }
1403
ImportSeparateFile(int index,int new_size)1404 bool EntryImpl::ImportSeparateFile(int index, int new_size) {
1405 if (entry_.Data()->data_size[index] > new_size)
1406 UpdateSize(index, entry_.Data()->data_size[index], new_size);
1407
1408 return MoveToLocalBuffer(index);
1409 }
1410
PrepareBuffer(int index,int offset,int buf_len)1411 bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
1412 DCHECK(user_buffers_[index].get());
1413 if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
1414 offset > entry_.Data()->data_size[index]) {
1415 // We are about to extend the buffer or the file (with zeros), so make sure
1416 // that we are not overwriting anything.
1417 Addr address(entry_.Data()->data_addr[index]);
1418 if (address.is_initialized() && address.is_separate_file()) {
1419 if (!Flush(index, 0))
1420 return false;
1421 // There is an actual file already, and we don't want to keep track of
1422 // its length so we let this operation go straight to disk.
1423 // The only case when a buffer is allowed to extend the file (as in fill
1424 // with zeros before the start) is when there is no file yet to extend.
1425 user_buffers_[index].reset();
1426 return true;
1427 }
1428 }
1429
1430 if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
1431 if (!Flush(index, offset + buf_len))
1432 return false;
1433
1434 // Lets try again.
1435 if (offset > user_buffers_[index]->End() ||
1436 !user_buffers_[index]->PreWrite(offset, buf_len)) {
1437 // We cannot complete the operation with a buffer.
1438 DCHECK(!user_buffers_[index]->Size());
1439 DCHECK(!user_buffers_[index]->Start());
1440 user_buffers_[index].reset();
1441 }
1442 }
1443 return true;
1444 }
1445
Flush(int index,int min_len)1446 bool EntryImpl::Flush(int index, int min_len) {
1447 Addr address(entry_.Data()->data_addr[index]);
1448 DCHECK(user_buffers_[index].get());
1449 DCHECK(!address.is_initialized() || address.is_separate_file());
1450 DVLOG(3) << "Flush";
1451
1452 int size = std::max(entry_.Data()->data_size[index], min_len);
1453 if (size && !address.is_initialized() && !CreateDataBlock(index, size))
1454 return false;
1455
1456 if (!entry_.Data()->data_size[index]) {
1457 DCHECK(!user_buffers_[index]->Size());
1458 return true;
1459 }
1460
1461 address.set_value(entry_.Data()->data_addr[index]);
1462
1463 int len = user_buffers_[index]->Size();
1464 int offset = user_buffers_[index]->Start();
1465 if (!len && !offset)
1466 return true;
1467
1468 if (address.is_block_file()) {
1469 DCHECK_EQ(len, entry_.Data()->data_size[index]);
1470 DCHECK(!offset);
1471 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1472 }
1473
1474 File* file = GetBackingFile(address, index);
1475 if (!file)
1476 return false;
1477
1478 if (!file->Write(user_buffers_[index]->Data(), len, offset, nullptr, nullptr))
1479 return false;
1480 user_buffers_[index]->Reset();
1481
1482 return true;
1483 }
1484
UpdateSize(int index,int old_size,int new_size)1485 void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
1486 if (entry_.Data()->data_size[index] == new_size)
1487 return;
1488
1489 unreported_size_[index] += new_size - old_size;
1490 entry_.Data()->data_size[index] = new_size;
1491 entry_.set_modified();
1492 }
1493
InitSparseData()1494 int EntryImpl::InitSparseData() {
1495 if (sparse_.get())
1496 return net::OK;
1497
1498 // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1499 auto sparse = std::make_unique<SparseControl>(this);
1500 int result = sparse->Init();
1501 if (net::OK == result)
1502 sparse_.swap(sparse);
1503
1504 return result;
1505 }
1506
SetEntryFlags(uint32_t flags)1507 void EntryImpl::SetEntryFlags(uint32_t flags) {
1508 entry_.Data()->flags |= flags;
1509 entry_.set_modified();
1510 }
1511
GetEntryFlags()1512 uint32_t EntryImpl::GetEntryFlags() {
1513 return entry_.Data()->flags;
1514 }
1515
GetData(int index,std::unique_ptr<char[]> * buffer,Addr * address)1516 void EntryImpl::GetData(int index,
1517 std::unique_ptr<char[]>* buffer,
1518 Addr* address) {
1519 DCHECK(backend_.get());
1520 if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
1521 !user_buffers_[index]->Start()) {
1522 // The data is already in memory, just copy it and we're done.
1523 int data_len = entry_.Data()->data_size[index];
1524 if (data_len <= user_buffers_[index]->Size()) {
1525 DCHECK(!user_buffers_[index]->Start());
1526 *buffer = std::make_unique<char[]>(data_len);
1527 memcpy(buffer->get(), user_buffers_[index]->Data(), data_len);
1528 return;
1529 }
1530 }
1531
1532 // Bad news: we'd have to read the info from disk so instead we'll just tell
1533 // the caller where to read from.
1534 *buffer = nullptr;
1535 address->set_value(entry_.Data()->data_addr[index]);
1536 if (address->is_initialized()) {
1537 // Prevent us from deleting the block from the backing store.
1538 backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
1539 unreported_size_[index], 0);
1540 entry_.Data()->data_addr[index] = 0;
1541 entry_.Data()->data_size[index] = 0;
1542 }
1543 }
1544
1545 } // namespace disk_cache
1546