xref: /aosp_15_r20/external/cronet/net/http/mock_http_cache.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/http/mock_http_cache.h"
6 
7 #include <algorithm>
8 #include <limits>
9 #include <memory>
10 #include <utility>
11 
12 #include "base/feature_list.h"
13 #include "base/functional/bind.h"
14 #include "base/functional/callback.h"
15 #include "base/functional/callback_helpers.h"
16 #include "base/location.h"
17 #include "base/task/single_thread_task_runner.h"
18 #include "net/base/features.h"
19 #include "net/base/net_errors.h"
20 #include "net/disk_cache/disk_cache_test_util.h"
21 #include "net/http/http_cache_writers.h"
22 #include "testing/gtest/include/gtest/gtest.h"
23 
24 namespace net {
25 
26 namespace {
27 
28 // During testing, we are going to limit the size of a cache entry to this many
29 // bytes using DCHECKs in order to prevent a test from causing unbounded memory
30 // growth. In practice cache entry shouldn't come anywhere near this limit for
31 // tests that use the mock cache. If they do, that's likely a problem with the
32 // test. If a test requires using massive cache entries, they should use a real
33 // cache backend instead.
34 const int kMaxMockCacheEntrySize = 100 * 1000 * 1000;
35 
36 // We can override the test mode for a given operation by setting this global
37 // variable.
38 int g_test_mode = 0;
39 
GetTestModeForEntry(const std::string & key)40 int GetTestModeForEntry(const std::string& key) {
41   GURL url(HttpCache::GetResourceURLFromHttpCacheKey(key));
42   const MockTransaction* t = FindMockTransaction(url);
43   DCHECK(t);
44   return t->test_mode;
45 }
46 
47 }  // namespace
48 
49 //-----------------------------------------------------------------------------
50 
51 struct MockDiskEntry::CallbackInfo {
52   scoped_refptr<MockDiskEntry> entry;
53   base::OnceClosure callback;
54 };
55 
MockDiskEntry(const std::string & key)56 MockDiskEntry::MockDiskEntry(const std::string& key)
57     : key_(key), max_file_size_(std::numeric_limits<int>::max()) {
58   test_mode_ = GetTestModeForEntry(key);
59 }
60 
Doom()61 void MockDiskEntry::Doom() {
62   doomed_ = true;
63 }
64 
Close()65 void MockDiskEntry::Close() {
66   Release();
67 }
68 
GetKey() const69 std::string MockDiskEntry::GetKey() const {
70   return key_;
71 }
72 
GetLastUsed() const73 base::Time MockDiskEntry::GetLastUsed() const {
74   return base::Time::Now();
75 }
76 
GetLastModified() const77 base::Time MockDiskEntry::GetLastModified() const {
78   return base::Time::Now();
79 }
80 
GetDataSize(int index) const81 int32_t MockDiskEntry::GetDataSize(int index) const {
82   DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
83   return static_cast<int32_t>(data_[index].size());
84 }
85 
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)86 int MockDiskEntry::ReadData(int index,
87                             int offset,
88                             IOBuffer* buf,
89                             int buf_len,
90                             CompletionOnceCallback callback) {
91   DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
92   DCHECK(!callback.is_null());
93 
94   if (fail_requests_ & FAIL_READ) {
95     return ERR_CACHE_READ_FAILURE;
96   }
97 
98   if (offset < 0 || offset > static_cast<int>(data_[index].size())) {
99     return ERR_FAILED;
100   }
101   if (static_cast<size_t>(offset) == data_[index].size()) {
102     return 0;
103   }
104 
105   int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
106   memcpy(buf->data(), &data_[index][offset], num);
107 
108   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) {
109     return num;
110   }
111 
112   // Pause and resume.
113   if (defer_op_ == DEFER_READ) {
114     defer_op_ = DEFER_NONE;
115     resume_callback_ = std::move(callback);
116     resume_return_code_ = num;
117     return ERR_IO_PENDING;
118   }
119 
120   CallbackLater(std::move(callback), num);
121   return ERR_IO_PENDING;
122 }
123 
ResumeDiskEntryOperation()124 void MockDiskEntry::ResumeDiskEntryOperation() {
125   DCHECK(!resume_callback_.is_null());
126   CallbackLater(std::move(resume_callback_), resume_return_code_);
127   resume_return_code_ = 0;
128 }
129 
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)130 int MockDiskEntry::WriteData(int index,
131                              int offset,
132                              IOBuffer* buf,
133                              int buf_len,
134                              CompletionOnceCallback callback,
135                              bool truncate) {
136   DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
137   DCHECK(!callback.is_null());
138   DCHECK(truncate);
139 
140   if (fail_requests_ & FAIL_WRITE) {
141     CallbackLater(std::move(callback), ERR_CACHE_READ_FAILURE);
142     return ERR_IO_PENDING;
143   }
144 
145   if (offset < 0 || offset > static_cast<int>(data_[index].size())) {
146     return ERR_FAILED;
147   }
148 
149   DCHECK_LT(offset + buf_len, kMaxMockCacheEntrySize);
150   if (offset + buf_len > max_file_size_ && index == 1) {
151     return net::ERR_FAILED;
152   }
153 
154   data_[index].resize(offset + buf_len);
155   if (buf_len) {
156     memcpy(&data_[index][offset], buf->data(), buf_len);
157   }
158 
159   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) {
160     return buf_len;
161   }
162 
163   if (defer_op_ == DEFER_WRITE) {
164     defer_op_ = DEFER_NONE;
165     resume_callback_ = std::move(callback);
166     resume_return_code_ = buf_len;
167     return ERR_IO_PENDING;
168   }
169 
170   CallbackLater(std::move(callback), buf_len);
171   return ERR_IO_PENDING;
172 }
173 
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)174 int MockDiskEntry::ReadSparseData(int64_t offset,
175                                   IOBuffer* buf,
176                                   int buf_len,
177                                   CompletionOnceCallback callback) {
178   DCHECK(!callback.is_null());
179   if (fail_sparse_requests_) {
180     return ERR_NOT_IMPLEMENTED;
181   }
182   if (!sparse_ || busy_ || cancel_) {
183     return ERR_CACHE_OPERATION_NOT_SUPPORTED;
184   }
185   if (offset < 0) {
186     return ERR_FAILED;
187   }
188 
189   if (fail_requests_ & FAIL_READ_SPARSE) {
190     return ERR_CACHE_READ_FAILURE;
191   }
192 
193   DCHECK(offset < std::numeric_limits<int32_t>::max());
194   int real_offset = static_cast<int>(offset);
195   if (!buf_len) {
196     return 0;
197   }
198 
199   int num = std::min(static_cast<int>(data_[1].size()) - real_offset, buf_len);
200   memcpy(buf->data(), &data_[1][real_offset], num);
201 
202   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) {
203     return num;
204   }
205 
206   CallbackLater(std::move(callback), num);
207   busy_ = true;
208   delayed_ = false;
209   return ERR_IO_PENDING;
210 }
211 
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)212 int MockDiskEntry::WriteSparseData(int64_t offset,
213                                    IOBuffer* buf,
214                                    int buf_len,
215                                    CompletionOnceCallback callback) {
216   DCHECK(!callback.is_null());
217   if (fail_sparse_requests_) {
218     return ERR_NOT_IMPLEMENTED;
219   }
220   if (busy_ || cancel_) {
221     return ERR_CACHE_OPERATION_NOT_SUPPORTED;
222   }
223   if (!sparse_) {
224     if (data_[1].size()) {
225       return ERR_CACHE_OPERATION_NOT_SUPPORTED;
226     }
227     sparse_ = true;
228   }
229   if (offset < 0) {
230     return ERR_FAILED;
231   }
232   if (!buf_len) {
233     return 0;
234   }
235 
236   if (fail_requests_ & FAIL_WRITE_SPARSE) {
237     return ERR_CACHE_READ_FAILURE;
238   }
239 
240   DCHECK(offset < std::numeric_limits<int32_t>::max());
241   int real_offset = static_cast<int>(offset);
242 
243   if (static_cast<int>(data_[1].size()) < real_offset + buf_len) {
244     DCHECK_LT(real_offset + buf_len, kMaxMockCacheEntrySize);
245     data_[1].resize(real_offset + buf_len);
246   }
247 
248   memcpy(&data_[1][real_offset], buf->data(), buf_len);
249   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) {
250     return buf_len;
251   }
252 
253   CallbackLater(std::move(callback), buf_len);
254   return ERR_IO_PENDING;
255 }
256 
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)257 disk_cache::RangeResult MockDiskEntry::GetAvailableRange(
258     int64_t offset,
259     int len,
260     RangeResultCallback callback) {
261   DCHECK(!callback.is_null());
262   if (!sparse_ || busy_ || cancel_) {
263     return RangeResult(ERR_CACHE_OPERATION_NOT_SUPPORTED);
264   }
265   if (offset < 0) {
266     return RangeResult(ERR_FAILED);
267   }
268 
269   if (fail_requests_ & FAIL_GET_AVAILABLE_RANGE) {
270     return RangeResult(ERR_CACHE_READ_FAILURE);
271   }
272 
273   RangeResult result;
274   result.net_error = OK;
275   result.start = offset;
276   result.available_len = 0;
277   DCHECK(offset < std::numeric_limits<int32_t>::max());
278   int real_offset = static_cast<int>(offset);
279   if (static_cast<int>(data_[1].size()) < real_offset) {
280     return result;
281   }
282 
283   int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
284   for (; num > 0; num--, real_offset++) {
285     if (!result.available_len) {
286       if (data_[1][real_offset]) {
287         result.available_len++;
288         result.start = real_offset;
289       }
290     } else {
291       if (!data_[1][real_offset]) {
292         break;
293       }
294       result.available_len++;
295     }
296   }
297   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE) {
298     return result;
299   }
300 
301   CallbackLater(base::BindOnce(std::move(callback), result));
302   return RangeResult(ERR_IO_PENDING);
303 }
304 
CouldBeSparse() const305 bool MockDiskEntry::CouldBeSparse() const {
306   if (fail_sparse_requests_) {
307     return false;
308   }
309   return sparse_;
310 }
311 
CancelSparseIO()312 void MockDiskEntry::CancelSparseIO() {
313   cancel_ = true;
314 }
315 
ReadyForSparseIO(CompletionOnceCallback callback)316 net::Error MockDiskEntry::ReadyForSparseIO(CompletionOnceCallback callback) {
317   if (fail_sparse_requests_) {
318     return ERR_NOT_IMPLEMENTED;
319   }
320   if (!cancel_) {
321     return OK;
322   }
323 
324   cancel_ = false;
325   DCHECK(!callback.is_null());
326   if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ) {
327     return OK;
328   }
329 
330   // The pending operation is already in the message loop (and hopefully
331   // already in the second pass).  Just notify the caller that it finished.
332   CallbackLater(std::move(callback), 0);
333   return ERR_IO_PENDING;
334 }
335 
SetLastUsedTimeForTest(base::Time time)336 void MockDiskEntry::SetLastUsedTimeForTest(base::Time time) {
337   NOTREACHED();
338 }
339 
340 // If |value| is true, don't deliver any completion callbacks until called
341 // again with |value| set to false.  Caution: remember to enable callbacks
342 // again or all subsequent tests will fail.
343 // Static.
IgnoreCallbacks(bool value)344 void MockDiskEntry::IgnoreCallbacks(bool value) {
345   if (ignore_callbacks_ == value) {
346     return;
347   }
348   ignore_callbacks_ = value;
349   if (!value) {
350     StoreAndDeliverCallbacks(false, nullptr, base::OnceClosure());
351   }
352 }
353 
354 MockDiskEntry::~MockDiskEntry() = default;
355 
356 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
357 // if the consumer called Close on the MockDiskEntry.  We achieve that by
358 // leveraging the fact that this class is reference counted.
CallbackLater(base::OnceClosure callback)359 void MockDiskEntry::CallbackLater(base::OnceClosure callback) {
360   if (ignore_callbacks_) {
361     return StoreAndDeliverCallbacks(true, this, std::move(callback));
362   }
363   base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
364       FROM_HERE,
365       base::BindOnce(&MockDiskEntry::RunCallback, this, std::move(callback)));
366 }
367 
CallbackLater(CompletionOnceCallback callback,int result)368 void MockDiskEntry::CallbackLater(CompletionOnceCallback callback, int result) {
369   CallbackLater(base::BindOnce(std::move(callback), result));
370 }
371 
RunCallback(base::OnceClosure callback)372 void MockDiskEntry::RunCallback(base::OnceClosure callback) {
373   if (busy_) {
374     // This is kind of hacky, but controlling the behavior of just this entry
375     // from a test is sort of complicated.  What we really want to do is
376     // delay the delivery of a sparse IO operation a little more so that the
377     // request start operation (async) will finish without seeing the end of
378     // this operation (already posted to the message loop)... and without
379     // just delaying for n mS (which may cause trouble with slow bots).  So
380     // we re-post this operation (all async sparse IO operations will take two
381     // trips through the message loop instead of one).
382     if (!delayed_) {
383       delayed_ = true;
384       return CallbackLater(std::move(callback));
385     }
386   }
387   busy_ = false;
388   std::move(callback).Run();
389 }
390 
391 // When |store| is true, stores the callback to be delivered later; otherwise
392 // delivers any callback previously stored.
393 // Static.
StoreAndDeliverCallbacks(bool store,MockDiskEntry * entry,base::OnceClosure callback)394 void MockDiskEntry::StoreAndDeliverCallbacks(bool store,
395                                              MockDiskEntry* entry,
396                                              base::OnceClosure callback) {
397   static std::vector<CallbackInfo> callback_list;
398   if (store) {
399     CallbackInfo c = {entry, std::move(callback)};
400     callback_list.push_back(std::move(c));
401   } else {
402     for (auto& callback_info : callback_list) {
403       callback_info.entry->CallbackLater(std::move(callback_info.callback));
404     }
405     callback_list.clear();
406   }
407 }
408 
409 // Statics.
410 bool MockDiskEntry::ignore_callbacks_ = false;
411 
412 //-----------------------------------------------------------------------------
413 
MockDiskCache()414 MockDiskCache::MockDiskCache()
415     : Backend(DISK_CACHE), max_file_size_(std::numeric_limits<int>::max()) {}
416 
~MockDiskCache()417 MockDiskCache::~MockDiskCache() {
418   ReleaseAll();
419 }
420 
GetEntryCount() const421 int32_t MockDiskCache::GetEntryCount() const {
422   return static_cast<int32_t>(entries_.size());
423 }
424 
OpenOrCreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)425 disk_cache::EntryResult MockDiskCache::OpenOrCreateEntry(
426     const std::string& key,
427     net::RequestPriority request_priority,
428     EntryResultCallback callback) {
429   DCHECK(!callback.is_null());
430 
431   if (force_fail_callback_later_) {
432     CallbackLater(base::BindOnce(
433         std::move(callback),
434         EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE)));
435     return EntryResult::MakeError(ERR_IO_PENDING);
436   }
437 
438   if (fail_requests_) {
439     return EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE);
440   }
441 
442   EntryResult result;
443 
444   // First try opening the entry.
445   auto split_callback = base::SplitOnceCallback(std::move(callback));
446   result = OpenEntry(key, request_priority, std::move(split_callback.first));
447   if (result.net_error() == OK || result.net_error() == ERR_IO_PENDING) {
448     return result;
449   }
450 
451   // Unable to open, try creating the entry.
452   result = CreateEntry(key, request_priority, std::move(split_callback.second));
453   if (result.net_error() == OK || result.net_error() == ERR_IO_PENDING) {
454     return result;
455   }
456 
457   return EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE);
458 }
459 
OpenEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)460 disk_cache::EntryResult MockDiskCache::OpenEntry(
461     const std::string& key,
462     net::RequestPriority request_priority,
463     EntryResultCallback callback) {
464   DCHECK(!callback.is_null());
465   if (force_fail_callback_later_) {
466     CallbackLater(base::BindOnce(
467         std::move(callback), EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE)));
468     return EntryResult::MakeError(ERR_IO_PENDING);
469   }
470 
471   if (fail_requests_) {
472     return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
473   }
474 
475   auto it = entries_.find(key);
476   if (it == entries_.end()) {
477     return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
478   }
479 
480   if (it->second->is_doomed()) {
481     it->second->Release();
482     entries_.erase(it);
483     return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
484   }
485 
486   open_count_++;
487 
488   MockDiskEntry* entry = it->second;
489   entry->AddRef();
490 
491   if (soft_failures_ || soft_failures_one_instance_) {
492     entry->set_fail_requests(soft_failures_ | soft_failures_one_instance_);
493     soft_failures_one_instance_ = 0;
494   }
495 
496   entry->set_max_file_size(max_file_size_);
497 
498   EntryResult result = EntryResult::MakeOpened(entry);
499   if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) {
500     return result;
501   }
502 
503   CallbackLater(base::BindOnce(std::move(callback), std::move(result)));
504   return EntryResult::MakeError(ERR_IO_PENDING);
505 }
506 
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)507 disk_cache::EntryResult MockDiskCache::CreateEntry(
508     const std::string& key,
509     net::RequestPriority request_priority,
510     EntryResultCallback callback) {
511   DCHECK(!callback.is_null());
512   if (force_fail_callback_later_) {
513     CallbackLater(base::BindOnce(
514         std::move(callback), EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE)));
515     return EntryResult::MakeError(ERR_IO_PENDING);
516   }
517 
518   if (fail_requests_) {
519     return EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE);
520   }
521 
522   auto it = entries_.find(key);
523   if (it != entries_.end()) {
524     if (!it->second->is_doomed()) {
525       if (double_create_check_) {
526         NOTREACHED();
527       } else {
528         return EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE);
529       }
530     }
531     it->second->Release();
532     entries_.erase(it);
533   }
534 
535   create_count_++;
536 
537   MockDiskEntry* new_entry = new MockDiskEntry(key);
538 
539   new_entry->AddRef();
540   entries_[key] = new_entry;
541 
542   new_entry->AddRef();
543 
544   if (soft_failures_ || soft_failures_one_instance_) {
545     new_entry->set_fail_requests(soft_failures_ | soft_failures_one_instance_);
546     soft_failures_one_instance_ = 0;
547   }
548 
549   if (fail_sparse_requests_) {
550     new_entry->set_fail_sparse_requests();
551   }
552 
553   new_entry->set_max_file_size(max_file_size_);
554 
555   EntryResult result = EntryResult::MakeCreated(new_entry);
556   if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) {
557     return result;
558   }
559 
560   // Pause and resume.
561   if (defer_op_ == MockDiskEntry::DEFER_CREATE) {
562     defer_op_ = MockDiskEntry::DEFER_NONE;
563     resume_callback_ = base::BindOnce(std::move(callback), std::move(result));
564     return EntryResult::MakeError(ERR_IO_PENDING);
565   }
566 
567   CallbackLater(base::BindOnce(std::move(callback), std::move(result)));
568   return EntryResult::MakeError(ERR_IO_PENDING);
569 }
570 
DoomEntry(const std::string & key,net::RequestPriority request_priority,CompletionOnceCallback callback)571 net::Error MockDiskCache::DoomEntry(const std::string& key,
572                                     net::RequestPriority request_priority,
573                                     CompletionOnceCallback callback) {
574   DCHECK(!callback.is_null());
575   if (force_fail_callback_later_) {
576     CallbackLater(base::BindOnce(std::move(callback), ERR_CACHE_DOOM_FAILURE));
577     return ERR_IO_PENDING;
578   }
579 
580   if (fail_requests_) {
581     return ERR_CACHE_DOOM_FAILURE;
582   }
583 
584   auto it = entries_.find(key);
585   if (it != entries_.end()) {
586     it->second->Release();
587     entries_.erase(it);
588     doomed_count_++;
589   }
590 
591   if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START) {
592     return OK;
593   }
594 
595   CallbackLater(base::BindOnce(std::move(callback), OK));
596   return ERR_IO_PENDING;
597 }
598 
DoomAllEntries(CompletionOnceCallback callback)599 net::Error MockDiskCache::DoomAllEntries(CompletionOnceCallback callback) {
600   return ERR_NOT_IMPLEMENTED;
601 }
602 
DoomEntriesBetween(const base::Time initial_time,const base::Time end_time,CompletionOnceCallback callback)603 net::Error MockDiskCache::DoomEntriesBetween(const base::Time initial_time,
604                                              const base::Time end_time,
605                                              CompletionOnceCallback callback) {
606   return ERR_NOT_IMPLEMENTED;
607 }
608 
DoomEntriesSince(const base::Time initial_time,CompletionOnceCallback callback)609 net::Error MockDiskCache::DoomEntriesSince(const base::Time initial_time,
610                                            CompletionOnceCallback callback) {
611   return ERR_NOT_IMPLEMENTED;
612 }
613 
CalculateSizeOfAllEntries(Int64CompletionOnceCallback callback)614 int64_t MockDiskCache::CalculateSizeOfAllEntries(
615     Int64CompletionOnceCallback callback) {
616   return ERR_NOT_IMPLEMENTED;
617 }
618 
619 class MockDiskCache::NotImplementedIterator : public Iterator {
620  public:
OpenNextEntry(EntryResultCallback callback)621   EntryResult OpenNextEntry(EntryResultCallback callback) override {
622     return EntryResult::MakeError(ERR_NOT_IMPLEMENTED);
623   }
624 };
625 
CreateIterator()626 std::unique_ptr<disk_cache::Backend::Iterator> MockDiskCache::CreateIterator() {
627   return std::make_unique<NotImplementedIterator>();
628 }
629 
GetStats(base::StringPairs * stats)630 void MockDiskCache::GetStats(base::StringPairs* stats) {}
631 
OnExternalCacheHit(const std::string & key)632 void MockDiskCache::OnExternalCacheHit(const std::string& key) {
633   external_cache_hits_.push_back(key);
634 }
635 
GetEntryInMemoryData(const std::string & key)636 uint8_t MockDiskCache::GetEntryInMemoryData(const std::string& key) {
637   if (!support_in_memory_entry_data_) {
638     return 0;
639   }
640 
641   auto it = entries_.find(key);
642   if (it != entries_.end()) {
643     return it->second->in_memory_data();
644   }
645   return 0;
646 }
647 
SetEntryInMemoryData(const std::string & key,uint8_t data)648 void MockDiskCache::SetEntryInMemoryData(const std::string& key, uint8_t data) {
649   auto it = entries_.find(key);
650   if (it != entries_.end()) {
651     it->second->set_in_memory_data(data);
652   }
653 }
654 
MaxFileSize() const655 int64_t MockDiskCache::MaxFileSize() const {
656   return max_file_size_;
657 }
658 
ReleaseAll()659 void MockDiskCache::ReleaseAll() {
660   for (auto entry : entries_) {
661     entry.second->Release();
662   }
663   entries_.clear();
664 }
665 
CallbackLater(base::OnceClosure callback)666 void MockDiskCache::CallbackLater(base::OnceClosure callback) {
667   base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
668       FROM_HERE, std::move(callback));
669 }
670 
IsDiskEntryDoomed(const std::string & key)671 bool MockDiskCache::IsDiskEntryDoomed(const std::string& key) {
672   auto it = entries_.find(key);
673   if (it != entries_.end()) {
674     return it->second->is_doomed();
675   }
676 
677   return false;
678 }
679 
ResumeCacheOperation()680 void MockDiskCache::ResumeCacheOperation() {
681   DCHECK(!resume_callback_.is_null());
682   CallbackLater(std::move(resume_callback_));
683 }
684 
GetDiskEntryRef(const std::string & key)685 scoped_refptr<MockDiskEntry> MockDiskCache::GetDiskEntryRef(
686     const std::string& key) {
687   auto it = entries_.find(key);
688   if (it == entries_.end()) {
689     return nullptr;
690   }
691   return it->second;
692 }
693 
GetExternalCacheHits() const694 const std::vector<std::string>& MockDiskCache::GetExternalCacheHits() const {
695   return external_cache_hits_;
696 }
697 
698 //-----------------------------------------------------------------------------
699 
CreateBackend(NetLog * net_log,disk_cache::BackendResultCallback callback)700 disk_cache::BackendResult MockBackendFactory::CreateBackend(
701     NetLog* net_log,
702     disk_cache::BackendResultCallback callback) {
703   return disk_cache::BackendResult::Make(std::make_unique<MockDiskCache>());
704 }
705 
706 //-----------------------------------------------------------------------------
707 
MockHttpCache()708 MockHttpCache::MockHttpCache()
709     : MockHttpCache(std::make_unique<MockBackendFactory>()) {}
710 
MockHttpCache(std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory)711 MockHttpCache::MockHttpCache(
712     std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory)
713     : http_cache_(std::make_unique<MockNetworkLayer>(),
714                   std::move(disk_cache_factory)) {}
715 
backend()716 disk_cache::Backend* MockHttpCache::backend() {
717   TestCompletionCallback cb;
718   disk_cache::Backend* backend;
719   int rv = http_cache_.GetBackend(&backend, cb.callback());
720   rv = cb.GetResult(rv);
721   return (rv == OK) ? backend : nullptr;
722 }
723 
disk_cache()724 MockDiskCache* MockHttpCache::disk_cache() {
725   return static_cast<MockDiskCache*>(backend());
726 }
727 
CreateTransaction(std::unique_ptr<HttpTransaction> * trans)728 int MockHttpCache::CreateTransaction(std::unique_ptr<HttpTransaction>* trans) {
729   return http_cache_.CreateTransaction(DEFAULT_PRIORITY, trans);
730 }
731 
SimulateCacheLockTimeout()732 void MockHttpCache::SimulateCacheLockTimeout() {
733   http_cache_.SimulateCacheLockTimeoutForTesting();
734 }
735 
SimulateCacheLockTimeoutAfterHeaders()736 void MockHttpCache::SimulateCacheLockTimeoutAfterHeaders() {
737   http_cache_.SimulateCacheLockTimeoutAfterHeadersForTesting();
738 }
739 
FailConditionalizations()740 void MockHttpCache::FailConditionalizations() {
741   http_cache_.FailConditionalizationForTest();
742 }
743 
ReadResponseInfo(disk_cache::Entry * disk_entry,HttpResponseInfo * response_info,bool * response_truncated)744 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
745                                      HttpResponseInfo* response_info,
746                                      bool* response_truncated) {
747   int size = disk_entry->GetDataSize(0);
748 
749   TestCompletionCallback cb;
750   auto buffer = base::MakeRefCounted<IOBufferWithSize>(size);
751   int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback());
752   rv = cb.GetResult(rv);
753   EXPECT_EQ(size, rv);
754 
755   return HttpCache::ParseResponseInfo(buffer->data(), size, response_info,
756                                       response_truncated);
757 }
758 
WriteResponseInfo(disk_cache::Entry * disk_entry,const HttpResponseInfo * response_info,bool skip_transient_headers,bool response_truncated)759 bool MockHttpCache::WriteResponseInfo(disk_cache::Entry* disk_entry,
760                                       const HttpResponseInfo* response_info,
761                                       bool skip_transient_headers,
762                                       bool response_truncated) {
763   base::Pickle pickle;
764   response_info->Persist(&pickle, skip_transient_headers, response_truncated);
765 
766   TestCompletionCallback cb;
767   int len = static_cast<int>(pickle.size());
768   auto data = base::MakeRefCounted<WrappedIOBuffer>(pickle);
769 
770   int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true);
771   rv = cb.GetResult(rv);
772   return (rv == len);
773 }
774 
OpenBackendEntry(const std::string & key,disk_cache::Entry ** entry)775 bool MockHttpCache::OpenBackendEntry(const std::string& key,
776                                      disk_cache::Entry** entry) {
777   TestEntryResultCompletionCallback cb;
778   disk_cache::EntryResult result =
779       backend()->OpenEntry(key, net::HIGHEST, cb.callback());
780   result = cb.GetResult(std::move(result));
781   if (result.net_error() == OK) {
782     *entry = result.ReleaseEntry();
783     return true;
784   } else {
785     return false;
786   }
787 }
788 
CreateBackendEntry(const std::string & key,disk_cache::Entry ** entry,NetLog * net_log)789 bool MockHttpCache::CreateBackendEntry(const std::string& key,
790                                        disk_cache::Entry** entry,
791                                        NetLog* net_log) {
792   TestEntryResultCompletionCallback cb;
793   disk_cache::EntryResult result =
794       backend()->CreateEntry(key, net::HIGHEST, cb.callback());
795   result = cb.GetResult(std::move(result));
796   if (result.net_error() == OK) {
797     *entry = result.ReleaseEntry();
798     return true;
799   } else {
800     return false;
801   }
802 }
803 
804 // Static.
GetTestMode(int test_mode)805 int MockHttpCache::GetTestMode(int test_mode) {
806   if (!g_test_mode) {
807     return test_mode;
808   }
809 
810   return g_test_mode;
811 }
812 
813 // Static.
SetTestMode(int test_mode)814 void MockHttpCache::SetTestMode(int test_mode) {
815   g_test_mode = test_mode;
816 }
817 
IsWriterPresent(const std::string & key)818 bool MockHttpCache::IsWriterPresent(const std::string& key) {
819   auto entry = http_cache_.GetActiveEntry(key);
820   return entry && entry->HasWriters() && !entry->writers()->IsEmpty();
821 }
822 
IsHeadersTransactionPresent(const std::string & key)823 bool MockHttpCache::IsHeadersTransactionPresent(const std::string& key) {
824   auto entry = http_cache_.GetActiveEntry(key);
825   return entry && entry->headers_transaction();
826 }
827 
GetCountReaders(const std::string & key)828 int MockHttpCache::GetCountReaders(const std::string& key) {
829   auto entry = http_cache_.GetActiveEntry(key);
830   return entry ? entry->readers().size() : 0;
831 }
832 
GetCountAddToEntryQueue(const std::string & key)833 int MockHttpCache::GetCountAddToEntryQueue(const std::string& key) {
834   auto entry = http_cache_.GetActiveEntry(key);
835   return entry ? entry->add_to_entry_queue().size() : 0;
836 }
837 
GetCountDoneHeadersQueue(const std::string & key)838 int MockHttpCache::GetCountDoneHeadersQueue(const std::string& key) {
839   auto entry = http_cache_.GetActiveEntry(key);
840   return entry ? entry->done_headers_queue().size() : 0;
841 }
842 
GetCountWriterTransactions(const std::string & key)843 int MockHttpCache::GetCountWriterTransactions(const std::string& key) {
844   auto entry = http_cache_.GetActiveEntry(key);
845   return entry && entry->writers() ? entry->writers()->GetTransactionsCount()
846                                    : 0;
847 }
848 
GetWeakPtr()849 base::WeakPtr<HttpCache> MockHttpCache::GetWeakPtr() {
850   return http_cache_.GetWeakPtr();
851 }
852 
853 //-----------------------------------------------------------------------------
854 
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)855 disk_cache::EntryResult MockDiskCacheNoCB::CreateEntry(
856     const std::string& key,
857     net::RequestPriority request_priority,
858     EntryResultCallback callback) {
859   return EntryResult::MakeError(ERR_IO_PENDING);
860 }
861 
862 //-----------------------------------------------------------------------------
863 
CreateBackend(NetLog * net_log,disk_cache::BackendResultCallback callback)864 disk_cache::BackendResult MockBackendNoCbFactory::CreateBackend(
865     NetLog* net_log,
866     disk_cache::BackendResultCallback callback) {
867   return disk_cache::BackendResult::Make(std::make_unique<MockDiskCacheNoCB>());
868 }
869 
870 //-----------------------------------------------------------------------------
871 
872 MockBlockingBackendFactory::MockBlockingBackendFactory() = default;
873 MockBlockingBackendFactory::~MockBlockingBackendFactory() = default;
874 
CreateBackend(NetLog * net_log,disk_cache::BackendResultCallback callback)875 disk_cache::BackendResult MockBlockingBackendFactory::CreateBackend(
876     NetLog* net_log,
877     disk_cache::BackendResultCallback callback) {
878   if (!block_) {
879     return MakeResult();
880   }
881 
882   callback_ = std::move(callback);
883   return disk_cache::BackendResult::MakeError(ERR_IO_PENDING);
884 }
885 
FinishCreation()886 void MockBlockingBackendFactory::FinishCreation() {
887   block_ = false;
888   if (!callback_.is_null()) {
889     // Running the callback might delete |this|.
890     std::move(callback_).Run(MakeResult());
891   }
892 }
893 
MakeResult()894 disk_cache::BackendResult MockBlockingBackendFactory::MakeResult() {
895   if (fail_) {
896     return disk_cache::BackendResult::MakeError(ERR_FAILED);
897   } else {
898     return disk_cache::BackendResult::Make(std::make_unique<MockDiskCache>());
899   }
900 }
901 
902 }  // namespace net
903