1 // Copyright 2011 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits>
6 #include <memory>
7 #include <string>
8
9 #include "base/barrier_closure.h"
10 #include "base/files/file_enumerator.h"
11 #include "base/files/file_path.h"
12 #include "base/functional/bind.h"
13 #include "base/hash/hash.h"
14 #include "base/memory/raw_ptr.h"
15 #include "base/process/process_metrics.h"
16 #include "base/rand_util.h"
17 #include "base/run_loop.h"
18 #include "base/strings/string_number_conversions.h"
19 #include "base/strings/string_util.h"
20 #include "base/test/scoped_run_loop_timeout.h"
21 #include "base/test/test_file_util.h"
22 #include "base/test/test_timeouts.h"
23 #include "base/threading/thread.h"
24 #include "base/time/time.h"
25 #include "base/timer/elapsed_timer.h"
26 #include "build/build_config.h"
27 #include "net/base/cache_type.h"
28 #include "net/base/completion_repeating_callback.h"
29 #include "net/base/io_buffer.h"
30 #include "net/base/net_errors.h"
31 #include "net/base/test_completion_callback.h"
32 #include "net/disk_cache/backend_cleanup_tracker.h"
33 #include "net/disk_cache/blockfile/backend_impl.h"
34 #include "net/disk_cache/blockfile/block_files.h"
35 #include "net/disk_cache/disk_cache.h"
36 #include "net/disk_cache/disk_cache_test_base.h"
37 #include "net/disk_cache/disk_cache_test_util.h"
38 #include "net/disk_cache/simple/simple_backend_impl.h"
39 #include "net/disk_cache/simple/simple_index.h"
40 #include "net/disk_cache/simple/simple_index_file.h"
41 #include "testing/gtest/include/gtest/gtest.h"
42 #include "testing/perf/perf_result_reporter.h"
43 #include "testing/platform_test.h"
44
45 using base::Time;
46
47 namespace {
48
49 const size_t kNumEntries = 10000;
50 const int kHeadersSize = 2000;
51
52 const int kBodySize = 72 * 1024 - 1;
53
54 // HttpCache likes this chunk size.
55 const int kChunkSize = 32 * 1024;
56
57 // As of 2017-01-12, this is a typical per-tab limit on HTTP connections.
58 const int kMaxParallelOperations = 10;
59
60 static constexpr char kMetricPrefixDiskCache[] = "DiskCache.";
61 static constexpr char kMetricPrefixSimpleIndex[] = "SimpleIndex.";
62 static constexpr char kMetricCacheEntriesWriteTimeMs[] =
63 "cache_entries_write_time";
64 static constexpr char kMetricCacheHeadersReadTimeColdMs[] =
65 "cache_headers_read_time_cold";
66 static constexpr char kMetricCacheHeadersReadTimeWarmMs[] =
67 "cache_headers_read_time_warm";
68 static constexpr char kMetricCacheEntriesReadTimeColdMs[] =
69 "cache_entries_read_time_cold";
70 static constexpr char kMetricCacheEntriesReadTimeWarmMs[] =
71 "cache_entries_read_time_warm";
72 static constexpr char kMetricCacheKeysHashTimeMs[] = "cache_keys_hash_time";
73 static constexpr char kMetricFillBlocksTimeMs[] = "fill_sequential_blocks_time";
74 static constexpr char kMetricCreateDeleteBlocksTimeMs[] =
75 "create_and_delete_random_blocks_time";
76 static constexpr char kMetricSimpleCacheInitTotalTimeMs[] =
77 "simple_cache_initial_read_total_time";
78 static constexpr char kMetricSimpleCacheInitPerEntryTimeUs[] =
79 "simple_cache_initial_read_per_entry_time";
80 static constexpr char kMetricAverageEvictionTimeMs[] = "average_eviction_time";
81
SetUpDiskCacheReporter(const std::string & story)82 perf_test::PerfResultReporter SetUpDiskCacheReporter(const std::string& story) {
83 perf_test::PerfResultReporter reporter(kMetricPrefixDiskCache, story);
84 reporter.RegisterImportantMetric(kMetricCacheEntriesWriteTimeMs, "ms");
85 reporter.RegisterImportantMetric(kMetricCacheHeadersReadTimeColdMs, "ms");
86 reporter.RegisterImportantMetric(kMetricCacheHeadersReadTimeWarmMs, "ms");
87 reporter.RegisterImportantMetric(kMetricCacheEntriesReadTimeColdMs, "ms");
88 reporter.RegisterImportantMetric(kMetricCacheEntriesReadTimeWarmMs, "ms");
89 reporter.RegisterImportantMetric(kMetricCacheKeysHashTimeMs, "ms");
90 reporter.RegisterImportantMetric(kMetricFillBlocksTimeMs, "ms");
91 reporter.RegisterImportantMetric(kMetricCreateDeleteBlocksTimeMs, "ms");
92 reporter.RegisterImportantMetric(kMetricSimpleCacheInitTotalTimeMs, "ms");
93 reporter.RegisterImportantMetric(kMetricSimpleCacheInitPerEntryTimeUs, "us");
94 return reporter;
95 }
96
SetUpSimpleIndexReporter(const std::string & story)97 perf_test::PerfResultReporter SetUpSimpleIndexReporter(
98 const std::string& story) {
99 perf_test::PerfResultReporter reporter(kMetricPrefixSimpleIndex, story);
100 reporter.RegisterImportantMetric(kMetricAverageEvictionTimeMs, "ms");
101 return reporter;
102 }
103
MaybeIncreaseFdLimitTo(unsigned int max_descriptors)104 void MaybeIncreaseFdLimitTo(unsigned int max_descriptors) {
105 #if BUILDFLAG(IS_POSIX)
106 base::IncreaseFdLimitTo(max_descriptors);
107 #endif
108 }
109
110 struct TestEntry {
111 std::string key;
112 int data_len;
113 };
114
115 enum class WhatToRead {
116 HEADERS_ONLY,
117 HEADERS_AND_BODY,
118 };
119
120 class DiskCachePerfTest : public DiskCacheTestWithCache {
121 public:
DiskCachePerfTest()122 DiskCachePerfTest() { MaybeIncreaseFdLimitTo(kFdLimitForCacheTests); }
123
entries() const124 const std::vector<TestEntry>& entries() const { return entries_; }
125
126 protected:
127 // Helper methods for constructing tests.
128 bool TimeWrites(const std::string& story);
129 bool TimeReads(WhatToRead what_to_read,
130 const std::string& metric,
131 const std::string& story);
132 void ResetAndEvictSystemDiskCache();
133
134 // Callbacks used within tests for intermediate operations.
135 void WriteCallback(net::CompletionOnceCallback final_callback,
136 scoped_refptr<net::IOBuffer> headers_buffer,
137 scoped_refptr<net::IOBuffer> body_buffer,
138 disk_cache::Entry* cache_entry,
139 int entry_index,
140 size_t write_offset,
141 int result);
142
143 // Complete perf tests.
144 void CacheBackendPerformance(const std::string& story);
145
146 const size_t kFdLimitForCacheTests = 8192;
147
148 std::vector<TestEntry> entries_;
149 };
150
151 class WriteHandler {
152 public:
WriteHandler(const DiskCachePerfTest * test,disk_cache::Backend * cache,net::CompletionOnceCallback final_callback)153 WriteHandler(const DiskCachePerfTest* test,
154 disk_cache::Backend* cache,
155 net::CompletionOnceCallback final_callback)
156 : test_(test), cache_(cache), final_callback_(std::move(final_callback)) {
157 CacheTestFillBuffer(headers_buffer_->data(), kHeadersSize, false);
158 CacheTestFillBuffer(body_buffer_->data(), kChunkSize, false);
159 }
160
161 void Run();
162
163 protected:
164 void CreateNextEntry();
165
166 void CreateCallback(int data_len, disk_cache::EntryResult result);
167 void WriteDataCallback(disk_cache::Entry* entry,
168 int next_offset,
169 int data_len,
170 int expected_result,
171 int result);
172
173 private:
174 bool CheckForErrorAndCancel(int result);
175
176 raw_ptr<const DiskCachePerfTest> test_;
177 raw_ptr<disk_cache::Backend> cache_;
178 net::CompletionOnceCallback final_callback_;
179
180 size_t next_entry_index_ = 0;
181 size_t pending_operations_count_ = 0;
182
183 int pending_result_ = net::OK;
184
185 scoped_refptr<net::IOBuffer> headers_buffer_ =
186 base::MakeRefCounted<net::IOBufferWithSize>(kHeadersSize);
187 scoped_refptr<net::IOBuffer> body_buffer_ =
188 base::MakeRefCounted<net::IOBufferWithSize>(kChunkSize);
189 };
190
Run()191 void WriteHandler::Run() {
192 for (int i = 0; i < kMaxParallelOperations; ++i) {
193 ++pending_operations_count_;
194 CreateNextEntry();
195 }
196 }
197
CreateNextEntry()198 void WriteHandler::CreateNextEntry() {
199 ASSERT_GT(kNumEntries, next_entry_index_);
200 TestEntry test_entry = test_->entries()[next_entry_index_++];
201 auto callback =
202 base::BindRepeating(&WriteHandler::CreateCallback, base::Unretained(this),
203 test_entry.data_len);
204 disk_cache::EntryResult result =
205 cache_->CreateEntry(test_entry.key, net::HIGHEST, callback);
206 if (result.net_error() != net::ERR_IO_PENDING)
207 callback.Run(std::move(result));
208 }
209
CreateCallback(int data_len,disk_cache::EntryResult result)210 void WriteHandler::CreateCallback(int data_len,
211 disk_cache::EntryResult result) {
212 if (CheckForErrorAndCancel(result.net_error()))
213 return;
214
215 disk_cache::Entry* entry = result.ReleaseEntry();
216 net::CompletionRepeatingCallback callback = base::BindRepeating(
217 &WriteHandler::WriteDataCallback, base::Unretained(this), entry, 0,
218 data_len, kHeadersSize);
219 int new_result = entry->WriteData(0, 0, headers_buffer_.get(), kHeadersSize,
220 callback, false);
221 if (new_result != net::ERR_IO_PENDING)
222 callback.Run(new_result);
223 }
224
WriteDataCallback(disk_cache::Entry * entry,int next_offset,int data_len,int expected_result,int result)225 void WriteHandler::WriteDataCallback(disk_cache::Entry* entry,
226 int next_offset,
227 int data_len,
228 int expected_result,
229 int result) {
230 if (CheckForErrorAndCancel(result)) {
231 entry->Close();
232 return;
233 }
234 DCHECK_LE(next_offset, data_len);
235 if (next_offset == data_len) {
236 entry->Close();
237 if (next_entry_index_ < kNumEntries) {
238 CreateNextEntry();
239 } else {
240 --pending_operations_count_;
241 if (pending_operations_count_ == 0)
242 std::move(final_callback_).Run(net::OK);
243 }
244 return;
245 }
246
247 int write_size = std::min(kChunkSize, data_len - next_offset);
248 net::CompletionRepeatingCallback callback = base::BindRepeating(
249 &WriteHandler::WriteDataCallback, base::Unretained(this), entry,
250 next_offset + write_size, data_len, write_size);
251 int new_result = entry->WriteData(1, next_offset, body_buffer_.get(),
252 write_size, callback, true);
253 if (new_result != net::ERR_IO_PENDING)
254 callback.Run(new_result);
255 }
256
CheckForErrorAndCancel(int result)257 bool WriteHandler::CheckForErrorAndCancel(int result) {
258 DCHECK_NE(net::ERR_IO_PENDING, result);
259 if (result != net::OK && !(result > 0))
260 pending_result_ = result;
261 if (pending_result_ != net::OK) {
262 --pending_operations_count_;
263 if (pending_operations_count_ == 0)
264 std::move(final_callback_).Run(pending_result_);
265 return true;
266 }
267 return false;
268 }
269
270 class ReadHandler {
271 public:
ReadHandler(const DiskCachePerfTest * test,WhatToRead what_to_read,disk_cache::Backend * cache,net::CompletionOnceCallback final_callback)272 ReadHandler(const DiskCachePerfTest* test,
273 WhatToRead what_to_read,
274 disk_cache::Backend* cache,
275 net::CompletionOnceCallback final_callback)
276 : test_(test),
277 what_to_read_(what_to_read),
278 cache_(cache),
279 final_callback_(std::move(final_callback)) {
280 for (auto& read_buffer : read_buffers_) {
281 read_buffer = base::MakeRefCounted<net::IOBufferWithSize>(
282 std::max(kHeadersSize, kChunkSize));
283 }
284 }
285
286 void Run();
287
288 protected:
289 void OpenNextEntry(int parallel_operation_index);
290
291 void OpenCallback(int parallel_operation_index,
292 int data_len,
293 disk_cache::EntryResult result);
294 void ReadDataCallback(int parallel_operation_index,
295 disk_cache::Entry* entry,
296 int next_offset,
297 int data_len,
298 int expected_result,
299 int result);
300
301 private:
302 bool CheckForErrorAndCancel(int result);
303
304 raw_ptr<const DiskCachePerfTest> test_;
305 const WhatToRead what_to_read_;
306
307 raw_ptr<disk_cache::Backend> cache_;
308 net::CompletionOnceCallback final_callback_;
309
310 size_t next_entry_index_ = 0;
311 size_t pending_operations_count_ = 0;
312
313 int pending_result_ = net::OK;
314
315 scoped_refptr<net::IOBuffer> read_buffers_[kMaxParallelOperations];
316 };
317
Run()318 void ReadHandler::Run() {
319 for (int i = 0; i < kMaxParallelOperations; ++i) {
320 OpenNextEntry(pending_operations_count_);
321 ++pending_operations_count_;
322 }
323 }
324
OpenNextEntry(int parallel_operation_index)325 void ReadHandler::OpenNextEntry(int parallel_operation_index) {
326 ASSERT_GT(kNumEntries, next_entry_index_);
327 TestEntry test_entry = test_->entries()[next_entry_index_++];
328 auto callback =
329 base::BindRepeating(&ReadHandler::OpenCallback, base::Unretained(this),
330 parallel_operation_index, test_entry.data_len);
331 disk_cache::EntryResult result =
332 cache_->OpenEntry(test_entry.key, net::HIGHEST, callback);
333 if (result.net_error() != net::ERR_IO_PENDING)
334 callback.Run(std::move(result));
335 }
336
OpenCallback(int parallel_operation_index,int data_len,disk_cache::EntryResult result)337 void ReadHandler::OpenCallback(int parallel_operation_index,
338 int data_len,
339 disk_cache::EntryResult result) {
340 if (CheckForErrorAndCancel(result.net_error()))
341 return;
342
343 disk_cache::Entry* entry = result.ReleaseEntry();
344
345 EXPECT_EQ(data_len, entry->GetDataSize(1));
346
347 net::CompletionRepeatingCallback callback = base::BindRepeating(
348 &ReadHandler::ReadDataCallback, base::Unretained(this),
349 parallel_operation_index, entry, 0, data_len, kHeadersSize);
350 int new_result =
351 entry->ReadData(0, 0, read_buffers_[parallel_operation_index].get(),
352 kChunkSize, callback);
353 if (new_result != net::ERR_IO_PENDING)
354 callback.Run(new_result);
355 }
356
ReadDataCallback(int parallel_operation_index,disk_cache::Entry * entry,int next_offset,int data_len,int expected_result,int result)357 void ReadHandler::ReadDataCallback(int parallel_operation_index,
358 disk_cache::Entry* entry,
359 int next_offset,
360 int data_len,
361 int expected_result,
362 int result) {
363 if (CheckForErrorAndCancel(result)) {
364 entry->Close();
365 return;
366 }
367 DCHECK_LE(next_offset, data_len);
368 if (what_to_read_ == WhatToRead::HEADERS_ONLY || next_offset == data_len) {
369 entry->Close();
370 if (next_entry_index_ < kNumEntries) {
371 OpenNextEntry(parallel_operation_index);
372 } else {
373 --pending_operations_count_;
374 if (pending_operations_count_ == 0)
375 std::move(final_callback_).Run(net::OK);
376 }
377 return;
378 }
379
380 int expected_read_size = std::min(kChunkSize, data_len - next_offset);
381 net::CompletionRepeatingCallback callback = base::BindRepeating(
382 &ReadHandler::ReadDataCallback, base::Unretained(this),
383 parallel_operation_index, entry, next_offset + expected_read_size,
384 data_len, expected_read_size);
385 int new_result = entry->ReadData(
386 1, next_offset, read_buffers_[parallel_operation_index].get(), kChunkSize,
387 callback);
388 if (new_result != net::ERR_IO_PENDING)
389 callback.Run(new_result);
390 }
391
CheckForErrorAndCancel(int result)392 bool ReadHandler::CheckForErrorAndCancel(int result) {
393 DCHECK_NE(net::ERR_IO_PENDING, result);
394 if (result != net::OK && !(result > 0))
395 pending_result_ = result;
396 if (pending_result_ != net::OK) {
397 --pending_operations_count_;
398 if (pending_operations_count_ == 0)
399 std::move(final_callback_).Run(pending_result_);
400 return true;
401 }
402 return false;
403 }
404
TimeWrites(const std::string & story)405 bool DiskCachePerfTest::TimeWrites(const std::string& story) {
406 for (size_t i = 0; i < kNumEntries; i++) {
407 TestEntry entry;
408 entry.key = GenerateKey(true);
409 entry.data_len = base::RandInt(0, kBodySize);
410 entries_.push_back(entry);
411 }
412
413 net::TestCompletionCallback cb;
414
415 auto reporter = SetUpDiskCacheReporter(story);
416 base::ElapsedTimer write_timer;
417
418 WriteHandler write_handler(this, cache_.get(), cb.callback());
419 write_handler.Run();
420 auto result = cb.WaitForResult();
421 reporter.AddResult(kMetricCacheEntriesWriteTimeMs,
422 write_timer.Elapsed().InMillisecondsF());
423 return result == net::OK;
424 }
425
TimeReads(WhatToRead what_to_read,const std::string & metric,const std::string & story)426 bool DiskCachePerfTest::TimeReads(WhatToRead what_to_read,
427 const std::string& metric,
428 const std::string& story) {
429 auto reporter = SetUpDiskCacheReporter(story);
430 base::ElapsedTimer timer;
431
432 net::TestCompletionCallback cb;
433 ReadHandler read_handler(this, what_to_read, cache_.get(), cb.callback());
434 read_handler.Run();
435 auto result = cb.WaitForResult();
436 reporter.AddResult(metric, timer.Elapsed().InMillisecondsF());
437 return result == net::OK;
438 }
439
TEST_F(DiskCachePerfTest,BlockfileHashes)440 TEST_F(DiskCachePerfTest, BlockfileHashes) {
441 auto reporter = SetUpDiskCacheReporter("baseline_story");
442 base::ElapsedTimer timer;
443 for (int i = 0; i < 300000; i++) {
444 std::string key = GenerateKey(true);
445 // TODO(dcheng): It's unclear if this is sufficient to keep a sufficiently
446 // smart optimizer from simply discarding the function call if it realizes
447 // there are no side effects.
448 base::PersistentHash(key);
449 }
450 reporter.AddResult(kMetricCacheKeysHashTimeMs,
451 timer.Elapsed().InMillisecondsF());
452 }
453
ResetAndEvictSystemDiskCache()454 void DiskCachePerfTest::ResetAndEvictSystemDiskCache() {
455 base::RunLoop().RunUntilIdle();
456 cache_.reset();
457
458 // Flush all files in the cache out of system memory.
459 const base::FilePath::StringType file_pattern = FILE_PATH_LITERAL("*");
460 base::FileEnumerator enumerator(cache_path_, true /* recursive */,
461 base::FileEnumerator::FILES, file_pattern);
462 for (base::FilePath file_path = enumerator.Next(); !file_path.empty();
463 file_path = enumerator.Next()) {
464 ASSERT_TRUE(base::EvictFileFromSystemCache(file_path));
465 }
466 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
467 // And, cache directories, on platforms where the eviction utility supports
468 // this (currently Linux and Android only).
469 if (simple_cache_mode_) {
470 ASSERT_TRUE(
471 base::EvictFileFromSystemCache(cache_path_.AppendASCII("index-dir")));
472 }
473 ASSERT_TRUE(base::EvictFileFromSystemCache(cache_path_));
474 #endif
475
476 DisableFirstCleanup();
477 InitCache();
478 }
479
CacheBackendPerformance(const std::string & story)480 void DiskCachePerfTest::CacheBackendPerformance(const std::string& story) {
481 base::test::ScopedRunLoopTimeout default_timeout(
482 FROM_HERE, TestTimeouts::action_max_timeout());
483
484 LOG(ERROR) << "Using cache at:" << cache_path_.MaybeAsASCII();
485 SetMaxSize(500 * 1024 * 1024);
486 InitCache();
487 EXPECT_TRUE(TimeWrites(story));
488
489 disk_cache::FlushCacheThreadForTesting();
490 base::RunLoop().RunUntilIdle();
491
492 ResetAndEvictSystemDiskCache();
493 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_ONLY,
494 kMetricCacheHeadersReadTimeColdMs, story));
495 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_ONLY,
496 kMetricCacheHeadersReadTimeWarmMs, story));
497
498 disk_cache::FlushCacheThreadForTesting();
499 base::RunLoop().RunUntilIdle();
500
501 ResetAndEvictSystemDiskCache();
502 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_AND_BODY,
503 kMetricCacheEntriesReadTimeColdMs, story));
504 EXPECT_TRUE(TimeReads(WhatToRead::HEADERS_AND_BODY,
505 kMetricCacheEntriesReadTimeWarmMs, story));
506
507 disk_cache::FlushCacheThreadForTesting();
508 base::RunLoop().RunUntilIdle();
509 }
510
511 #if BUILDFLAG(IS_FUCHSIA)
512 // TODO(crbug.com/851083): Fix this test on Fuchsia and re-enable.
513 #define MAYBE_CacheBackendPerformance DISABLED_CacheBackendPerformance
514 #else
515 #define MAYBE_CacheBackendPerformance CacheBackendPerformance
516 #endif
TEST_F(DiskCachePerfTest,MAYBE_CacheBackendPerformance)517 TEST_F(DiskCachePerfTest, MAYBE_CacheBackendPerformance) {
518 CacheBackendPerformance("blockfile_cache");
519 }
520
521 #if BUILDFLAG(IS_FUCHSIA)
522 // TODO(crbug.com/851083): Fix this test on Fuchsia and re-enable.
523 #define MAYBE_SimpleCacheBackendPerformance \
524 DISABLED_SimpleCacheBackendPerformance
525 #else
526 #define MAYBE_SimpleCacheBackendPerformance SimpleCacheBackendPerformance
527 #endif
TEST_F(DiskCachePerfTest,MAYBE_SimpleCacheBackendPerformance)528 TEST_F(DiskCachePerfTest, MAYBE_SimpleCacheBackendPerformance) {
529 SetSimpleCacheMode();
530 CacheBackendPerformance("simple_cache");
531 }
532
533 // Creating and deleting "entries" on a block-file is something quite frequent
534 // (after all, almost everything is stored on block files). The operation is
535 // almost free when the file is empty, but can be expensive if the file gets
536 // fragmented, or if we have multiple files. This test measures that scenario,
537 // by using multiple, highly fragmented files.
TEST_F(DiskCachePerfTest,BlockFilesPerformance)538 TEST_F(DiskCachePerfTest, BlockFilesPerformance) {
539 ASSERT_TRUE(CleanupCacheDir());
540
541 disk_cache::BlockFiles files(cache_path_);
542 ASSERT_TRUE(files.Init(true));
543
544 const int kNumBlocks = 60000;
545 disk_cache::Addr address[kNumBlocks];
546
547 auto reporter = SetUpDiskCacheReporter("blockfile_cache");
548 base::ElapsedTimer sequential_timer;
549
550 // Fill up the 32-byte block file (use three files).
551 for (auto& addr : address) {
552 int block_size = base::RandInt(1, 4);
553 EXPECT_TRUE(files.CreateBlock(disk_cache::RANKINGS, block_size, &addr));
554 }
555
556 reporter.AddResult(kMetricFillBlocksTimeMs,
557 sequential_timer.Elapsed().InMillisecondsF());
558 base::ElapsedTimer random_timer;
559
560 for (int i = 0; i < 200000; i++) {
561 int block_size = base::RandInt(1, 4);
562 int entry = base::RandInt(0, kNumBlocks - 1);
563
564 files.DeleteBlock(address[entry], false);
565 EXPECT_TRUE(
566 files.CreateBlock(disk_cache::RANKINGS, block_size, &address[entry]));
567 }
568
569 reporter.AddResult(kMetricCreateDeleteBlocksTimeMs,
570 random_timer.Elapsed().InMillisecondsF());
571 base::RunLoop().RunUntilIdle();
572 }
573
VerifyRvAndCallClosure(base::RepeatingClosure * c,int expect_rv,int rv)574 void VerifyRvAndCallClosure(base::RepeatingClosure* c, int expect_rv, int rv) {
575 EXPECT_EQ(expect_rv, rv);
576 c->Run();
577 }
578
TEST_F(DiskCachePerfTest,SimpleCacheInitialReadPortion)579 TEST_F(DiskCachePerfTest, SimpleCacheInitialReadPortion) {
580 // A benchmark that aims to measure how much time we take in I/O thread
581 // for initial bookkeeping before returning to the caller, and how much
582 // after (batched up some). The later portion includes some event loop
583 // overhead.
584 const int kBatchSize = 100;
585
586 SetSimpleCacheMode();
587
588 InitCache();
589 // Write out the entries, and keep their objects around.
590 auto buffer1 = base::MakeRefCounted<net::IOBufferWithSize>(kHeadersSize);
591 auto buffer2 = base::MakeRefCounted<net::IOBufferWithSize>(kBodySize);
592
593 CacheTestFillBuffer(buffer1->data(), kHeadersSize, false);
594 CacheTestFillBuffer(buffer2->data(), kBodySize, false);
595
596 disk_cache::Entry* cache_entry[kBatchSize];
597 for (int i = 0; i < kBatchSize; ++i) {
598 TestEntryResultCompletionCallback cb_create;
599 disk_cache::EntryResult result = cb_create.GetResult(cache_->CreateEntry(
600 base::NumberToString(i), net::HIGHEST, cb_create.callback()));
601 ASSERT_EQ(net::OK, result.net_error());
602 cache_entry[i] = result.ReleaseEntry();
603
604 net::TestCompletionCallback cb;
605 int rv = cache_entry[i]->WriteData(0, 0, buffer1.get(), kHeadersSize,
606 cb.callback(), false);
607 ASSERT_EQ(kHeadersSize, cb.GetResult(rv));
608 rv = cache_entry[i]->WriteData(1, 0, buffer2.get(), kBodySize,
609 cb.callback(), false);
610 ASSERT_EQ(kBodySize, cb.GetResult(rv));
611 }
612
613 // Now repeatedly read these, batching up the waiting to try to
614 // account for the two portions separately. Note that we need separate entries
615 // since we are trying to keep interesting work from being on the delayed-done
616 // portion.
617 const int kIterations = 50000;
618
619 double elapsed_early = 0.0;
620 double elapsed_late = 0.0;
621
622 for (int i = 0; i < kIterations; ++i) {
623 base::RunLoop event_loop;
624 base::RepeatingClosure barrier =
625 base::BarrierClosure(kBatchSize, event_loop.QuitWhenIdleClosure());
626 net::CompletionRepeatingCallback cb_batch(base::BindRepeating(
627 VerifyRvAndCallClosure, base::Unretained(&barrier), kHeadersSize));
628
629 base::ElapsedTimer timer_early;
630 for (auto* entry : cache_entry) {
631 int rv = entry->ReadData(0, 0, buffer1.get(), kHeadersSize, cb_batch);
632 if (rv != net::ERR_IO_PENDING) {
633 barrier.Run();
634 ASSERT_EQ(kHeadersSize, rv);
635 }
636 }
637 elapsed_early += timer_early.Elapsed().InMillisecondsF();
638
639 base::ElapsedTimer timer_late;
640 event_loop.Run();
641 elapsed_late += timer_late.Elapsed().InMillisecondsF();
642 }
643
644 // Cleanup
645 for (auto* entry : cache_entry)
646 entry->Close();
647
648 disk_cache::FlushCacheThreadForTesting();
649 base::RunLoop().RunUntilIdle();
650 auto reporter = SetUpDiskCacheReporter("early_portion");
651 reporter.AddResult(kMetricSimpleCacheInitTotalTimeMs, elapsed_early);
652 reporter.AddResult(kMetricSimpleCacheInitPerEntryTimeUs,
653 1000 * (elapsed_early / (kIterations * kBatchSize)));
654 reporter = SetUpDiskCacheReporter("event_loop_portion");
655 reporter.AddResult(kMetricSimpleCacheInitTotalTimeMs, elapsed_late);
656 reporter.AddResult(kMetricSimpleCacheInitPerEntryTimeUs,
657 1000 * (elapsed_late / (kIterations * kBatchSize)));
658 }
659
660 #if BUILDFLAG(IS_FUCHSIA)
661 // TODO(crbug.com/1318120): Fix this test on Fuchsia and re-enable.
662 #define MAYBE_EvictionPerformance DISABLED_EvictionPerformance
663 #else
664 #define MAYBE_EvictionPerformance EvictionPerformance
665 #endif
666 // Measures how quickly SimpleIndex can compute which entries to evict.
TEST(SimpleIndexPerfTest,MAYBE_EvictionPerformance)667 TEST(SimpleIndexPerfTest, MAYBE_EvictionPerformance) {
668 const int kEntries = 10000;
669
670 class NoOpDelegate : public disk_cache::SimpleIndexDelegate {
671 void DoomEntries(std::vector<uint64_t>* entry_hashes,
672 net::CompletionOnceCallback callback) override {}
673 };
674
675 NoOpDelegate delegate;
676 base::Time start(base::Time::Now());
677
678 double evict_elapsed_ms = 0;
679 int iterations = 0;
680 while (iterations < 61000) {
681 ++iterations;
682 disk_cache::SimpleIndex index(/* io_thread = */ nullptr,
683 /* cleanup_tracker = */ nullptr, &delegate,
684 net::DISK_CACHE,
685 /* simple_index_file = */ nullptr);
686
687 // Make sure large enough to not evict on insertion.
688 index.SetMaxSize(kEntries * 2);
689
690 for (int i = 0; i < kEntries; ++i) {
691 index.InsertEntryForTesting(
692 i, disk_cache::EntryMetadata(start + base::Seconds(i), 1u));
693 }
694
695 // Trigger an eviction.
696 base::ElapsedTimer timer;
697 index.SetMaxSize(kEntries);
698 index.UpdateEntrySize(0, 1u);
699 evict_elapsed_ms += timer.Elapsed().InMillisecondsF();
700 }
701
702 auto reporter = SetUpSimpleIndexReporter("baseline_story");
703 reporter.AddResult(kMetricAverageEvictionTimeMs,
704 evict_elapsed_ms / iterations);
705 }
706
707 } // namespace
708