1 // Copyright 2017 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // Internal helper used to sequence cleanup and reuse of cache directories
6 // among different objects.
7
8 #include "net/disk_cache/backend_cleanup_tracker.h"
9
10 #include <unordered_map>
11 #include <utility>
12
13 #include "base/files/file_path.h"
14 #include "base/functional/callback.h"
15 #include "base/lazy_instance.h"
16 #include "base/memory/ref_counted.h"
17 #include "base/synchronization/lock.h"
18 #include "base/task/sequenced_task_runner.h"
19
20 namespace disk_cache {
21
22 namespace {
23
24 using TrackerMap = std::unordered_map<base::FilePath, BackendCleanupTracker*>;
25 struct AllBackendCleanupTrackers {
26 TrackerMap map;
27
28 // Since clients can potentially call CreateCacheBackend from multiple
29 // threads, we need to lock the map keeping track of cleanup trackers
30 // for these backends. Our overall strategy is to have TryCreate
31 // acts as an arbitrator --- whatever thread grabs one, gets to operate
32 // on the tracker freely until it gets destroyed.
33 base::Lock lock;
34 };
35
36 static base::LazyInstance<AllBackendCleanupTrackers>::Leaky g_all_trackers;
37
38 } // namespace.
39
40 // static
TryCreate(const base::FilePath & path,base::OnceClosure retry_closure)41 scoped_refptr<BackendCleanupTracker> BackendCleanupTracker::TryCreate(
42 const base::FilePath& path,
43 base::OnceClosure retry_closure) {
44 AllBackendCleanupTrackers* all_trackers = g_all_trackers.Pointer();
45 base::AutoLock lock(all_trackers->lock);
46
47 std::pair<TrackerMap::iterator, bool> insert_result =
48 all_trackers->map.insert(
49 std::pair<base::FilePath, BackendCleanupTracker*>(path, nullptr));
50 if (insert_result.second) {
51 auto tracker = base::WrapRefCounted(new BackendCleanupTracker(path));
52 insert_result.first->second = tracker.get();
53 return tracker;
54 } else {
55 insert_result.first->second->AddPostCleanupCallbackImpl(
56 std::move(retry_closure));
57 return nullptr;
58 }
59 }
60
AddPostCleanupCallback(base::OnceClosure cb)61 void BackendCleanupTracker::AddPostCleanupCallback(base::OnceClosure cb) {
62 DCHECK_CALLED_ON_VALID_SEQUENCE(seq_checker_);
63 // Despite the sequencing requirement we need to grab the table lock since
64 // this may otherwise race against TryMakeContext.
65 base::AutoLock lock(g_all_trackers.Get().lock);
66 AddPostCleanupCallbackImpl(std::move(cb));
67 }
68
AddPostCleanupCallbackImpl(base::OnceClosure cb)69 void BackendCleanupTracker::AddPostCleanupCallbackImpl(base::OnceClosure cb) {
70 post_cleanup_cbs_.emplace_back(base::SequencedTaskRunner::GetCurrentDefault(),
71 std::move(cb));
72 }
73
BackendCleanupTracker(const base::FilePath & path)74 BackendCleanupTracker::BackendCleanupTracker(const base::FilePath& path)
75 : path_(path) {}
76
~BackendCleanupTracker()77 BackendCleanupTracker::~BackendCleanupTracker() {
78 DCHECK_CALLED_ON_VALID_SEQUENCE(seq_checker_);
79
80 {
81 AllBackendCleanupTrackers* all_trackers = g_all_trackers.Pointer();
82 base::AutoLock lock(all_trackers->lock);
83 int rv = all_trackers->map.erase(path_);
84 DCHECK_EQ(1, rv);
85 }
86
87 while (!post_cleanup_cbs_.empty()) {
88 post_cleanup_cbs_.back().first->PostTask(
89 FROM_HERE, std::move(post_cleanup_cbs_.back().second));
90 post_cleanup_cbs_.pop_back();
91 }
92 }
93
94 } // namespace disk_cache
95