1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef PARTITION_ALLOC_STARSCAN_RACEFUL_WORKLIST_H_
6 #define PARTITION_ALLOC_STARSCAN_RACEFUL_WORKLIST_H_
7 
8 #include <algorithm>
9 #include <atomic>
10 #include <vector>
11 
12 #include "partition_alloc/internal_allocator_forward.h"
13 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
14 #include "partition_alloc/partition_alloc_base/rand_util.h"
15 #include "partition_alloc/partition_alloc_check.h"
16 
17 namespace partition_alloc::internal {
18 
19 template <typename T>
20 class RacefulWorklist {
21   struct Node {
NodeNode22     explicit Node(const T& value) : value(value) {}
NodeNode23     Node(const Node& other)
24         : value(other.value),
25           is_being_visited(
26               other.is_being_visited.load(std::memory_order_relaxed)),
27           is_visited(other.is_visited.load(std::memory_order_relaxed)) {}
28 
29     T value;
30     std::atomic<bool> is_being_visited{false};
31     std::atomic<bool> is_visited{false};
32   };
33   using Underlying = std::vector<Node, internal::InternalAllocator<Node>>;
34 
35  public:
36   class RandomizedView {
37    public:
RandomizedView(RacefulWorklist & worklist)38     explicit RandomizedView(RacefulWorklist& worklist)
39         : worklist_(worklist), offset_(0) {
40       if (worklist.data_.size() > 0) {
41         offset_ = static_cast<size_t>(
42             internal::base::RandGenerator(worklist.data_.size()));
43       }
44     }
45 
46     RandomizedView(const RandomizedView&) = delete;
47     const RandomizedView& operator=(const RandomizedView&) = delete;
48 
49     template <typename Function>
50     void Visit(Function f);
51 
52    private:
53     RacefulWorklist& worklist_;
54     size_t offset_;
55   };
56 
57   RacefulWorklist() = default;
58 
59   RacefulWorklist(const RacefulWorklist&) = delete;
60   RacefulWorklist& operator=(const RacefulWorklist&) = delete;
61 
Push(const T & t)62   void Push(const T& t) { data_.push_back(Node(t)); }
63 
64   template <typename It>
Push(It begin,It end)65   void Push(It begin, It end) {
66     std::transform(begin, end, std::back_inserter(data_),
67                    [](const T& t) { return Node(t); });
68   }
69 
70   template <typename Function>
71   void VisitNonConcurrently(Function) const;
72 
73  private:
74   Underlying data_;
75   std::atomic<bool> fully_visited_{false};
76 };
77 
78 template <typename T>
79 template <typename Function>
VisitNonConcurrently(Function f)80 void RacefulWorklist<T>::VisitNonConcurrently(Function f) const {
81   for (const auto& t : data_) {
82     f(t.value);
83   }
84 }
85 
86 template <typename T>
87 template <typename Function>
Visit(Function f)88 void RacefulWorklist<T>::RandomizedView::Visit(Function f) {
89   auto& data = worklist_.data_;
90   std::vector<typename Underlying::iterator,
91               internal::InternalAllocator<typename Underlying::iterator>>
92       to_revisit;
93 
94   // To avoid worklist iteration, quick check if the worklist was already
95   // visited.
96   if (worklist_.fully_visited_.load(std::memory_order_acquire)) {
97     return;
98   }
99 
100   const auto offset_it = std::next(data.begin(), offset_);
101 
102   // First, visit items starting from the offset.
103   for (auto it = offset_it; it != data.end(); ++it) {
104     if (it->is_visited.load(std::memory_order_relaxed)) {
105       continue;
106     }
107     if (it->is_being_visited.load(std::memory_order_relaxed)) {
108       to_revisit.push_back(it);
109       continue;
110     }
111     it->is_being_visited.store(true, std::memory_order_relaxed);
112     f(it->value);
113     it->is_visited.store(true, std::memory_order_relaxed);
114   }
115 
116   // Then, visit items before the offset.
117   for (auto it = data.begin(); it != offset_it; ++it) {
118     if (it->is_visited.load(std::memory_order_relaxed)) {
119       continue;
120     }
121     if (it->is_being_visited.load(std::memory_order_relaxed)) {
122       to_revisit.push_back(it);
123       continue;
124     }
125     it->is_being_visited.store(true, std::memory_order_relaxed);
126     f(it->value);
127     it->is_visited.store(true, std::memory_order_relaxed);
128   }
129 
130   // Finally, racefully visit items that were scanned by some other thread.
131   for (auto it : to_revisit) {
132     if (PA_LIKELY(it->is_visited.load(std::memory_order_relaxed))) {
133       continue;
134     }
135     // Don't bail out here if the item is being visited by another thread.
136     // This is helpful to guarantee forward progress if the other thread
137     // is making slow progress.
138     it->is_being_visited.store(true, std::memory_order_relaxed);
139     f(it->value);
140     it->is_visited.store(true, std::memory_order_relaxed);
141   }
142 
143   worklist_.fully_visited_.store(true, std::memory_order_release);
144 }
145 
146 }  // namespace partition_alloc::internal
147 
148 #endif  // PARTITION_ALLOC_STARSCAN_RACEFUL_WORKLIST_H_
149