1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef BASE_SAMPLING_HEAP_PROFILER_POISSON_ALLOCATION_SAMPLER_H_
6 #define BASE_SAMPLING_HEAP_PROFILER_POISSON_ALLOCATION_SAMPLER_H_
7
8 #include <atomic>
9 #include <vector>
10
11 #include "base/allocator/dispatcher/notification_data.h"
12 #include "base/allocator/dispatcher/reentry_guard.h"
13 #include "base/allocator/dispatcher/subsystem.h"
14 #include "base/base_export.h"
15 #include "base/compiler_specific.h"
16 #include "base/gtest_prod_util.h"
17 #include "base/memory/raw_ptr.h"
18 #include "base/no_destructor.h"
19 #include "base/sampling_heap_profiler/lock_free_address_hash_set.h"
20 #include "base/synchronization/lock.h"
21 #include "base/thread_annotations.h"
22
23 namespace heap_profiling {
24 class HeapProfilerControllerTest;
25 }
26
27 namespace base {
28
29 class SamplingHeapProfilerTest;
30
31 // This singleton class implements Poisson sampling of the incoming allocations
32 // stream. It hooks onto base::allocator and base::PartitionAlloc.
33 // The only control parameter is sampling interval that controls average value
34 // of the sampling intervals. The actual intervals between samples are
35 // randomized using Poisson distribution to mitigate patterns in the allocation
36 // stream.
37 // Once accumulated allocation sizes fill up the current sample interval,
38 // a sample is generated and sent to the observers via |SampleAdded| call.
39 // When the corresponding memory that triggered the sample is freed observers
40 // get notified with |SampleRemoved| call.
41 //
42 class BASE_EXPORT PoissonAllocationSampler {
43 public:
44 class SamplesObserver {
45 public:
46 virtual ~SamplesObserver() = default;
47 virtual void SampleAdded(
48 void* address,
49 size_t size,
50 size_t total,
51 base::allocator::dispatcher::AllocationSubsystem type,
52 const char* context) = 0;
53 virtual void SampleRemoved(void* address) = 0;
54 };
55
56 // An instance of this class makes the sampler not report samples generated
57 // within the object scope for the current thread.
58 // It allows observers to allocate/deallocate memory while holding a lock
59 // without a chance to get into reentrancy problems.
60 // The current implementation doesn't support ScopedMuteThreadSamples nesting.
61 class BASE_EXPORT ScopedMuteThreadSamples {
62 public:
63 ScopedMuteThreadSamples();
64 ~ScopedMuteThreadSamples();
65
66 ScopedMuteThreadSamples(const ScopedMuteThreadSamples&) = delete;
67 ScopedMuteThreadSamples& operator=(const ScopedMuteThreadSamples&) = delete;
68
69 static bool IsMuted();
70 };
71
72 // An instance of this class makes the sampler behave deterministically to
73 // ensure test results are repeatable. Does not support nesting.
74 class BASE_EXPORT ScopedSuppressRandomnessForTesting {
75 public:
76 ScopedSuppressRandomnessForTesting();
77 ~ScopedSuppressRandomnessForTesting();
78
79 ScopedSuppressRandomnessForTesting(
80 const ScopedSuppressRandomnessForTesting&) = delete;
81 ScopedSuppressRandomnessForTesting& operator=(
82 const ScopedSuppressRandomnessForTesting&) = delete;
83
84 static bool IsSuppressed();
85 };
86
87 // Must be called early during the process initialization. It creates and
88 // reserves a TLS slot.
89 static void Init();
90
91 void AddSamplesObserver(SamplesObserver*);
92
93 // Note: After an observer is removed it is still possible to receive
94 // a notification to that observer. This is not a problem currently as
95 // the only client of this interface is the base::SamplingHeapProfiler,
96 // which is a singleton.
97 // If there's a need for this functionality in the future, one might
98 // want to put observers notification loop under a reader-writer lock.
99 void RemoveSamplesObserver(SamplesObserver*);
100
101 // Sets the mean number of bytes that will be allocated before taking a
102 // sample.
103 void SetSamplingInterval(size_t sampling_interval_bytes);
104
105 // Returns the current mean sampling interval, in bytes.
106 size_t SamplingInterval() const;
107
108 ALWAYS_INLINE void OnAllocation(
109 const base::allocator::dispatcher::AllocationNotificationData&
110 allocation_data);
111 ALWAYS_INLINE void OnFree(
112 const base::allocator::dispatcher::FreeNotificationData& free_data);
113
114 static PoissonAllocationSampler* Get();
115
116 PoissonAllocationSampler(const PoissonAllocationSampler&) = delete;
117 PoissonAllocationSampler& operator=(const PoissonAllocationSampler&) = delete;
118
119 // Returns true if a ScopedMuteHookedSamplesForTesting exists. Only friends
120 // can create a ScopedMuteHookedSamplesForTesting but anyone can check the
121 // status of this. This can be read from any thread.
AreHookedSamplesMuted()122 static bool AreHookedSamplesMuted() {
123 return profiling_state_.load(std::memory_order_relaxed) &
124 ProfilingStateFlag::kHookedSamplesMutedForTesting;
125 }
126
127 private:
128 // Flags recording the state of the profiler. This does not use enum class so
129 // flags can be used in a bitmask.
130 enum ProfilingStateFlag {
131 // Set if profiling has ever been started in this session of Chrome. Once
132 // this is set, it is never reset. This is used to optimize the common case
133 // where profiling is never used.
134 kWasStarted = 1 << 0,
135 // Set if profiling is currently running. This flag is toggled on and off
136 // as sample observers are added and removed.
137 kIsRunning = 1 << 1,
138 // Set if a ScopedMuteHookedSamplesForTesting object exists.
139 kHookedSamplesMutedForTesting = 1 << 2,
140 };
141 using ProfilingStateFlagMask = int;
142
143 // An instance of this class makes the sampler only report samples with
144 // AllocatorType kManualForTesting, not those from hooked allocators. This
145 // allows unit tests to set test expectations based on only explicit calls to
146 // RecordAlloc and RecordFree.
147 //
148 // The accumulated bytes on the thread that creates a
149 // ScopedMuteHookedSamplesForTesting will also be reset to 0, and restored
150 // when the object leaves scope. This gives tests a known state to start
151 // recording samples on one thread: a full interval must pass to record a
152 // sample. Other threads will still have a random number of accumulated bytes.
153 //
154 // Only one instance may exist at a time.
155 class BASE_EXPORT ScopedMuteHookedSamplesForTesting {
156 public:
157 ScopedMuteHookedSamplesForTesting();
158 ~ScopedMuteHookedSamplesForTesting();
159
160 ScopedMuteHookedSamplesForTesting(
161 const ScopedMuteHookedSamplesForTesting&) = delete;
162 ScopedMuteHookedSamplesForTesting& operator=(
163 const ScopedMuteHookedSamplesForTesting&) = delete;
164
165 private:
166 intptr_t accumulated_bytes_snapshot_;
167 };
168
169 PoissonAllocationSampler();
170 ~PoissonAllocationSampler() = delete;
171
172 static size_t GetNextSampleInterval(size_t base_interval);
173
174 // Return the set of sampled addresses. This is only valid to call after
175 // Init().
176 static LockFreeAddressHashSet& sampled_addresses_set();
177
178 // Atomically adds `flag` to `profiling_state_`. DCHECK's if it was already
179 // set. If `flag` is kIsRunning, also sets kWasStarted. Uses
180 // std::memory_order_relaxed semantics and therefore doesn't synchronize the
181 // state of any other memory with future readers. (See the comment in
182 // RecordFree() for why this is safe.)
183 static void SetProfilingStateFlag(ProfilingStateFlag flag);
184
185 // Atomically removes `flag` from `profiling_state_`. DCHECK's if it was not
186 // already set. Uses std::memory_order_relaxed semantics and therefore doesn't
187 // synchronize the state of any other memory with future readers. (See the
188 // comment in RecordFree() for why this is safe.)
189 static void ResetProfilingStateFlag(ProfilingStateFlag flag);
190
191 void DoRecordAllocation(const ProfilingStateFlagMask state,
192 void* address,
193 size_t size,
194 base::allocator::dispatcher::AllocationSubsystem type,
195 const char* context);
196 void DoRecordFree(void* address);
197
198 void BalanceAddressesHashSet();
199
200 Lock mutex_;
201
202 // The |observers_| list is guarded by |mutex_|, however a copy of it
203 // is made before invoking the observers (to avoid performing expensive
204 // operations under the lock) as such the SamplesObservers themselves need
205 // to be thread-safe and support being invoked racily after
206 // RemoveSamplesObserver().
207 std::vector<raw_ptr<SamplesObserver, VectorExperimental>> observers_
208 GUARDED_BY(mutex_);
209
210 // Fast, thread-safe access to the current profiling state.
211 static std::atomic<ProfilingStateFlagMask> profiling_state_;
212
213 friend class heap_profiling::HeapProfilerControllerTest;
214 friend class NoDestructor<PoissonAllocationSampler>;
215 friend class PoissonAllocationSamplerStateTest;
216 friend class SamplingHeapProfilerTest;
217 FRIEND_TEST_ALL_PREFIXES(PoissonAllocationSamplerTest, MuteHooksWithoutInit);
218 FRIEND_TEST_ALL_PREFIXES(SamplingHeapProfilerTest, HookedAllocatorMuted);
219 };
220
OnAllocation(const base::allocator::dispatcher::AllocationNotificationData & allocation_data)221 ALWAYS_INLINE void PoissonAllocationSampler::OnAllocation(
222 const base::allocator::dispatcher::AllocationNotificationData&
223 allocation_data) {
224 // The allocation hooks may be installed before the sampler is started. Check
225 // if its ever been started first to avoid extra work on the fast path,
226 // because it's the most common case.
227 const ProfilingStateFlagMask state =
228 profiling_state_.load(std::memory_order_relaxed);
229 if (LIKELY(!(state & ProfilingStateFlag::kWasStarted))) {
230 return;
231 }
232
233 const auto type = allocation_data.allocation_subsystem();
234
235 // When sampling is muted for testing, only handle manual calls to
236 // RecordAlloc. (This doesn't need to be checked in RecordFree because muted
237 // allocations won't be added to sampled_addresses_set(), so RecordFree
238 // already skips them.)
239 if (UNLIKELY((state & ProfilingStateFlag::kHookedSamplesMutedForTesting) &&
240 type != base::allocator::dispatcher::AllocationSubsystem::
241 kManualForTesting)) {
242 return;
243 }
244
245 // Note: ReentryGuard prevents from recursions introduced by malloc and
246 // initialization of thread local storage which happen in the allocation path
247 // only (please see docs of ReentryGuard for full details).
248 allocator::dispatcher::ReentryGuard reentry_guard;
249
250 if (UNLIKELY(!reentry_guard)) {
251 return;
252 }
253
254 DoRecordAllocation(state, allocation_data.address(), allocation_data.size(),
255 type, allocation_data.type_name());
256 }
257
OnFree(const base::allocator::dispatcher::FreeNotificationData & free_data)258 ALWAYS_INLINE void PoissonAllocationSampler::OnFree(
259 const base::allocator::dispatcher::FreeNotificationData& free_data) {
260 // The allocation hooks may be installed before the sampler is started. Check
261 // if its ever been started first to avoid extra work on the fast path,
262 // because it's the most common case. Note that DoRecordFree still needs to be
263 // called if the sampler was started but is now stopped, to track allocations
264 // that were recorded while the sampler was still running.
265 //
266 // Relaxed ordering is safe here because there's only one case where
267 // RecordAlloc and RecordFree MUST see the same value of `profiling_state_`.
268 // Assume thread A updates `profiling_state_` from 0 to kWasStarted |
269 // kIsRunning, thread B calls RecordAlloc, and thread C calls RecordFree.
270 // (Something else could update `profiling_state_` to remove kIsRunning before
271 // RecordAlloc or RecordFree.)
272 //
273 // 1. If RecordAlloc(p) sees !kWasStarted or !kIsRunning it will return
274 // immediately, so p won't be in sampled_address_set(). So no matter what
275 // RecordFree(p) sees it will also return immediately.
276 //
277 // 2. If RecordFree() is called with a pointer that was never passed to
278 // RecordAlloc(), again it will return immediately no matter what it sees.
279 //
280 // 3. If RecordAlloc(p) sees kIsRunning it will put p in
281 // sampled_address_set(). In this case RecordFree(p) MUST see !kWasStarted
282 // or it will return without removing p:
283 //
284 // 3a. If the program got p as the return value from malloc() and passed it
285 // to free(), then RecordFree() happens-after RecordAlloc() and
286 // therefore will see the same value of `profiling_state_` as
287 // RecordAlloc() for all memory orders. (Proof: using the definitions
288 // of sequenced-after, happens-after and inter-thread happens-after
289 // from https://en.cppreference.com/w/cpp/atomic/memory_order, malloc()
290 // calls RecordAlloc() so its return is sequenced-after RecordAlloc();
291 // free() inter-thread happens-after malloc's return because it
292 // consumes the result; RecordFree() is sequenced-after its caller,
293 // free(); therefore RecordFree() interthread happens-after
294 // RecordAlloc().)
295 // 3b. If the program is freeing a random pointer which coincidentally was
296 // also returned from malloc(), such that free(p) does not happen-after
297 // malloc(), then there is already an unavoidable race condition. If
298 // the profiler sees malloc() before free(p), then it will add p to
299 // sampled_addresses_set() and then remove it; otherwise it will do
300 // nothing in RecordFree() and add p to sampled_addresses_set() in
301 // RecordAlloc(), recording a potential leak. Reading
302 // `profiling_state_` with relaxed ordering adds another possibility:
303 // if the profiler sees malloc() with kWasStarted and then free without
304 // kWasStarted, it will add p to sampled_addresses_set() in
305 // RecordAlloc() and then do nothing in RecordFree(). This has the same
306 // outcome as the existing race.
307 const ProfilingStateFlagMask state =
308 profiling_state_.load(std::memory_order_relaxed);
309 if (LIKELY(!(state & ProfilingStateFlag::kWasStarted))) {
310 return;
311 }
312
313 void* const address = free_data.address();
314
315 if (UNLIKELY(address == nullptr)) {
316 return;
317 }
318 if (LIKELY(!sampled_addresses_set().Contains(address))) {
319 return;
320 }
321 if (UNLIKELY(ScopedMuteThreadSamples::IsMuted())) {
322 return;
323 }
324
325 // Note: ReentryGuard prevents from recursions introduced by malloc and
326 // initialization of thread local storage which happen in the allocation path
327 // only (please see docs of ReentryGuard for full details). Therefore, the
328 // DoNotifyFree doesn't need to be guarded.
329
330 DoRecordFree(address);
331 }
332
333 } // namespace base
334
335 #endif // BASE_SAMPLING_HEAP_PROFILER_POISSON_ALLOCATION_SAMPLER_H_
336