xref: /aosp_15_r20/external/cronet/base/allocator/partition_allocator/src/partition_alloc/partition_lock.h (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef PARTITION_ALLOC_PARTITION_LOCK_H_
6 #define PARTITION_ALLOC_PARTITION_LOCK_H_
7 
8 #include <atomic>
9 #include <type_traits>
10 
11 #include "build/build_config.h"
12 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
13 #include "partition_alloc/partition_alloc_base/debug/debugging_buildflags.h"
14 #include "partition_alloc/partition_alloc_base/immediate_crash.h"
15 #include "partition_alloc/partition_alloc_base/thread_annotations.h"
16 #include "partition_alloc/partition_alloc_base/threading/platform_thread.h"
17 #include "partition_alloc/partition_alloc_check.h"
18 #include "partition_alloc/spinning_mutex.h"
19 #include "partition_alloc/thread_isolation/thread_isolation.h"
20 
21 namespace partition_alloc::internal {
22 
23 class PA_LOCKABLE Lock {
24  public:
25   inline constexpr Lock();
Acquire()26   void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {
27 #if BUILDFLAG(PA_DCHECK_IS_ON)
28 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
29     LiftThreadIsolationScope lift_thread_isolation_restrictions;
30 #endif
31 
32     // When PartitionAlloc is malloc(), it can easily become reentrant. For
33     // instance, a DCHECK() triggers in external code (such as
34     // base::Lock). DCHECK() error message formatting allocates, which triggers
35     // PartitionAlloc, and then we get reentrancy, and in this case infinite
36     // recursion.
37     //
38     // To avoid that, crash quickly when the code becomes reentrant.
39     base::PlatformThreadRef current_thread = base::PlatformThread::CurrentRef();
40     if (!lock_.Try()) {
41       // The lock wasn't free when we tried to acquire it. This can be because
42       // another thread or *this* thread was holding it.
43       //
44       // If it's this thread holding it, then it cannot have become free in the
45       // meantime, and the current value of |owning_thread_ref_| is valid, as it
46       // was set by this thread. Assuming that writes to |owning_thread_ref_|
47       // are atomic, then if it's us, we are trying to recursively acquire a
48       // non-recursive lock.
49       //
50       // Note that we don't rely on a DCHECK() in base::Lock(), as it would
51       // itself allocate. Meaning that without this code, a reentrancy issue
52       // hangs on Linux.
53       if (PA_UNLIKELY(owning_thread_ref_.load(std::memory_order_acquire) ==
54                       current_thread)) {
55         // Trying to acquire lock while it's held by this thread: reentrancy
56         // issue.
57         PA_IMMEDIATE_CRASH();
58       }
59       lock_.Acquire();
60     }
61     owning_thread_ref_.store(current_thread, std::memory_order_release);
62 #else
63     lock_.Acquire();
64 #endif
65   }
66 
Release()67   void Release() PA_UNLOCK_FUNCTION() {
68 #if BUILDFLAG(PA_DCHECK_IS_ON)
69 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
70     LiftThreadIsolationScope lift_thread_isolation_restrictions;
71 #endif
72     owning_thread_ref_.store(base::PlatformThreadRef(),
73                              std::memory_order_release);
74 #endif
75     lock_.Release();
76   }
AssertAcquired()77   void AssertAcquired() const PA_ASSERT_EXCLUSIVE_LOCK() {
78     lock_.AssertAcquired();
79 #if BUILDFLAG(PA_DCHECK_IS_ON)
80 #if BUILDFLAG(ENABLE_THREAD_ISOLATION)
81     LiftThreadIsolationScope lift_thread_isolation_restrictions;
82 #endif
83     PA_DCHECK(owning_thread_ref_.load(std ::memory_order_acquire) ==
84               base::PlatformThread::CurrentRef());
85 #endif
86   }
87 
Reinit()88   void Reinit() PA_UNLOCK_FUNCTION() {
89     lock_.AssertAcquired();
90 #if BUILDFLAG(PA_DCHECK_IS_ON)
91     owning_thread_ref_.store(base::PlatformThreadRef(),
92                              std::memory_order_release);
93 #endif
94     lock_.Reinit();
95   }
96 
97  private:
98   SpinningMutex lock_;
99 
100 #if BUILDFLAG(PA_DCHECK_IS_ON)
101   // Should in theory be protected by |lock_|, but we need to read it to detect
102   // recursive lock acquisition (and thus, the allocator becoming reentrant).
103   std::atomic<base::PlatformThreadRef> owning_thread_ref_ =
104       base::PlatformThreadRef();
105 #endif
106 };
107 
108 class PA_SCOPED_LOCKABLE ScopedGuard {
109  public:
ScopedGuard(Lock & lock)110   explicit ScopedGuard(Lock& lock) PA_EXCLUSIVE_LOCK_FUNCTION(lock)
111       : lock_(lock) {
112     lock_.Acquire();
113   }
PA_UNLOCK_FUNCTION()114   ~ScopedGuard() PA_UNLOCK_FUNCTION() { lock_.Release(); }
115 
116  private:
117   Lock& lock_;
118 };
119 
120 class PA_SCOPED_LOCKABLE ScopedUnlockGuard {
121  public:
ScopedUnlockGuard(Lock & lock)122   explicit ScopedUnlockGuard(Lock& lock) PA_UNLOCK_FUNCTION(lock)
123       : lock_(lock) {
124     lock_.Release();
125   }
PA_EXCLUSIVE_LOCK_FUNCTION()126   ~ScopedUnlockGuard() PA_EXCLUSIVE_LOCK_FUNCTION() { lock_.Acquire(); }
127 
128  private:
129   Lock& lock_;
130 };
131 
132 constexpr Lock::Lock() = default;
133 
134 // We want PartitionRoot to not have a global destructor, so this should not
135 // have one.
136 static_assert(std::is_trivially_destructible_v<Lock>, "");
137 
138 }  // namespace partition_alloc::internal
139 
140 namespace base {
141 namespace internal {
142 
143 using PartitionLock = ::partition_alloc::internal::Lock;
144 using PartitionAutoLock = ::partition_alloc::internal::ScopedGuard;
145 
146 }  // namespace internal
147 }  // namespace base
148 
149 #endif  // PARTITION_ALLOC_PARTITION_LOCK_H_
150