xref: /aosp_15_r20/external/angle/src/libANGLE/ContextMutex.cpp (revision 8975f5c5ed3d1c378011245431ada316dfb6f244)
1 //
2 // Copyright 2023 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ContextMutex.cpp: Classes for protecting Context access and EGLImage siblings.
7 
8 #include "libANGLE/ContextMutex.h"
9 
10 #include "common/system_utils.h"
11 #include "libANGLE/Context.h"
12 
13 namespace egl
14 {
15 
16 namespace
17 {
CheckThreadIdCurrent(const std::atomic<angle::ThreadId> & threadId,angle::ThreadId * currentThreadIdOut)18 [[maybe_unused]] bool CheckThreadIdCurrent(const std::atomic<angle::ThreadId> &threadId,
19                                            angle::ThreadId *currentThreadIdOut)
20 {
21     *currentThreadIdOut = angle::GetCurrentThreadId();
22     return (threadId.load(std::memory_order_relaxed) == *currentThreadIdOut);
23 }
24 
TryUpdateThreadId(std::atomic<angle::ThreadId> * threadId,angle::ThreadId oldThreadId,angle::ThreadId newThreadId)25 [[maybe_unused]] bool TryUpdateThreadId(std::atomic<angle::ThreadId> *threadId,
26                                         angle::ThreadId oldThreadId,
27                                         angle::ThreadId newThreadId)
28 {
29     const bool ok = (threadId->load(std::memory_order_relaxed) == oldThreadId);
30     if (ok)
31     {
32         threadId->store(newThreadId, std::memory_order_relaxed);
33     }
34     return ok;
35 }
36 }  // namespace
37 
38 // ScopedContextMutexAddRefLock
lock(ContextMutex * mutex)39 void ScopedContextMutexAddRefLock::lock(ContextMutex *mutex)
40 {
41     ASSERT(mutex != nullptr);
42     ASSERT(mMutex == nullptr);
43     // lock() before addRef() - using mMutex as synchronization
44     mutex->lock();
45     // Take the "root" mutex after the lock.
46     mMutex = mutex->getRoot();
47     ASSERT(mMutex->isReferenced());
48     mMutex->addRef();
49 }
50 
51 // ContextMutex
ContextMutex(ContextMutex * root)52 ContextMutex::ContextMutex(ContextMutex *root)
53     : mRoot(this), mOwnerThreadId(angle::InvalidThreadId()), mLockLevel(0), mRefCount(0), mRank(0)
54 {
55     if (root != nullptr)
56     {
57         setNewRoot(root);
58     }
59 }
60 
~ContextMutex()61 ContextMutex::~ContextMutex()
62 {
63     ASSERT(mLockLevel == 0);
64     ASSERT(mRefCount == 0);
65     ASSERT(mLeaves.empty());
66 
67     ContextMutex *const root = getRoot();
68     if (this == root)
69     {
70         ASSERT(mOldRoots.empty());
71     }
72     else
73     {
74         for (ContextMutex *oldRoot : mOldRoots)
75         {
76             ASSERT(oldRoot->getRoot() == root);
77             ASSERT(oldRoot->mLeaves.empty());
78             oldRoot->release();
79         }
80         root->removeLeaf(this);
81         root->release();
82     }
83 }
84 
Merge(ContextMutex * lockedMutex,ContextMutex * otherMutex)85 void ContextMutex::Merge(ContextMutex *lockedMutex, ContextMutex *otherMutex)
86 {
87     ASSERT(lockedMutex != nullptr);
88     ASSERT(otherMutex != nullptr);
89 
90     // Since lockedMutex is locked, its "root" pointer is stable.
91     ContextMutex *lockedRoot      = lockedMutex->getRoot();
92     ContextMutex *otherLockedRoot = nullptr;
93 
94     // Mutex merging will update the structure of both mutexes, therefore both mutexes must be
95     // locked before continuing. First mutex is already locked, need to lock the other mutex.
96     // Because other thread may perform merge with same mutexes reversed, we can't simply lock
97     // otherMutex - this may cause a deadlock. Additionally, otherMutex may have same "root" (same
98     // mutex or already merged), not only merging is unnecessary, but locking otherMutex will
99     // guarantee a deadlock.
100 
101     for (;;)
102     {
103         // First, check that "root" of otherMutex is the same as "root" of lockedMutex.
104         // lockedRoot is stable by definition and it is safe to compare with "unstable root".
105         ContextMutex *otherRoot = otherMutex->getRoot();
106         if (otherRoot == lockedRoot)
107         {
108             // Do nothing if two mutexes are the same/merged.
109             return;
110         }
111         // Second, try to lock otherMutex "root" (can't use lock()/lockImpl(), see above comment).
112         if (otherRoot->tryLockImpl())
113         {
114             otherLockedRoot = otherRoot->getRoot();
115             // otherMutex "root" can't become lockedMutex "root". For that to happen, lockedMutex
116             // must be locked from some other thread first, which is impossible, since it is already
117             // locked by this thread.
118             ASSERT(otherLockedRoot != lockedRoot);
119             // Lock is successful. Both mutexes are locked - can proceed with the merge...
120             break;
121         }
122         // Lock was unsuccessful - unlock and retry...
123         // May use "unlockImpl()" because lockedRoot is a "stable root" mutex.
124         // Note: lock will be preserved in case of the recursive lock.
125         lockedRoot->unlockImpl();
126         // Sleep random amount to allow one of the thread acquire the lock next time...
127         std::this_thread::sleep_for(std::chrono::microseconds(rand() % 91 + 10));
128         // Because lockedMutex was unlocked, its "root" might have been changed. Below line will
129         // reacquire the lock and update lockedRoot pointer.
130         lockedMutex->lock();
131         lockedRoot = lockedMutex->getRoot();
132     }
133 
134     // Decide the new "root". See mRank comment for more details...
135 
136     ContextMutex *oldRoot = otherLockedRoot;
137     ContextMutex *newRoot = lockedRoot;
138 
139     if (oldRoot->mRank > newRoot->mRank)
140     {
141         std::swap(oldRoot, newRoot);
142     }
143     else if (oldRoot->mRank == newRoot->mRank)
144     {
145         ++newRoot->mRank;
146     }
147 
148     ASSERT(newRoot->isReferenced());
149 
150     // Update the structure
151     for (ContextMutex *const leaf : oldRoot->mLeaves)
152     {
153         ASSERT(leaf->getRoot() == oldRoot);
154         leaf->setNewRoot(newRoot);
155     }
156     oldRoot->mLeaves.clear();
157     oldRoot->setNewRoot(newRoot);
158 
159     // Leave only the "merged" mutex locked. "oldRoot" already merged, need to use "unlockImpl()"
160     oldRoot->unlockImpl();
161 
162     // Merge from recursive lock is unexpected. Handle such cases anyway to be safe.
163     while (oldRoot->mLockLevel > 0)
164     {
165         newRoot->lockImpl();
166         oldRoot->unlockImpl();
167     }
168 }
169 
setNewRoot(ContextMutex * newRoot)170 void ContextMutex::setNewRoot(ContextMutex *newRoot)
171 {
172     ContextMutex *const oldRoot = getRoot();
173 
174     ASSERT(newRoot != oldRoot);
175     mRoot.store(newRoot, std::memory_order_relaxed);
176     newRoot->addRef();
177 
178     newRoot->addLeaf(this);
179 
180     if (oldRoot != this)
181     {
182         mOldRoots.emplace_back(oldRoot);
183     }
184 }
185 
addLeaf(ContextMutex * leaf)186 void ContextMutex::addLeaf(ContextMutex *leaf)
187 {
188     ASSERT(this == getRoot());
189     ASSERT(leaf->getRoot() == this);
190     ASSERT(leaf->mLeaves.empty());
191     ASSERT(mLeaves.count(leaf) == 0);
192     mLeaves.emplace(leaf);
193 }
194 
removeLeaf(ContextMutex * leaf)195 void ContextMutex::removeLeaf(ContextMutex *leaf)
196 {
197     ASSERT(this == getRoot());
198     ASSERT(leaf->getRoot() == this);
199     ASSERT(leaf->mLeaves.empty());
200     ASSERT(mLeaves.count(leaf) == 1);
201     mLeaves.erase(leaf);
202 }
203 
release(UnlockBehaviour unlockBehaviour)204 void ContextMutex::release(UnlockBehaviour unlockBehaviour)
205 {
206     ASSERT(isReferenced());
207     const bool needDelete = (--mRefCount == 0);
208     if (unlockBehaviour == UnlockBehaviour::kUnlock)
209     {
210         ASSERT(this == getRoot());
211         unlockImpl();
212     }
213     if (needDelete)
214     {
215         delete this;
216     }
217 }
218 
try_lock()219 bool ContextMutex::try_lock()
220 {
221     return getRoot()->tryLockImpl();
222 }
223 
lock()224 void ContextMutex::lock()
225 {
226     getRoot()->lockImpl();
227 }
228 
unlock()229 void ContextMutex::unlock()
230 {
231     ContextMutex *const root = getRoot();
232     // "root" is currently locked so "root->getRoot()" will return stable result.
233     ASSERT(root == root->getRoot());
234     root->unlockImpl();
235 }
236 
237 #if defined(ANGLE_ENABLE_CONTEXT_MUTEX_RECURSION)
tryLockImpl()238 bool ContextMutex::tryLockImpl()
239 {
240     const angle::ThreadId threadId = angle::GetCurrentThreadId();
241     if (ANGLE_UNLIKELY(!mMutex.try_lock()))
242     {
243         if (ANGLE_UNLIKELY(mOwnerThreadId.load(std::memory_order_relaxed) == threadId))
244         {
245             ASSERT(this == getRoot());
246             ASSERT(mLockLevel > 0);
247             ++mLockLevel;
248             return true;
249         }
250         return false;
251     }
252     ASSERT(mOwnerThreadId.load(std::memory_order_relaxed) == angle::InvalidThreadId());
253     ASSERT(mLockLevel == 0);
254     ContextMutex *const root = getRoot();
255     if (ANGLE_UNLIKELY(this != root))
256     {
257         // Unlock, so only the "stable root" mutex remains locked
258         mMutex.unlock();
259         return root->tryLockImpl();
260     }
261     mOwnerThreadId.store(threadId, std::memory_order_relaxed);
262     mLockLevel = 1;
263     return true;
264 }
265 
lockImpl()266 void ContextMutex::lockImpl()
267 {
268     const angle::ThreadId threadId = angle::GetCurrentThreadId();
269     if (ANGLE_UNLIKELY(!mMutex.try_lock()))
270     {
271         if (ANGLE_UNLIKELY(mOwnerThreadId.load(std::memory_order_relaxed) == threadId))
272         {
273             ASSERT(this == getRoot());
274             ASSERT(mLockLevel > 0);
275             ++mLockLevel;
276             return;
277         }
278         mMutex.lock();
279     }
280     ASSERT(mOwnerThreadId.load(std::memory_order_relaxed) == angle::InvalidThreadId());
281     ASSERT(mLockLevel == 0);
282     ContextMutex *const root = getRoot();
283     if (ANGLE_UNLIKELY(this != root))
284     {
285         // Unlock, so only the "stable root" mutex remains locked
286         mMutex.unlock();
287         root->lockImpl();
288     }
289     else
290     {
291         mOwnerThreadId.store(threadId, std::memory_order_relaxed);
292         mLockLevel = 1;
293     }
294 }
295 
unlockImpl()296 void ContextMutex::unlockImpl()
297 {
298     ASSERT(mOwnerThreadId.load(std::memory_order_relaxed) == angle::GetCurrentThreadId());
299     ASSERT(mLockLevel > 0);
300     if (ANGLE_LIKELY(--mLockLevel == 0))
301     {
302         mOwnerThreadId.store(angle::InvalidThreadId(), std::memory_order_relaxed);
303         mMutex.unlock();
304     }
305 }
306 #else
tryLockImpl()307 bool ContextMutex::tryLockImpl()
308 {
309     angle::ThreadId currentThreadId;
310     ASSERT(!CheckThreadIdCurrent(mOwnerThreadId, &currentThreadId));
311     if (mMutex.try_lock())
312     {
313         ContextMutex *const root = getRoot();
314         if (ANGLE_UNLIKELY(this != root))
315         {
316             // Unlock, so only the "stable root" mutex remains locked
317             mMutex.unlock();
318             return root->tryLockImpl();
319         }
320         ASSERT(TryUpdateThreadId(&mOwnerThreadId, angle::InvalidThreadId(), currentThreadId));
321         return true;
322     }
323     return false;
324 }
325 
lockImpl()326 void ContextMutex::lockImpl()
327 {
328     angle::ThreadId currentThreadId;
329     ASSERT(!CheckThreadIdCurrent(mOwnerThreadId, &currentThreadId));
330     mMutex.lock();
331     ContextMutex *const root = getRoot();
332     if (ANGLE_UNLIKELY(this != root))
333     {
334         // Unlock, so only the "stable root" mutex remains locked
335         mMutex.unlock();
336         root->lockImpl();
337     }
338     else
339     {
340         ASSERT(TryUpdateThreadId(&mOwnerThreadId, angle::InvalidThreadId(), currentThreadId));
341     }
342 }
343 
unlockImpl()344 void ContextMutex::unlockImpl()
345 {
346     ASSERT(
347         TryUpdateThreadId(&mOwnerThreadId, angle::GetCurrentThreadId(), angle::InvalidThreadId()));
348     mMutex.unlock();
349 }
350 #endif
351 
352 }  // namespace egl
353