Lines Matching full:lock

27 #include <trace/events/lock.h>
36 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument
43 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument
49 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument
54 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument
68 * lock->owner state tracking:
70 * lock->owner holds the task_struct pointer of the owner. Bit 0
71 * is used to keep track of the "lock has waiters" state.
74 * NULL 0 lock is free (fast acquire possible)
75 * NULL 1 lock is free and has waiters and the top waiter
76 * is going to take the lock*
77 * taskpointer 0 lock is held (fast release possible)
78 * taskpointer 1 lock is held and has waiters**
81 * possible when bit 0 of lock->owner is 0.
83 * (*) It also can be a transitional state when grabbing the lock
84 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
85 * we need to set the bit0 before looking at the lock, and the owner may be
89 * waiters. This can happen when grabbing the lock in the slow path.
90 * To prevent a cmpxchg of the owner releasing the lock, we need to
91 * set this bit before looking at the lock.
95 rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_owner_encode() argument
99 if (rt_mutex_has_waiters(lock)) in rt_mutex_owner_encode()
106 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_set_owner() argument
109 * lock->wait_lock is held but explicit acquire semantics are needed in rt_mutex_set_owner()
110 * for a new lock owner so WRITE_ONCE is insufficient. in rt_mutex_set_owner()
112 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); in rt_mutex_set_owner()
115 static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock) in rt_mutex_clear_owner() argument
117 /* lock->wait_lock is held so the unlock provides release semantics. */ in rt_mutex_clear_owner()
118 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); in rt_mutex_clear_owner()
121 static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) in clear_rt_mutex_waiters() argument
123 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
124 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
128 fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock) in fixup_rt_mutex_waiters() argument
130 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters()
132 if (rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters()
137 * lock->owner still has the waiters bit set, otherwise the in fixup_rt_mutex_waiters()
143 * lock(l->lock) in fixup_rt_mutex_waiters()
147 * unlock(l->lock) in fixup_rt_mutex_waiters()
151 * lock(l->lock) in fixup_rt_mutex_waiters()
155 * unlock(l->lock) in fixup_rt_mutex_waiters()
158 * lock(l->lock) in fixup_rt_mutex_waiters()
161 * unlock(l->lock) in fixup_rt_mutex_waiters()
162 * lock(l->lock) in fixup_rt_mutex_waiters()
166 * unlock(l->lock) in fixup_rt_mutex_waiters()
167 * lock(l->lock) in fixup_rt_mutex_waiters()
174 * lock(l->lock) in fixup_rt_mutex_waiters()
187 * serialized by l->lock, so nothing else can modify the waiters in fixup_rt_mutex_waiters()
201 * in case that the lock acquisition failed it might in fixup_rt_mutex_waiters()
216 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_acquire() argument
220 return try_cmpxchg_acquire(&lock->owner, &old, new); in rt_mutex_cmpxchg_acquire()
223 static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock) in rt_mutex_try_acquire() argument
225 return rt_mutex_cmpxchg_acquire(lock, NULL, current); in rt_mutex_try_acquire()
228 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_release() argument
232 return try_cmpxchg_release(&lock->owner, &old, new); in rt_mutex_cmpxchg_release()
237 * all future threads that attempt to [Rmw] the lock to the slowpath. As such
240 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) in mark_rt_mutex_waiters() argument
242 unsigned long *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
261 * 2) Drop lock->wait_lock
262 * 3) Try to unlock the lock with cmpxchg
264 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, in unlock_rt_mutex_safe() argument
266 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
268 struct task_struct *owner = rt_mutex_owner(lock); in unlock_rt_mutex_safe()
270 clear_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
271 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
277 * lock(wait_lock); in unlock_rt_mutex_safe()
279 * mark_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
280 * acquire(lock); in unlock_rt_mutex_safe()
284 * lock(wait_lock); in unlock_rt_mutex_safe()
285 * mark_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
290 * lock(wait_lock); in unlock_rt_mutex_safe()
293 * lock(wait_lock); in unlock_rt_mutex_safe()
294 * acquire(lock); in unlock_rt_mutex_safe()
296 return rt_mutex_cmpxchg_release(lock, owner, NULL); in unlock_rt_mutex_safe()
300 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_acquire() argument
308 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
310 static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock) in rt_mutex_try_acquire() argument
319 return rt_mutex_slowtrylock(lock); in rt_mutex_try_acquire()
322 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_release() argument
329 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) in mark_rt_mutex_waiters() argument
331 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
332 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
336 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
338 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, in unlock_rt_mutex_safe() argument
340 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
342 lock->owner = NULL; in unlock_rt_mutex_safe()
343 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
364 lockdep_assert_held(&waiter->lock->wait_lock); in waiter_update_prio()
377 lockdep_assert_held(&waiter->lock->wait_lock); in waiter_clone_prio()
479 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue() argument
481 lockdep_assert_held(&lock->wait_lock); in rt_mutex_enqueue()
483 rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less); in rt_mutex_enqueue()
487 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue() argument
489 lockdep_assert_held(&lock->wait_lock); in rt_mutex_dequeue()
494 rb_erase_cached(&waiter->tree.entry, &lock->waiters); in rt_mutex_dequeue()
526 static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock, in rt_mutex_adjust_prio() argument
531 lockdep_assert_held(&lock->wait_lock); in rt_mutex_adjust_prio()
532 lockdep_assert(rt_mutex_owner(lock) == p); in rt_mutex_adjust_prio()
601 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; in task_blocked_on_lock()
616 * comparison to detect lock chain changes.
650 * [1] lock(task->pi_lock); [R] acquire [P1]
653 * [4] lock = waiter->lock; [P1]
654 * [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L]
659 * [7] requeue_lock_waiter(lock, waiter); [P1] + [L]
663 * [10] task = owner(lock); [L]
665 * lock(task->pi_lock); [L] acquire [P2]
666 * [11] requeue_pi_waiter(tsk, waiters(lock));[P2] + [L]
669 * unlock(lock->wait_lock); release [L]
672 * Where P1 is the blocking task and P2 is the lock owner; going up one step
687 struct rt_mutex_base *lock; in rt_mutex_adjust_prio_chain() local
701 * We limit the lock chain length for each invocation. in rt_mutex_adjust_prio_chain()
712 printk(KERN_WARNING "Maximum lock depth %d reached " in rt_mutex_adjust_prio_chain()
752 * the previous owner of the lock might have released the lock. in rt_mutex_adjust_prio_chain()
759 * the task might have moved on in the lock chain or even left in rt_mutex_adjust_prio_chain()
760 * the chain completely and blocks now on an unrelated lock or in rt_mutex_adjust_prio_chain()
763 * We stored the lock on which @task was blocked in @next_lock, in rt_mutex_adjust_prio_chain()
766 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
770 * There could be 'spurious' loops in the lock graph due to ww_mutex, in rt_mutex_adjust_prio_chain()
831 * [4] Get the next lock; per holding task->pi_lock we can't unblock in rt_mutex_adjust_prio_chain()
832 * and guarantee @lock's existence. in rt_mutex_adjust_prio_chain()
834 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
837 * which is the reverse lock order versus the other rtmutex in rt_mutex_adjust_prio_chain()
840 * Per the above, holding task->pi_lock guarantees lock exists, so in rt_mutex_adjust_prio_chain()
841 * inverting this lock order is infeasible from a life-time in rt_mutex_adjust_prio_chain()
844 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
852 * lock->wait_lock. in rt_mutex_adjust_prio_chain()
854 * Deadlock detection. If the lock is the same as the original in rt_mutex_adjust_prio_chain()
855 * lock which caused us to walk the lock chain or if the in rt_mutex_adjust_prio_chain()
856 * current lock is owned by the task which initiated the chain in rt_mutex_adjust_prio_chain()
859 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { in rt_mutex_adjust_prio_chain()
874 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
879 * If we just follow the lock chain for deadlock detection, no in rt_mutex_adjust_prio_chain()
892 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
893 * If there is no owner of the lock, end of chain. in rt_mutex_adjust_prio_chain()
895 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
896 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
900 /* [10] Grab the next task, i.e. owner of @lock */ in rt_mutex_adjust_prio_chain()
901 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
914 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
918 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
928 * operation on @lock. We need it for the boost/deboost in rt_mutex_adjust_prio_chain()
931 prerequeue_top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
933 /* [7] Requeue the waiter in the lock waiter tree. */ in rt_mutex_adjust_prio_chain()
934 rt_mutex_dequeue(lock, waiter); in rt_mutex_adjust_prio_chain()
949 rt_mutex_enqueue(lock, waiter); in rt_mutex_adjust_prio_chain()
955 * Since we hold lock->waiter_lock, task cannot unblock, even if we in rt_mutex_adjust_prio_chain()
962 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
964 * We must abort the chain walk if there is no lock owner even in rt_mutex_adjust_prio_chain()
965 * in the dead lock detection case, as we have nothing to in rt_mutex_adjust_prio_chain()
968 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
972 * to get the lock. in rt_mutex_adjust_prio_chain()
974 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
977 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
982 * [10] Grab the next task, i.e. the owner of @lock in rt_mutex_adjust_prio_chain()
984 * Per holding lock->wait_lock and checking for !owner above, there in rt_mutex_adjust_prio_chain()
987 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
991 if (waiter == rt_mutex_top_waiter(lock)) { in rt_mutex_adjust_prio_chain()
994 * waiter on the lock. Replace the previous top waiter in rt_mutex_adjust_prio_chain()
1001 rt_mutex_adjust_prio(lock, task); in rt_mutex_adjust_prio_chain()
1005 * The waiter was the top waiter on the lock, but is in rt_mutex_adjust_prio_chain()
1015 waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
1018 rt_mutex_adjust_prio(lock, task); in rt_mutex_adjust_prio_chain()
1028 * and lock->wait_lock. The actual decisions are made after we in rt_mutex_adjust_prio_chain()
1031 * Check whether the task which owns the current lock is pi in rt_mutex_adjust_prio_chain()
1032 * blocked itself. If yes we store a pointer to the lock for in rt_mutex_adjust_prio_chain()
1033 * the lock chain change detection above. After we dropped in rt_mutex_adjust_prio_chain()
1038 * Store the top waiter of @lock for the end of chain walk in rt_mutex_adjust_prio_chain()
1041 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
1045 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
1051 * We reached the end of the lock chain. Stop right here. No in rt_mutex_adjust_prio_chain()
1058 * If the current waiter is not the top waiter on the lock, in rt_mutex_adjust_prio_chain()
1078 * Must be called with lock->wait_lock held and interrupts disabled
1080 * @lock: The lock to be acquired.
1081 * @task: The task which wants to acquire the lock
1082 * @waiter: The waiter that is queued to the lock's wait tree if the
1086 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, in try_to_take_rt_mutex() argument
1089 lockdep_assert_held(&lock->wait_lock); in try_to_take_rt_mutex()
1092 * Before testing whether we can acquire @lock, we set the in try_to_take_rt_mutex()
1093 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all in try_to_take_rt_mutex()
1094 * other tasks which try to modify @lock into the slow path in try_to_take_rt_mutex()
1095 * and they serialize on @lock->wait_lock. in try_to_take_rt_mutex()
1100 * - There is a lock owner. The caller must fixup the in try_to_take_rt_mutex()
1101 * transient state if it does a trylock or leaves the lock in try_to_take_rt_mutex()
1104 * - @task acquires the lock and there are no other in try_to_take_rt_mutex()
1108 mark_rt_mutex_waiters(lock); in try_to_take_rt_mutex()
1111 * If @lock has an owner, give up. in try_to_take_rt_mutex()
1113 if (rt_mutex_owner(lock)) in try_to_take_rt_mutex()
1118 * into @lock waiter tree. If @waiter == NULL then this is a in try_to_take_rt_mutex()
1122 struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); in try_to_take_rt_mutex()
1125 * If waiter is the highest priority waiter of @lock, in try_to_take_rt_mutex()
1130 * We can acquire the lock. Remove the waiter from the in try_to_take_rt_mutex()
1131 * lock waiters tree. in try_to_take_rt_mutex()
1133 rt_mutex_dequeue(lock, waiter); in try_to_take_rt_mutex()
1139 * If the lock has waiters already we check whether @task is in try_to_take_rt_mutex()
1140 * eligible to take over the lock. in try_to_take_rt_mutex()
1143 * the lock. @task->pi_blocked_on is NULL, so it does in try_to_take_rt_mutex()
1146 if (rt_mutex_has_waiters(lock)) { in try_to_take_rt_mutex()
1149 rt_mutex_top_waiter(lock))) in try_to_take_rt_mutex()
1154 * don't have to change anything in the lock in try_to_take_rt_mutex()
1159 * No waiters. Take the lock without the in try_to_take_rt_mutex()
1177 * Finish the lock acquisition. @task is the new owner. If in try_to_take_rt_mutex()
1181 if (rt_mutex_has_waiters(lock)) in try_to_take_rt_mutex()
1182 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); in try_to_take_rt_mutex()
1190 rt_mutex_set_owner(lock, task); in try_to_take_rt_mutex()
1196 * Task blocks on lock.
1200 * This must be called with lock->wait_lock held and interrupts disabled
1202 static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, in task_blocks_on_rt_mutex() argument
1209 struct task_struct *owner = rt_mutex_owner(lock); in task_blocks_on_rt_mutex()
1214 lockdep_assert_held(&lock->wait_lock); in task_blocks_on_rt_mutex()
1233 waiter->lock = lock; in task_blocks_on_rt_mutex()
1237 /* Get the top priority waiter on the lock */ in task_blocks_on_rt_mutex()
1238 if (rt_mutex_has_waiters(lock)) in task_blocks_on_rt_mutex()
1239 top_waiter = rt_mutex_top_waiter(lock); in task_blocks_on_rt_mutex()
1240 rt_mutex_enqueue(lock, waiter); in task_blocks_on_rt_mutex()
1250 rtm = container_of(lock, struct rt_mutex, rtmutex); in task_blocks_on_rt_mutex()
1254 rt_mutex_dequeue(lock, waiter); in task_blocks_on_rt_mutex()
1265 if (waiter == rt_mutex_top_waiter(lock)) { in task_blocks_on_rt_mutex()
1269 rt_mutex_adjust_prio(lock, owner); in task_blocks_on_rt_mutex()
1276 /* Store the lock on which owner is blocked or NULL */ in task_blocks_on_rt_mutex()
1289 * The owner can't disappear while holding a lock, in task_blocks_on_rt_mutex()
1295 raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); in task_blocks_on_rt_mutex()
1297 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, in task_blocks_on_rt_mutex()
1300 raw_spin_lock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1309 * Called with lock->wait_lock held and interrupts disabled.
1312 struct rt_mutex_base *lock) in mark_wakeup_next_waiter() argument
1316 lockdep_assert_held(&lock->wait_lock); in mark_wakeup_next_waiter()
1320 waiter = rt_mutex_top_waiter(lock); in mark_wakeup_next_waiter()
1330 rt_mutex_adjust_prio(lock, current); in mark_wakeup_next_waiter()
1334 * queued on the lock until it gets the lock, this lock in mark_wakeup_next_waiter()
1338 * the top waiter can steal this lock. in mark_wakeup_next_waiter()
1340 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1357 static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) in __rt_mutex_slowtrylock() argument
1359 int ret = try_to_take_rt_mutex(lock, current, NULL); in __rt_mutex_slowtrylock()
1362 * try_to_take_rt_mutex() sets the lock waiters bit in __rt_mutex_slowtrylock()
1365 fixup_rt_mutex_waiters(lock, true); in __rt_mutex_slowtrylock()
1371 * Slow path try-lock function:
1373 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) in rt_mutex_slowtrylock() argument
1379 * If the lock already has an owner we fail to get the lock. in rt_mutex_slowtrylock()
1380 * This can be done without taking the @lock->wait_lock as in rt_mutex_slowtrylock()
1383 if (rt_mutex_owner(lock)) in rt_mutex_slowtrylock()
1387 * The mutex has currently no owner. Lock the wait lock and try to in rt_mutex_slowtrylock()
1388 * acquire the lock. We use irqsave here to support early boot calls. in rt_mutex_slowtrylock()
1390 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1392 ret = __rt_mutex_slowtrylock(lock); in rt_mutex_slowtrylock()
1394 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1399 static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) in __rt_mutex_trylock() argument
1401 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in __rt_mutex_trylock()
1404 return rt_mutex_slowtrylock(lock); in __rt_mutex_trylock()
1410 static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) in rt_mutex_slowunlock() argument
1416 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1418 debug_rt_mutex_unlock(lock); in rt_mutex_slowunlock()
1425 * foo->lock->owner = NULL; in rt_mutex_slowunlock()
1426 * rtmutex_lock(foo->lock); <- fast path in rt_mutex_slowunlock()
1428 * rtmutex_unlock(foo->lock); <- fast path in rt_mutex_slowunlock()
1431 * raw_spin_unlock(foo->lock->wait_lock); in rt_mutex_slowunlock()
1436 * lock->wait_lock. So we do the following sequence: in rt_mutex_slowunlock()
1438 * owner = rt_mutex_owner(lock); in rt_mutex_slowunlock()
1439 * clear_rt_mutex_waiters(lock); in rt_mutex_slowunlock()
1440 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1441 * if (cmpxchg(&lock->owner, owner, 0) == owner) in rt_mutex_slowunlock()
1446 * lock->owner is serialized by lock->wait_lock: in rt_mutex_slowunlock()
1448 * lock->owner = NULL; in rt_mutex_slowunlock()
1449 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1451 while (!rt_mutex_has_waiters(lock)) { in rt_mutex_slowunlock()
1452 /* Drops lock->wait_lock ! */ in rt_mutex_slowunlock()
1453 if (unlock_rt_mutex_safe(lock, flags) == true) in rt_mutex_slowunlock()
1456 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1465 mark_wakeup_next_waiter(&wqh, lock); in rt_mutex_slowunlock()
1466 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1471 static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) in __rt_mutex_unlock() argument
1473 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) in __rt_mutex_unlock()
1476 rt_mutex_slowunlock(lock); in __rt_mutex_unlock()
1480 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, in rtmutex_spin_on_owner() argument
1489 if (owner != rt_mutex_owner(lock)) in rtmutex_spin_on_owner()
1493 * the lock owner still matches @owner. If that fails, in rtmutex_spin_on_owner()
1500 * - the lock owner has been scheduled out in rtmutex_spin_on_owner()
1507 !rt_mutex_waiter_is_top_waiter(lock, waiter)) { in rtmutex_spin_on_owner()
1517 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, in rtmutex_spin_on_owner() argument
1533 * Remove a waiter from a lock and give up
1535 * Must be called with lock->wait_lock held and interrupts disabled. It must
1538 static void __sched remove_waiter(struct rt_mutex_base *lock, in remove_waiter() argument
1541 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); in remove_waiter()
1542 struct task_struct *owner = rt_mutex_owner(lock); in remove_waiter()
1545 lockdep_assert_held(&lock->wait_lock); in remove_waiter()
1548 rt_mutex_dequeue(lock, waiter); in remove_waiter()
1554 * waiter of the lock and there is an owner to update. in remove_waiter()
1563 if (rt_mutex_has_waiters(lock)) in remove_waiter()
1564 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); in remove_waiter()
1566 rt_mutex_adjust_prio(lock, owner); in remove_waiter()
1568 /* Store the lock on which owner is blocked or NULL */ in remove_waiter()
1583 raw_spin_unlock_irq(&lock->wait_lock); in remove_waiter()
1585 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, in remove_waiter()
1588 raw_spin_lock_irq(&lock->wait_lock); in remove_waiter()
1593 * @lock: the rt_mutex to take
1599 * @wake_q: wake_q of tasks to wake when we drop the lock->wait_lock
1601 * Must be called with lock->wait_lock held and interrupts disabled
1603 static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, in rt_mutex_slowlock_block() argument
1609 __releases(&lock->wait_lock) __acquires(&lock->wait_lock) in rt_mutex_slowlock_block()
1611 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in rt_mutex_slowlock_block()
1616 /* Try to acquire the lock: */ in rt_mutex_slowlock_block()
1617 if (try_to_take_rt_mutex(lock, current, waiter)) in rt_mutex_slowlock_block()
1635 if (waiter == rt_mutex_top_waiter(lock)) in rt_mutex_slowlock_block()
1636 owner = rt_mutex_owner(lock); in rt_mutex_slowlock_block()
1639 raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); in rt_mutex_slowlock_block()
1641 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) in rt_mutex_slowlock_block()
1644 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1653 struct rt_mutex_base *lock, in rt_mutex_handle_deadlock() argument
1666 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_handle_deadlock()
1677 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
1678 * @lock: The rtmutex to block lock
1685 static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, in __rt_mutex_slowlock() argument
1692 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in __rt_mutex_slowlock()
1696 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_slowlock()
1698 /* Try to acquire the lock again: */ in __rt_mutex_slowlock()
1699 if (try_to_take_rt_mutex(lock, current, NULL)) { in __rt_mutex_slowlock()
1709 trace_contention_begin(lock, LCB_F_RT); in __rt_mutex_slowlock()
1711 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk, wake_q); in __rt_mutex_slowlock()
1713 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter, wake_q); in __rt_mutex_slowlock()
1716 /* acquired the lock */ in __rt_mutex_slowlock()
1724 remove_waiter(lock, waiter); in __rt_mutex_slowlock()
1725 rt_mutex_handle_deadlock(ret, chwalk, lock, waiter); in __rt_mutex_slowlock()
1732 fixup_rt_mutex_waiters(lock, true); in __rt_mutex_slowlock()
1734 trace_contention_end(lock, ret); in __rt_mutex_slowlock()
1739 static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, in __rt_mutex_slowlock_locked() argument
1750 ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK, in __rt_mutex_slowlock_locked()
1759 * @lock: The rtmutex to block lock
1763 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, in rt_mutex_slowlock() argument
1784 * rtmutex with lock->wait_lock held. But we cannot unconditionally in rt_mutex_slowlock()
1788 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowlock()
1789 ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state, &wake_q); in rt_mutex_slowlock()
1790 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); in rt_mutex_slowlock()
1796 static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, in __rt_mutex_lock() argument
1801 if (likely(rt_mutex_try_acquire(lock))) in __rt_mutex_lock()
1804 return rt_mutex_slowlock(lock, NULL, state); in __rt_mutex_lock()
1814 * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
1815 * @lock: The underlying RT mutex
1818 static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock, in rtlock_slowlock_locked() argument
1820 __releases(&lock->wait_lock) __acquires(&lock->wait_lock) in rtlock_slowlock_locked()
1825 lockdep_assert_held(&lock->wait_lock); in rtlock_slowlock_locked()
1827 if (try_to_take_rt_mutex(lock, current, NULL)) in rtlock_slowlock_locked()
1835 trace_contention_begin(lock, LCB_F_RT); in rtlock_slowlock_locked()
1837 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK, wake_q); in rtlock_slowlock_locked()
1840 /* Try to acquire the lock again */ in rtlock_slowlock_locked()
1841 if (try_to_take_rt_mutex(lock, current, &waiter)) in rtlock_slowlock_locked()
1844 if (&waiter == rt_mutex_top_waiter(lock)) in rtlock_slowlock_locked()
1845 owner = rt_mutex_owner(lock); in rtlock_slowlock_locked()
1848 raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); in rtlock_slowlock_locked()
1850 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) in rtlock_slowlock_locked()
1853 raw_spin_lock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1864 fixup_rt_mutex_waiters(lock, true); in rtlock_slowlock_locked()
1867 trace_contention_end(lock, 0); in rtlock_slowlock_locked()
1870 static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) in rtlock_slowlock() argument
1875 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rtlock_slowlock()
1876 rtlock_slowlock_locked(lock, &wake_q); in rtlock_slowlock()
1877 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); in rtlock_slowlock()