Lines Matching full:locked
92 try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) { in pv_hybrid_queued_unfair_trylock()
124 return !READ_ONCE(lock->locked) && in trylock_clear_pending()
142 * Try to clear pending bit & set locked bit in trylock_clear_pending()
286 * Wait for node->locked to become true, halt the vcpu after a short spin.
299 if (READ_ONCE(node->locked)) in pv_wait_node()
309 * Order pn->state vs pn->locked thusly: in pv_wait_node()
311 * [S] pn->state = VCPU_HALTED [S] next->locked = 1 in pv_wait_node()
313 * [L] pn->locked [RmW] pn->state = VCPU_HASHED in pv_wait_node()
319 if (!READ_ONCE(node->locked)) { in pv_wait_node()
333 * If the locked flag is still not set after wakeup, it is a in pv_wait_node()
340 !READ_ONCE(node->locked)); in pv_wait_node()
344 * By now our node->locked should be 1 and our caller will not actually in pv_wait_node()
351 * Called after setting next->locked = 1 when we're the lock owner.
364 * observe its next->locked value and advance itself. in pv_kick_node()
368 * The write to next->locked in arch_mcs_spin_unlock_contended() in pv_kick_node()
387 WRITE_ONCE(lock->locked, _Q_SLOW_VAL); in pv_kick_node()
392 * Wait for l->locked to become clear and acquire the lock;
446 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL in pv_wait_head_or_lock()
448 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> in pv_wait_head_or_lock()
452 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { in pv_wait_head_or_lock()
458 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock()
466 pv_wait(&lock->locked, _Q_SLOW_VAL); in pv_wait_head_or_lock()
499 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) in __pv_queued_spin_unlock_slowpath() argument
503 if (unlikely(locked != _Q_SLOW_VAL)) { in __pv_queued_spin_unlock_slowpath()
529 smp_store_release(&lock->locked, 0); in __pv_queued_spin_unlock_slowpath()
545 u8 locked = _Q_LOCKED_VAL; in __pv_queued_spin_unlock() local
552 if (try_cmpxchg_release(&lock->locked, &locked, 0)) in __pv_queued_spin_unlock()
555 __pv_queued_spin_unlock_slowpath(lock, locked); in __pv_queued_spin_unlock()