Lines Matching refs:owner

45 	atomic_long_set(&lock->owner, 0);  in __mutex_init()
78 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner()
81 static inline struct task_struct *__owner_task(unsigned long owner) in __owner_task() argument
83 return (struct task_struct *)(owner & ~MUTEX_FLAGS); in __owner_task()
92 static inline unsigned long __owner_flags(unsigned long owner) in __owner_flags() argument
94 return owner & MUTEX_FLAGS; in __owner_flags()
102 unsigned long owner, curr = (unsigned long)current; in __mutex_trylock_common() local
104 owner = atomic_long_read(&lock->owner); in __mutex_trylock_common()
106 unsigned long flags = __owner_flags(owner); in __mutex_trylock_common()
107 unsigned long task = owner & ~MUTEX_FLAGS; in __mutex_trylock_common()
126 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { in __mutex_trylock_common()
133 return __owner_task(owner); in __mutex_trylock_common()
168 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) in __mutex_trylock_fast()
178 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); in __mutex_unlock_fast()
184 atomic_long_or(flag, &lock->owner); in __mutex_set_flag()
189 atomic_long_andnot(flag, &lock->owner); in __mutex_clear_flag()
230 unsigned long owner = atomic_long_read(&lock->owner); in __mutex_handoff() local
235 MUTEX_WARN_ON(__owner_task(owner) != current); in __mutex_handoff()
236 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); in __mutex_handoff()
238 new = (owner & MUTEX_FLAG_WAITERS); in __mutex_handoff()
243 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) in __mutex_handoff()
329 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) in ww_mutex_spin_on_owner()
349 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, in mutex_spin_on_owner() argument
356 while (__mutex_owner(lock) == owner) { in mutex_spin_on_owner()
370 if (!owner->on_cpu || need_resched() || in mutex_spin_on_owner()
371 vcpu_is_preempted(task_cpu(owner))) { in mutex_spin_on_owner()
392 struct task_struct *owner; in mutex_can_spin_on_owner() local
405 owner = __mutex_owner(lock); in mutex_can_spin_on_owner()
412 if (owner) in mutex_can_spin_on_owner()
413 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); in mutex_can_spin_on_owner()
469 struct task_struct *owner; in mutex_optimistic_spin() local
472 owner = __mutex_trylock_or_owner(lock); in mutex_optimistic_spin()
473 if (!owner) in mutex_optimistic_spin()
480 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) in mutex_optimistic_spin()
898 unsigned long owner; in __mutex_unlock_slowpath() local
909 owner = atomic_long_read(&lock->owner); in __mutex_unlock_slowpath()
911 MUTEX_WARN_ON(__owner_task(owner) != current); in __mutex_unlock_slowpath()
912 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); in __mutex_unlock_slowpath()
914 if (owner & MUTEX_FLAG_HANDOFF) in __mutex_unlock_slowpath()
917 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { in __mutex_unlock_slowpath()
918 if (owner & MUTEX_FLAG_WAITERS) in __mutex_unlock_slowpath()
939 if (owner & MUTEX_FLAG_HANDOFF) in __mutex_unlock_slowpath()