// SPDX-License-Identifier: GPL-2.01#include <linux/percpu.h>2#include <linux/sched.h>3#include <linux/osq_lock.h>45/*6* An MCS like lock especially tailored for optimistic spinning for sleeping7* lock implementations (mutex, rwsem, etc).8*9* Using a single mcs node per CPU is safe because sleeping locks should not be10* called from interrupt context and we have preemption disabled while11* spinning.12*/1314struct optimistic_spin_node {15struct optimistic_spin_node *next, *prev;16int locked; /* 1 if lock acquired */17int cpu; /* encoded CPU # + 1 value */18};1920static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);2122/*23* We use the value 0 to represent "no CPU", thus the encoded value24* will be the CPU number incremented by 1.25*/26static inline int encode_cpu(int cpu_nr)27{28return cpu_nr + 1;29}3031static inline int node_cpu(struct optimistic_spin_node *node)32{33return node->cpu - 1;34}3536static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)37{38int cpu_nr = encoded_cpu_val - 1;3940return per_cpu_ptr(&osq_node, cpu_nr);41}4243/*44* Get a stable @node->next pointer, either for unlock() or unqueue() purposes.45* Can return NULL in case we were the last queued and we updated @lock instead.46*47* If osq_lock() is being cancelled there must be a previous node48* and 'old_cpu' is its CPU #.49* For osq_unlock() there is never a previous node and old_cpu is50* set to OSQ_UNLOCKED_VAL.51*/52static inline struct optimistic_spin_node *53osq_wait_next(struct optimistic_spin_queue *lock,54struct optimistic_spin_node *node,55int old_cpu)56{57int curr = encode_cpu(smp_processor_id());5859for (;;) {60if (atomic_read(&lock->tail) == curr &&61atomic_cmpxchg_acquire(&lock->tail, curr, old_cpu) == curr) {62/*63* We were the last queued, we moved @lock back. @prev64* will now observe @lock and will complete its65* unlock()/unqueue().66*/67return NULL;68}6970/*71* We must xchg() the @node->next value, because if we were to72* leave it in, a concurrent unlock()/unqueue() from73* @node->next might complete Step-A and think its @prev is74* still valid.75*76* If the concurrent unlock()/unqueue() wins the race, we'll77* wait for either @lock to point to us, through its Step-B, or78* wait for a new @node->next from its Step-C.79*/80if (node->next) {81struct optimistic_spin_node *next;8283next = xchg(&node->next, NULL);84if (next)85return next;86}8788cpu_relax();89}90}9192bool osq_lock(struct optimistic_spin_queue *lock)93{94struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);95struct optimistic_spin_node *prev, *next;96int curr = encode_cpu(smp_processor_id());97int old;9899node->locked = 0;100node->next = NULL;101node->cpu = curr;102103/*104* We need both ACQUIRE (pairs with corresponding RELEASE in105* unlock() uncontended, or fastpath) and RELEASE (to publish106* the node fields we just initialised) semantics when updating107* the lock tail.108*/109old = atomic_xchg(&lock->tail, curr);110if (old == OSQ_UNLOCKED_VAL)111return true;112113prev = decode_cpu(old);114node->prev = prev;115116/*117* osq_lock() unqueue118*119* node->prev = prev osq_wait_next()120* WMB MB121* prev->next = node next->prev = prev // unqueue-C122*123* Here 'node->prev' and 'next->prev' are the same variable and we need124* to ensure these stores happen in-order to avoid corrupting the list.125*/126smp_wmb();127128WRITE_ONCE(prev->next, node);129130/*131* Normally @prev is untouchable after the above store; because at that132* moment unlock can proceed and wipe the node element from stack.133*134* However, since our nodes are static per-cpu storage, we're135* guaranteed their existence -- this allows us to apply136* cmpxchg in an attempt to undo our queueing.137*/138139/*140* Wait to acquire the lock or cancellation. Note that need_resched()141* will come with an IPI, which will wake smp_cond_load_relaxed() if it142* is implemented with a monitor-wait. vcpu_is_preempted() relies on143* polling, be careful.144*/145if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||146vcpu_is_preempted(node_cpu(node->prev))))147return true;148149/* unqueue */150/*151* Step - A -- stabilize @prev152*153* Undo our @prev->next assignment; this will make @prev's154* unlock()/unqueue() wait for a next pointer since @lock points to us155* (or later).156*/157158for (;;) {159/*160* cpu_relax() below implies a compiler barrier which would161* prevent this comparison being optimized away.162*/163if (data_race(prev->next) == node &&164cmpxchg(&prev->next, node, NULL) == node)165break;166167/*168* We can only fail the cmpxchg() racing against an unlock(),169* in which case we should observe @node->locked becoming170* true.171*/172if (smp_load_acquire(&node->locked))173return true;174175cpu_relax();176177/*178* Or we race against a concurrent unqueue()'s step-B, in which179* case its step-C will write us a new @node->prev pointer.180*/181prev = READ_ONCE(node->prev);182}183184/*185* Step - B -- stabilize @next186*187* Similar to unlock(), wait for @node->next or move @lock from @node188* back to @prev.189*/190191next = osq_wait_next(lock, node, prev->cpu);192if (!next)193return false;194195/*196* Step - C -- unlink197*198* @prev is stable because its still waiting for a new @prev->next199* pointer, @next is stable because our @node->next pointer is NULL and200* it will wait in Step-A.201*/202203WRITE_ONCE(next->prev, prev);204WRITE_ONCE(prev->next, next);205206return false;207}208209void osq_unlock(struct optimistic_spin_queue *lock)210{211struct optimistic_spin_node *node, *next;212int curr = encode_cpu(smp_processor_id());213214/*215* Fast path for the uncontended case.216*/217if (atomic_try_cmpxchg_release(&lock->tail, &curr, OSQ_UNLOCKED_VAL))218return;219220/*221* Second most likely case.222*/223node = this_cpu_ptr(&osq_node);224next = xchg(&node->next, NULL);225if (next) {226WRITE_ONCE(next->locked, 1);227return;228}229230next = osq_wait_next(lock, node, OSQ_UNLOCKED_VAL);231if (next)232WRITE_ONCE(next->locked, 1);233}234235236