Path: blob/master/arch/arc/include/asm/atomic-spinlock.h
26481 views
/* SPDX-License-Identifier: GPL-2.0-only */12#ifndef _ASM_ARC_ATOMIC_SPLOCK_H3#define _ASM_ARC_ATOMIC_SPLOCK_H45/*6* Non hardware assisted Atomic-R-M-W7* Locking would change to irq-disabling only (UP) and spinlocks (SMP)8*/910static inline void arch_atomic_set(atomic_t *v, int i)11{12/*13* Independent of hardware support, all of the atomic_xxx() APIs need14* to follow the same locking rules to make sure that a "hardware"15* atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn16* sequence17*18* Thus atomic_set() despite being 1 insn (and seemingly atomic)19* requires the locking.20*/21unsigned long flags;2223atomic_ops_lock(flags);24WRITE_ONCE(v->counter, i);25atomic_ops_unlock(flags);26}2728#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))2930#define ATOMIC_OP(op, c_op, asm_op) \31static inline void arch_atomic_##op(int i, atomic_t *v) \32{ \33unsigned long flags; \34\35atomic_ops_lock(flags); \36v->counter c_op i; \37atomic_ops_unlock(flags); \38}3940#define ATOMIC_OP_RETURN(op, c_op, asm_op) \41static inline int arch_atomic_##op##_return(int i, atomic_t *v) \42{ \43unsigned long flags; \44unsigned int temp; \45\46/* \47* spin lock/unlock provides the needed smp_mb() before/after \48*/ \49atomic_ops_lock(flags); \50temp = v->counter; \51temp c_op i; \52v->counter = temp; \53atomic_ops_unlock(flags); \54\55return temp; \56}5758#define ATOMIC_FETCH_OP(op, c_op, asm_op) \59static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \60{ \61unsigned long flags; \62unsigned int orig; \63\64/* \65* spin lock/unlock provides the needed smp_mb() before/after \66*/ \67atomic_ops_lock(flags); \68orig = v->counter; \69v->counter c_op i; \70atomic_ops_unlock(flags); \71\72return orig; \73}7475#define ATOMIC_OPS(op, c_op, asm_op) \76ATOMIC_OP(op, c_op, asm_op) \77ATOMIC_OP_RETURN(op, c_op, asm_op) \78ATOMIC_FETCH_OP(op, c_op, asm_op)7980ATOMIC_OPS(add, +=, add)81ATOMIC_OPS(sub, -=, sub)8283#define arch_atomic_fetch_add arch_atomic_fetch_add84#define arch_atomic_fetch_sub arch_atomic_fetch_sub85#define arch_atomic_add_return arch_atomic_add_return86#define arch_atomic_sub_return arch_atomic_sub_return8788#undef ATOMIC_OPS89#define ATOMIC_OPS(op, c_op, asm_op) \90ATOMIC_OP(op, c_op, asm_op) \91ATOMIC_FETCH_OP(op, c_op, asm_op)9293ATOMIC_OPS(and, &=, and)94ATOMIC_OPS(andnot, &= ~, bic)95ATOMIC_OPS(or, |=, or)96ATOMIC_OPS(xor, ^=, xor)9798#define arch_atomic_andnot arch_atomic_andnot99100#define arch_atomic_fetch_and arch_atomic_fetch_and101#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot102#define arch_atomic_fetch_or arch_atomic_fetch_or103#define arch_atomic_fetch_xor arch_atomic_fetch_xor104105#undef ATOMIC_OPS106#undef ATOMIC_FETCH_OP107#undef ATOMIC_OP_RETURN108#undef ATOMIC_OP109110#endif111112113