Path: blob/master/include/asm-generic/bitops/atomic.h
10818 views
#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_1#define _ASM_GENERIC_BITOPS_ATOMIC_H_23#include <asm/types.h>4#include <asm/system.h>56#ifdef CONFIG_SMP7#include <asm/spinlock.h>8#include <asm/cache.h> /* we use L1_CACHE_BYTES */910/* Use an array of spinlocks for our atomic_ts.11* Hash function to index into a different SPINLOCK.12* Since "a" is usually an address, use one spinlock per cacheline.13*/14# define ATOMIC_HASH_SIZE 415# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))1617extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;1819/* Can't use raw_spin_lock_irq because of #include problems, so20* this is the substitute */21#define _atomic_spin_lock_irqsave(l,f) do { \22arch_spinlock_t *s = ATOMIC_HASH(l); \23local_irq_save(f); \24arch_spin_lock(s); \25} while(0)2627#define _atomic_spin_unlock_irqrestore(l,f) do { \28arch_spinlock_t *s = ATOMIC_HASH(l); \29arch_spin_unlock(s); \30local_irq_restore(f); \31} while(0)323334#else35# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)36# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)37#endif3839/*40* NMI events can occur at any time, including when interrupts have been41* disabled by *_irqsave(). So you can get NMI events occurring while a42* *_bit function is holding a spin lock. If the NMI handler also wants43* to do bit manipulation (and they do) then you can get a deadlock44* between the original caller of *_bit() and the NMI handler.45*46* by Keith Owens47*/4849/**50* set_bit - Atomically set a bit in memory51* @nr: the bit to set52* @addr: the address to start counting from53*54* This function is atomic and may not be reordered. See __set_bit()55* if you do not require the atomic guarantees.56*57* Note: there are no guarantees that this function will not be reordered58* on non x86 architectures, so if you are writing portable code,59* make sure not to rely on its reordering guarantees.60*61* Note that @nr may be almost arbitrarily large; this function is not62* restricted to acting on a single-word quantity.63*/64static inline void set_bit(int nr, volatile unsigned long *addr)65{66unsigned long mask = BIT_MASK(nr);67unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);68unsigned long flags;6970_atomic_spin_lock_irqsave(p, flags);71*p |= mask;72_atomic_spin_unlock_irqrestore(p, flags);73}7475/**76* clear_bit - Clears a bit in memory77* @nr: Bit to clear78* @addr: Address to start counting from79*80* clear_bit() is atomic and may not be reordered. However, it does81* not contain a memory barrier, so if it is used for locking purposes,82* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()83* in order to ensure changes are visible on other processors.84*/85static inline void clear_bit(int nr, volatile unsigned long *addr)86{87unsigned long mask = BIT_MASK(nr);88unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);89unsigned long flags;9091_atomic_spin_lock_irqsave(p, flags);92*p &= ~mask;93_atomic_spin_unlock_irqrestore(p, flags);94}9596/**97* change_bit - Toggle a bit in memory98* @nr: Bit to change99* @addr: Address to start counting from100*101* change_bit() is atomic and may not be reordered. It may be102* reordered on other architectures than x86.103* Note that @nr may be almost arbitrarily large; this function is not104* restricted to acting on a single-word quantity.105*/106static inline void change_bit(int nr, volatile unsigned long *addr)107{108unsigned long mask = BIT_MASK(nr);109unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);110unsigned long flags;111112_atomic_spin_lock_irqsave(p, flags);113*p ^= mask;114_atomic_spin_unlock_irqrestore(p, flags);115}116117/**118* test_and_set_bit - Set a bit and return its old value119* @nr: Bit to set120* @addr: Address to count from121*122* This operation is atomic and cannot be reordered.123* It may be reordered on other architectures than x86.124* It also implies a memory barrier.125*/126static inline int test_and_set_bit(int nr, volatile unsigned long *addr)127{128unsigned long mask = BIT_MASK(nr);129unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);130unsigned long old;131unsigned long flags;132133_atomic_spin_lock_irqsave(p, flags);134old = *p;135*p = old | mask;136_atomic_spin_unlock_irqrestore(p, flags);137138return (old & mask) != 0;139}140141/**142* test_and_clear_bit - Clear a bit and return its old value143* @nr: Bit to clear144* @addr: Address to count from145*146* This operation is atomic and cannot be reordered.147* It can be reorderdered on other architectures other than x86.148* It also implies a memory barrier.149*/150static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)151{152unsigned long mask = BIT_MASK(nr);153unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);154unsigned long old;155unsigned long flags;156157_atomic_spin_lock_irqsave(p, flags);158old = *p;159*p = old & ~mask;160_atomic_spin_unlock_irqrestore(p, flags);161162return (old & mask) != 0;163}164165/**166* test_and_change_bit - Change a bit and return its old value167* @nr: Bit to change168* @addr: Address to count from169*170* This operation is atomic and cannot be reordered.171* It also implies a memory barrier.172*/173static inline int test_and_change_bit(int nr, volatile unsigned long *addr)174{175unsigned long mask = BIT_MASK(nr);176unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);177unsigned long old;178unsigned long flags;179180_atomic_spin_lock_irqsave(p, flags);181old = *p;182*p = old ^ mask;183_atomic_spin_unlock_irqrestore(p, flags);184185return (old & mask) != 0;186}187188#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */189190191