Path: blob/master/arch/parisc/include/asm/atomic.h
10819 views
/* Copyright (C) 2000 Philipp Rumpf <[email protected]>1* Copyright (C) 2006 Kyle McMartin <[email protected]>2*/34#ifndef _ASM_PARISC_ATOMIC_H_5#define _ASM_PARISC_ATOMIC_H_67#include <linux/types.h>8#include <asm/system.h>910/*11* Atomic operations that C can't guarantee us. Useful for12* resource counting etc..13*14* And probably incredibly slow on parisc. OTOH, we don't15* have to write any serious assembly. prumpf16*/1718#ifdef CONFIG_SMP19#include <asm/spinlock.h>20#include <asm/cache.h> /* we use L1_CACHE_BYTES */2122/* Use an array of spinlocks for our atomic_ts.23* Hash function to index into a different SPINLOCK.24* Since "a" is usually an address, use one spinlock per cacheline.25*/26# define ATOMIC_HASH_SIZE 427# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))2829extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;3031/* Can't use raw_spin_lock_irq because of #include problems, so32* this is the substitute */33#define _atomic_spin_lock_irqsave(l,f) do { \34arch_spinlock_t *s = ATOMIC_HASH(l); \35local_irq_save(f); \36arch_spin_lock(s); \37} while(0)3839#define _atomic_spin_unlock_irqrestore(l,f) do { \40arch_spinlock_t *s = ATOMIC_HASH(l); \41arch_spin_unlock(s); \42local_irq_restore(f); \43} while(0)444546#else47# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)48# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)49#endif5051/* This should get optimized out since it's never called.52** Or get a link error if xchg is used "wrong".53*/54extern void __xchg_called_with_bad_pointer(void);555657/* __xchg32/64 defined in arch/parisc/lib/bitops.c */58extern unsigned long __xchg8(char, char *);59extern unsigned long __xchg32(int, int *);60#ifdef CONFIG_64BIT61extern unsigned long __xchg64(unsigned long, unsigned long *);62#endif6364/* optimizer better get rid of switch since size is a constant */65static __inline__ unsigned long66__xchg(unsigned long x, __volatile__ void * ptr, int size)67{68switch(size) {69#ifdef CONFIG_64BIT70case 8: return __xchg64(x,(unsigned long *) ptr);71#endif72case 4: return __xchg32((int) x, (int *) ptr);73case 1: return __xchg8((char) x, (char *) ptr);74}75__xchg_called_with_bad_pointer();76return x;77}787980/*81** REVISIT - Abandoned use of LDCW in xchg() for now:82** o need to test sizeof(*ptr) to avoid clearing adjacent bytes83** o and while we are at it, could CONFIG_64BIT code use LDCD too?84**85** if (__builtin_constant_p(x) && (x == NULL))86** if (((unsigned long)p & 0xf) == 0)87** return __ldcw(p);88*/89#define xchg(ptr,x) \90((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))919293#define __HAVE_ARCH_CMPXCHG 19495/* bug catcher for when unsupported size is used - won't link */96extern void __cmpxchg_called_with_bad_pointer(void);9798/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */99extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);100extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);101102/* don't worry...optimizer will get rid of most of this */103static __inline__ unsigned long104__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)105{106switch(size) {107#ifdef CONFIG_64BIT108case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);109#endif110case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);111}112__cmpxchg_called_with_bad_pointer();113return old;114}115116#define cmpxchg(ptr,o,n) \117({ \118__typeof__(*(ptr)) _o_ = (o); \119__typeof__(*(ptr)) _n_ = (n); \120(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \121(unsigned long)_n_, sizeof(*(ptr))); \122})123124#include <asm-generic/cmpxchg-local.h>125126static inline unsigned long __cmpxchg_local(volatile void *ptr,127unsigned long old,128unsigned long new_, int size)129{130switch (size) {131#ifdef CONFIG_64BIT132case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);133#endif134case 4: return __cmpxchg_u32(ptr, old, new_);135default:136return __cmpxchg_local_generic(ptr, old, new_, size);137}138}139140/*141* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make142* them available.143*/144#define cmpxchg_local(ptr, o, n) \145((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \146(unsigned long)(n), sizeof(*(ptr))))147#ifdef CONFIG_64BIT148#define cmpxchg64_local(ptr, o, n) \149({ \150BUILD_BUG_ON(sizeof(*(ptr)) != 8); \151cmpxchg_local((ptr), (o), (n)); \152})153#else154#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))155#endif156157/*158* Note that we need not lock read accesses - aligned word writes/reads159* are atomic, so a reader never sees inconsistent values.160*/161162/* It's possible to reduce all atomic operations to either163* __atomic_add_return, atomic_set and atomic_read (the latter164* is there only for consistency).165*/166167static __inline__ int __atomic_add_return(int i, atomic_t *v)168{169int ret;170unsigned long flags;171_atomic_spin_lock_irqsave(v, flags);172173ret = (v->counter += i);174175_atomic_spin_unlock_irqrestore(v, flags);176return ret;177}178179static __inline__ void atomic_set(atomic_t *v, int i)180{181unsigned long flags;182_atomic_spin_lock_irqsave(v, flags);183184v->counter = i;185186_atomic_spin_unlock_irqrestore(v, flags);187}188189static __inline__ int atomic_read(const atomic_t *v)190{191return (*(volatile int *)&(v)->counter);192}193194/* exported interface */195#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))196#define atomic_xchg(v, new) (xchg(&((v)->counter), new))197198/**199* atomic_add_unless - add unless the number is a given value200* @v: pointer of type atomic_t201* @a: the amount to add to v...202* @u: ...unless v is equal to u.203*204* Atomically adds @a to @v, so long as it was not @u.205* Returns non-zero if @v was not @u, and zero otherwise.206*/207static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)208{209int c, old;210c = atomic_read(v);211for (;;) {212if (unlikely(c == (u)))213break;214old = atomic_cmpxchg((v), c, c + (a));215if (likely(old == c))216break;217c = old;218}219return c != (u);220}221222#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)223224#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))225#define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))226#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))227#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))228229#define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))230#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))231#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))232#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))233234#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)235236/*237* atomic_inc_and_test - increment and test238* @v: pointer of type atomic_t239*240* Atomically increments @v by 1241* and returns true if the result is zero, or false for all242* other cases.243*/244#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)245246#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)247248#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)249250#define ATOMIC_INIT(i) ((atomic_t) { (i) })251252#define smp_mb__before_atomic_dec() smp_mb()253#define smp_mb__after_atomic_dec() smp_mb()254#define smp_mb__before_atomic_inc() smp_mb()255#define smp_mb__after_atomic_inc() smp_mb()256257#ifdef CONFIG_64BIT258259#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })260261static __inline__ int262__atomic64_add_return(s64 i, atomic64_t *v)263{264int ret;265unsigned long flags;266_atomic_spin_lock_irqsave(v, flags);267268ret = (v->counter += i);269270_atomic_spin_unlock_irqrestore(v, flags);271return ret;272}273274static __inline__ void275atomic64_set(atomic64_t *v, s64 i)276{277unsigned long flags;278_atomic_spin_lock_irqsave(v, flags);279280v->counter = i;281282_atomic_spin_unlock_irqrestore(v, flags);283}284285static __inline__ s64286atomic64_read(const atomic64_t *v)287{288return (*(volatile long *)&(v)->counter);289}290291#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))292#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))293#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))294#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))295296#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))297#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))298#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))299#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))300301#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)302303#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)304#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)305#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)306307/* exported interface */308#define atomic64_cmpxchg(v, o, n) \309((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))310#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))311312/**313* atomic64_add_unless - add unless the number is a given value314* @v: pointer of type atomic64_t315* @a: the amount to add to v...316* @u: ...unless v is equal to u.317*318* Atomically adds @a to @v, so long as it was not @u.319* Returns non-zero if @v was not @u, and zero otherwise.320*/321static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)322{323long c, old;324c = atomic64_read(v);325for (;;) {326if (unlikely(c == (u)))327break;328old = atomic64_cmpxchg((v), c, c + (a));329if (likely(old == c))330break;331c = old;332}333return c != (u);334}335336#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)337338#else /* CONFIG_64BIT */339340#include <asm-generic/atomic64.h>341342#endif /* !CONFIG_64BIT */343344#include <asm-generic/atomic-long.h>345346#endif /* _ASM_PARISC_ATOMIC_H_ */347348349