Path: blob/master/arch/xtensa/include/asm/atomic.h
15126 views
/*1* include/asm-xtensa/atomic.h2*3* Atomic operations that C can't guarantee us. Useful for resource counting..4*5* This file is subject to the terms and conditions of the GNU General Public6* License. See the file "COPYING" in the main directory of this archive7* for more details.8*9* Copyright (C) 2001 - 2005 Tensilica Inc.10*/1112#ifndef _XTENSA_ATOMIC_H13#define _XTENSA_ATOMIC_H1415#include <linux/stringify.h>16#include <linux/types.h>1718#ifdef __KERNEL__19#include <asm/processor.h>20#include <asm/system.h>2122#define ATOMIC_INIT(i) { (i) }2324/*25* This Xtensa implementation assumes that the right mechanism26* for exclusion is for locking interrupts to level 1.27*28* Locking interrupts looks like this:29*30* rsil a15, 131* <code>32* wsr a15, PS33* rsync34*35* Note that a15 is used here because the register allocation36* done by the compiler is not guaranteed and a window overflow37* may not occur between the rsil and wsr instructions. By using38* a15 in the rsil, the machine is guaranteed to be in a state39* where no register reference will cause an overflow.40*/4142/**43* atomic_read - read atomic variable44* @v: pointer of type atomic_t45*46* Atomically reads the value of @v.47*/48#define atomic_read(v) (*(volatile int *)&(v)->counter)4950/**51* atomic_set - set atomic variable52* @v: pointer of type atomic_t53* @i: required value54*55* Atomically sets the value of @v to @i.56*/57#define atomic_set(v,i) ((v)->counter = (i))5859/**60* atomic_add - add integer to atomic variable61* @i: integer value to add62* @v: pointer of type atomic_t63*64* Atomically adds @i to @v.65*/66static inline void atomic_add(int i, atomic_t * v)67{68unsigned int vval;6970__asm__ __volatile__(71"rsil a15, "__stringify(LOCKLEVEL)"\n\t"72"l32i %0, %2, 0 \n\t"73"add %0, %0, %1 \n\t"74"s32i %0, %2, 0 \n\t"75"wsr a15, "__stringify(PS)" \n\t"76"rsync \n"77: "=&a" (vval)78: "a" (i), "a" (v)79: "a15", "memory"80);81}8283/**84* atomic_sub - subtract the atomic variable85* @i: integer value to subtract86* @v: pointer of type atomic_t87*88* Atomically subtracts @i from @v.89*/90static inline void atomic_sub(int i, atomic_t *v)91{92unsigned int vval;9394__asm__ __volatile__(95"rsil a15, "__stringify(LOCKLEVEL)"\n\t"96"l32i %0, %2, 0 \n\t"97"sub %0, %0, %1 \n\t"98"s32i %0, %2, 0 \n\t"99"wsr a15, "__stringify(PS)" \n\t"100"rsync \n"101: "=&a" (vval)102: "a" (i), "a" (v)103: "a15", "memory"104);105}106107/*108* We use atomic_{add|sub}_return to define other functions.109*/110111static inline int atomic_add_return(int i, atomic_t * v)112{113unsigned int vval;114115__asm__ __volatile__(116"rsil a15,"__stringify(LOCKLEVEL)"\n\t"117"l32i %0, %2, 0 \n\t"118"add %0, %0, %1 \n\t"119"s32i %0, %2, 0 \n\t"120"wsr a15, "__stringify(PS)" \n\t"121"rsync \n"122: "=&a" (vval)123: "a" (i), "a" (v)124: "a15", "memory"125);126127return vval;128}129130static inline int atomic_sub_return(int i, atomic_t * v)131{132unsigned int vval;133134__asm__ __volatile__(135"rsil a15,"__stringify(LOCKLEVEL)"\n\t"136"l32i %0, %2, 0 \n\t"137"sub %0, %0, %1 \n\t"138"s32i %0, %2, 0 \n\t"139"wsr a15, "__stringify(PS)" \n\t"140"rsync \n"141: "=&a" (vval)142: "a" (i), "a" (v)143: "a15", "memory"144);145146return vval;147}148149/**150* atomic_sub_and_test - subtract value from variable and test result151* @i: integer value to subtract152* @v: pointer of type atomic_t153*154* Atomically subtracts @i from @v and returns155* true if the result is zero, or false for all156* other cases.157*/158#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)159160/**161* atomic_inc - increment atomic variable162* @v: pointer of type atomic_t163*164* Atomically increments @v by 1.165*/166#define atomic_inc(v) atomic_add(1,(v))167168/**169* atomic_inc - increment atomic variable170* @v: pointer of type atomic_t171*172* Atomically increments @v by 1.173*/174#define atomic_inc_return(v) atomic_add_return(1,(v))175176/**177* atomic_dec - decrement atomic variable178* @v: pointer of type atomic_t179*180* Atomically decrements @v by 1.181*/182#define atomic_dec(v) atomic_sub(1,(v))183184/**185* atomic_dec_return - decrement atomic variable186* @v: pointer of type atomic_t187*188* Atomically decrements @v by 1.189*/190#define atomic_dec_return(v) atomic_sub_return(1,(v))191192/**193* atomic_dec_and_test - decrement and test194* @v: pointer of type atomic_t195*196* Atomically decrements @v by 1 and197* returns true if the result is 0, or false for all other198* cases.199*/200#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)201202/**203* atomic_inc_and_test - increment and test204* @v: pointer of type atomic_t205*206* Atomically increments @v by 1207* and returns true if the result is zero, or false for all208* other cases.209*/210#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)211212/**213* atomic_add_negative - add and test if negative214* @v: pointer of type atomic_t215* @i: integer value to add216*217* Atomically adds @i to @v and returns true218* if the result is negative, or false when219* result is greater than or equal to zero.220*/221#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)222223#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))224#define atomic_xchg(v, new) (xchg(&((v)->counter), new))225226/**227* atomic_add_unless - add unless the number is a given value228* @v: pointer of type atomic_t229* @a: the amount to add to v...230* @u: ...unless v is equal to u.231*232* Atomically adds @a to @v, so long as it was not @u.233* Returns non-zero if @v was not @u, and zero otherwise.234*/235static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)236{237int c, old;238c = atomic_read(v);239for (;;) {240if (unlikely(c == (u)))241break;242old = atomic_cmpxchg((v), c, c + (a));243if (likely(old == c))244break;245c = old;246}247return c != (u);248}249250#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)251252static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)253{254unsigned int all_f = -1;255unsigned int vval;256257__asm__ __volatile__(258"rsil a15,"__stringify(LOCKLEVEL)"\n\t"259"l32i %0, %2, 0 \n\t"260"xor %1, %4, %3 \n\t"261"and %0, %0, %4 \n\t"262"s32i %0, %2, 0 \n\t"263"wsr a15, "__stringify(PS)" \n\t"264"rsync \n"265: "=&a" (vval), "=a" (mask)266: "a" (v), "a" (all_f), "1" (mask)267: "a15", "memory"268);269}270271static inline void atomic_set_mask(unsigned int mask, atomic_t *v)272{273unsigned int vval;274275__asm__ __volatile__(276"rsil a15,"__stringify(LOCKLEVEL)"\n\t"277"l32i %0, %2, 0 \n\t"278"or %0, %0, %1 \n\t"279"s32i %0, %2, 0 \n\t"280"wsr a15, "__stringify(PS)" \n\t"281"rsync \n"282: "=&a" (vval)283: "a" (mask), "a" (v)284: "a15", "memory"285);286}287288/* Atomic operations are already serializing */289#define smp_mb__before_atomic_dec() barrier()290#define smp_mb__after_atomic_dec() barrier()291#define smp_mb__before_atomic_inc() barrier()292#define smp_mb__after_atomic_inc() barrier()293294#include <asm-generic/atomic-long.h>295#endif /* __KERNEL__ */296297#endif /* _XTENSA_ATOMIC_H */298299300301