Path: blob/master/arch/tile/include/asm/atomic_64.h
10819 views
/*1* Copyright 2011 Tilera Corporation. All Rights Reserved.2*3* This program is free software; you can redistribute it and/or4* modify it under the terms of the GNU General Public License5* as published by the Free Software Foundation, version 2.6*7* This program is distributed in the hope that it will be useful, but8* WITHOUT ANY WARRANTY; without even the implied warranty of9* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or10* NON INFRINGEMENT. See the GNU General Public License for11* more details.12*13* Do not include directly; use <asm/atomic.h>.14*/1516#ifndef _ASM_TILE_ATOMIC_64_H17#define _ASM_TILE_ATOMIC_64_H1819#ifndef __ASSEMBLY__2021#include <arch/spr_def.h>2223/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */2425#define atomic_set(v, i) ((v)->counter = (i))2627/*28* The smp_mb() operations throughout are to support the fact that29* Linux requires memory barriers before and after the operation,30* on any routine which updates memory and returns a value.31*/3233static inline int atomic_cmpxchg(atomic_t *v, int o, int n)34{35int val;36__insn_mtspr(SPR_CMPEXCH_VALUE, o);37smp_mb(); /* barrier for proper semantics */38val = __insn_cmpexch4((void *)&v->counter, n);39smp_mb(); /* barrier for proper semantics */40return val;41}4243static inline int atomic_xchg(atomic_t *v, int n)44{45int val;46smp_mb(); /* barrier for proper semantics */47val = __insn_exch4((void *)&v->counter, n);48smp_mb(); /* barrier for proper semantics */49return val;50}5152static inline void atomic_add(int i, atomic_t *v)53{54__insn_fetchadd4((void *)&v->counter, i);55}5657static inline int atomic_add_return(int i, atomic_t *v)58{59int val;60smp_mb(); /* barrier for proper semantics */61val = __insn_fetchadd4((void *)&v->counter, i) + i;62barrier(); /* the "+ i" above will wait on memory */63return val;64}6566static inline int atomic_add_unless(atomic_t *v, int a, int u)67{68int guess, oldval = v->counter;69do {70if (oldval == u)71break;72guess = oldval;73oldval = atomic_cmpxchg(v, guess, guess + a);74} while (guess != oldval);75return oldval != u;76}7778/* Now the true 64-bit operations. */7980#define ATOMIC64_INIT(i) { (i) }8182#define atomic64_read(v) ((v)->counter)83#define atomic64_set(v, i) ((v)->counter = (i))8485static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)86{87long val;88smp_mb(); /* barrier for proper semantics */89__insn_mtspr(SPR_CMPEXCH_VALUE, o);90val = __insn_cmpexch((void *)&v->counter, n);91smp_mb(); /* barrier for proper semantics */92return val;93}9495static inline long atomic64_xchg(atomic64_t *v, long n)96{97long val;98smp_mb(); /* barrier for proper semantics */99val = __insn_exch((void *)&v->counter, n);100smp_mb(); /* barrier for proper semantics */101return val;102}103104static inline void atomic64_add(long i, atomic64_t *v)105{106__insn_fetchadd((void *)&v->counter, i);107}108109static inline long atomic64_add_return(long i, atomic64_t *v)110{111int val;112smp_mb(); /* barrier for proper semantics */113val = __insn_fetchadd((void *)&v->counter, i) + i;114barrier(); /* the "+ i" above will wait on memory */115return val;116}117118static inline long atomic64_add_unless(atomic64_t *v, long a, long u)119{120long guess, oldval = v->counter;121do {122if (oldval == u)123break;124guess = oldval;125oldval = atomic64_cmpxchg(v, guess, guess + a);126} while (guess != oldval);127return oldval != u;128}129130#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))131#define atomic64_sub(i, v) atomic64_add(-(i), (v))132#define atomic64_inc_return(v) atomic64_add_return(1, (v))133#define atomic64_dec_return(v) atomic64_sub_return(1, (v))134#define atomic64_inc(v) atomic64_add(1, (v))135#define atomic64_dec(v) atomic64_sub(1, (v))136137#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)138#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)139#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)140#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)141142#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)143144/* Atomic dec and inc don't implement barrier, so provide them if needed. */145#define smp_mb__before_atomic_dec() smp_mb()146#define smp_mb__after_atomic_dec() smp_mb()147#define smp_mb__before_atomic_inc() smp_mb()148#define smp_mb__after_atomic_inc() smp_mb()149150/* Define this to indicate that cmpxchg is an efficient operation. */151#define __HAVE_ARCH_CMPXCHG152153#endif /* !__ASSEMBLY__ */154155#endif /* _ASM_TILE_ATOMIC_64_H */156157158