Path: blob/master/arch/tile/include/asm/atomic_32.h
10819 views
/*1* Copyright 2010 Tilera Corporation. All Rights Reserved.2*3* This program is free software; you can redistribute it and/or4* modify it under the terms of the GNU General Public License5* as published by the Free Software Foundation, version 2.6*7* This program is distributed in the hope that it will be useful, but8* WITHOUT ANY WARRANTY; without even the implied warranty of9* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or10* NON INFRINGEMENT. See the GNU General Public License for11* more details.12*13* Do not include directly; use <asm/atomic.h>.14*/1516#ifndef _ASM_TILE_ATOMIC_32_H17#define _ASM_TILE_ATOMIC_32_H1819#include <arch/chip.h>2021#ifndef __ASSEMBLY__2223/* Tile-specific routines to support <asm/atomic.h>. */24int _atomic_xchg(atomic_t *v, int n);25int _atomic_xchg_add(atomic_t *v, int i);26int _atomic_xchg_add_unless(atomic_t *v, int a, int u);27int _atomic_cmpxchg(atomic_t *v, int o, int n);2829/**30* atomic_xchg - atomically exchange contents of memory with a new value31* @v: pointer of type atomic_t32* @i: integer value to store in memory33*34* Atomically sets @v to @i and returns old @v35*/36static inline int atomic_xchg(atomic_t *v, int n)37{38smp_mb(); /* barrier for proper semantics */39return _atomic_xchg(v, n);40}4142/**43* atomic_cmpxchg - atomically exchange contents of memory if it matches44* @v: pointer of type atomic_t45* @o: old value that memory should have46* @n: new value to write to memory if it matches47*48* Atomically checks if @v holds @o and replaces it with @n if so.49* Returns the old value at @v.50*/51static inline int atomic_cmpxchg(atomic_t *v, int o, int n)52{53smp_mb(); /* barrier for proper semantics */54return _atomic_cmpxchg(v, o, n);55}5657/**58* atomic_add - add integer to atomic variable59* @i: integer value to add60* @v: pointer of type atomic_t61*62* Atomically adds @i to @v.63*/64static inline void atomic_add(int i, atomic_t *v)65{66_atomic_xchg_add(v, i);67}6869/**70* atomic_add_return - add integer and return71* @v: pointer of type atomic_t72* @i: integer value to add73*74* Atomically adds @i to @v and returns @i + @v75*/76static inline int atomic_add_return(int i, atomic_t *v)77{78smp_mb(); /* barrier for proper semantics */79return _atomic_xchg_add(v, i) + i;80}8182/**83* atomic_add_unless - add unless the number is already a given value84* @v: pointer of type atomic_t85* @a: the amount to add to v...86* @u: ...unless v is equal to u.87*88* Atomically adds @a to @v, so long as @v was not already @u.89* Returns non-zero if @v was not @u, and zero otherwise.90*/91static inline int atomic_add_unless(atomic_t *v, int a, int u)92{93smp_mb(); /* barrier for proper semantics */94return _atomic_xchg_add_unless(v, a, u) != u;95}9697/**98* atomic_set - set atomic variable99* @v: pointer of type atomic_t100* @i: required value101*102* Atomically sets the value of @v to @i.103*104* atomic_set() can't be just a raw store, since it would be lost if it105* fell between the load and store of one of the other atomic ops.106*/107static inline void atomic_set(atomic_t *v, int n)108{109_atomic_xchg(v, n);110}111112/* A 64bit atomic type */113114typedef struct {115u64 __aligned(8) counter;116} atomic64_t;117118#define ATOMIC64_INIT(val) { (val) }119120u64 _atomic64_xchg(atomic64_t *v, u64 n);121u64 _atomic64_xchg_add(atomic64_t *v, u64 i);122u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);123u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);124125/**126* atomic64_read - read atomic variable127* @v: pointer of type atomic64_t128*129* Atomically reads the value of @v.130*/131static inline u64 atomic64_read(const atomic64_t *v)132{133/*134* Requires an atomic op to read both 32-bit parts consistently.135* Casting away const is safe since the atomic support routines136* do not write to memory if the value has not been modified.137*/138return _atomic64_xchg_add((atomic64_t *)v, 0);139}140141/**142* atomic64_xchg - atomically exchange contents of memory with a new value143* @v: pointer of type atomic64_t144* @i: integer value to store in memory145*146* Atomically sets @v to @i and returns old @v147*/148static inline u64 atomic64_xchg(atomic64_t *v, u64 n)149{150smp_mb(); /* barrier for proper semantics */151return _atomic64_xchg(v, n);152}153154/**155* atomic64_cmpxchg - atomically exchange contents of memory if it matches156* @v: pointer of type atomic64_t157* @o: old value that memory should have158* @n: new value to write to memory if it matches159*160* Atomically checks if @v holds @o and replaces it with @n if so.161* Returns the old value at @v.162*/163static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)164{165smp_mb(); /* barrier for proper semantics */166return _atomic64_cmpxchg(v, o, n);167}168169/**170* atomic64_add - add integer to atomic variable171* @i: integer value to add172* @v: pointer of type atomic64_t173*174* Atomically adds @i to @v.175*/176static inline void atomic64_add(u64 i, atomic64_t *v)177{178_atomic64_xchg_add(v, i);179}180181/**182* atomic64_add_return - add integer and return183* @v: pointer of type atomic64_t184* @i: integer value to add185*186* Atomically adds @i to @v and returns @i + @v187*/188static inline u64 atomic64_add_return(u64 i, atomic64_t *v)189{190smp_mb(); /* barrier for proper semantics */191return _atomic64_xchg_add(v, i) + i;192}193194/**195* atomic64_add_unless - add unless the number is already a given value196* @v: pointer of type atomic64_t197* @a: the amount to add to v...198* @u: ...unless v is equal to u.199*200* Atomically adds @a to @v, so long as @v was not already @u.201* Returns non-zero if @v was not @u, and zero otherwise.202*/203static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)204{205smp_mb(); /* barrier for proper semantics */206return _atomic64_xchg_add_unless(v, a, u) != u;207}208209/**210* atomic64_set - set atomic variable211* @v: pointer of type atomic64_t212* @i: required value213*214* Atomically sets the value of @v to @i.215*216* atomic64_set() can't be just a raw store, since it would be lost if it217* fell between the load and store of one of the other atomic ops.218*/219static inline void atomic64_set(atomic64_t *v, u64 n)220{221_atomic64_xchg(v, n);222}223224#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)225#define atomic64_inc(v) atomic64_add(1LL, (v))226#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))227#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)228#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))229#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)230#define atomic64_sub(i, v) atomic64_add(-(i), (v))231#define atomic64_dec(v) atomic64_sub(1LL, (v))232#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))233#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)234#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)235236/*237* We need to barrier before modifying the word, since the _atomic_xxx()238* routines just tns the lock and then read/modify/write of the word.239* But after the word is updated, the routine issues an "mf" before returning,240* and since it's a function call, we don't even need a compiler barrier.241*/242#define smp_mb__before_atomic_dec() smp_mb()243#define smp_mb__before_atomic_inc() smp_mb()244#define smp_mb__after_atomic_dec() do { } while (0)245#define smp_mb__after_atomic_inc() do { } while (0)246247#endif /* !__ASSEMBLY__ */248249/*250* Internal definitions only beyond this point.251*/252253#define ATOMIC_LOCKS_FOUND_VIA_TABLE() \254(!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))255256#if ATOMIC_LOCKS_FOUND_VIA_TABLE()257258/* Number of entries in atomic_lock_ptr[]. */259#define ATOMIC_HASH_L1_SHIFT 6260#define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)261262/* Number of locks in each struct pointed to by atomic_lock_ptr[]. */263#define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)264#define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)265266#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */267268/*269* Number of atomic locks in atomic_locks[]. Must be a power of two.270* There is no reason for more than PAGE_SIZE / 8 entries, since that271* is the maximum number of pointer bits we can use to index this.272* And we cannot have more than PAGE_SIZE / 4, since this has to273* fit on a single page and each entry takes 4 bytes.274*/275#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)276#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)277278#ifndef __ASSEMBLY__279extern int atomic_locks[];280#endif281282#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */283284/*285* All the code that may fault while holding an atomic lock must286* place the pointer to the lock in ATOMIC_LOCK_REG so the fault code287* can correctly release and reacquire the lock. Note that we288* mention the register number in a comment in "lib/atomic_asm.S" to help289* assembly coders from using this register by mistake, so if it290* is changed here, change that comment as well.291*/292#define ATOMIC_LOCK_REG 20293#define ATOMIC_LOCK_REG_NAME r20294295#ifndef __ASSEMBLY__296/* Called from setup to initialize a hash table to point to per_cpu locks. */297void __init_atomic_per_cpu(void);298299#ifdef CONFIG_SMP300/* Support releasing the atomic lock in do_page_fault_ics(). */301void __atomic_fault_unlock(int *lock_ptr);302#endif303304/* Private helper routines in lib/atomic_asm_32.S */305extern struct __get_user __atomic_cmpxchg(volatile int *p,306int *lock, int o, int n);307extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);308extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);309extern struct __get_user __atomic_xchg_add_unless(volatile int *p,310int *lock, int o, int n);311extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);312extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);313extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);314extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);315extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);316extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);317extern u64 __atomic64_xchg_add_unless(volatile u64 *p,318int *lock, u64 o, u64 n);319320#endif /* !__ASSEMBLY__ */321322#endif /* _ASM_TILE_ATOMIC_32_H */323324325