Path: blob/main/sys/compat/linuxkpi/common/include/asm/atomic.h
39604 views
/*-1* Copyright (c) 2010 Isilon Systems, Inc.2* Copyright (c) 2010 iX Systems, Inc.3* Copyright (c) 2010 Panasas, Inc.4* Copyright (c) 2013-2018 Mellanox Technologies, Ltd.5* All rights reserved.6*7* Redistribution and use in source and binary forms, with or without8* modification, are permitted provided that the following conditions9* are met:10* 1. Redistributions of source code must retain the above copyright11* notice unmodified, this list of conditions, and the following12* disclaimer.13* 2. Redistributions in binary form must reproduce the above copyright14* notice, this list of conditions and the following disclaimer in the15* documentation and/or other materials provided with the distribution.16*17* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR18* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES19* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.20* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,21* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT22* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,23* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY24* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT25* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF26* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.27*/2829#ifndef _LINUXKPI_ASM_ATOMIC_H_30#define _LINUXKPI_ASM_ATOMIC_H_3132#include <linux/compiler.h>33#include <sys/types.h>34#include <machine/atomic.h>35#define ATOMIC_INIT(x) { .counter = (x) }3637typedef struct {38volatile int counter;39} atomic_t;4041/*------------------------------------------------------------------------*42* 32-bit atomic operations43*------------------------------------------------------------------------*/4445#define atomic_add(i, v) atomic_add_return((i), (v))46#define atomic_sub(i, v) atomic_sub_return((i), (v))47#define atomic_inc_return(v) atomic_add_return(1, (v))48#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)49#define atomic_add_and_test(i, v) (atomic_add_return((i), (v)) == 0)50#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)51#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)52#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)53#define atomic_dec_return(v) atomic_sub_return(1, (v))54#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)5556static inline int57atomic_add_return(int i, atomic_t *v)58{59return i + atomic_fetchadd_int(&v->counter, i);60}6162static inline int63atomic_sub_return(int i, atomic_t *v)64{65return atomic_fetchadd_int(&v->counter, -i) - i;66}6768static inline void69atomic_set(atomic_t *v, int i)70{71WRITE_ONCE(v->counter, i);72}7374static inline void75atomic_set_release(atomic_t *v, int i)76{77atomic_store_rel_int(&v->counter, i);78}7980static inline void81atomic_set_mask(unsigned int mask, atomic_t *v)82{83atomic_set_int(&v->counter, mask);84}8586static inline int87atomic_read(const atomic_t *v)88{89return READ_ONCE(v->counter);90}9192static inline int93atomic_inc(atomic_t *v)94{95return atomic_fetchadd_int(&v->counter, 1) + 1;96}9798static inline int99atomic_dec(atomic_t *v)100{101return atomic_fetchadd_int(&v->counter, -1) - 1;102}103104static inline int105atomic_add_unless(atomic_t *v, int a, int u)106{107int c = atomic_read(v);108109for (;;) {110if (unlikely(c == u))111break;112if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))113break;114}115return (c != u);116}117118static inline int119atomic_fetch_add_unless(atomic_t *v, int a, int u)120{121int c = atomic_read(v);122123for (;;) {124if (unlikely(c == u))125break;126if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))127break;128}129return (c);130}131132static inline void133atomic_clear_mask(unsigned int mask, atomic_t *v)134{135atomic_clear_int(&v->counter, mask);136}137138static inline int139atomic_xchg(atomic_t *v, int i)140{141return (atomic_swap_int(&v->counter, i));142}143144static inline int145atomic_cmpxchg(atomic_t *v, int old, int new)146{147int ret = old;148149for (;;) {150if (atomic_fcmpset_int(&v->counter, &ret, new))151break;152if (ret != old)153break;154}155return (ret);156}157158#if defined(__amd64__) || defined(__arm64__) || defined(__i386__)159#define LINUXKPI_ATOMIC_8(...) __VA_ARGS__160#define LINUXKPI_ATOMIC_16(...) __VA_ARGS__161#else162#define LINUXKPI_ATOMIC_8(...)163#define LINUXKPI_ATOMIC_16(...)164#endif165166#if !(defined(i386) || (defined(__powerpc__) && !defined(__powerpc64__)))167#define LINUXKPI_ATOMIC_64(...) __VA_ARGS__168#else169#define LINUXKPI_ATOMIC_64(...)170#endif171172#define cmpxchg(ptr, old, new) ({ \173union { \174__typeof(*(ptr)) val; \175u8 u8[0]; \176u16 u16[0]; \177u32 u32[0]; \178u64 u64[0]; \179} __ret = { .val = (old) }, __new = { .val = (new) }; \180\181CTASSERT( \182LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \183LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \184LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \185sizeof(__ret.val) == 4); \186\187switch (sizeof(__ret.val)) { \188LINUXKPI_ATOMIC_8( \189case 1: \190while (!atomic_fcmpset_8((volatile u8 *)(ptr), \191__ret.u8, __new.u8[0]) && __ret.val == (old)) \192; \193break; \194) \195LINUXKPI_ATOMIC_16( \196case 2: \197while (!atomic_fcmpset_16((volatile u16 *)(ptr), \198__ret.u16, __new.u16[0]) && __ret.val == (old)) \199; \200break; \201) \202case 4: \203while (!atomic_fcmpset_32((volatile u32 *)(ptr), \204__ret.u32, __new.u32[0]) && __ret.val == (old)) \205; \206break; \207LINUXKPI_ATOMIC_64( \208case 8: \209while (!atomic_fcmpset_64((volatile u64 *)(ptr), \210__ret.u64, __new.u64[0]) && __ret.val == (old)) \211; \212break; \213) \214} \215__ret.val; \216})217218#define cmpxchg64(...) cmpxchg(__VA_ARGS__)219#define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__)220221#define xchg(ptr, new) ({ \222union { \223__typeof(*(ptr)) val; \224u8 u8[0]; \225u16 u16[0]; \226u32 u32[0]; \227u64 u64[0]; \228} __ret, __new = { .val = (new) }; \229\230CTASSERT( \231LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \232LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \233LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \234sizeof(__ret.val) == 4); \235\236switch (sizeof(__ret.val)) { \237LINUXKPI_ATOMIC_8( \238case 1: \239__ret.val = READ_ONCE(*ptr); \240while (!atomic_fcmpset_8((volatile u8 *)(ptr), \241__ret.u8, __new.u8[0])) \242; \243break; \244) \245LINUXKPI_ATOMIC_16( \246case 2: \247__ret.val = READ_ONCE(*ptr); \248while (!atomic_fcmpset_16((volatile u16 *)(ptr), \249__ret.u16, __new.u16[0])) \250; \251break; \252) \253case 4: \254__ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr), \255__new.u32[0]); \256break; \257LINUXKPI_ATOMIC_64( \258case 8: \259__ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr), \260__new.u64[0]); \261break; \262) \263} \264__ret.val; \265})266267#define try_cmpxchg(p, op, n) \268({ \269__typeof(p) __op = (__typeof((p)))(op); \270__typeof(*(p)) __o = *__op; \271__typeof(*(p)) __p = __sync_val_compare_and_swap((p), (__o), (n)); \272if (__p != __o) \273*__op = __p; \274(__p == __o); \275})276277#define __atomic_try_cmpxchg(type, _p, _po, _n) \278({ \279__typeof(_po) __po = (_po); \280__typeof(*(_po)) __r, __o = *__po; \281__r = atomic_cmpxchg##type((_p), __o, (_n)); \282if (unlikely(__r != __o)) \283*__po = __r; \284likely(__r == __o); \285})286287#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)288289static inline int290atomic_dec_if_positive(atomic_t *v)291{292int retval;293int old;294295old = atomic_read(v);296for (;;) {297retval = old - 1;298if (unlikely(retval < 0))299break;300if (likely(atomic_fcmpset_int(&v->counter, &old, retval)))301break;302}303return (retval);304}305306#define LINUX_ATOMIC_OP(op, c_op) \307static inline void atomic_##op(int i, atomic_t *v) \308{ \309int c, old; \310\311c = v->counter; \312while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \313c = old; \314}315316#define LINUX_ATOMIC_FETCH_OP(op, c_op) \317static inline int atomic_fetch_##op(int i, atomic_t *v) \318{ \319int c, old; \320\321c = v->counter; \322while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \323c = old; \324\325return (c); \326}327328static inline int329atomic_fetch_inc(atomic_t *v)330{331332return ((atomic_inc_return(v) - 1));333}334335LINUX_ATOMIC_OP(or, |)336LINUX_ATOMIC_OP(and, &)337LINUX_ATOMIC_OP(andnot, &~)338LINUX_ATOMIC_OP(xor, ^)339340LINUX_ATOMIC_FETCH_OP(or, |)341LINUX_ATOMIC_FETCH_OP(and, &)342LINUX_ATOMIC_FETCH_OP(andnot, &~)343LINUX_ATOMIC_FETCH_OP(xor, ^)344345#endif /* _LINUXKPI_ASM_ATOMIC_H_ */346347348