Path: blob/master/arch/arm64/include/asm/atomic_ll_sc.h
26481 views
/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Based on arch/arm/include/asm/atomic.h3*4* Copyright (C) 1996 Russell King.5* Copyright (C) 2002 Deep Blue Solutions Ltd.6* Copyright (C) 2012 ARM Ltd.7*/89#ifndef __ASM_ATOMIC_LL_SC_H10#define __ASM_ATOMIC_LL_SC_H1112#include <linux/stringify.h>1314#ifndef CONFIG_CC_HAS_K_CONSTRAINT15#define K16#endif1718/*19* AArch64 UP and SMP safe atomic ops. We use load exclusive and20* store exclusive to ensure that these are atomic. We may loop21* to ensure that the update happens.22*/2324#define ATOMIC_OP(op, asm_op, constraint) \25static __always_inline void \26__ll_sc_atomic_##op(int i, atomic_t *v) \27{ \28unsigned long tmp; \29int result; \30\31asm volatile("// atomic_" #op "\n" \32" prfm pstl1strm, %2\n" \33"1: ldxr %w0, %2\n" \34" " #asm_op " %w0, %w0, %w3\n" \35" stxr %w1, %w0, %2\n" \36" cbnz %w1, 1b\n" \37: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \38: __stringify(constraint) "r" (i)); \39}4041#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\42static __always_inline int \43__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \44{ \45unsigned long tmp; \46int result; \47\48asm volatile("// atomic_" #op "_return" #name "\n" \49" prfm pstl1strm, %2\n" \50"1: ld" #acq "xr %w0, %2\n" \51" " #asm_op " %w0, %w0, %w3\n" \52" st" #rel "xr %w1, %w0, %2\n" \53" cbnz %w1, 1b\n" \54" " #mb \55: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \56: __stringify(constraint) "r" (i) \57: cl); \58\59return result; \60}6162#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \63static __always_inline int \64__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \65{ \66unsigned long tmp; \67int val, result; \68\69asm volatile("// atomic_fetch_" #op #name "\n" \70" prfm pstl1strm, %3\n" \71"1: ld" #acq "xr %w0, %3\n" \72" " #asm_op " %w1, %w0, %w4\n" \73" st" #rel "xr %w2, %w1, %3\n" \74" cbnz %w2, 1b\n" \75" " #mb \76: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \77: __stringify(constraint) "r" (i) \78: cl); \79\80return result; \81}8283#define ATOMIC_OPS(...) \84ATOMIC_OP(__VA_ARGS__) \85ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\86ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\87ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\88ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\89ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\90ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\91ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\92ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)9394ATOMIC_OPS(add, add, I)95ATOMIC_OPS(sub, sub, J)9697#undef ATOMIC_OPS98#define ATOMIC_OPS(...) \99ATOMIC_OP(__VA_ARGS__) \100ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\101ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\102ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\103ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)104105ATOMIC_OPS(and, and, K)106ATOMIC_OPS(or, orr, K)107ATOMIC_OPS(xor, eor, K)108/*109* GAS converts the mysterious and undocumented BIC (immediate) alias to110* an AND (immediate) instruction with the immediate inverted. We don't111* have a constraint for this, so fall back to register.112*/113ATOMIC_OPS(andnot, bic, )114115#undef ATOMIC_OPS116#undef ATOMIC_FETCH_OP117#undef ATOMIC_OP_RETURN118#undef ATOMIC_OP119120#define ATOMIC64_OP(op, asm_op, constraint) \121static __always_inline void \122__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \123{ \124s64 result; \125unsigned long tmp; \126\127asm volatile("// atomic64_" #op "\n" \128" prfm pstl1strm, %2\n" \129"1: ldxr %0, %2\n" \130" " #asm_op " %0, %0, %3\n" \131" stxr %w1, %0, %2\n" \132" cbnz %w1, 1b" \133: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \134: __stringify(constraint) "r" (i)); \135}136137#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\138static __always_inline long \139__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \140{ \141s64 result; \142unsigned long tmp; \143\144asm volatile("// atomic64_" #op "_return" #name "\n" \145" prfm pstl1strm, %2\n" \146"1: ld" #acq "xr %0, %2\n" \147" " #asm_op " %0, %0, %3\n" \148" st" #rel "xr %w1, %0, %2\n" \149" cbnz %w1, 1b\n" \150" " #mb \151: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \152: __stringify(constraint) "r" (i) \153: cl); \154\155return result; \156}157158#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\159static __always_inline long \160__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \161{ \162s64 result, val; \163unsigned long tmp; \164\165asm volatile("// atomic64_fetch_" #op #name "\n" \166" prfm pstl1strm, %3\n" \167"1: ld" #acq "xr %0, %3\n" \168" " #asm_op " %1, %0, %4\n" \169" st" #rel "xr %w2, %1, %3\n" \170" cbnz %w2, 1b\n" \171" " #mb \172: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \173: __stringify(constraint) "r" (i) \174: cl); \175\176return result; \177}178179#define ATOMIC64_OPS(...) \180ATOMIC64_OP(__VA_ARGS__) \181ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \182ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \183ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \184ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \185ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \186ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \187ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \188ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)189190ATOMIC64_OPS(add, add, I)191ATOMIC64_OPS(sub, sub, J)192193#undef ATOMIC64_OPS194#define ATOMIC64_OPS(...) \195ATOMIC64_OP(__VA_ARGS__) \196ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \197ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \198ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \199ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)200201ATOMIC64_OPS(and, and, L)202ATOMIC64_OPS(or, orr, L)203ATOMIC64_OPS(xor, eor, L)204/*205* GAS converts the mysterious and undocumented BIC (immediate) alias to206* an AND (immediate) instruction with the immediate inverted. We don't207* have a constraint for this, so fall back to register.208*/209ATOMIC64_OPS(andnot, bic, )210211#undef ATOMIC64_OPS212#undef ATOMIC64_FETCH_OP213#undef ATOMIC64_OP_RETURN214#undef ATOMIC64_OP215216static __always_inline s64217__ll_sc_atomic64_dec_if_positive(atomic64_t *v)218{219s64 result;220unsigned long tmp;221222asm volatile("// atomic64_dec_if_positive\n"223" prfm pstl1strm, %2\n"224"1: ldxr %0, %2\n"225" subs %0, %0, #1\n"226" b.lt 2f\n"227" stlxr %w1, %0, %2\n"228" cbnz %w1, 1b\n"229" dmb ish\n"230"2:"231: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)232:233: "cc", "memory");234235return result;236}237238#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \239static __always_inline u##sz \240__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \241unsigned long old, \242u##sz new) \243{ \244unsigned long tmp; \245u##sz oldval; \246\247/* \248* Sub-word sizes require explicit casting so that the compare \249* part of the cmpxchg doesn't end up interpreting non-zero \250* upper bits of the register containing "old". \251*/ \252if (sz < 32) \253old = (u##sz)old; \254\255asm volatile( \256" prfm pstl1strm, %[v]\n" \257"1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \258" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \259" cbnz %" #w "[tmp], 2f\n" \260" st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \261" cbnz %w[tmp], 1b\n" \262" " #mb "\n" \263"2:" \264: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \265[v] "+Q" (*(u##sz *)ptr) \266: [old] __stringify(constraint) "r" (old), [new] "r" (new) \267: cl); \268\269return oldval; \270}271272/*273* Earlier versions of GCC (no later than 8.1.0) appear to incorrectly274* handle the 'K' constraint for the value 4294967295 - thus we use no275* constraint for 32 bit operations.276*/277__CMPXCHG_CASE(w, b, , 8, , , , , K)278__CMPXCHG_CASE(w, h, , 16, , , , , K)279__CMPXCHG_CASE(w, , , 32, , , , , K)280__CMPXCHG_CASE( , , , 64, , , , , L)281__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K)282__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K)283__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K)284__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)285__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K)286__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K)287__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K)288__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)289__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K)290__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K)291__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K)292__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)293294#undef __CMPXCHG_CASE295296union __u128_halves {297u128 full;298struct {299u64 low, high;300};301};302303#define __CMPXCHG128(name, mb, rel, cl...) \304static __always_inline u128 \305__ll_sc__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new) \306{ \307union __u128_halves r, o = { .full = (old) }, \308n = { .full = (new) }; \309unsigned int tmp; \310\311asm volatile("// __cmpxchg128" #name "\n" \312" prfm pstl1strm, %[v]\n" \313"1: ldxp %[rl], %[rh], %[v]\n" \314" cmp %[rl], %[ol]\n" \315" ccmp %[rh], %[oh], 0, eq\n" \316" b.ne 2f\n" \317" st" #rel "xp %w[tmp], %[nl], %[nh], %[v]\n" \318" cbnz %w[tmp], 1b\n" \319" " #mb "\n" \320"2:" \321: [v] "+Q" (*(u128 *)ptr), \322[rl] "=&r" (r.low), [rh] "=&r" (r.high), \323[tmp] "=&r" (tmp) \324: [ol] "r" (o.low), [oh] "r" (o.high), \325[nl] "r" (n.low), [nh] "r" (n.high) \326: "cc", ##cl); \327\328return r.full; \329}330331__CMPXCHG128( , , )332__CMPXCHG128(_mb, dmb ish, l, "memory")333334#undef __CMPXCHG128335336#undef K337338#endif /* __ASM_ATOMIC_LL_SC_H */339340341