Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arc/include/asm/cmpxchg.h
26481 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4
*/
5
6
#ifndef __ASM_ARC_CMPXCHG_H
7
#define __ASM_ARC_CMPXCHG_H
8
9
#include <linux/build_bug.h>
10
#include <linux/types.h>
11
#include <linux/cmpxchg-emu.h>
12
13
#include <asm/barrier.h>
14
#include <asm/smp.h>
15
16
#ifdef CONFIG_ARC_HAS_LLSC
17
18
/*
19
* if (*ptr == @old)
20
* *ptr = @new
21
*/
22
#define __cmpxchg(ptr, old, new) \
23
({ \
24
__typeof__(*(ptr)) _prev; \
25
\
26
__asm__ __volatile__( \
27
"1: llock %0, [%1] \n" \
28
" brne %0, %2, 2f \n" \
29
" scond %3, [%1] \n" \
30
" bnz 1b \n" \
31
"2: \n" \
32
: "=&r"(_prev) /* Early clobber prevent reg reuse */ \
33
: "r"(ptr), /* Not "m": llock only supports reg */ \
34
"ir"(old), \
35
"r"(new) /* Not "ir": scond can't take LIMM */ \
36
: "cc", \
37
"memory"); /* gcc knows memory is clobbered */ \
38
\
39
_prev; \
40
})
41
42
#define arch_cmpxchg_relaxed(ptr, old, new) \
43
({ \
44
__typeof__(ptr) _p_ = (ptr); \
45
__typeof__(*(ptr)) _o_ = (old); \
46
__typeof__(*(ptr)) _n_ = (new); \
47
__typeof__(*(ptr)) _prev_; \
48
\
49
switch(sizeof((_p_))) { \
50
case 1: \
51
_prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *__force)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
52
break; \
53
case 4: \
54
_prev_ = __cmpxchg(_p_, _o_, _n_); \
55
break; \
56
default: \
57
BUILD_BUG(); \
58
} \
59
_prev_; \
60
})
61
62
#else
63
64
#define arch_cmpxchg(ptr, old, new) \
65
({ \
66
volatile __typeof__(ptr) _p_ = (ptr); \
67
__typeof__(*(ptr)) _o_ = (old); \
68
__typeof__(*(ptr)) _n_ = (new); \
69
__typeof__(*(ptr)) _prev_; \
70
unsigned long __flags; \
71
\
72
/* \
73
* spin lock/unlock provide the needed smp_mb() before/after \
74
*/ \
75
atomic_ops_lock(__flags); \
76
_prev_ = *_p_; \
77
if (_prev_ == _o_) \
78
*_p_ = _n_; \
79
atomic_ops_unlock(__flags); \
80
_prev_; \
81
})
82
83
#endif
84
85
/*
86
* xchg
87
*/
88
#ifdef CONFIG_ARC_HAS_LLSC
89
90
#define __arch_xchg(ptr, val) \
91
({ \
92
__asm__ __volatile__( \
93
" ex %0, [%1] \n" /* set new value */ \
94
: "+r"(val) \
95
: "r"(ptr) \
96
: "memory"); \
97
_val_; /* get old value */ \
98
})
99
100
#define arch_xchg_relaxed(ptr, val) \
101
({ \
102
__typeof__(ptr) _p_ = (ptr); \
103
__typeof__(*(ptr)) _val_ = (val); \
104
\
105
switch(sizeof(*(_p_))) { \
106
case 4: \
107
_val_ = __arch_xchg(_p_, _val_); \
108
break; \
109
default: \
110
BUILD_BUG(); \
111
} \
112
_val_; \
113
})
114
115
#else /* !CONFIG_ARC_HAS_LLSC */
116
117
/*
118
* EX instructions is baseline and present in !LLSC too. But in this
119
* regime it still needs use @atomic_ops_lock spinlock to allow interop
120
* with cmpxchg() which uses spinlock in !LLSC
121
* (llist.h use xchg and cmpxchg on sama data)
122
*/
123
124
#define arch_xchg(ptr, val) \
125
({ \
126
__typeof__(ptr) _p_ = (ptr); \
127
__typeof__(*(ptr)) _val_ = (val); \
128
\
129
unsigned long __flags; \
130
\
131
atomic_ops_lock(__flags); \
132
\
133
__asm__ __volatile__( \
134
" ex %0, [%1] \n" \
135
: "+r"(_val_) \
136
: "r"(_p_) \
137
: "memory"); \
138
\
139
atomic_ops_unlock(__flags); \
140
_val_; \
141
})
142
143
#endif
144
145
#endif
146
147