Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/include/asm/barrier.h
26488 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4
*/
5
#ifndef __ASM_BARRIER_H
6
#define __ASM_BARRIER_H
7
8
/*
9
* Hint encoding:
10
*
11
* Bit4: ordering or completion (0: completion, 1: ordering)
12
* Bit3: barrier for previous read (0: true, 1: false)
13
* Bit2: barrier for previous write (0: true, 1: false)
14
* Bit1: barrier for succeeding read (0: true, 1: false)
15
* Bit0: barrier for succeeding write (0: true, 1: false)
16
*
17
* Hint 0x700: barrier for "read after read" from the same address
18
*/
19
20
#define DBAR(hint) __asm__ __volatile__("dbar %0 " : : "I"(hint) : "memory")
21
22
#define crwrw 0b00000
23
#define cr_r_ 0b00101
24
#define c_w_w 0b01010
25
26
#define orwrw 0b10000
27
#define or_r_ 0b10101
28
#define o_w_w 0b11010
29
30
#define orw_w 0b10010
31
#define or_rw 0b10100
32
33
#define c_sync() DBAR(crwrw)
34
#define c_rsync() DBAR(cr_r_)
35
#define c_wsync() DBAR(c_w_w)
36
37
#define o_sync() DBAR(orwrw)
38
#define o_rsync() DBAR(or_r_)
39
#define o_wsync() DBAR(o_w_w)
40
41
#define ldacq_mb() DBAR(or_rw)
42
#define strel_mb() DBAR(orw_w)
43
44
#define mb() c_sync()
45
#define rmb() c_rsync()
46
#define wmb() c_wsync()
47
#define iob() c_sync()
48
#define wbflush() c_sync()
49
50
#define __smp_mb() o_sync()
51
#define __smp_rmb() o_rsync()
52
#define __smp_wmb() o_wsync()
53
54
#ifdef CONFIG_SMP
55
#define __WEAK_LLSC_MB " dbar 0x700 \n"
56
#else
57
#define __WEAK_LLSC_MB " \n"
58
#endif
59
60
#define __smp_mb__before_atomic() barrier()
61
#define __smp_mb__after_atomic() barrier()
62
63
/**
64
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
65
* @index: array element index
66
* @size: number of elements in array
67
*
68
* Returns:
69
* 0 - (@index < @size)
70
*/
71
#define array_index_mask_nospec array_index_mask_nospec
72
static inline unsigned long array_index_mask_nospec(unsigned long index,
73
unsigned long size)
74
{
75
unsigned long mask;
76
77
__asm__ __volatile__(
78
"sltu %0, %1, %2\n\t"
79
#if (__SIZEOF_LONG__ == 4)
80
"sub.w %0, $zero, %0\n\t"
81
#elif (__SIZEOF_LONG__ == 8)
82
"sub.d %0, $zero, %0\n\t"
83
#endif
84
: "=r" (mask)
85
: "r" (index), "r" (size)
86
:);
87
88
return mask;
89
}
90
91
#define __smp_load_acquire(p) \
92
({ \
93
typeof(*p) ___p1 = READ_ONCE(*p); \
94
compiletime_assert_atomic_type(*p); \
95
ldacq_mb(); \
96
___p1; \
97
})
98
99
#define __smp_store_release(p, v) \
100
do { \
101
compiletime_assert_atomic_type(*p); \
102
strel_mb(); \
103
WRITE_ONCE(*p, v); \
104
} while (0)
105
106
#define __smp_store_mb(p, v) \
107
do { \
108
union { typeof(p) __val; char __c[1]; } __u = \
109
{ .__val = (__force typeof(p)) (v) }; \
110
unsigned long __tmp; \
111
switch (sizeof(p)) { \
112
case 1: \
113
*(volatile __u8 *)&p = *(__u8 *)__u.__c; \
114
__smp_mb(); \
115
break; \
116
case 2: \
117
*(volatile __u16 *)&p = *(__u16 *)__u.__c; \
118
__smp_mb(); \
119
break; \
120
case 4: \
121
__asm__ __volatile__( \
122
"amswap_db.w %[tmp], %[val], %[mem] \n" \
123
: [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp) \
124
: [val] "r" (*(__u32 *)__u.__c) \
125
: ); \
126
break; \
127
case 8: \
128
__asm__ __volatile__( \
129
"amswap_db.d %[tmp], %[val], %[mem] \n" \
130
: [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp) \
131
: [val] "r" (*(__u64 *)__u.__c) \
132
: ); \
133
break; \
134
} \
135
} while (0)
136
137
#include <asm-generic/barrier.h>
138
139
#endif /* __ASM_BARRIER_H */
140
141