Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/include/asm/barrier.h
26471 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Based on arch/arm/include/asm/barrier.h
4
*
5
* Copyright (C) 2012 ARM Ltd.
6
* Copyright (C) 2013 Regents of the University of California
7
* Copyright (C) 2017 SiFive
8
*/
9
10
#ifndef _ASM_RISCV_BARRIER_H
11
#define _ASM_RISCV_BARRIER_H
12
13
#ifndef __ASSEMBLY__
14
#include <asm/cmpxchg.h>
15
#include <asm/fence.h>
16
17
/* These barriers need to enforce ordering on both devices or memory. */
18
#define __mb() RISCV_FENCE(iorw, iorw)
19
#define __rmb() RISCV_FENCE(ir, ir)
20
#define __wmb() RISCV_FENCE(ow, ow)
21
22
/* These barriers do not need to enforce ordering on devices, just memory. */
23
#define __smp_mb() RISCV_FENCE(rw, rw)
24
#define __smp_rmb() RISCV_FENCE(r, r)
25
#define __smp_wmb() RISCV_FENCE(w, w)
26
27
/*
28
* This is a very specific barrier: it's currently only used in two places in
29
* the kernel, both in the scheduler. See include/linux/spinlock.h for the two
30
* orderings it guarantees, but the "critical section is RCsc" guarantee
31
* mandates a barrier on RISC-V. The sequence looks like:
32
*
33
* lr.aq lock
34
* sc lock <= LOCKED
35
* smp_mb__after_spinlock()
36
* // critical section
37
* lr lock
38
* sc.rl lock <= UNLOCKED
39
*
40
* The AQ/RL pair provides a RCpc critical section, but there's not really any
41
* way we can take advantage of that here because the ordering is only enforced
42
* on that one lock. Thus, we're just doing a full fence.
43
*
44
* Since we allow writeX to be called from preemptive regions we need at least
45
* an "o" in the predecessor set to ensure device writes are visible before the
46
* task is marked as available for scheduling on a new hart. While I don't see
47
* any concrete reason we need a full IO fence, it seems safer to just upgrade
48
* this in order to avoid any IO crossing a scheduling boundary. In both
49
* instances the scheduler pairs this with an mb(), so nothing is necessary on
50
* the new hart.
51
*/
52
#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)
53
54
#define __smp_store_release(p, v) \
55
do { \
56
compiletime_assert_atomic_type(*p); \
57
RISCV_FENCE(rw, w); \
58
WRITE_ONCE(*p, v); \
59
} while (0)
60
61
#define __smp_load_acquire(p) \
62
({ \
63
typeof(*p) ___p1 = READ_ONCE(*p); \
64
compiletime_assert_atomic_type(*p); \
65
RISCV_FENCE(r, rw); \
66
___p1; \
67
})
68
69
#ifdef CONFIG_RISCV_ISA_ZAWRS
70
#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
71
typeof(ptr) __PTR = (ptr); \
72
__unqual_scalar_typeof(*ptr) VAL; \
73
for (;;) { \
74
VAL = READ_ONCE(*__PTR); \
75
if (cond_expr) \
76
break; \
77
__cmpwait_relaxed(ptr, VAL); \
78
} \
79
(typeof(*ptr))VAL; \
80
})
81
#endif
82
83
#include <asm-generic/barrier.h>
84
85
#endif /* __ASSEMBLY__ */
86
87
#endif /* _ASM_RISCV_BARRIER_H */
88
89