/* SPDX-License-Identifier: GPL-2.0 */1/*2* Copyright (C) 1999 Cort Dougan <[email protected]>3*/4#ifndef _ASM_POWERPC_BARRIER_H5#define _ASM_POWERPC_BARRIER_H67#include <asm/asm-const.h>89#ifndef __ASSEMBLY__10#include <asm/ppc-opcode.h>11#endif1213/*14* Memory barrier.15* The sync instruction guarantees that all memory accesses initiated16* by this processor have been performed (with respect to all other17* mechanisms that access memory). The eieio instruction is a barrier18* providing an ordering (separately) for (a) cacheable stores and (b)19* loads and stores to non-cacheable memory (e.g. I/O devices).20*21* mb() prevents loads and stores being reordered across this point.22* rmb() prevents loads being reordered across this point.23* wmb() prevents stores being reordered across this point.24*25* *mb() variants without smp_ prefix must order all types of memory26* operations with one another. sync is the only instruction sufficient27* to do this.28*29* For the smp_ barriers, ordering is for cacheable memory operations30* only. We have to use the sync instruction for smp_mb(), since lwsync31* doesn't order loads with respect to previous stores. Lwsync can be32* used for smp_rmb() and smp_wmb().33*34* However, on CPUs that don't support lwsync, lwsync actually maps to a35* heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.36*/37#define __mb() __asm__ __volatile__ ("sync" : : : "memory")38#define __rmb() __asm__ __volatile__ ("sync" : : : "memory")39#define __wmb() __asm__ __volatile__ ("sync" : : : "memory")4041/* The sub-arch has lwsync */42#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_E500MC)43# define SMPWMB LWSYNC44#elif defined(CONFIG_BOOKE)45# define SMPWMB mbar46#else47# define SMPWMB eieio48#endif4950/* clang defines this macro for a builtin, which will not work with runtime patching */51#undef __lwsync52#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")53#define __dma_rmb() __lwsync()54#define __dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")5556#define __smp_lwsync() __lwsync()5758#define __smp_mb() __mb()59#define __smp_rmb() __lwsync()60#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")6162/*63* This is a barrier which prevents following instructions from being64* started until the value of the argument x is known. For example, if65* x is a variable loaded from memory, this prevents following66* instructions from being executed until the load has been performed.67*/68#define data_barrier(x) \69asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");7071#define __smp_store_release(p, v) \72do { \73compiletime_assert_atomic_type(*p); \74__smp_lwsync(); \75WRITE_ONCE(*p, v); \76} while (0)7778#define __smp_load_acquire(p) \79({ \80typeof(*p) ___p1 = READ_ONCE(*p); \81compiletime_assert_atomic_type(*p); \82__smp_lwsync(); \83___p1; \84})8586#ifdef CONFIG_PPC_BOOK3S_6487#define NOSPEC_BARRIER_SLOT nop88#elif defined(CONFIG_PPC_E500)89#define NOSPEC_BARRIER_SLOT nop; nop90#endif9192#ifdef CONFIG_PPC_BARRIER_NOSPEC93/*94* Prevent execution of subsequent instructions until preceding branches have95* been fully resolved and are no longer executing speculatively.96*/97#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT9899// This also acts as a compiler barrier due to the memory clobber.100#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")101102#else /* !CONFIG_PPC_BARRIER_NOSPEC */103#define barrier_nospec_asm104#define barrier_nospec()105#endif /* CONFIG_PPC_BARRIER_NOSPEC */106107/*108* pmem_wmb() ensures that all stores for which the modification109* are written to persistent storage by preceding dcbfps/dcbstps110* instructions have updated persistent storage before any data111* access or data transfer caused by subsequent instructions is112* initiated.113*/114#define pmem_wmb() __asm__ __volatile__(PPC_PHWSYNC ::: "memory")115116#include <asm-generic/barrier.h>117118#endif /* _ASM_POWERPC_BARRIER_H */119120121