/* SPDX-License-Identifier: GPL-2.0 */1#ifndef __SPARC64_BARRIER_H2#define __SPARC64_BARRIER_H34/* These are here in an effort to more fully work around Spitfire Errata5* #51. Essentially, if a memory barrier occurs soon after a mispredicted6* branch, the chip can stop executing instructions until a trap occurs.7* Therefore, if interrupts are disabled, the chip can hang forever.8*9* It used to be believed that the memory barrier had to be right in the10* delay slot, but a case has been traced recently wherein the memory barrier11* was one instruction after the branch delay slot and the chip still hung.12* The offending sequence was the following in sym_wakeup_done() of the13* sym53c8xx_2 driver:14*15* call sym_ccb_from_dsa, 016* movge %icc, 0, %l017* brz,pn %o0, .LL130318* mov %o0, %l219* membar #LoadLoad20*21* The branch has to be mispredicted for the bug to occur. Therefore, we put22* the memory barrier explicitly into a "branch always, predicted taken"23* delay slot to avoid the problem case.24*/25#define membar_safe(type) \26do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \27" membar " type "\n" \28"1:\n" \29: : : "memory"); \30} while (0)3132/* The kernel always executes in TSO memory model these days,33* and furthermore most sparc64 chips implement more stringent34* memory ordering than required by the specifications.35*/36#define mb() membar_safe("#StoreLoad")37#define rmb() __asm__ __volatile__("":::"memory")38#define wmb() __asm__ __volatile__("":::"memory")3940#define __smp_store_release(p, v) \41do { \42compiletime_assert_atomic_type(*p); \43barrier(); \44WRITE_ONCE(*p, v); \45} while (0)4647#define __smp_load_acquire(p) \48({ \49typeof(*p) ___p1 = READ_ONCE(*p); \50compiletime_assert_atomic_type(*p); \51barrier(); \52___p1; \53})5455#define __smp_mb__before_atomic() barrier()56#define __smp_mb__after_atomic() barrier()5758#include <asm-generic/barrier.h>5960#endif /* !(__SPARC64_BARRIER_H) */616263