Path: blob/master/tools/arch/sparc/include/asm/barrier_64.h
26296 views
/* SPDX-License-Identifier: GPL-2.0 */1#ifndef __TOOLS_LINUX_SPARC64_BARRIER_H2#define __TOOLS_LINUX_SPARC64_BARRIER_H34/* Copied from the kernel sources to tools/:5*6* These are here in an effort to more fully work around Spitfire Errata7* #51. Essentially, if a memory barrier occurs soon after a mispredicted8* branch, the chip can stop executing instructions until a trap occurs.9* Therefore, if interrupts are disabled, the chip can hang forever.10*11* It used to be believed that the memory barrier had to be right in the12* delay slot, but a case has been traced recently wherein the memory barrier13* was one instruction after the branch delay slot and the chip still hung.14* The offending sequence was the following in sym_wakeup_done() of the15* sym53c8xx_2 driver:16*17* call sym_ccb_from_dsa, 018* movge %icc, 0, %l019* brz,pn %o0, .LL130320* mov %o0, %l221* membar #LoadLoad22*23* The branch has to be mispredicted for the bug to occur. Therefore, we put24* the memory barrier explicitly into a "branch always, predicted taken"25* delay slot to avoid the problem case.26*/27#define membar_safe(type) \28do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \29" membar " type "\n" \30"1:\n" \31: : : "memory"); \32} while (0)3334/* The kernel always executes in TSO memory model these days,35* and furthermore most sparc64 chips implement more stringent36* memory ordering than required by the specifications.37*/38#define mb() membar_safe("#StoreLoad")39#define rmb() __asm__ __volatile__("":::"memory")40#define wmb() __asm__ __volatile__("":::"memory")4142#define smp_store_release(p, v) \43do { \44barrier(); \45WRITE_ONCE(*p, v); \46} while (0)4748#define smp_load_acquire(p) \49({ \50typeof(*p) ___p1 = READ_ONCE(*p); \51barrier(); \52___p1; \53})5455#endif /* !(__TOOLS_LINUX_SPARC64_BARRIER_H) */565758