/*1* This file is subject to the terms and conditions of the GNU General Public2* License. See the file "COPYING" in the main directory of this archive3* for more details.4*5* Copyright (C) 2006 by Ralf Baechle ([email protected])6*/7#ifndef __ASM_BARRIER_H8#define __ASM_BARRIER_H910/*11* read_barrier_depends - Flush all pending reads that subsequents reads12* depend on.13*14* No data-dependent reads from memory-like regions are ever reordered15* over this barrier. All reads preceding this primitive are guaranteed16* to access memory (but not necessarily other CPUs' caches) before any17* reads following this primitive that depend on the data return by18* any of the preceding reads. This primitive is much lighter weight than19* rmb() on most CPUs, and is never heavier weight than is20* rmb().21*22* These ordering constraints are respected by both the local CPU23* and the compiler.24*25* Ordering is not guaranteed by anything other than these primitives,26* not even by data dependencies. See the documentation for27* memory_barrier() for examples and URLs to more information.28*29* For example, the following code would force ordering (the initial30* value of "a" is zero, "b" is one, and "p" is "&a"):31*32* <programlisting>33* CPU 0 CPU 134*35* b = 2;36* memory_barrier();37* p = &b; q = p;38* read_barrier_depends();39* d = *q;40* </programlisting>41*42* because the read of "*q" depends on the read of "p" and these43* two reads are separated by a read_barrier_depends(). However,44* the following code, with the same initial values for "a" and "b":45*46* <programlisting>47* CPU 0 CPU 148*49* a = 2;50* memory_barrier();51* b = 3; y = b;52* read_barrier_depends();53* x = a;54* </programlisting>55*56* does not enforce ordering, since there is no data dependency between57* the read of "a" and the read of "b". Therefore, on some CPUs, such58* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()59* in cases like this where there are no data dependencies.60*/6162#define read_barrier_depends() do { } while(0)63#define smp_read_barrier_depends() do { } while(0)6465#ifdef CONFIG_CPU_HAS_SYNC66#define __sync() \67__asm__ __volatile__( \68".set push\n\t" \69".set noreorder\n\t" \70".set mips2\n\t" \71"sync\n\t" \72".set pop" \73: /* no output */ \74: /* no input */ \75: "memory")76#else77#define __sync() do { } while(0)78#endif7980#define __fast_iob() \81__asm__ __volatile__( \82".set push\n\t" \83".set noreorder\n\t" \84"lw $0,%0\n\t" \85"nop\n\t" \86".set pop" \87: /* no output */ \88: "m" (*(int *)CKSEG1) \89: "memory")90#ifdef CONFIG_CPU_CAVIUM_OCTEON91# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"92# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")9394# define fast_wmb() __syncw()95# define fast_rmb() barrier()96# define fast_mb() __sync()97# define fast_iob() do { } while (0)98#else /* ! CONFIG_CPU_CAVIUM_OCTEON */99# define fast_wmb() __sync()100# define fast_rmb() __sync()101# define fast_mb() __sync()102# ifdef CONFIG_SGI_IP28103# define fast_iob() \104__asm__ __volatile__( \105".set push\n\t" \106".set noreorder\n\t" \107"lw $0,%0\n\t" \108"sync\n\t" \109"lw $0,%0\n\t" \110".set pop" \111: /* no output */ \112: "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \113: "memory")114# else115# define fast_iob() \116do { \117__sync(); \118__fast_iob(); \119} while (0)120# endif121#endif /* CONFIG_CPU_CAVIUM_OCTEON */122123#ifdef CONFIG_CPU_HAS_WB124125#include <asm/wbflush.h>126127#define wmb() fast_wmb()128#define rmb() fast_rmb()129#define mb() wbflush()130#define iob() wbflush()131132#else /* !CONFIG_CPU_HAS_WB */133134#define wmb() fast_wmb()135#define rmb() fast_rmb()136#define mb() fast_mb()137#define iob() fast_iob()138139#endif /* !CONFIG_CPU_HAS_WB */140141#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)142# ifdef CONFIG_CPU_CAVIUM_OCTEON143# define smp_mb() __sync()144# define smp_rmb() barrier()145# define smp_wmb() __syncw()146# else147# define smp_mb() __asm__ __volatile__("sync" : : :"memory")148# define smp_rmb() __asm__ __volatile__("sync" : : :"memory")149# define smp_wmb() __asm__ __volatile__("sync" : : :"memory")150# endif151#else152#define smp_mb() barrier()153#define smp_rmb() barrier()154#define smp_wmb() barrier()155#endif156157#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)158#define __WEAK_LLSC_MB " sync \n"159#else160#define __WEAK_LLSC_MB " \n"161#endif162163#define set_mb(var, value) \164do { var = value; smp_mb(); } while (0)165166#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")167168#ifdef CONFIG_CPU_CAVIUM_OCTEON169#define smp_mb__before_llsc() smp_wmb()170/* Cause previous writes to become visible on all CPUs as soon as possible */171#define nudge_writes() __asm__ __volatile__(".set push\n\t" \172".set arch=octeon\n\t" \173"syncw\n\t" \174".set pop" : : : "memory")175#else176#define smp_mb__before_llsc() smp_llsc_mb()177#define nudge_writes() mb()178#endif179180#endif /* __ASM_BARRIER_H */181182183