/* SPDX-License-Identifier: GPL-2.0-only */1/*2* xchg/cmpxchg operations for the Hexagon architecture3*4* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.5*/67#ifndef _ASM_CMPXCHG_H8#define _ASM_CMPXCHG_H910/*11* __arch_xchg - atomically exchange a register and a memory location12* @x: value to swap13* @ptr: pointer to memory14* @size: size of the value15*16* Only 4 bytes supported currently.17*18* Note: there was an errata for V2 about .new's and memw_locked.19*20*/21static inline unsigned long22__arch_xchg(unsigned long x, volatile void *ptr, int size)23{24unsigned long retval;2526/* Can't seem to use printk or panic here, so just stop */27if (size != 4) do { asm volatile("brkpt;\n"); } while (1);2829__asm__ __volatile__ (30"1: %0 = memw_locked(%1);\n" /* load into retval */31" memw_locked(%1,P0) = %2;\n" /* store into memory */32" if (!P0) jump 1b;\n"33: "=&r" (retval)34: "r" (ptr), "r" (x)35: "memory", "p0"36);37return retval;38}3940/*41* Atomically swap the contents of a register with memory. Should be atomic42* between multiple CPU's and within interrupts on the same CPU.43*/44#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(v), (ptr), \45sizeof(*(ptr))))4647/*48* see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps.49* looks just like atomic_cmpxchg on our arch currently with a bunch of50* variable casting.51*/5253#define arch_cmpxchg(ptr, old, new) \54({ \55__typeof__(ptr) __ptr = (ptr); \56__typeof__(*(ptr)) __old = (old); \57__typeof__(*(ptr)) __new = (new); \58__typeof__(*(ptr)) __oldval = (__typeof__(*(ptr))) 0; \59\60asm volatile( \61"1: %0 = memw_locked(%1);\n" \62" { P0 = cmp.eq(%0,%2);\n" \63" if (!P0.new) jump:nt 2f; }\n" \64" memw_locked(%1,p0) = %3;\n" \65" if (!P0) jump 1b;\n" \66"2:\n" \67: "=&r" (__oldval) \68: "r" (__ptr), "r" (__old), "r" (__new) \69: "memory", "p0" \70); \71__oldval; \72})7374#endif /* _ASM_CMPXCHG_H */757677