Path: blob/master/arch/sh/include/asm/atomic-llsc.h
15126 views
#ifndef __ASM_SH_ATOMIC_LLSC_H1#define __ASM_SH_ATOMIC_LLSC_H23/*4* To get proper branch prediction for the main line, we must branch5* forward to code at the end of this object's .text section, then6* branch back to restart the operation.7*/8static inline void atomic_add(int i, atomic_t *v)9{10unsigned long tmp;1112__asm__ __volatile__ (13"1: movli.l @%2, %0 ! atomic_add \n"14" add %1, %0 \n"15" movco.l %0, @%2 \n"16" bf 1b \n"17: "=&z" (tmp)18: "r" (i), "r" (&v->counter)19: "t");20}2122static inline void atomic_sub(int i, atomic_t *v)23{24unsigned long tmp;2526__asm__ __volatile__ (27"1: movli.l @%2, %0 ! atomic_sub \n"28" sub %1, %0 \n"29" movco.l %0, @%2 \n"30" bf 1b \n"31: "=&z" (tmp)32: "r" (i), "r" (&v->counter)33: "t");34}3536/*37* SH-4A note:38*39* We basically get atomic_xxx_return() for free compared with40* atomic_xxx(). movli.l/movco.l require r0 due to the instruction41* encoding, so the retval is automatically set without having to42* do any special work.43*/44static inline int atomic_add_return(int i, atomic_t *v)45{46unsigned long temp;4748__asm__ __volatile__ (49"1: movli.l @%2, %0 ! atomic_add_return \n"50" add %1, %0 \n"51" movco.l %0, @%2 \n"52" bf 1b \n"53" synco \n"54: "=&z" (temp)55: "r" (i), "r" (&v->counter)56: "t");5758return temp;59}6061static inline int atomic_sub_return(int i, atomic_t *v)62{63unsigned long temp;6465__asm__ __volatile__ (66"1: movli.l @%2, %0 ! atomic_sub_return \n"67" sub %1, %0 \n"68" movco.l %0, @%2 \n"69" bf 1b \n"70" synco \n"71: "=&z" (temp)72: "r" (i), "r" (&v->counter)73: "t");7475return temp;76}7778static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)79{80unsigned long tmp;8182__asm__ __volatile__ (83"1: movli.l @%2, %0 ! atomic_clear_mask \n"84" and %1, %0 \n"85" movco.l %0, @%2 \n"86" bf 1b \n"87: "=&z" (tmp)88: "r" (~mask), "r" (&v->counter)89: "t");90}9192static inline void atomic_set_mask(unsigned int mask, atomic_t *v)93{94unsigned long tmp;9596__asm__ __volatile__ (97"1: movli.l @%2, %0 ! atomic_set_mask \n"98" or %1, %0 \n"99" movco.l %0, @%2 \n"100" bf 1b \n"101: "=&z" (tmp)102: "r" (mask), "r" (&v->counter)103: "t");104}105106#endif /* __ASM_SH_ATOMIC_LLSC_H */107108109