Path: blob/master/arch/m68k/include/asm/bitops_no.h
10820 views
#ifndef _M68KNOMMU_BITOPS_H1#define _M68KNOMMU_BITOPS_H23/*4* Copyright 1992, Linus Torvalds.5*/67#include <linux/compiler.h>8#include <asm/byteorder.h> /* swab32 */910#ifdef __KERNEL__1112#ifndef _LINUX_BITOPS_H13#error only <linux/bitops.h> can be included directly14#endif1516#if defined (__mcfisaaplus__) || defined (__mcfisac__)17static inline int ffs(unsigned int val)18{19if (!val)20return 0;2122asm volatile(23"bitrev %0\n\t"24"ff1 %0\n\t"25: "=d" (val)26: "0" (val)27);28val++;29return val;30}3132static inline int __ffs(unsigned int val)33{34asm volatile(35"bitrev %0\n\t"36"ff1 %0\n\t"37: "=d" (val)38: "0" (val)39);40return val;41}4243#else44#include <asm-generic/bitops/ffs.h>45#include <asm-generic/bitops/__ffs.h>46#endif4748#include <asm-generic/bitops/sched.h>49#include <asm-generic/bitops/ffz.h>5051static __inline__ void set_bit(int nr, volatile unsigned long * addr)52{53#ifdef CONFIG_COLDFIRE54__asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"55: "+m" (((volatile char *)addr)[(nr^31) >> 3])56: "d" (nr)57: "%a0", "cc");58#else59__asm__ __volatile__ ("bset %1,%0"60: "+m" (((volatile char *)addr)[(nr^31) >> 3])61: "di" (nr)62: "cc");63#endif64}6566#define __set_bit(nr, addr) set_bit(nr, addr)6768/*69* clear_bit() doesn't provide any barrier for the compiler.70*/71#define smp_mb__before_clear_bit() barrier()72#define smp_mb__after_clear_bit() barrier()7374static __inline__ void clear_bit(int nr, volatile unsigned long * addr)75{76#ifdef CONFIG_COLDFIRE77__asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"78: "+m" (((volatile char *)addr)[(nr^31) >> 3])79: "d" (nr)80: "%a0", "cc");81#else82__asm__ __volatile__ ("bclr %1,%0"83: "+m" (((volatile char *)addr)[(nr^31) >> 3])84: "di" (nr)85: "cc");86#endif87}8889#define __clear_bit(nr, addr) clear_bit(nr, addr)9091static __inline__ void change_bit(int nr, volatile unsigned long * addr)92{93#ifdef CONFIG_COLDFIRE94__asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"95: "+m" (((volatile char *)addr)[(nr^31) >> 3])96: "d" (nr)97: "%a0", "cc");98#else99__asm__ __volatile__ ("bchg %1,%0"100: "+m" (((volatile char *)addr)[(nr^31) >> 3])101: "di" (nr)102: "cc");103#endif104}105106#define __change_bit(nr, addr) change_bit(nr, addr)107108static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)109{110char retval;111112#ifdef CONFIG_COLDFIRE113__asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"114: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])115: "d" (nr)116: "%a0");117#else118__asm__ __volatile__ ("bset %2,%1; sne %0"119: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])120: "di" (nr)121/* No clobber */);122#endif123124return retval;125}126127#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)128129static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)130{131char retval;132133#ifdef CONFIG_COLDFIRE134__asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"135: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])136: "d" (nr)137: "%a0");138#else139__asm__ __volatile__ ("bclr %2,%1; sne %0"140: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])141: "di" (nr)142/* No clobber */);143#endif144145return retval;146}147148#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)149150static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)151{152char retval;153154#ifdef CONFIG_COLDFIRE155__asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"156: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])157: "d" (nr)158: "%a0");159#else160__asm__ __volatile__ ("bchg %2,%1; sne %0"161: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])162: "di" (nr)163/* No clobber */);164#endif165166return retval;167}168169#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)170171/*172* This routine doesn't need to be atomic.173*/174static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)175{176return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;177}178179static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)180{181int * a = (int *) addr;182int mask;183184a += nr >> 5;185mask = 1 << (nr & 0x1f);186return ((mask & *a) != 0);187}188189#define test_bit(nr,addr) \190(__builtin_constant_p(nr) ? \191__constant_test_bit((nr),(addr)) : \192__test_bit((nr),(addr)))193194#include <asm-generic/bitops/find.h>195#include <asm-generic/bitops/hweight.h>196#include <asm-generic/bitops/lock.h>197198#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)199200static inline void __set_bit_le(int nr, void *addr)201{202__set_bit(nr ^ BITOP_LE_SWIZZLE, addr);203}204205static inline void __clear_bit_le(int nr, void *addr)206{207__clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);208}209210static inline int __test_and_set_bit_le(int nr, volatile void *addr)211{212char retval;213214#ifdef CONFIG_COLDFIRE215__asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"216: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])217: "d" (nr)218: "%a0");219#else220__asm__ __volatile__ ("bset %2,%1; sne %0"221: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])222: "di" (nr)223/* No clobber */);224#endif225226return retval;227}228229static inline int __test_and_clear_bit_le(int nr, volatile void *addr)230{231char retval;232233#ifdef CONFIG_COLDFIRE234__asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"235: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])236: "d" (nr)237: "%a0");238#else239__asm__ __volatile__ ("bclr %2,%1; sne %0"240: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])241: "di" (nr)242/* No clobber */);243#endif244245return retval;246}247248#include <asm-generic/bitops/ext2-atomic.h>249250static inline int test_bit_le(int nr, const volatile void *addr)251{252char retval;253254#ifdef CONFIG_COLDFIRE255__asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"256: "=d" (retval)257: "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)258: "%a0");259#else260__asm__ __volatile__ ("btst %2,%1; sne %0"261: "=d" (retval)262: "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)263/* No clobber */);264#endif265266return retval;267}268269#define find_first_zero_bit_le(addr, size) \270find_next_zero_bit_le((addr), (size), 0)271272static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)273{274unsigned long *p = ((unsigned long *) addr) + (offset >> 5);275unsigned long result = offset & ~31UL;276unsigned long tmp;277278if (offset >= size)279return size;280size -= result;281offset &= 31UL;282if(offset) {283/* We hold the little endian value in tmp, but then the284* shift is illegal. So we could keep a big endian value285* in tmp, like this:286*287* tmp = __swab32(*(p++));288* tmp |= ~0UL >> (32-offset);289*290* but this would decrease performance, so we change the291* shift:292*/293tmp = *(p++);294tmp |= __swab32(~0UL >> (32-offset));295if(size < 32)296goto found_first;297if(~tmp)298goto found_middle;299size -= 32;300result += 32;301}302while(size & ~31UL) {303if(~(tmp = *(p++)))304goto found_middle;305result += 32;306size -= 32;307}308if(!size)309return result;310tmp = *p;311312found_first:313/* tmp is little endian, so we would have to swab the shift,314* see above. But then we have to swab tmp below for ffz, so315* we might as well do this here.316*/317return result + ffz(__swab32(tmp) | (~0UL << size));318found_middle:319return result + ffz(__swab32(tmp));320}321#define find_next_zero_bit_le find_next_zero_bit_le322323extern unsigned long find_next_bit_le(const void *addr,324unsigned long size, unsigned long offset);325326#endif /* __KERNEL__ */327328#include <asm-generic/bitops/fls.h>329#include <asm-generic/bitops/__fls.h>330#include <asm-generic/bitops/fls64.h>331332#endif /* _M68KNOMMU_BITOPS_H */333334335