1 #ifndef _M68KNOMMU_BITOPS_H
2 #define _M68KNOMMU_BITOPS_H
5 * Copyright 1992, Linus Torvalds.
8 #include <linux/config.h>
9 #include <linux/compiler.h>
10 #include <asm/byteorder.h> /* swab32 */
11 #include <asm/system.h> /* save_flags */
15 #include <asm-generic/bitops/ffs.h>
16 #include <asm-generic/bitops/__ffs.h>
17 #include <asm-generic/bitops/sched.h>
18 #include <asm-generic/bitops/ffz.h>
20 static __inline__ void set_bit(int nr, volatile unsigned long * addr)
22 #ifdef CONFIG_COLDFIRE
23 __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
24 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
28 __asm__ __volatile__ ("bset %1,%0"
29 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
35 #define __set_bit(nr, addr) set_bit(nr, addr)
38 * clear_bit() doesn't provide any barrier for the compiler.
40 #define smp_mb__before_clear_bit() barrier()
41 #define smp_mb__after_clear_bit() barrier()
43 static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
45 #ifdef CONFIG_COLDFIRE
46 __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
47 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
51 __asm__ __volatile__ ("bclr %1,%0"
52 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
58 #define __clear_bit(nr, addr) clear_bit(nr, addr)
60 static __inline__ void change_bit(int nr, volatile unsigned long * addr)
62 #ifdef CONFIG_COLDFIRE
63 __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
64 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
68 __asm__ __volatile__ ("bchg %1,%0"
69 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
75 #define __change_bit(nr, addr) change_bit(nr, addr)
77 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
81 #ifdef CONFIG_COLDFIRE
82 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
83 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
87 __asm__ __volatile__ ("bset %2,%1; sne %0"
88 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
96 #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
98 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
102 #ifdef CONFIG_COLDFIRE
103 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
104 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
108 __asm__ __volatile__ ("bclr %2,%1; sne %0"
109 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
117 #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
119 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
123 #ifdef CONFIG_COLDFIRE
124 __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
125 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
129 __asm__ __volatile__ ("bchg %2,%1; sne %0"
130 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
138 #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
141 * This routine doesn't need to be atomic.
143 static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
145 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
148 static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
150 int * a = (int *) addr;
154 mask = 1 << (nr & 0x1f);
155 return ((mask & *a) != 0);
158 #define test_bit(nr,addr) \
159 (__builtin_constant_p(nr) ? \
160 __constant_test_bit((nr),(addr)) : \
161 __test_bit((nr),(addr)))
163 #include <asm-generic/bitops/find.h>
164 #include <asm-generic/bitops/hweight.h>
166 static __inline__ int ext2_set_bit(int nr, volatile void * addr)
170 #ifdef CONFIG_COLDFIRE
171 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
172 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
176 __asm__ __volatile__ ("bset %2,%1; sne %0"
177 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
185 static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
189 #ifdef CONFIG_COLDFIRE
190 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
191 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
195 __asm__ __volatile__ ("bclr %2,%1; sne %0"
196 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
204 #define ext2_set_bit_atomic(lock, nr, addr) \
208 ret = ext2_set_bit((nr), (addr)); \
213 #define ext2_clear_bit_atomic(lock, nr, addr) \
217 ret = ext2_clear_bit((nr), (addr)); \
222 static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
226 #ifdef CONFIG_COLDFIRE
227 __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
229 : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
232 __asm__ __volatile__ ("btst %2,%1; sne %0"
234 : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
241 #define ext2_find_first_zero_bit(addr, size) \
242 ext2_find_next_zero_bit((addr), (size), 0)
244 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
246 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
247 unsigned long result = offset & ~31UL;
255 /* We hold the little endian value in tmp, but then the
256 * shift is illegal. So we could keep a big endian value
259 * tmp = __swab32(*(p++));
260 * tmp |= ~0UL >> (32-offset);
262 * but this would decrease preformance, so we change the
266 tmp |= __swab32(~0UL >> (32-offset));
274 while(size & ~31UL) {
285 /* tmp is little endian, so we would have to swab the shift,
286 * see above. But then we have to swab tmp below for ffz, so
287 * we might as well do this here.
289 return result + ffz(__swab32(tmp) | (~0UL << size));
291 return result + ffz(__swab32(tmp));
294 #include <asm-generic/bitops/minix.h>
296 #endif /* __KERNEL__ */
298 #include <asm-generic/bitops/fls.h>
299 #include <asm-generic/bitops/fls64.h>
301 #endif /* _M68KNOMMU_BITOPS_H */