2 * bitops.h: Bit string operations on the ppc
8 #include <linux/config.h>
9 #include <asm/byteorder.h>
11 extern void set_bit(int nr, volatile void *addr);
12 extern void clear_bit(int nr, volatile void *addr);
13 extern void change_bit(int nr, volatile void *addr);
14 extern int test_and_set_bit(int nr, volatile void *addr);
15 extern int test_and_clear_bit(int nr, volatile void *addr);
16 extern int test_and_change_bit(int nr, volatile void *addr);
19 * Arguably these bit operations don't imply any memory barrier or
20 * SMP ordering, but in fact a lot of drivers expect them to imply
21 * both, since they do on x86 cpus.
24 #define SMP_WMB "eieio\n"
25 #define SMP_MB "\nsync"
29 #endif /* CONFIG_SMP */
31 #define __INLINE_BITOPS 1
35 * These used to be if'd out here because using : "cc" as a constraint
36 * resulted in errors from egcs. Things may be OK with gcc-2.95.
38 extern __inline__ void set_bit(int nr, volatile void * addr)
41 unsigned long mask = 1 << (nr & 0x1f);
42 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
44 __asm__ __volatile__(SMP_WMB "\
50 : "=&r" (old), "=m" (*p)
51 : "r" (mask), "r" (p), "m" (*p)
55 extern __inline__ void clear_bit(int nr, volatile void *addr)
58 unsigned long mask = 1 << (nr & 0x1f);
59 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
61 __asm__ __volatile__(SMP_WMB "\
67 : "=&r" (old), "=m" (*p)
68 : "r" (mask), "r" (p), "m" (*p)
72 extern __inline__ void change_bit(int nr, volatile void *addr)
75 unsigned long mask = 1 << (nr & 0x1f);
76 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
78 __asm__ __volatile__(SMP_WMB "\
84 : "=&r" (old), "=m" (*p)
85 : "r" (mask), "r" (p), "m" (*p)
89 extern __inline__ int test_and_set_bit(int nr, volatile void *addr)
92 unsigned int mask = 1 << (nr & 0x1f);
93 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
95 __asm__ __volatile__(SMP_WMB "\
101 : "=&r" (old), "=&r" (t), "=m" (*p)
102 : "r" (mask), "r" (p), "m" (*p)
105 return (old & mask) != 0;
108 extern __inline__ int test_and_clear_bit(int nr, volatile void *addr)
111 unsigned int mask = 1 << (nr & 0x1f);
112 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
114 __asm__ __volatile__(SMP_WMB "\
120 : "=&r" (old), "=&r" (t), "=m" (*p)
121 : "r" (mask), "r" (p), "m" (*p)
124 return (old & mask) != 0;
127 extern __inline__ int test_and_change_bit(int nr, volatile void *addr)
130 unsigned int mask = 1 << (nr & 0x1f);
131 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
133 __asm__ __volatile__(SMP_WMB "\
139 : "=&r" (old), "=&r" (t), "=m" (*p)
140 : "r" (mask), "r" (p), "m" (*p)
143 return (old & mask) != 0;
145 #endif /* __INLINE_BITOPS */
147 extern __inline__ int test_bit(int nr, __const__ volatile void *addr)
149 __const__ unsigned int *p = (__const__ unsigned int *) addr;
151 return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
154 /* Return the bit position of the most significant 1 bit in a word */
155 extern __inline__ int __ilog2(unsigned int x)
159 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
163 extern __inline__ int ffz(unsigned int x)
167 return __ilog2(x & -x);
173 * ffs: find first bit set. This is defined the same way as
174 * the libc and compiler builtin ffs routines, therefore
175 * differs in spirit from the above ffz (man ffs).
177 extern __inline__ int ffs(int x)
179 return __ilog2(x & -x) + 1;
183 * hweightN: returns the hamming weight (i.e. the number
184 * of bits set) of a N-bit word
187 #define hweight32(x) generic_hweight32(x)
188 #define hweight16(x) generic_hweight16(x)
189 #define hweight8(x) generic_hweight8(x)
191 #endif /* __KERNEL__ */
194 * This implementation of find_{first,next}_zero_bit was stolen from
195 * Linus' asm-alpha/bitops.h.
197 #define find_first_zero_bit(addr, size) \
198 find_next_zero_bit((addr), (size), 0)
200 extern __inline__ unsigned long find_next_zero_bit(void * addr,
201 unsigned long size, unsigned long offset)
203 unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
204 unsigned int result = offset & ~31UL;
213 tmp |= ~0UL >> (32-offset);
222 if ((tmp = *p++) != ~0U)
233 return result + ffz(tmp);
237 #define _EXT2_HAVE_ASM_BITOPS_
241 * test_and_{set,clear}_bit guarantee atomicity without
242 * disabling interrupts.
244 #define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, addr)
245 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, addr)
248 extern __inline__ int ext2_set_bit(int nr, void * addr)
251 unsigned char *ADDR = (unsigned char *) addr;
255 mask = 1 << (nr & 0x07);
256 oldbit = (*ADDR & mask) ? 1 : 0;
261 extern __inline__ int ext2_clear_bit(int nr, void * addr)
264 unsigned char *ADDR = (unsigned char *) addr;
268 mask = 1 << (nr & 0x07);
269 oldbit = (*ADDR & mask) ? 1 : 0;
270 *ADDR = *ADDR & ~mask;
273 #endif /* __KERNEL__ */
275 extern __inline__ int ext2_test_bit(int nr, __const__ void * addr)
277 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
279 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
283 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
284 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
287 #define ext2_find_first_zero_bit(addr, size) \
288 ext2_find_next_zero_bit((addr), (size), 0)
290 extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
291 unsigned long size, unsigned long offset)
293 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
294 unsigned int result = offset & ~31UL;
302 tmp = cpu_to_le32p(p++);
303 tmp |= ~0UL >> (32-offset);
312 if ((tmp = cpu_to_le32p(p++)) != ~0U)
319 tmp = cpu_to_le32p(p);
323 return result + ffz(tmp);
326 /* Bitmap functions for the minix filesystem. */
327 #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
328 #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
329 #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
330 #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
331 #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
333 #endif /* _PPC_BITOPS_H */