2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 2000 Silicon Graphics, Inc.
12 #include <linux/types.h>
13 #include <asm/byteorder.h> /* sigh ... */
17 #include <asm/sgidefs.h>
18 #include <asm/system.h>
19 #include <linux/config.h>
22 * clear_bit() doesn't provide any barrier for the compiler.
24 #define smp_mb__before_clear_bit() barrier()
25 #define smp_mb__after_clear_bit() barrier()
28 * Only disable interrupt for kernel mode stuff to keep usermode stuff
29 * that dares to use kernel include files alive.
31 #define __bi_flags unsigned long flags
32 #define __bi_cli() __cli()
33 #define __bi_save_flags(x) __save_flags(x)
34 #define __bi_save_and_cli(x) __save_and_cli(x)
35 #define __bi_restore_flags(x) __restore_flags(x)
39 #define __bi_save_flags(x)
40 #define __bi_save_and_cli(x)
41 #define __bi_restore_flags(x)
42 #endif /* __KERNEL__ */
44 #ifdef CONFIG_CPU_HAS_LLSC
46 #include <asm/mipsregs.h>
49 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
54 * set_bit - Atomically set a bit in memory
56 * @addr: the address to start counting from
58 * This function is atomic and may not be reordered. See __set_bit()
59 * if you do not require the atomic guarantees.
60 * Note that @nr may be almost arbitrarily large; this function is not
61 * restricted to acting on a single-word quantity.
63 extern __inline__ void
64 set_bit(int nr, volatile void *addr)
66 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
70 "1:\tll\t%0, %1\t\t# set_bit\n\t"
74 : "=&r" (temp), "=m" (*m)
75 : "ir" (1UL << (nr & 0x1f)), "m" (*m));
79 * __set_bit - Set a bit in memory
81 * @addr: the address to start counting from
83 * Unlike set_bit(), this function is non-atomic and may be reordered.
84 * If it's called on the same region of memory simultaneously, the effect
85 * may be that only one operation succeeds.
87 extern __inline__ void __set_bit(int nr, volatile void * addr)
89 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
91 *m |= 1UL << (nr & 31);
95 * clear_bit - Clears a bit in memory
97 * @addr: Address to start counting from
99 * clear_bit() is atomic and may not be reordered. However, it does
100 * not contain a memory barrier, so if it is used for locking purposes,
101 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
102 * in order to ensure changes are visible on other processors.
104 extern __inline__ void
105 clear_bit(int nr, volatile void *addr)
107 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
110 __asm__ __volatile__(
111 "1:\tll\t%0, %1\t\t# clear_bit\n\t"
115 : "=&r" (temp), "=m" (*m)
116 : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
120 * change_bit - Toggle a bit in memory
122 * @addr: Address to start counting from
124 * change_bit() is atomic and may not be reordered.
125 * Note that @nr may be almost arbitrarily large; this function is not
126 * restricted to acting on a single-word quantity.
128 extern __inline__ void
129 change_bit(int nr, volatile void *addr)
131 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
134 __asm__ __volatile__(
135 "1:\tll\t%0, %1\t\t# change_bit\n\t"
139 : "=&r" (temp), "=m" (*m)
140 : "ir" (1UL << (nr & 0x1f)), "m" (*m));
144 * __change_bit - Toggle a bit in memory
145 * @nr: the bit to set
146 * @addr: the address to start counting from
148 * Unlike change_bit(), this function is non-atomic and may be reordered.
149 * If it's called on the same region of memory simultaneously, the effect
150 * may be that only one operation succeeds.
152 extern __inline__ void __change_bit(int nr, volatile void * addr)
154 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
156 *m ^= 1UL << (nr & 31);
160 * test_and_set_bit - Set a bit and return its old value
162 * @addr: Address to count from
164 * This operation is atomic and cannot be reordered.
165 * It also implies a memory barrier.
167 extern __inline__ int
168 test_and_set_bit(int nr, volatile void *addr)
170 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
171 unsigned long temp, res;
173 __asm__ __volatile__(
174 ".set\tnoreorder\t\t# test_and_set_bit\n"
179 " and\t%2, %0, %3\n\t"
181 : "=&r" (temp), "=m" (*m), "=&r" (res)
182 : "r" (1UL << (nr & 0x1f)), "m" (*m)
189 * __test_and_set_bit - Set a bit and return its old value
191 * @addr: Address to count from
193 * This operation is non-atomic and can be reordered.
194 * If two examples of this operation race, one can appear to succeed
195 * but actually fail. You must protect multiple accesses with a lock.
197 extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
200 volatile int *a = addr;
203 mask = 1 << (nr & 0x1f);
204 retval = (mask & *a) != 0;
211 * test_and_clear_bit - Clear a bit and return its old value
213 * @addr: Address to count from
215 * This operation is atomic and cannot be reordered.
216 * It also implies a memory barrier.
218 extern __inline__ int
219 test_and_clear_bit(int nr, volatile void *addr)
221 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
222 unsigned long temp, res;
224 __asm__ __volatile__(
225 ".set\tnoreorder\t\t# test_and_clear_bit\n"
231 " and\t%2, %0, %3\n\t"
233 : "=&r" (temp), "=m" (*m), "=&r" (res)
234 : "r" (1UL << (nr & 0x1f)), "m" (*m)
241 * __test_and_clear_bit - Clear a bit and return its old value
243 * @addr: Address to count from
245 * This operation is non-atomic and can be reordered.
246 * If two examples of this operation race, one can appear to succeed
247 * but actually fail. You must protect multiple accesses with a lock.
249 extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
252 volatile int *a = addr;
255 mask = 1 << (nr & 0x1f);
256 retval = (mask & *a) != 0;
263 * test_and_change_bit - Change a bit and return its new value
265 * @addr: Address to count from
267 * This operation is atomic and cannot be reordered.
268 * It also implies a memory barrier.
270 extern __inline__ int
271 test_and_change_bit(int nr, volatile void *addr)
273 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
274 unsigned long temp, res;
276 __asm__ __volatile__(
277 ".set\tnoreorder\t\t# test_and_change_bit\n"
279 "xor\t%2, %0, %3\n\t"
282 " and\t%2, %0, %3\n\t"
284 : "=&r" (temp), "=m" (*m), "=&r" (res)
285 : "r" (1UL << (nr & 0x1f)), "m" (*m)
292 * __test_and_change_bit - Change a bit and return its old value
294 * @addr: Address to count from
296 * This operation is non-atomic and can be reordered.
297 * If two examples of this operation race, one can appear to succeed
298 * but actually fail. You must protect multiple accesses with a lock.
300 extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
303 volatile int *a = addr;
306 mask = 1 << (nr & 0x1f);
307 retval = (mask & *a) != 0;
316 * set_bit - Atomically set a bit in memory
317 * @nr: the bit to set
318 * @addr: the address to start counting from
320 * This function is atomic and may not be reordered. See __set_bit()
321 * if you do not require the atomic guarantees.
322 * Note that @nr may be almost arbitrarily large; this function is not
323 * restricted to acting on a single-word quantity.
325 extern __inline__ void set_bit(int nr, volatile void * addr)
328 volatile int *a = addr;
332 mask = 1 << (nr & 0x1f);
333 __bi_save_and_cli(flags);
335 __bi_restore_flags(flags);
339 * __set_bit - Set a bit in memory
340 * @nr: the bit to set
341 * @addr: the address to start counting from
343 * Unlike set_bit(), this function is non-atomic and may be reordered.
344 * If it's called on the same region of memory simultaneously, the effect
345 * may be that only one operation succeeds.
347 extern __inline__ void __set_bit(int nr, volatile void * addr)
350 volatile int *a = addr;
353 mask = 1 << (nr & 0x1f);
358 * clear_bit - Clears a bit in memory
360 * @addr: Address to start counting from
362 * clear_bit() is atomic and may not be reordered. However, it does
363 * not contain a memory barrier, so if it is used for locking purposes,
364 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
365 * in order to ensure changes are visible on other processors.
367 extern __inline__ void clear_bit(int nr, volatile void * addr)
370 volatile int *a = addr;
374 mask = 1 << (nr & 0x1f);
375 __bi_save_and_cli(flags);
377 __bi_restore_flags(flags);
381 * change_bit - Toggle a bit in memory
383 * @addr: Address to start counting from
385 * change_bit() is atomic and may not be reordered.
386 * Note that @nr may be almost arbitrarily large; this function is not
387 * restricted to acting on a single-word quantity.
389 extern __inline__ void change_bit(int nr, volatile void * addr)
392 volatile int *a = addr;
396 mask = 1 << (nr & 0x1f);
397 __bi_save_and_cli(flags);
399 __bi_restore_flags(flags);
403 * __change_bit - Toggle a bit in memory
404 * @nr: the bit to set
405 * @addr: the address to start counting from
407 * Unlike change_bit(), this function is non-atomic and may be reordered.
408 * If it's called on the same region of memory simultaneously, the effect
409 * may be that only one operation succeeds.
411 extern __inline__ void __change_bit(int nr, volatile void * addr)
413 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
415 *m ^= 1UL << (nr & 31);
419 * test_and_set_bit - Set a bit and return its old value
421 * @addr: Address to count from
423 * This operation is atomic and cannot be reordered.
424 * It also implies a memory barrier.
426 extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
429 volatile int *a = addr;
433 mask = 1 << (nr & 0x1f);
434 __bi_save_and_cli(flags);
435 retval = (mask & *a) != 0;
437 __bi_restore_flags(flags);
443 * __test_and_set_bit - Set a bit and return its old value
445 * @addr: Address to count from
447 * This operation is non-atomic and can be reordered.
448 * If two examples of this operation race, one can appear to succeed
449 * but actually fail. You must protect multiple accesses with a lock.
451 extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
454 volatile int *a = addr;
457 mask = 1 << (nr & 0x1f);
458 retval = (mask & *a) != 0;
465 * test_and_clear_bit - Clear a bit and return its old value
467 * @addr: Address to count from
469 * This operation is atomic and cannot be reordered.
470 * It also implies a memory barrier.
472 extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
475 volatile int *a = addr;
479 mask = 1 << (nr & 0x1f);
480 __bi_save_and_cli(flags);
481 retval = (mask & *a) != 0;
483 __bi_restore_flags(flags);
489 * __test_and_clear_bit - Clear a bit and return its old value
491 * @addr: Address to count from
493 * This operation is non-atomic and can be reordered.
494 * If two examples of this operation race, one can appear to succeed
495 * but actually fail. You must protect multiple accesses with a lock.
497 extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
500 volatile int *a = addr;
503 mask = 1 << (nr & 0x1f);
504 retval = (mask & *a) != 0;
511 * test_and_change_bit - Change a bit and return its new value
513 * @addr: Address to count from
515 * This operation is atomic and cannot be reordered.
516 * It also implies a memory barrier.
518 extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
521 volatile int *a = addr;
525 mask = 1 << (nr & 0x1f);
526 __bi_save_and_cli(flags);
527 retval = (mask & *a) != 0;
529 __bi_restore_flags(flags);
535 * __test_and_change_bit - Change a bit and return its old value
537 * @addr: Address to count from
539 * This operation is non-atomic and can be reordered.
540 * If two examples of this operation race, one can appear to succeed
541 * but actually fail. You must protect multiple accesses with a lock.
543 extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
546 volatile int *a = addr;
549 mask = 1 << (nr & 0x1f);
550 retval = (mask & *a) != 0;
558 #undef __bi_save_flags
559 #undef __bi_restore_flags
564 * test_bit - Determine whether a bit is set
565 * @nr: bit number to test
566 * @addr: Address to start counting from
568 extern __inline__ int test_bit(int nr, volatile void *addr)
570 return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
575 /* Little endian versions. */
578 * find_first_zero_bit - find the first zero bit in a memory region
579 * @addr: The address to start the search at
580 * @size: The maximum size to search
582 * Returns the bit-number of the first zero bit, not the number of the byte
585 extern __inline__ int find_first_zero_bit (void *addr, unsigned size)
593 __asm__ (".set\tnoreorder\n\t"
595 "1:\tsubu\t$1,%6,%0\n\t"
599 #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
600 (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
601 (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
611 #error "Fix this for big endian"
612 #endif /* __MIPSEB__ */
614 "1:\tand\t%2,$1,%1\n\t"
622 : "=r" (res), "=r" (dummy), "=r" (addr)
623 : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
624 "2" (addr), "r" (size)
631 * find_next_zero_bit - find the first zero bit in a memory region
632 * @addr: The address to base the search on
633 * @offset: The bitnumber to start searching at
634 * @size: The maximum size to search
636 extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
638 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
639 int set = 0, bit = offset & 31, res;
644 * Look for zero in first byte
647 #error "Fix this for big endian byte order"
649 __asm__(".set\tnoreorder\n\t"
651 "1:\tand\t$1,%4,%1\n\t"
659 : "=r" (set), "=r" (dummy)
660 : "0" (0), "1" (1 << bit), "r" (*p)
662 if (set < (32 - bit))
668 * No zero yet, search remaining full bytes for a zero
670 res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
671 return offset + set + res;
674 #endif /* !(__MIPSEB__) */
677 * ffz - find first zero in word.
678 * @word: The word to search
680 * Undefined if no zero exists, so code should check against ~0UL first.
682 extern __inline__ unsigned long ffz(unsigned long word)
685 unsigned int mask = 1;
688 ".set\tnoreorder\n\t"
691 "1:\tand\t$1,%2,%1\n\t"
699 : "=&r" (__res), "=r" (mask)
700 : "r" (word), "1" (mask)
709 * ffs - find first bit set
710 * @x: the word to search
712 * This is defined the same way as
713 * the libc and compiler builtin ffs routines, therefore
714 * differs in spirit from the above ffz (man ffs).
717 #define ffs(x) generic_ffs(x)
720 * hweightN - returns the hamming weight of a N-bit word
721 * @x: the word to weigh
723 * The Hamming Weight of a number is the total number of bits set in it.
726 #define hweight32(x) generic_hweight32(x)
727 #define hweight16(x) generic_hweight16(x)
728 #define hweight8(x) generic_hweight8(x)
730 #endif /* __KERNEL__ */
734 * find_next_zero_bit - find the first zero bit in a memory region
735 * @addr: The address to base the search on
736 * @offset: The bitnumber to start searching at
737 * @size: The maximum size to search
739 extern __inline__ int find_next_zero_bit(void *addr, int size, int offset)
741 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
742 unsigned long result = offset & ~31UL;
751 tmp |= ~0UL >> (32-offset);
759 while (size & ~31UL) {
772 return result + ffz(tmp);
775 /* Linus sez that gcc can optimize the following correctly, we'll see if this
776 * holds on the Sparc as it does for the ALPHA.
779 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
781 * find_first_zero_bit - find the first zero bit in a memory region
782 * @addr: The address to start the search at
783 * @size: The maximum size to search
785 * Returns the bit-number of the first zero bit, not the number of the byte
788 extern int find_first_zero_bit (void *addr, unsigned size);
791 #define find_first_zero_bit(addr, size) \
792 find_next_zero_bit((addr), (size), 0)
794 #endif /* (__MIPSEB__) */
796 /* Now for the ext2 filesystem bit operations and helper routines. */
799 extern __inline__ int ext2_set_bit(int nr, void * addr)
801 int mask, retval, flags;
802 unsigned char *ADDR = (unsigned char *) addr;
805 mask = 1 << (nr & 0x07);
807 retval = (mask & *ADDR) != 0;
809 restore_flags(flags);
813 extern __inline__ int ext2_clear_bit(int nr, void * addr)
815 int mask, retval, flags;
816 unsigned char *ADDR = (unsigned char *) addr;
819 mask = 1 << (nr & 0x07);
821 retval = (mask & *ADDR) != 0;
823 restore_flags(flags);
827 extern __inline__ int ext2_test_bit(int nr, const void * addr)
830 const unsigned char *ADDR = (const unsigned char *) addr;
833 mask = 1 << (nr & 0x07);
834 return ((mask & *ADDR) != 0);
837 #define ext2_find_first_zero_bit(addr, size) \
838 ext2_find_next_zero_bit((addr), (size), 0)
840 extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
842 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
843 unsigned long result = offset & ~31UL;
851 /* We hold the little endian value in tmp, but then the
852 * shift is illegal. So we could keep a big endian value
855 * tmp = __swab32(*(p++));
856 * tmp |= ~0UL >> (32-offset);
858 * but this would decrease preformance, so we change the
862 tmp |= __swab32(~0UL >> (32-offset));
870 while(size & ~31UL) {
881 /* tmp is little endian, so we would have to swab the shift,
882 * see above. But then we have to swab tmp below for ffz, so
883 * we might as well do this here.
885 return result + ffz(__swab32(tmp) | (~0UL << size));
887 return result + ffz(__swab32(tmp));
889 #else /* !(__MIPSEB__) */
891 /* Native ext2 byte ordering, just collapse using defines. */
892 #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
893 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
894 #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
895 #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
896 #define ext2_find_next_zero_bit(addr, size, offset) \
897 find_next_zero_bit((addr), (size), (offset))
899 #endif /* !(__MIPSEB__) */
902 * Bitmap functions for the minix filesystem.
903 * FIXME: These assume that Minix uses the native byte/bitorder.
904 * This limits the Minix filesystem's value for data exchange very much.
906 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
907 #define minix_set_bit(nr,addr) set_bit(nr,addr)
908 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
909 #define minix_test_bit(nr,addr) test_bit(nr,addr)
910 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
912 #endif /* _ASM_BITOPS_H */