spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
EXPORT_SYMBOL(atomic_set);
+
+unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
+{
+ unsigned long old, flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(addr), flags);
+ old = *addr;
+ *addr = old | mask;
+ spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
+
+ return old & mask;
+}
+EXPORT_SYMBOL(___set_bit);
+
+unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
+{
+ unsigned long old, flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(addr), flags);
+ old = *addr;
+ *addr = old & ~mask;
+ spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
+
+ return old & mask;
+}
+EXPORT_SYMBOL(___clear_bit);
+
+unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
+{
+ unsigned long old, flags;
+
+ spin_lock_irqsave(ATOMIC_HASH(addr), flags);
+ old = *addr;
+ *addr = old ^ mask;
+ spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
+
+ return old & mask;
+}
+EXPORT_SYMBOL(___change_bit);
+++ /dev/null
-/* bitops.S: Low level assembler bit operations.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <asm/ptrace.h>
-#include <asm/psr.h>
-
- .text
- .align 4
-
- .globl __bitops_begin
-__bitops_begin:
-
- /* Take bits in %g2 and set them in word at %g1,
- * return whether bits were set in original value
- * in %g2. %g4 holds value to restore into %o7
- * in delay slot of jmpl return, %g3 + %g5 + %g7 can be
- * used as temporaries and thus is considered clobbered
- * by all callers.
- */
- .globl ___set_bit
-___set_bit:
- rd %psr, %g3
- nop; nop; nop;
- or %g3, PSR_PIL, %g5
- wr %g5, 0x0, %psr
- nop; nop; nop
-#ifdef CONFIG_SMP
- set bitops_spinlock, %g5
-2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
- orcc %g7, 0x0, %g0 ! Did we get it?
- bne 2b ! Nope...
-#endif
- ld [%g1], %g7
- or %g7, %g2, %g5
- and %g7, %g2, %g2
-#ifdef CONFIG_SMP
- st %g5, [%g1]
- set bitops_spinlock, %g5
- stb %g0, [%g5]
-#else
- st %g5, [%g1]
-#endif
- wr %g3, 0x0, %psr
- nop; nop; nop
- jmpl %o7, %g0
- mov %g4, %o7
-
- /* Same as above, but clears the bits from %g2 instead. */
- .globl ___clear_bit
-___clear_bit:
- rd %psr, %g3
- nop; nop; nop
- or %g3, PSR_PIL, %g5
- wr %g5, 0x0, %psr
- nop; nop; nop
-#ifdef CONFIG_SMP
- set bitops_spinlock, %g5
-2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
- orcc %g7, 0x0, %g0 ! Did we get it?
- bne 2b ! Nope...
-#endif
- ld [%g1], %g7
- andn %g7, %g2, %g5
- and %g7, %g2, %g2
-#ifdef CONFIG_SMP
- st %g5, [%g1]
- set bitops_spinlock, %g5
- stb %g0, [%g5]
-#else
- st %g5, [%g1]
-#endif
- wr %g3, 0x0, %psr
- nop; nop; nop
- jmpl %o7, %g0
- mov %g4, %o7
-
- /* Same thing again, but this time toggles the bits from %g2. */
- .globl ___change_bit
-___change_bit:
- rd %psr, %g3
- nop; nop; nop
- or %g3, PSR_PIL, %g5
- wr %g5, 0x0, %psr
- nop; nop; nop
-#ifdef CONFIG_SMP
- set bitops_spinlock, %g5
-2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
- orcc %g7, 0x0, %g0 ! Did we get it?
- bne 2b ! Nope...
-#endif
- ld [%g1], %g7
- xor %g7, %g2, %g5
- and %g7, %g2, %g2
-#ifdef CONFIG_SMP
- st %g5, [%g1]
- set bitops_spinlock, %g5
- stb %g0, [%g5]
-#else
- st %g5, [%g1]
-#endif
- wr %g3, 0x0, %psr
- nop; nop; nop
- jmpl %o7, %g0
- mov %g4, %o7
-
- .globl __bitops_end
-__bitops_end:
#ifdef __KERNEL__
+extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
+extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
+extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
+
/*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
* is in the highest of the four bytes and bit '31' is the high bit
*/
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
- register unsigned long mask asm("g2");
- register unsigned long *ADDR asm("g1");
- register int tmp1 asm("g3");
- register int tmp2 asm("g4");
- register int tmp3 asm("g5");
- register int tmp4 asm("g7");
+ unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___set_bit\n\t"
- " add %%o7, 8, %%o7\n"
- : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
- : "0" (mask), "r" (ADDR)
- : "memory", "cc");
-
- return mask != 0;
+ return ___set_bit(ADDR, mask) != 0;
}
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
- register unsigned long mask asm("g2");
- register unsigned long *ADDR asm("g1");
- register int tmp1 asm("g3");
- register int tmp2 asm("g4");
- register int tmp3 asm("g5");
- register int tmp4 asm("g7");
+ unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___set_bit\n\t"
- " add %%o7, 8, %%o7\n"
- : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
- : "0" (mask), "r" (ADDR)
- : "memory", "cc");
+ (void) ___set_bit(ADDR, mask);
}
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- register unsigned long mask asm("g2");
- register unsigned long *ADDR asm("g1");
- register int tmp1 asm("g3");
- register int tmp2 asm("g4");
- register int tmp3 asm("g5");
- register int tmp4 asm("g7");
+ unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___clear_bit\n\t"
- " add %%o7, 8, %%o7\n"
- : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
- : "0" (mask), "r" (ADDR)
- : "memory", "cc");
-
- return mask != 0;
+ return ___clear_bit(ADDR, mask) != 0;
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- register unsigned long mask asm("g2");
- register unsigned long *ADDR asm("g1");
- register int tmp1 asm("g3");
- register int tmp2 asm("g4");
- register int tmp3 asm("g5");
- register int tmp4 asm("g7");
+ unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___clear_bit\n\t"
- " add %%o7, 8, %%o7\n"
- : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
- : "0" (mask), "r" (ADDR)
- : "memory", "cc");
+ (void) ___clear_bit(ADDR, mask);
}
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- register unsigned long mask asm("g2");
- register unsigned long *ADDR asm("g1");
- register int tmp1 asm("g3");
- register int tmp2 asm("g4");
- register int tmp3 asm("g5");
- register int tmp4 asm("g7");
+ unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___change_bit\n\t"
- " add %%o7, 8, %%o7\n"
- : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
- : "0" (mask), "r" (ADDR)
- : "memory", "cc");
-
- return mask != 0;
+ return ___change_bit(ADDR, mask) != 0;
}
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
- register unsigned long mask asm("g2");
- register unsigned long *ADDR asm("g1");
- register int tmp1 asm("g3");
- register int tmp2 asm("g4");
- register int tmp3 asm("g5");
- register int tmp4 asm("g7");
+ unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___change_bit\n\t"
- " add %%o7, 8, %%o7\n"
- : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
- : "0" (mask), "r" (ADDR)
- : "memory", "cc");
+ (void) ___change_bit(ADDR, mask);
}
#include <asm-generic/bitops/non-atomic.h>