1 #ifndef _PPC_BYTEORDER_H
2 #define _PPC_BYTEORDER_H
8 static __inline__ unsigned ld_le16(const volatile unsigned short *addr)
12 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
16 static __inline__ void st_le16(volatile unsigned short *addr, const unsigned val)
18 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
21 static __inline__ unsigned ld_le32(const volatile unsigned *addr)
25 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
29 static __inline__ void st_le32(volatile unsigned *addr, const unsigned val)
31 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
34 /* alas, egcs sounds like it has a bug in this code that doesn't use the
35 inline asm correctly, and can cause file corruption. Until I hear that
36 it's fixed, I can live without the extra speed. I hope. */
37 #if !(__GNUC__ >= 2 && __GNUC_MINOR__ >= 90)
39 # define __arch_swab16(x) ld_le16(&x)
40 # define __arch_swab32(x) ld_le32(&x)
42 static __inline__ __attribute__((const)) __u16 ___arch__swab16(__u16 value)
46 __asm__("rlwimi %0,%1,8,16,23"
48 : "r" (value), "0" (value >> 8));
52 static __inline__ __attribute__((const)) __u32 ___arch__swab32(__u32 value)
56 __asm__("rlwimi %0,%1,24,16,23\n\t"
57 "rlwimi %0,%1,8,8,15\n\t"
60 : "r" (value), "0" (value >> 24));
63 #define __arch__swab32(x) ___arch__swab32(x)
64 #define __arch__swab16(x) ___arch__swab16(x)
69 /* The same, but returns converted value from the location pointer by addr. */
70 #define __arch__swab16p(addr) ld_le16(addr)
71 #define __arch__swab32p(addr) ld_le32(addr)
73 /* The same, but do the conversion in situ, ie. put the value back to addr. */
74 #define __arch__swab16s(addr) st_le16(addr,*addr)
75 #define __arch__swab32s(addr) st_le32(addr,*addr)
79 #if defined(__GNUC__) && !defined(__STRICT_ANSI__)
80 #define __BYTEORDER_HAS_U64__
82 #include <linux/byteorder/big_endian.h>
84 #endif /* _PPC_BYTEORDER_H */