1 #ifndef _LINUX_BYTEORDER_SWAB_H
2 #define _LINUX_BYTEORDER_SWAB_H
5 * linux/byteorder/swab.h
6 * Byte-swapping, independently from CPU endianness
9 * Francois-Rene Rideau <fare@tunes.org> 19971205
10 * separated swab functions from cpu_to_XX,
11 * to clean up support for bizarre-endian architectures.
13 * See asm-i386/byteorder.h and suches for examples of how to provide
14 * architecture-dependent optimized versions
18 /* casts are necessary for constants, because we never know how for sure
19 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
21 #define ___swab16(x) \
23 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
24 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
25 #define ___swab32(x) \
27 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
28 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
29 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
30 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
31 #define ___swab64(x) \
33 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
34 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
35 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
36 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
37 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
38 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
39 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
40 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
42 #define ___constant_swab16(x) ___swab16(x)
43 #define ___constant_swab32(x) ___swab32(x)
44 #define ___constant_swab64(x) ___swab64(x)
47 * provide defaults when no architecture-specific optimization is detected
49 #ifndef __arch__swab16
50 # define __arch__swab16(x) ___swab16(x)
52 #ifndef __arch__swab32
53 # define __arch__swab32(x) ___swab32(x)
55 #ifndef __arch__swab64
56 # define __arch__swab64(x) ___swab64(x)
59 #ifndef __arch__swab16p
60 # define __arch__swab16p(x) __swab16(*(x))
62 #ifndef __arch__swab32p
63 # define __arch__swab32p(x) __swab32(*(x))
65 #ifndef __arch__swab64p
66 # define __arch__swab64p(x) __swab64(*(x))
69 #ifndef __arch__swab16s
70 # define __arch__swab16s(x) do { *(x) = __swab16p((x)); } while (0)
72 #ifndef __arch__swab32s
73 # define __arch__swab32s(x) do { *(x) = __swab32p((x)); } while (0)
75 #ifndef __arch__swab64s
76 # define __arch__swab64s(x) do { *(x) = __swab64p((x)); } while (0)
81 * Allow constant folding
83 #if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
84 # define __swab16(x) \
85 (__builtin_constant_p((__u16)(x)) ? \
88 # define __swab32(x) \
89 (__builtin_constant_p((__u32)(x)) ? \
92 # define __swab64(x) \
93 (__builtin_constant_p((__u64)(x)) ? \
97 # define __swab16(x) __fswab16(x)
98 # define __swab32(x) __fswab32(x)
99 # define __swab64(x) __fswab64(x)
100 #endif /* OPTIMIZE */
103 static __inline__ __attribute__((const)) __u16 __fswab16(__u16 x)
105 return __arch__swab16(x);
107 static __inline__ __u16 __swab16p(const __u16 *x)
109 return __arch__swab16p(x);
111 static __inline__ void __swab16s(__u16 *addr)
113 __arch__swab16s(addr);
116 static __inline__ __attribute__((const)) __u32 __fswab32(__u32 x)
118 return __arch__swab32(x);
120 static __inline__ __u32 __swab32p(const __u32 *x)
122 return __arch__swab32p(x);
124 static __inline__ void __swab32s(__u32 *addr)
126 __arch__swab32s(addr);
129 static __inline__ __attribute__((const)) __u64 __fswab64(__u64 x)
131 # ifdef __SWAB_64_THRU_32__
133 __u32 l = x & ((1ULL<<32)-1);
134 return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
136 return __arch__swab64(x);
139 static __inline__ __u64 __swab64p(const __u64 *x)
141 return __arch__swab64p(x);
143 static __inline__ void __swab64s(__u64 *addr)
145 __arch__swab64s(addr);
148 #if defined(__KERNEL__)
149 #define swab16 __swab16
150 #define swab32 __swab32
151 #define swab64 __swab64
152 #define swab16p __swab16p
153 #define swab32p __swab32p
154 #define swab64p __swab64p
155 #define swab16s __swab16s
156 #define swab32s __swab32s
157 #define swab64s __swab64s
160 #endif /* _LINUX_BYTEORDER_SWAB_H */