From: Becky Bruce Date: Tue, 27 Sep 2005 19:28:56 +0000 (-0500) Subject: [PATCH] powerpc: merge byteorder.h X-Git-Tag: v3.12-rc1~40274^2~301 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a559c91d77c3220be521453bd23815e1e1980a82;p=kernel%2Fkernel-generic.git [PATCH] powerpc: merge byteorder.h powerpc: Merge byteorder.h Essentially adopts the 64-bit version of this file. The 32-bit version had been using unsigned ints for arguments/return values that were actually only 16 bits - the new file uses __u16 for these items as in the 64-bit version of the header. The order of some of the asm constraints in the 64-bit version was slightly different than the 32-bit version, but they produce identical code. Signed-off-by: Becky Bruce Signed-off-by: Kumar Gala Signed-off-by: Paul Mackerras --- diff --git a/include/asm-ppc64/byteorder.h b/include/asm-powerpc/byteorder.h similarity index 90% rename from include/asm-ppc64/byteorder.h rename to include/asm-powerpc/byteorder.h index 8b57da6..b377522 100644 --- a/include/asm-ppc64/byteorder.h +++ b/include/asm-powerpc/byteorder.h @@ -1,5 +1,5 @@ -#ifndef _PPC64_BYTEORDER_H -#define _PPC64_BYTEORDER_H +#ifndef _ASM_POWERPC_BYTEORDER_H +#define _ASM_POWERPC_BYTEORDER_H /* * This program is free software; you can redistribute it and/or @@ -77,10 +77,13 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value) #ifndef __STRICT_ANSI__ #define __BYTEORDER_HAS_U64__ -#endif +#ifndef __powerpc64__ +#define __SWAB_64_THRU_32__ +#endif /* __powerpc64__ */ +#endif /* __STRICT_ANSI__ */ #endif /* __GNUC__ */ #include -#endif /* _PPC64_BYTEORDER_H */ +#endif /* _ASM_POWERPC_BYTEORDER_H */ diff --git a/include/asm-ppc/byteorder.h b/include/asm-ppc/byteorder.h deleted file mode 100644 index c63c81e..0000000 --- a/include/asm-ppc/byteorder.h +++ /dev/null @@ -1,76 +0,0 @@ -#ifndef _PPC_BYTEORDER_H -#define _PPC_BYTEORDER_H - -#include -#include - -#ifdef __GNUC__ -#ifdef __KERNEL__ - -extern __inline__ unsigned ld_le16(const volatile unsigned short *addr) -{ - unsigned val; - - __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); - return val; -} - -extern __inline__ void st_le16(volatile unsigned short *addr, const unsigned val) -{ - __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); -} - -extern __inline__ unsigned ld_le32(const volatile unsigned *addr) -{ - unsigned val; - - __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); - return val; -} - -extern __inline__ void st_le32(volatile unsigned *addr, const unsigned val) -{ - __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); -} - -static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value) -{ - __u16 result; - - __asm__("rlwimi %0,%2,8,16,23" : "=&r" (result) : "0" (value >> 8), "r" (value)); - return result; -} - -static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value) -{ - __u32 result; - - __asm__("rlwimi %0,%2,24,16,23" : "=&r" (result) : "0" (value>>24), "r" (value)); - __asm__("rlwimi %0,%2,8,8,15" : "=&r" (result) : "0" (result), "r" (value)); - __asm__("rlwimi %0,%2,24,0,7" : "=&r" (result) : "0" (result), "r" (value)); - - return result; -} -#define __arch__swab32(x) ___arch__swab32(x) -#define __arch__swab16(x) ___arch__swab16(x) - -/* The same, but returns converted value from the location pointer by addr. */ -#define __arch__swab16p(addr) ld_le16(addr) -#define __arch__swab32p(addr) ld_le32(addr) - -/* The same, but do the conversion in situ, ie. put the value back to addr. */ -#define __arch__swab16s(addr) st_le16(addr,*addr) -#define __arch__swab32s(addr) st_le32(addr,*addr) - -#endif /* __KERNEL__ */ - -#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) -# define __BYTEORDER_HAS_U64__ -# define __SWAB_64_THRU_32__ -#endif - -#endif /* __GNUC__ */ - -#include - -#endif /* _PPC_BYTEORDER_H */