+++ /dev/null
-/*
- * String handling functions for PowerPC.
- *
- * Copyright (C) 1996 Paul Mackerras.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <asm/processor.h>
-#include <asm/errno.h>
-#include <asm/ppc_asm.h>
-
-_GLOBAL(strcpy)
- addi r5,r3,-1
- addi r4,r4,-1
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- stbu r0,1(r5)
- bne 1b
- blr
-
-_GLOBAL(strncpy)
- cmpwi 0,r5,0
- beqlr
- mtctr r5
- addi r6,r3,-1
- addi r4,r4,-1
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- stbu r0,1(r6)
- bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
- blr
-
-_GLOBAL(strcat)
- addi r5,r3,-1
- addi r4,r4,-1
-1: lbzu r0,1(r5)
- cmpwi 0,r0,0
- bne 1b
- addi r5,r5,-1
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- stbu r0,1(r5)
- bne 1b
- blr
-
-_GLOBAL(strcmp)
- addi r5,r3,-1
- addi r4,r4,-1
-1: lbzu r3,1(r5)
- cmpwi 1,r3,0
- lbzu r0,1(r4)
- subf. r3,r0,r3
- beqlr 1
- beq 1b
- blr
-
-_GLOBAL(strlen)
- addi r4,r3,-1
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- bne 1b
- subf r3,r3,r4
- blr
-
-_GLOBAL(memcmp)
- cmpwi 0,r5,0
- ble- 2f
- mtctr r5
- addi r6,r3,-1
- addi r4,r4,-1
-1: lbzu r3,1(r6)
- lbzu r0,1(r4)
- subf. r3,r0,r3
- bdnzt 2,1b
- blr
-2: li r3,0
- blr
-
-_GLOBAL(memchr)
- cmpwi 0,r5,0
- ble- 2f
- mtctr r5
- addi r3,r3,-1
-1: lbzu r0,1(r3)
- cmpw 0,r0,r4
- bdnzf 2,1b
- beqlr
-2: li r3,0
- blr
-
-_GLOBAL(__clear_user)
- addi r6,r3,-4
- li r3,0
- li r5,0
- cmplwi 0,r4,4
- blt 7f
- /* clear a single word */
-11: stwu r5,4(r6)
- beqlr
- /* clear word sized chunks */
- andi. r0,r6,3
- add r4,r0,r4
- subf r6,r0,r6
- srwi r0,r4,2
- andi. r4,r4,3
- mtctr r0
- bdz 7f
-1: stwu r5,4(r6)
- bdnz 1b
- /* clear byte sized chunks */
-7: cmpwi 0,r4,0
- beqlr
- mtctr r4
- addi r6,r6,3
-8: stbu r5,1(r6)
- bdnz 8b
- blr
-90: mr r3,r4
- blr
-91: mfctr r3
- slwi r3,r3,2
- add r3,r3,r4
- blr
-92: mfctr r3
- blr
-
- .section __ex_table,"a"
- .align 3
- .llong 11b,90b
- .llong 1b,91b
- .llong 8b,92b
- .text
-
-/* r3 = dst, r4 = src, r5 = count */
-_GLOBAL(__strncpy_from_user)
- addi r6,r3,-1
- addi r4,r4,-1
- cmpwi 0,r5,0
- beq 2f
- mtctr r5
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- stbu r0,1(r6)
- bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
- beq 3f
-2: addi r6,r6,1
-3: subf r3,r3,r6
- blr
-99: li r3,-EFAULT
- blr
-
- .section __ex_table,"a"
- .align 3
- .llong 1b,99b
- .text
-
-/* r3 = str, r4 = len (> 0) */
-_GLOBAL(__strnlen_user)
- addi r7,r3,-1
- mtctr r4 /* ctr = len */
-1: lbzu r0,1(r7) /* get next byte */
- cmpwi 0,r0,0
- bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
- addi r7,r7,1
- subf r3,r3,r7 /* number of bytes we have looked at */
- beqlr /* return if we found a 0 byte */
- cmpw 0,r3,r4 /* did we look at all len bytes? */
- blt 99f /* if not, must have hit top */
- addi r3,r4,1 /* return len + 1 to indicate no null found */
- blr
-99: li r3,0 /* bad address, return 0 */
- blr
-
- .section __ex_table,"a"
- .align 3
- .llong 1b,99b
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+#define KERNEL_DS MAKE_MM_SEG(~0UL)
#ifdef __powerpc64__
-#define KERNEL_DS MAKE_MM_SEG(0UL)
-#define USER_DS MAKE_MM_SEG(0xf000000000000000UL)
+/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
+#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
#else
-#define KERNEL_DS MAKE_MM_SEG(~0UL)
#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
#endif
#ifdef __powerpc64__
/*
- * Use the alpha trick for checking ranges:
- *
- * Is a address valid? This does a straightforward calculation rather
- * than tests.
- *
- * Address valid if:
- * - "addr" doesn't have any high-bits set
- * - AND "size" doesn't have any high-bits set
- * - OR we are in kernel mode.
- *
- * We dont have to check for high bits in (addr+size) because the first
- * two checks force the maximum result to be below the start of the
- * kernel region.
+ * This check is sufficient because there is a large enough
+ * gap between user addresses and the kernel addresses
*/
#define __access_ok(addr, size, segment) \
- (((segment).seg & (addr | size )) == 0)
+ (((addr) <= (segment).seg) && ((size) <= (segment).seg))
#else
: "=r" (err) \
: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
-#ifndef __powerpc64__
+#ifdef __powerpc64__
+#define __put_user_asm2(x, ptr, retval) \
+ __put_user_asm(x, ptr, retval, "std")
+#else /* __powerpc64__ */
#define __put_user_asm2(x, addr, err) \
__asm__ __volatile__( \
"1: stw %1,0(%2)\n" \
".previous" \
: "=r" (err) \
: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
-#else /* __powerpc64__ */
-#define __put_user_asm2(x, ptr, retval) \
- __put_user_asm(x, ptr, retval, "std")
#endif /* __powerpc64__ */
#define __put_user_size(x, ptr, size, retval) \
#define __get_user_asm(x, addr, err, op) \
__asm__ __volatile__( \
- "1: "op" %1,0(%2) # get_user\n" \
+ "1: "op" %1,0(%2) # get_user\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %0,%3\n" \
: "=r" (err), "=r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err))
-#ifndef __powerpc64__
-#define __get_user_asm2(x, addr, err) \
+#ifdef __powerpc64__
+#define __get_user_asm2(x, addr, err) \
+ __get_user_asm(x, addr, err, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2(x, addr, err) \
__asm__ __volatile__( \
"1: lwz %1,0(%2)\n" \
"2: lwz %1+1,4(%2)\n" \
".previous" \
: "=r" (err), "=&r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err))
-#else
-#define __get_user_asm2(x, addr, err) \
- __get_user_asm(x, addr, err, "ld")
#endif /* __powerpc64__ */
#define __get_user_size(x, ptr, size, retval) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
- if (size > sizeof(x)) \
- (x) = __get_user_bad(); \
+ if (size > sizeof(x)) \
+ (x) = __get_user_bad(); \
switch (size) { \
case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- might_sleep(); \
+ might_sleep(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
const void __user *from, unsigned long size);
#ifndef __powerpc64__
-extern inline unsigned long
-copy_from_user(void *to, const void __user *from, unsigned long n)
+
+extern inline unsigned long copy_from_user(void *to,
+ const void __user *from, unsigned long n)
{
unsigned long over;
return n;
}
-extern inline unsigned long
-copy_to_user(void __user *to, const void *from, unsigned long n)
+extern inline unsigned long copy_to_user(void __user *to,
+ const void *from, unsigned long n)
{
unsigned long over;
return n;
}
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
#else /* __powerpc64__ */
-static inline unsigned long
-__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+#define __copy_in_user(to, from, size) \
+ __copy_tofrom_user((to), (from), (size))
+
+extern unsigned long copy_from_user(void *to, const void __user *from,
+ unsigned long n);
+extern unsigned long copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+extern unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n);
+
+static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n) && (n <= 8)) {
unsigned long ret;
return __copy_tofrom_user((__force void __user *) to, from, n);
}
-static inline unsigned long
-__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+static inline unsigned long __copy_to_user_inatomic(void __user *to,
+ const void *from, unsigned long n)
{
if (__builtin_constant_p(n) && (n <= 8)) {
unsigned long ret;
#endif /* __powerpc64__ */
-static inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long size)
+static inline unsigned long __copy_from_user(void *to,
+ const void __user *from, unsigned long size)
{
might_sleep();
#ifndef __powerpc64__
#endif /* __powerpc64__ */
}
-static inline unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long size)
+static inline unsigned long __copy_to_user(void __user *to,
+ const void *from, unsigned long size)
{
might_sleep();
#ifndef __powerpc64__
#endif /* __powerpc64__ */
}
-#ifndef __powerpc64__
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-#else /* __powerpc64__ */
-#define __copy_in_user(to, from, size) \
- __copy_tofrom_user((to), (from), (size))
-
-extern unsigned long copy_from_user(void *to, const void __user *from,
- unsigned long n);
-extern unsigned long copy_to_user(void __user *to, const void *from,
- unsigned long n);
-extern unsigned long copy_in_user(void __user *to, const void __user *from,
- unsigned long n);
-#endif /* __powerpc64__ */
-
extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
*
* Return 0 for error
*/
-#ifndef __powerpc64__
extern int __strnlen_user(const char __user *str, long len, unsigned long top);
-#else /* __powerpc64__ */
-extern int __strnlen_user(const char __user *str, long len);
-#endif /* __powerpc64__ */
/*
* Returns the length of the string at str (including the null byte),
*/
static inline int strnlen_user(const char __user *str, long len)
{
-#ifndef __powerpc64__
unsigned long top = current->thread.fs.seg;
if ((unsigned long)str > top)
return 0;
return __strnlen_user(str, len, top);
-#else /* __powerpc64__ */
- might_sleep();
- if (likely(access_ok(VERIFY_READ, str, 1)))
- return __strnlen_user(str, len);
- return 0;
-#endif /* __powerpc64__ */
}
#define strlen_user(str) strnlen_user((str), 0x7ffffffe)