1 /* MN10300 userspace access functions
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
15 * User space memory access functions
17 #include <linux/thread_info.h>
18 #include <linux/kernel.h>
20 #include <asm/errno.h>
23 #define VERIFY_WRITE 1
26 * The fs value determines whether argument validity checking should be
27 * performed or not. If get_fs() == USER_DS, checking is performed, with
28 * get_fs() == KERNEL_DS, checking is bypassed.
30 * For historical reasons, these macros are grossly misnamed.
32 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
34 #define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
35 #define KERNEL_DS MAKE_MM_SEG(0x9FFFFFFF)
36 #define USER_DS MAKE_MM_SEG(TASK_SIZE)
38 #define get_ds() (KERNEL_DS)
39 #define get_fs() (current_thread_info()->addr_limit)
40 #define set_fs(x) (current_thread_info()->addr_limit = (x))
41 #define __kernel_ds_p() (current_thread_info()->addr_limit.seg == 0x9FFFFFFF)
43 #define segment_eq(a, b) ((a).seg == (b).seg)
45 #define __addr_ok(addr) \
46 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
49 * check that a range of addresses falls within the current address limit
51 static inline int ___range_ok(unsigned long addr, unsigned int size)
55 asm(" add %3,%1 \n" /* set C-flag if addr + size > 4Gb */
57 " cmp %4,%1 \n" /* jump if addr+size>limit (error) */
59 " clr %0 \n" /* mark okay */
61 : "=r"(flag), "=&r"(tmp)
62 : "1"(addr), "ir"(size),
63 "r"(current_thread_info()->addr_limit.seg), "0"(flag)
70 #define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size))
72 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
73 #define __access_ok(addr, size) (__range_ok((addr), (size)) == 0)
75 static inline int verify_area(int type, const void *addr, unsigned long size)
77 return access_ok(type, addr, size) ? 0 : -EFAULT;
82 * The exception table consists of pairs of addresses: the first is the
83 * address of an instruction that is allowed to fault, and the second is
84 * the address at which the program should continue. No registers are
85 * modified, so it is entirely up to the continuation code to figure out
88 * All the routines below use bits of fixup code that are out of line
89 * with the main instruction path. This means when everything is well,
90 * we don't even have to jump over them. Further, they do not intrude
91 * on our cache or tlb entries.
94 struct exception_table_entry
96 unsigned long insn, fixup;
99 /* Returns 0 if exception not found and fixup otherwise. */
100 extern int fixup_exception(struct pt_regs *regs);
102 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
103 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
106 * The "__xxx" versions do not do address space checking, useful when
107 * doing multiple accesses to the same area (the user has to do the
108 * checks by hand with "access_ok()")
110 #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
111 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
113 struct __large_struct { unsigned long buf[100]; };
114 #define __m(x) (*(struct __large_struct *)(x))
116 #define __get_user_nocheck(x, ptr, size) \
118 unsigned long __gu_addr; \
120 __gu_addr = (unsigned long) (ptr); \
123 unsigned char __gu_val; \
124 __get_user_asm("bu"); \
125 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
129 unsigned short __gu_val; \
130 __get_user_asm("hu"); \
131 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
135 unsigned int __gu_val; \
136 __get_user_asm(""); \
137 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
141 __get_user_unknown(); \
147 #define __get_user_check(x, ptr, size) \
149 const __typeof__(*(ptr))* __guc_ptr = (ptr); \
151 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
152 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
155 (x) = (__typeof__(x))0; \
160 #define __get_user_asm(INSN) \
164 " mov"INSN" %2,%1\n" \
167 " .section .fixup,\"ax\"\n" \
173 " .section __ex_table,\"a\"\n" \
177 : "=&r" (__gu_err), "=&r" (__gu_val) \
178 : "m" (__m(__gu_addr)), "i" (-EFAULT)); \
181 extern int __get_user_unknown(void);
183 #define __put_user_nocheck(x, ptr, size) \
186 __typeof__(*(ptr)) val; \
189 unsigned long __pu_addr; \
191 __pu_val.val = (x); \
192 __pu_addr = (unsigned long) (ptr); \
194 case 1: __put_user_asm("bu"); break; \
195 case 2: __put_user_asm("hu"); break; \
196 case 4: __put_user_asm("" ); break; \
197 case 8: __put_user_asm8(); break; \
198 default: __pu_err = __put_user_unknown(); break; \
203 #define __put_user_check(x, ptr, size) \
206 __typeof__(*(ptr)) val; \
209 unsigned long __pu_addr; \
211 __pu_val.val = (x); \
212 __pu_addr = (unsigned long) (ptr); \
213 if (likely(__access_ok(__pu_addr, size))) { \
215 case 1: __put_user_asm("bu"); break; \
216 case 2: __put_user_asm("hu"); break; \
217 case 4: __put_user_asm("" ); break; \
218 case 8: __put_user_asm8(); break; \
219 default: __pu_err = __put_user_unknown(); break; \
223 __pu_err = -EFAULT; \
228 #define __put_user_asm(INSN) \
232 " mov"INSN" %1,%2\n" \
235 " .section .fixup,\"ax\"\n" \
240 " .section __ex_table,\"a\"\n" \
245 : "r" (__pu_val.val), "m" (__m(__pu_addr)), \
250 #define __put_user_asm8() \
257 " .section .fixup,\"ax\" \n" \
262 " .section __ex_table,\"a\"\n" \
268 : "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]), \
269 "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)), \
274 extern int __put_user_unknown(void);
278 * Copy To/From Userspace
280 /* Generic arbitrary sized copy. */
281 #define __copy_user(to, from, size) \
285 const void *__from = from; \
288 "0: movbu (%0),%3;\n" \
289 "1: movbu %3,(%1);\n" \
295 " .section .fixup,\"ax\"\n" \
298 " .section __ex_table,\"a\"\n" \
303 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
304 : "0"(__from), "1"(__to), "2"(size) \
309 #define __copy_user_zeroing(to, from, size) \
313 const void *__from = from; \
316 "0: movbu (%0),%3;\n" \
317 "1: movbu %3,(%1);\n" \
323 " .section .fixup,\"ax\"\n" \
327 "4: movbu %3,(%1);\n" \
334 " .section __ex_table,\"a\"\n" \
339 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
340 : "0"(__from), "1"(__to), "2"(size) \
345 /* We let the __ versions of copy_from/to_user inline, because they're often
346 * used in fast paths and have only a small space overhead.
349 unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
352 __copy_user_zeroing(to, from, n);
357 unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
360 __copy_user(to, from, n);
366 #error "don't use - these macros don't increment to & from pointers"
367 /* Optimize just a little bit when we know the size of the move. */
368 #define __constant_copy_user(to, from, size) \
372 "0: movbu (%1),d3;\n" \
373 "1: movbu d3,(%2);\n" \
377 ".section .fixup,\"ax\"\n" \
380 ".section __ex_table,\"a\"\n" \
386 : "d"(size), "d"(to), "d"(from) \
390 /* Optimize just a little bit when we know the size of the move. */
391 #define __constant_copy_user_zeroing(to, from, size) \
395 "0: movbu (%1),d3;\n" \
396 "1: movbu d3,(%2);\n" \
400 ".section .fixup,\"ax\"\n" \
403 ".section __ex_table,\"a\"\n" \
409 : "d"(size), "d"(to), "d"(from) \
414 unsigned long __constant_copy_to_user(void *to, const void *from,
417 if (access_ok(VERIFY_WRITE, to, n))
418 __constant_copy_user(to, from, n);
423 unsigned long __constant_copy_from_user(void *to, const void *from,
426 if (access_ok(VERIFY_READ, from, n))
427 __constant_copy_user_zeroing(to, from, n);
432 unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
435 __constant_copy_user(to, from, n);
440 unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
443 __constant_copy_user_zeroing(to, from, n);
448 extern unsigned long __generic_copy_to_user(void __user *, const void *,
450 extern unsigned long __generic_copy_from_user(void *, const void __user *,
453 #define __copy_to_user_inatomic(to, from, n) \
454 __generic_copy_to_user_nocheck((to), (from), (n))
455 #define __copy_from_user_inatomic(to, from, n) \
456 __generic_copy_from_user_nocheck((to), (from), (n))
458 #define __copy_to_user(to, from, n) \
461 __copy_to_user_inatomic((to), (from), (n)); \
464 #define __copy_from_user(to, from, n) \
467 __copy_from_user_inatomic((to), (from), (n)); \
471 #define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
472 #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
474 extern long strncpy_from_user(char *dst, const char __user *src, long count);
475 extern long __strncpy_from_user(char *dst, const char __user *src, long count);
476 extern long strnlen_user(const char __user *str, long n);
477 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
478 extern unsigned long clear_user(void __user *mem, unsigned long len);
479 extern unsigned long __clear_user(void __user *mem, unsigned long len);
481 #endif /* _ASM_UACCESS_H */