1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __M68K_UACCESS_H
3 #define __M68K_UACCESS_H
8 * User space memory access functions
10 #include <linux/compiler.h>
11 #include <linux/types.h>
12 #include <asm/extable.h>
14 /* We let the MMU do all checking */
15 static inline int access_ok(const void __user *addr,
19 * XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check
26 * Not all varients of the 68k family support the notion of address spaces.
27 * The traditional 680x0 parts do, and they use the sfc/dfc registers and
28 * the "moves" instruction to access user space from kernel space. Other
29 * family members like ColdFire don't support this, and only have a single
30 * address space, and use the usual "move" instruction for user space access.
32 * Outside of this difference the user space access functions are the same.
33 * So lets keep the code simple and just define in what we need to use.
35 #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
41 #define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \
43 "1: "inst"."#bwl" %2,%1\n" \
45 " .section .fixup,\"ax\"\n" \
47 "10: moveq.l %3,%0\n" \
51 " .section __ex_table,\"a\"\n" \
56 : "+d" (res), "=m" (*(ptr)) \
57 : #reg (x), "i" (err))
59 #define __put_user_asm8(inst, res, x, ptr) \
61 const void *__pu_ptr = (const void __force *)(ptr); \
64 "1: "inst".l %2,(%1)+\n" \
65 "2: "inst".l %R2,(%1)\n" \
67 " .section .fixup,\"ax\"\n" \
73 " .section __ex_table,\"a\"\n" \
79 : "+d" (res), "+a" (__pu_ptr) \
80 : "r" (x), "i" (-EFAULT) \
85 * These are the main single-value transfer routines. They automatically
86 * use the right size if we just have the right pointer type.
89 #define __put_user(x, ptr) \
91 typeof(*(ptr)) __pu_val = (x); \
93 __chk_user_ptr(ptr); \
94 switch (sizeof (*(ptr))) { \
96 __put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \
99 __put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \
102 __put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \
105 __put_user_asm8(MOVES, __pu_err, __pu_val, ptr); \
112 #define put_user(x, ptr) __put_user(x, ptr)
115 #define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({ \
118 "1: "inst"."#bwl" %2,%1\n" \
120 " .section .fixup,\"ax\"\n" \
122 "10: move.l %3,%0\n" \
127 " .section __ex_table,\"a\"\n" \
131 : "+d" (res), "=&" #reg (__gu_val) \
132 : "m" (*(ptr)), "i" (err)); \
133 (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \
136 #define __get_user_asm8(inst, res, x, ptr) \
138 const void *__gu_ptr = (const void __force *)(ptr); \
141 __typeof__(*(ptr)) t; \
145 "1: "inst".l (%2)+,%1\n" \
146 "2: "inst".l (%2),%R1\n" \
148 " .section .fixup,\"ax\"\n" \
150 "10: move.l %3,%0\n" \
156 " .section __ex_table,\"a\"\n" \
161 : "+d" (res), "=&r" (__gu_val.l), \
168 #define __get_user(x, ptr) \
171 __chk_user_ptr(ptr); \
172 switch (sizeof(*(ptr))) { \
174 __get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \
177 __get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \
180 __get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \
183 __get_user_asm8(MOVES, __gu_err, x, ptr); \
190 #define get_user(x, ptr) __get_user(x, ptr)
192 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
193 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
200 #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
202 "1: "MOVES"."#s1" (%2)+,%3\n" \
203 " move."#s1" %3,(%1)+\n" \
204 " .ifnc \""#s2"\",\"\"\n" \
205 "2: "MOVES"."#s2" (%2)+,%3\n" \
206 " move."#s2" %3,(%1)+\n" \
207 " .ifnc \""#s3"\",\"\"\n" \
208 "3: "MOVES"."#s3" (%2)+,%3\n" \
209 " move."#s3" %3,(%1)+\n" \
213 " .section __ex_table,\"a\"\n" \
216 " .ifnc \""#s2"\",\"\"\n" \
218 " .ifnc \""#s3"\",\"\"\n" \
224 " .section .fixup,\"ax\"\n" \
226 "10: addq.l #"#n1",%0\n" \
227 " .ifnc \""#s2"\",\"\"\n" \
228 "20: addq.l #"#n2",%0\n" \
229 " .ifnc \""#s3"\",\"\"\n" \
230 "30: addq.l #"#n3",%0\n" \
235 : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
238 #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
239 ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
240 #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \
241 ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \
242 __suffix##n1, __suffix##n2, __suffix##n3)
244 static __always_inline unsigned long
245 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
247 unsigned long res = 0, tmp;
251 __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
254 __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
257 __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
260 __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
263 __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
266 __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
269 __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
272 __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
275 __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
278 __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
281 __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
284 /* we limit the inlined version to 3 moves */
285 return __generic_copy_from_user(to, from, n);
291 #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
293 " move."#s1" (%2)+,%3\n" \
294 "11: "MOVES"."#s1" %3,(%1)+\n" \
295 "12: move."#s2" (%2)+,%3\n" \
296 "21: "MOVES"."#s2" %3,(%1)+\n" \
298 " .ifnc \""#s3"\",\"\"\n" \
299 " move."#s3" (%2)+,%3\n" \
300 "31: "MOVES"."#s3" %3,(%1)+\n" \
305 " .section __ex_table,\"a\"\n" \
311 " .ifnc \""#s3"\",\"\"\n" \
317 " .section .fixup,\"ax\"\n" \
319 "5: moveq.l #"#n",%0\n" \
322 : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
325 static __always_inline unsigned long
326 __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
328 unsigned long res = 0, tmp;
332 __put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to,
336 __put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to,
340 __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
343 __put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to,
347 __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
350 __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
353 __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
356 __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
359 __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
362 __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
365 __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
368 /* limit the inlined version to 3 moves */
369 return __generic_copy_to_user(to, from, n);
375 static inline unsigned long
376 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
378 if (__builtin_constant_p(n))
379 return __constant_copy_from_user(to, from, n);
380 return __generic_copy_from_user(to, from, n);
383 static inline unsigned long
384 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
386 if (__builtin_constant_p(n))
387 return __constant_copy_to_user(to, from, n);
388 return __generic_copy_to_user(to, from, n);
390 #define INLINE_COPY_FROM_USER
391 #define INLINE_COPY_TO_USER
393 #define HAVE_GET_KERNEL_NOFAULT
395 #define __get_kernel_nofault(dst, src, type, err_label) \
397 type *__gk_dst = (type *)(dst); \
398 type *__gk_src = (type *)(src); \
401 switch (sizeof(type)) { \
403 __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
404 u8, b, d, -EFAULT); \
407 __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
408 u16, w, r, -EFAULT); \
411 __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
412 u32, l, r, -EFAULT); \
415 __get_user_asm8("move", __gk_err, *__gk_dst, __gk_src); \
420 if (unlikely(__gk_err)) \
424 #define __put_kernel_nofault(dst, src, type, err_label) \
426 type __pk_src = *(type *)(src); \
427 type *__pk_dst = (type *)(dst); \
430 switch (sizeof(type)) { \
432 __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
436 __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
440 __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
444 __put_user_asm8("move", __pk_err, __pk_src, __pk_dst); \
449 if (unlikely(__pk_err)) \
453 extern long strncpy_from_user(char *dst, const char __user *src, long count);
454 extern __must_check long strnlen_user(const char __user *str, long n);
456 unsigned long __clear_user(void __user *to, unsigned long n);
458 #define clear_user __clear_user
460 #else /* !CONFIG_MMU */
461 #include <asm-generic/uaccess.h>
464 #endif /* _M68K_UACCESS_H */