1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/instrumented.h>
7 #include <linux/minmax.h>
8 #include <linux/sched.h>
9 #include <linux/thread_info.h>
11 #include <asm/uaccess.h>
14 * Architectures that support memory tagging (assigning tags to memory regions,
15 * embedding these tags into addresses that point to these memory regions, and
16 * checking that the memory and the pointer tags match on memory accesses)
17 * redefine this macro to strip tags from pointers.
19 * Passing down mm_struct allows to define untagging rules on per-process
22 * It's defined as noop for architectures that don't support memory tagging.
25 #define untagged_addr(addr) (addr)
28 #ifndef untagged_addr_remote
29 #define untagged_addr_remote(mm, addr) ({ \
30 mmap_assert_locked(mm); \
31 untagged_addr(addr); \
36 * Architectures should provide two primitives (raw_copy_{to,from}_user())
37 * and get rid of their private instances of copy_{to,from}_user() and
38 * __copy_{to,from}_user{,_inatomic}().
40 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
41 * return the amount left to copy. They should assume that access_ok() has
42 * already been checked (and succeeded); they should *not* zero-pad anything.
43 * No KASAN or object size checks either - those belong here.
45 * Both of these functions should attempt to copy size bytes starting at from
46 * into the area starting at to. They must not fetch or store anything
47 * outside of those areas. Return value must be between 0 (everything
48 * copied successfully) and size (nothing copied).
50 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
51 * at to must become equal to the bytes fetched from the corresponding area
52 * starting at from. All data past to + size - N must be left unmodified.
54 * If copying succeeds, the return value must be 0. If some data cannot be
55 * fetched, it is permitted to copy less than had been fetched; the only
56 * hard requirement is that not storing anything at all (i.e. returning size)
57 * should happen only when nothing could be copied. In other words, you don't
58 * have to squeeze as much as possible - it is allowed, but not necessary.
60 * For raw_copy_from_user() to always points to kernel memory and no faults
61 * on store should happen. Interpretation of from is affected by set_fs().
62 * For raw_copy_to_user() it's the other way round.
64 * Both can be inlined - it's up to architectures whether it wants to bother
65 * with that. They should not be used directly; they are used to implement
66 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
67 * that are used instead. Out of those, __... ones are inlined. Plain
68 * copy_{to,from}_user() might or might not be inlined. If you want them
69 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
71 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
72 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
73 * at all; their callers absolutely must check the return value.
75 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
76 * but both source and destination are __user pointers (affected by set_fs()
77 * as usual) and both source and destination can trigger faults.
80 static __always_inline __must_check unsigned long
81 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
85 instrument_copy_from_user_before(to, from, n);
86 check_object_size(to, n, false);
87 res = raw_copy_from_user(to, from, n);
88 instrument_copy_from_user_after(to, from, n, res);
92 static __always_inline __must_check unsigned long
93 __copy_from_user(void *to, const void __user *from, unsigned long n)
98 instrument_copy_from_user_before(to, from, n);
99 if (should_fail_usercopy())
101 check_object_size(to, n, false);
102 res = raw_copy_from_user(to, from, n);
103 instrument_copy_from_user_after(to, from, n, res);
108 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
109 * @to: Destination address, in user space.
110 * @from: Source address, in kernel space.
111 * @n: Number of bytes to copy.
113 * Context: User context only.
115 * Copy data from kernel space to user space. Caller must check
116 * the specified block with access_ok() before calling this function.
117 * The caller should also make sure he pins the user space address
118 * so that we don't result in page fault and sleep.
120 static __always_inline __must_check unsigned long
121 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
123 if (should_fail_usercopy())
125 instrument_copy_to_user(to, from, n);
126 check_object_size(from, n, true);
127 return raw_copy_to_user(to, from, n);
130 static __always_inline __must_check unsigned long
131 __copy_to_user(void __user *to, const void *from, unsigned long n)
134 if (should_fail_usercopy())
136 instrument_copy_to_user(to, from, n);
137 check_object_size(from, n, true);
138 return raw_copy_to_user(to, from, n);
141 #ifdef INLINE_COPY_FROM_USER
142 static inline __must_check unsigned long
143 _copy_from_user(void *to, const void __user *from, unsigned long n)
145 unsigned long res = n;
147 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
148 instrument_copy_from_user_before(to, from, n);
149 res = raw_copy_from_user(to, from, n);
150 instrument_copy_from_user_after(to, from, n, res);
153 memset(to + (n - res), 0, res);
157 extern __must_check unsigned long
158 _copy_from_user(void *, const void __user *, unsigned long);
161 #ifdef INLINE_COPY_TO_USER
162 static inline __must_check unsigned long
163 _copy_to_user(void __user *to, const void *from, unsigned long n)
166 if (should_fail_usercopy())
168 if (access_ok(to, n)) {
169 instrument_copy_to_user(to, from, n);
170 n = raw_copy_to_user(to, from, n);
175 extern __must_check unsigned long
176 _copy_to_user(void __user *, const void *, unsigned long);
179 static __always_inline unsigned long __must_check
180 copy_from_user(void *to, const void __user *from, unsigned long n)
182 if (check_copy_size(to, n, false))
183 n = _copy_from_user(to, from, n);
187 static __always_inline unsigned long __must_check
188 copy_to_user(void __user *to, const void *from, unsigned long n)
190 if (check_copy_size(from, n, true))
191 n = _copy_to_user(to, from, n);
195 #ifndef copy_mc_to_kernel
197 * Without arch opt-in this generic copy_mc_to_kernel() will not handle
198 * #MC (or arch equivalent) during source read.
200 static inline unsigned long __must_check
201 copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
203 memcpy(dst, src, cnt);
208 static __always_inline void pagefault_disabled_inc(void)
210 current->pagefault_disabled++;
213 static __always_inline void pagefault_disabled_dec(void)
215 current->pagefault_disabled--;
219 * These routines enable/disable the pagefault handler. If disabled, it will
220 * not take any locks and go straight to the fixup table.
222 * User access methods will not sleep when called from a pagefault_disabled()
225 static inline void pagefault_disable(void)
227 pagefault_disabled_inc();
229 * make sure to have issued the store before a pagefault
235 static inline void pagefault_enable(void)
238 * make sure to issue those last loads/stores before enabling
239 * the pagefault handler again.
242 pagefault_disabled_dec();
246 * Is the pagefault handler disabled? If so, user access methods will not sleep.
248 static inline bool pagefault_disabled(void)
250 return current->pagefault_disabled != 0;
254 * The pagefault handler is in general disabled by pagefault_disable() or
255 * when in irq context (via in_atomic()).
257 * This function should only be used by the fault handlers. Other users should
258 * stick to pagefault_disabled().
259 * Please NEVER use preempt_disable() to disable the fault handler. With
260 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
261 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
263 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
265 #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
268 * probe_subpage_writeable: probe the user range for write faults at sub-page
269 * granularity (e.g. arm64 MTE)
270 * @uaddr: start of address range
271 * @size: size of address range
273 * Returns 0 on success, the number of bytes not probed on fault.
275 * It is expected that the caller checked for the write permission of each
276 * page in the range either by put_user() or GUP. The architecture port can
277 * implement a more efficient get_user() probing if the same sub-page faults
278 * are triggered by either a read or a write.
280 static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
285 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
287 #ifndef ARCH_HAS_NOCACHE_UACCESS
289 static inline __must_check unsigned long
290 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
293 return __copy_from_user_inatomic(to, from, n);
296 #endif /* ARCH_HAS_NOCACHE_UACCESS */
298 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
301 * copy_struct_from_user: copy a struct from userspace
302 * @dst: Destination address, in kernel space. This buffer must be @ksize
304 * @ksize: Size of @dst struct.
305 * @src: Source address, in userspace.
306 * @usize: (Alleged) size of @src struct.
308 * Copies a struct from userspace to kernel space, in a way that guarantees
309 * backwards-compatibility for struct syscall arguments (as long as future
310 * struct extensions are made such that all new fields are *appended* to the
311 * old struct, and zeroed-out new fields have the same meaning as the old
314 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
315 * The recommended usage is something like the following:
317 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
320 * struct foo karg = {};
322 * if (usize > PAGE_SIZE)
324 * if (usize < FOO_SIZE_VER0)
327 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
334 * There are three cases to consider:
335 * * If @usize == @ksize, then it's copied verbatim.
336 * * If @usize < @ksize, then the userspace has passed an old struct to a
337 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
338 * are to be zero-filled.
339 * * If @usize > @ksize, then the userspace has passed a new struct to an
340 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
341 * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
343 * Returns (in all cases, some data may have been copied):
344 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
345 * * -EFAULT: access to userspace failed.
347 static __always_inline __must_check int
348 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
351 size_t size = min(ksize, usize);
352 size_t rest = max(ksize, usize) - size;
354 /* Double check if ksize is larger than a known object size. */
355 if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
358 /* Deal with trailing bytes. */
360 memset(dst + size, 0, rest);
361 } else if (usize > ksize) {
362 int ret = check_zeroed_user(src + size, rest);
364 return ret ?: -E2BIG;
366 /* Copy the interoperable parts of the struct. */
367 if (copy_from_user(dst, src, size))
372 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
374 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
375 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
377 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
378 long notrace copy_to_user_nofault(void __user *dst, const void *src,
381 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
384 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
386 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
388 #ifndef __get_kernel_nofault
389 #define __get_kernel_nofault(dst, src, type, label) \
391 type __user *p = (type __force __user *)(src); \
393 if (__get_user(data, p)) \
395 *(type *)dst = data; \
398 #define __put_kernel_nofault(dst, src, type, label) \
400 type __user *p = (type __force __user *)(dst); \
401 type data = *(type *)src; \
402 if (__put_user(data, p)) \
408 * get_kernel_nofault(): safely attempt to read from a location
409 * @val: read into this variable
410 * @ptr: address to read from
412 * Returns 0 on success, or -EFAULT.
414 #define get_kernel_nofault(val, ptr) ({ \
415 const typeof(val) *__gk_ptr = (ptr); \
416 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
419 #ifndef user_access_begin
420 #define user_access_begin(ptr,len) access_ok(ptr, len)
421 #define user_access_end() do { } while (0)
422 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
423 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
424 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
425 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
426 #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
427 static inline unsigned long user_access_save(void) { return 0UL; }
428 static inline void user_access_restore(unsigned long flags) { }
430 #ifndef user_write_access_begin
431 #define user_write_access_begin user_access_begin
432 #define user_write_access_end user_access_end
434 #ifndef user_read_access_begin
435 #define user_read_access_begin user_access_begin
436 #define user_read_access_end user_access_end
439 #ifdef CONFIG_HARDENED_USERCOPY
440 void __noreturn usercopy_abort(const char *name, const char *detail,
441 bool to_user, unsigned long offset,
445 #endif /* __LINUX_UACCESS_H__ */