{
unsigned ret;
+ stac();
/*
* If CPU has FSRM feature, use 'rep movs'.
* Otherwise, use copy_user_generic_unrolled.
"=d" (len)),
"1" (to), "2" (from), "3" (len)
: "memory", "rcx", "r8", "r9", "r10", "r11");
+ clac();
return ret;
}
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size)
{
+ long ret;
kasan_check_write(dst, size);
- return __copy_user_nocache(dst, src, size, 0);
+ stac();
+ ret = __copy_user_nocache(dst, src, size, 0);
+ clac();
+ return ret;
}
static inline int
* eax uncopied bytes or 0 if successful.
*/
SYM_FUNC_START(copy_user_generic_unrolled)
- ASM_STAC
cmpl $8,%edx
jb .Lcopy_user_short_string_bytes
ALIGN_DESTINATION
* eax uncopied bytes or 0 if successful.
*/
SYM_FUNC_START(copy_user_fast_string)
- ASM_STAC
movl %edx,%ecx
1: rep movsb
xorl %eax,%eax
- ASM_CLAC
RET
12: movl %ecx,%eax /* ecx is zerorest also */
- ASM_CLAC
RET
_ASM_EXTABLE_CPY(1b, 12b)
movl %edx,%ecx
1: rep movsb
2: mov %ecx,%eax
- ASM_CLAC
RET
3:
movl %edx,%eax
- ASM_CLAC
RET
_ASM_EXTABLE_CPY(1b, 2b)
decl %ecx
jnz 21b
23: xor %eax,%eax
- ASM_CLAC
RET
40: leal (%rdx,%rcx,8),%edx
* - Require 4-byte alignment when size is 4 bytes.
*/
SYM_FUNC_START(__copy_user_nocache)
- ASM_STAC
-
/* If size is less than 8 bytes, go to 4-byte copy */
cmpl $8,%edx
jb .L_4b_nocache_copy_entry
/* Finished copying; fence the prior stores */
.L_finish_copy:
xorl %eax,%eax
- ASM_CLAC
sfence
RET
long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
{
unsigned long flushed, dest = (unsigned long) dst;
- long rc = __copy_user_nocache(dst, src, size, 0);
+ long rc;
+
+ stac();
+ rc = __copy_user_nocache(dst, src, size, 0);
+ clac();
/*
* __copy_user_nocache() uses non-temporal stores for the bulk
"copy_mc_enhanced_fast_string",
"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
"clear_user_original",
+ "copy_user_generic_unrolled",
+ "copy_user_fast_string",
+ "__copy_user_nocache",
NULL
};