1 /* Copyright 2002 Andi Kleen */
3 #include <linux/linkage.h>
5 #include <asm/cpufeatures.h>
6 #include <asm/mcsafe_test.h>
7 #include <asm/alternative-asm.h>
8 #include <asm/export.h>
11 * We build a jump to memcpy_orig by default which gets NOPped out on
12 * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
13 * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
14 * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
20 * memcpy - Copy a memory block.
28 * rax original destination
32 ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
33 "jmp memcpy_erms", X86_FEATURE_ERMS
46 EXPORT_SYMBOL(__memcpy)
49 * memcpy_erms() - enhanced fast string memcpy. This is faster and
50 * simpler than memcpy. Use memcpy_erms when possible.
66 * We check whether memory false dependence could occur,
67 * then jump to corresponding copy mode.
76 * Move in blocks of 4x8 bytes:
89 jae .Lcopy_forward_loop
95 * Calculate copy position to tail.
101 * At most 3 ALU operations in one cycle,
102 * so append NOPS in the same 16 bytes trunk.
105 .Lcopy_backward_loop:
109 movq -3*8(%rsi), %r10
110 movq -4*8(%rsi), %r11
111 leaq -4*8(%rsi), %rsi
114 movq %r10, -3*8(%rdi)
115 movq %r11, -4*8(%rdi)
116 leaq -4*8(%rdi), %rdi
117 jae .Lcopy_backward_loop
120 * Calculate copy position to head.
130 * Move data from 16 bytes to 31 bytes.
134 movq -2*8(%rsi, %rdx), %r10
135 movq -1*8(%rsi, %rdx), %r11
138 movq %r10, -2*8(%rdi, %rdx)
139 movq %r11, -1*8(%rdi, %rdx)
146 * Move data from 8 bytes to 15 bytes.
149 movq -1*8(%rsi, %rdx), %r9
151 movq %r9, -1*8(%rdi, %rdx)
159 * Move data from 4 bytes to 7 bytes.
162 movl -4(%rsi, %rdx), %r8d
164 movl %r8d, -4(%rdi, %rdx)
171 * Move data from 1 bytes to 3 bytes.
176 movzbq (%rsi, %rdx), %r9
178 movb %r9b, (%rdi, %rdx)
191 * __memcpy_mcsafe - memory copy with machine check exception handling
192 * Note that we only catch machine checks when reading the source addresses.
193 * Writes to target are posted and don't generate machine checks.
195 ENTRY(__memcpy_mcsafe)
197 /* Less than 8 bytes? Go to byte copy loop */
200 /* Check for bad alignment of source */
202 /* Already aligned */
205 /* Copy one byte at a time until source is 8-byte aligned */
211 .L_read_leading_bytes:
213 MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
214 MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
215 .L_write_leading_bytes:
220 jnz .L_read_leading_bytes
230 MCSAFE_TEST_SRC %rsi 8 .E_read_words
231 MCSAFE_TEST_DST %rdi 8 .E_write_words
239 /* Any trailing bytes? */
242 jz .L_done_memcpy_trap
244 /* Copy trailing bytes */
246 .L_read_trailing_bytes:
248 MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
249 MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
250 .L_write_trailing_bytes:
255 jnz .L_read_trailing_bytes
257 /* Copy successful. Return zero */
261 ENDPROC(__memcpy_mcsafe)
262 EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
264 .section .fixup, "ax"
266 * Return number of bytes not copied for any failure. Note that
267 * there is no "tail" handling since the source buffer is 8-byte
268 * aligned and poison is cacheline aligned.
279 * For write fault handling, given the destination is unaligned,
280 * we handle faults on multi-byte writes with a byte-by-byte
281 * copy up to the write-protected page.
287 jmp mcsafe_handle_tail
291 _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
292 _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
293 _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
294 _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
295 _ASM_EXTABLE(.L_write_words, .E_write_words)
296 _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)