1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/arch/arm/lib/memset.S
5 * Copyright (C) 1995-2000 Russell King
7 * ASM optimised string functions
9 #include <linux/linkage.h>
10 #include <asm/assembler.h>
16 #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD) && !defined(MEMSET_NO_THUMB_BUILD)
21 ands r3, r0, #3 @ 1 unaligned?
22 mov ip, r0 @ preserve r0 as return value
25 * we know that the pointer in ip is aligned to a word boundary.
27 1: orr r1, r1, r1, lsl #8
28 orr r1, r1, r1, lsl #16
36 * We need 2 extra registers for this loop - use r8 and the LR
43 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
44 stmiage ip!, {r1, r3, r8, lr}
45 stmiage ip!, {r1, r3, r8, lr}
46 stmiage ip!, {r1, r3, r8, lr}
48 ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
50 * No need to correct the count; we're only testing bits from now on
53 stmiane ip!, {r1, r3, r8, lr}
54 stmiane ip!, {r1, r3, r8, lr}
56 stmiane ip!, {r1, r3, r8, lr}
62 * This version aligns the destination pointer in order to write
63 * whole cache lines at once.
66 stmfd sp!, {r4-r8, lr}
81 movs r8, r8, lsl #(32 - 4)
82 stmiacs ip!, {r4, r5, r6, r7}
89 stmiage ip!, {r1, r3-r8, lr}
90 stmiage ip!, {r1, r3-r8, lr}
92 ldmfdeq sp!, {r4-r8, pc}
95 stmiane ip!, {r1, r3-r8, lr}
98 ldmfd sp!, {r4-r8, lr}
103 stmiane ip!, {r1, r3}
107 * When we get here, we've got less than 4 bytes to zero. We
108 * may have an unaligned pointer as well.
117 6: subs r2, r2, #4 @ 1 do we have enough
118 blt 5b @ 1 bytes to align with?
120 strblt r1, [ip], #1 @ 1
121 strble r1, [ip], #1 @ 1
122 strb r1, [ip], #1 @ 1
123 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))