1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright 2010, Google Inc.
5 * Brought in from coreboot uldivmod.S
8 #include <linux/linkage.h>
9 #include <asm/assembler.h>
12 * A, Q = r0 + (r1 << 32)
13 * B, R = r2 + (r3 << 32)
35 .pushsection .text.__aeabi_uldivmod, "ax"
36 ENTRY(__aeabi_uldivmod)
38 stmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) lr}
40 orrs ip, B_0, B_1 @ Z set -> B == 0
42 @ Test if B is power of 2: (B & (B - 1)) == 0
48 @ Test if A_1 == B_1 == 0
53 /* CLZ only exists in ARM architecture version 5 and above. */
67 @ if clz B - clz A > 0
70 @ B <<= (clz B - clz A)
73 movmi B_1, B_1, lsl D_0
74 ARM( orrmi B_1, B_1, B_0, lsr ip )
75 THUMB( lsrmi TMP, B_0, ip )
76 THUMB( orrmi B_1, B_1, TMP )
77 movpl B_1, B_0, lsl D_1
79 @ C = 1 << (clz B - clz A)
80 movmi C_1, C_1, lsl D_0
81 ARM( orrmi C_1, C_1, C_0, lsr ip )
82 THUMB( lsrmi TMP, C_0, ip )
83 THUMB( orrmi C_1, C_1, TMP )
84 movpl C_1, C_0, lsl D_1
89 @ C: current bit; D: result
91 @ C: current bit; D: result
103 orr B_1, B_1, B_0, lsr #28
107 orr C_1, C_1, C_0, lsr #28
117 orr B_1, B_1, B_0, lsr #31
121 orr C_1, C_1, C_0, lsr #31
141 movs C_1, C_1, lsr #1
147 movs B_1, B_1, lsr #1
151 @ Note: A, B & Q, R are aliases
156 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
159 @ Note: A_0 & r0 are aliases
166 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
170 @ Note: A, B and Q, R are aliases
175 @ Note: B must not be 0 here!
182 mov A_0, A_1, lsr D_0
185 movpl A_0, A_0, lsr D_0
186 ARM( orrpl A_0, A_0, A_1, lsl D_1 )
187 THUMB( lslpl TMP, A_1, D_1 )
188 THUMB( orrpl A_0, A_0, TMP )
189 mov A_1, A_1, lsr D_0
193 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
195 @ Note: A, B and Q, R are aliases
200 @ Note: B must not be 0 here!
201 @ Count the leading zeroes in B.
204 @ If B is greater than 1 << 31, divide A and B by 1 << 32.
208 @ Count the remaining leading zeroes in B.
209 movs B_1, B_0, lsl #16
211 moveq B_0, B_0, lsr #16
214 moveq B_0, B_0, lsr #8
217 moveq B_0, B_0, lsr #4
220 moveq B_0, B_0, lsr #2
223 @ Shift A to the right by the appropriate amount.
225 mov Q_0, A_0, lsr D_0
226 ARM( orr Q_0, Q_0, A_1, lsl D_1 )
227 THUMB( lsl A_1, D_1 )
228 THUMB( orr Q_0, A_1 )
229 mov Q_1, A_1, lsr D_0
233 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
238 @ As wrong as it could be
243 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
244 ENDPROC(__aeabi_uldivmod)