1 /* UltraSPARC 64 mpn_modexact_1c_odd -- mpn by limb exact style remainder.
3 THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY. THEY'RE ALMOST
4 CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
5 FUTURE GNU MP RELEASES.
7 Copyright 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
9 This file is part of the GNU MP Library.
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of the GNU Lesser General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
16 The GNU MP Library is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
18 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
19 License for more details.
21 You should have received a copy of the GNU Lesser General Public License
22 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
28 #include "mpn/sparc64/sparc64.h"
31 /* 64-bit divisor 32-bit divisor
32 cycles/limb cycles/limb
38 /* This implementation reduces the number of multiplies done, knowing that
39 on ultrasparc 1 and 2 the mulx instruction stalls the whole chip.
41 The key idea is to use the fact that the low limb of q*d equals l, this
42 being the whole purpose of the q calculated. It means there's no need to
43 calculate the lowest 32x32->64 part of the q*d, instead it can be
44 inferred from l and the other three 32x32->64 parts. See sparc64.h for
47 When d is 32-bits, the same applies, but in this case there's only one
48 other 32x32->64 part (ie. HIGH(q)*d).
50 The net effect is that for 64-bit divisor each limb is 4 mulx, or for
51 32-bit divisor each is 2 mulx.
55 No doubt this could be done in assembler, if that helped the scheduling,
56 or perhaps guaranteed good code irrespective of the compiler.
60 It might be possibly to use floating point. The loop is dominated by
61 multiply latency, so not sure if floats would improve that. One
62 possibility would be to take two limbs at a time, with a 128 bit inverse,
63 if there's enough registers, which could effectively use float throughput
64 to reduce total latency across two limbs. */
66 #define ASSERT_RETVAL(r) \
67 ASSERT (orig_c < d ? r < d : r <= d)
70 mpn_modexact_1c_odd (mp_srcptr src, mp_size_t size, mp_limb_t d, mp_limb_t orig_c)
73 mp_limb_t s, l, q, h, inverse;
77 ASSERT_MPN (src, size);
81 /* udivx is faster than 10 or 12 mulx's for one limb via an inverse */
100 binvert_limb (inverse, d);
108 SUBC_LIMB (c, l, s, c);
111 umul_ppmm_half_lowequal (h, q, d, l);
119 /* With high s <= d the final step can be a subtract and addback.
120 If c==0 then the addback will restore to l>=0. If c==d then
121 will get l==d if s==0, but that's ok per the function
125 l += (l > c ? d : 0);
132 /* Can't skip a divide, just do the loop code once more. */
133 SUBC_LIMB (c, l, s, c);
135 umul_ppmm_half_lowequal (h, q, d, l);
144 mp_limb_t dl = LOW32 (d);
145 mp_limb_t dh = HIGH32 (d);
152 SUBC_LIMB (c, l, s, c);
155 umul_ppmm_lowequal (h, q, d, dh, dl, l);
163 /* With high s <= d the final step can be a subtract and addback.
164 If c==0 then the addback will restore to l>=0. If c==d then
165 will get l==d if s==0, but that's ok per the function
169 l += (l > c ? d : 0);
176 /* Can't skip a divide, just do the loop code once more. */
177 SUBC_LIMB (c, l, s, c);
179 umul_ppmm_lowequal (h, q, d, dh, dl, l);