1 dnl AMD K6-2 mpn_copyd -- copy limb vector, decrementing.
3 dnl Copyright 2001, 2002 Free Software Foundation, Inc.
5 dnl This file is part of the GNU MP Library.
7 dnl The GNU MP Library is free software; you can redistribute it and/or
8 dnl modify it under the terms of the GNU Lesser General Public License as
9 dnl published by the Free Software Foundation; either version 3 of the
10 dnl License, or (at your option) any later version.
12 dnl The GNU MP Library is distributed in the hope that it will be useful,
13 dnl but WITHOUT ANY WARRANTY; without even the implied warranty of
14 dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 dnl Lesser General Public License for more details.
17 dnl You should have received a copy of the GNU Lesser General Public License
18 dnl along with the GNU MP Library. If not, see http://www.gnu.org/licenses/.
20 include(`../config.m4')
23 C K6-2: 1.0 cycles/limb
26 C void mpn_copyd (mp_ptr dst, mp_srcptr src, mp_size_t size);
28 C The loop here is no faster than a rep movsl at 1.0 c/l, but it avoids a 30
29 C cycle startup time, which amounts for instance to a 2x speedup at 15
32 C If dst is 4mod8 the loop would be 1.17 c/l, but that's avoided by
33 C processing one limb separately to make it aligned. This and a final odd
34 C limb are handled in a branch-free fashion, ending up re-copying if the
35 C special case isn't needed.
39 C There used to be a big unrolled version of this, running at 0.56 c/l if
40 C the destination was aligned, but that seemed rather excessive for the
41 C relative importance of copyd.
43 C If the destination alignment is ignored and just left to run at 1.17 c/l
44 C some code size and a fixed few cycles can be saved. Considering how few
45 C uses copyd finds perhaps that should be favoured. The current code has
46 C the attraction of being no slower than a basic rep movsl though.
48 defframe(PARAM_SIZE,12)
49 defframe(PARAM_SRC, 8)
50 defframe(PARAM_DST, 4)
52 dnl re-using parameter space
53 define(SAVE_EBX,`PARAM_SIZE')
67 subl $1, %ecx C better code alignment than decl
71 leal 4(%edx,%ecx,4), %ebx
73 Zdisp( movd, 0,(%eax,%ecx,4), %mm0) C high limb
74 Zdisp( movd, %mm0, 0,(%edx,%ecx,4)) C Zdisp for good code alignment
80 andl $1, %ebx C 1 if dst[size-2] unaligned
91 movq -4(%eax,%ecx,4), %mm0
94 movq %mm0, 4(%edx,%ecx,4)