+2013-12-06 Adhemerval Zanella <azanella@linux.vnet.ibm.com>
+
+ * sysdeps/powerpc/powerpc64/addmul_1.S: New file: optimized
+ __mpn_addmul_1 for PowerPC64.
+ * sysdeps/powerpc/powerpc64/submul_1.S: New file: optimized
+ __mpn_submul_1 for PowerPC64.
+ * sysdeps/powerpc/powerpc64/lshift.S: New file: optimized __mpn_lshift
+ for PowerPC64.
+ * sysdeps/powerpc/powerpc64/mul_1.S: New file: optimized __mpn_mul_1
+ for PowerPC64.
+
2013-12-06 Fernando J. V. da Silva <fernandojvdasilva@gmail.com>
[BZ #15089]
--- /dev/null
+/* PowerPC64 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
+ the result to a second limb vector.
+ Copyright (C) 1999-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+#ifdef USE_AS_SUBMUL
+# define FUNC __mpn_submul_1
+# define ADDSUBC subfe
+# define ADDSUB subfc
+#else
+# define FUNC __mpn_addmul_1
+# define ADDSUBC adde
+# define ADDSUB addc
+#endif
+
+#define RP r3
+#define UP r4
+#define N r5
+#define VL r6
+
+EALIGN(FUNC, 5, 0)
+ std r31, -8(r1)
+ rldicl. r0, N, 0, 62
+ std r30, -16(r1)
+ cmpdi VL, r0, 2
+ std r29, -24(r1)
+ addi N, N, 3
+ std r28, -32(r1)
+ srdi N, N, 2
+ std r27, -40(r1)
+ mtctr N
+ beq cr0, L(b00)
+ blt cr6, L(b01)
+ beq cr6, L(b10)
+
+L(b11): ld r9, 0(UP)
+ ld r28, 0(RP)
+ mulld r0, r9, VL
+ mulhdu r12, r9, VL
+ ADDSUB r0, r0, r28
+ std r0, 0(RP)
+ addi RP, RP, 8
+ ld r9, 8(UP)
+ ld r27, 16(UP)
+ addi UP, UP, 24
+#ifdef USE_AS_SUBMUL
+ subfe r11, r11, r11
+#endif
+ b L(bot)
+
+ .align 4
+L(b00): ld r9, 0(UP)
+ ld r27, 8(UP)
+ ld r28, 0(RP)
+ ld r29, 8(RP)
+ mulld r0, r9, VL
+ mulhdu N, r9, VL
+ mulld r7, r27, VL
+ mulhdu r8, r27, VL
+ addc r7, r7, N
+ addze r12, r8
+ ADDSUB r0, r0, r28
+ std r0, 0(RP)
+ ADDSUBC r7, r7, r29
+ std r7, 8(RP)
+ addi RP, RP, 16
+ ld r9, 16(UP)
+ ld r27, 24(UP)
+ addi UP, UP, 32
+#ifdef USE_AS_SUBMUL
+ subfe r11, r11, r11
+#endif
+ b L(bot)
+
+ .align 4
+L(b01): bdnz L(gt1)
+ ld r9, 0(UP)
+ ld r11, 0(RP)
+ mulld r0, r9, VL
+ mulhdu r8, r9, VL
+ ADDSUB r0, r0, r11
+ std r0, 0(RP)
+#ifdef USE_AS_SUBMUL
+ subfe r11, r11, r11
+ addic r11, r11, 1
+#endif
+ addze RP, r8
+ blr
+
+L(gt1): ld r9, 0(UP)
+ ld r27, 8(UP)
+ mulld r0, r9, VL
+ mulhdu N, r9, VL
+ mulld r7, r27, VL
+ mulhdu r8, r27, VL
+ ld r9, 16(UP)
+ ld r28, 0(RP)
+ ld r29, 8(RP)
+ ld r30, 16(RP)
+ mulld r11, r9, VL
+ mulhdu r10, r9, VL
+ addc r7, r7, N
+ adde r11, r11, r8
+ addze r12, r10
+ ADDSUB r0, r0, r28
+ std r0, 0(RP)
+ ADDSUBC r7, r7, r29
+ std r7, 8(RP)
+ ADDSUBC r11, r11, r30
+ std r11, 16(RP)
+ addi RP, RP, 24
+ ld r9, 24(UP)
+ ld r27, 32(UP)
+ addi UP, UP, 40
+#ifdef USE_AS_SUBMUL
+ subfe r11, r11, r11
+#endif
+ b L(bot)
+
+L(b10): addic r0, r0, r0
+ li r12, 0
+ ld r9, 0(UP)
+ ld r27, 8(UP)
+ bdz L(end)
+ addi UP, UP, 16
+
+ .align 4
+L(top): mulld r0, r9, VL
+ mulhdu N, r9, VL
+ mulld r7, r27, VL
+ mulhdu r8, r27, VL
+ ld r9, 0(UP)
+ ld r28, 0(RP)
+ ld r27, 8(UP)
+ ld r29, 8(RP)
+ adde r0, r0, r12
+ adde r7, r7, N
+ mulld N, r9, VL
+ mulhdu r10, r9, VL
+ mulld r11, r27, VL
+ mulhdu r12, r27, VL
+ ld r9, 16(UP)
+ ld r30, 16(RP)
+ ld r27, 24(UP)
+ ld r31, 24(RP)
+ adde N, N, r8
+ adde r11, r11, r10
+ addze r12, r12
+ ADDSUB r0, r0, r28
+ std r0, 0(RP)
+ ADDSUBC r7, r7, r29
+ std r7, 8(RP)
+ ADDSUBC N, N, r30
+ std N, 16(RP)
+ ADDSUBC r11, r11, r31
+ std r11, 24(RP)
+ addi UP, UP, 32
+#ifdef USE_AS_SUBMUL
+ subfe r11, r11, r11
+#endif
+ addi RP, RP, 32
+L(bot):
+#ifdef USE_AS_SUBMUL
+ addic r11, r11, 1
+#endif
+ bdnz L(top)
+
+L(end): mulld r0, r9, VL
+ mulhdu N, r9, VL
+ mulld r7, r27, VL
+ mulhdu r8, r27, VL
+ ld r28, 0(RP)
+ ld r29, 8(RP)
+ adde r0, r0, r12
+ adde r7, r7, N
+ addze r8, r8
+ ADDSUB r0, r0, r28
+ std r0, 0(RP)
+ ADDSUBC r7, r7, r29
+ std r7, 8(RP)
+#ifdef USE_AS_SUBMUL
+ subfe r11, r11, r11
+ addic r11, r11, 1
+#endif
+ addze RP, r8
+ ld r31, -8(r1)
+ ld r30, -16(r1)
+ ld r29, -24(r1)
+ ld r28, -32(r1)
+ ld r27, -40(r1)
+ blr
+END(FUNC)
--- /dev/null
+/* PowerPC64 mpn_lshift -- rp[] = up[] << cnt
+ Copyright (C) 2003-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+#define RP r3
+#define UP r4
+#define N r5
+#define CNT r6
+
+#define TNC r0
+#define U0 r30
+#define U1 r31
+#define RETVAL r5
+
+EALIGN(__mpn_lshift, 5, 0)
+ std U1, -8(r1)
+ std U0, -16(r1)
+ subfic TNC, CNT, 64
+ sldi r7, N, RP
+ add UP, UP, r7
+ add RP, RP, r7
+ rldicl. U0, N, 0, 62
+ cmpdi CNT, U0, 2
+ addi U1, N, RP
+ ld r10, -8(UP)
+ srd RETVAL, r10, TNC
+
+ srdi U1, U1, 2
+ mtctr U1
+ beq cr0, L(b00)
+ blt cr6, L(b01)
+ ld r11, -16(UP)
+ beq cr6, L(b10)
+
+ .align 4
+L(b11): sld r8, r10, CNT
+ srd r9, r11, TNC
+ ld U1, -24(UP)
+ addi UP, UP, -24
+ sld r12, r11, CNT
+ srd r7, U1, TNC
+ addi RP, RP, 16
+ bdnz L(gt3)
+
+ or r11, r8, r9
+ sld r8, U1, CNT
+ b L(cj3)
+
+ .align 4
+L(gt3): ld U0, -8(UP)
+ or r11, r8, r9
+ sld r8, U1, CNT
+ srd r9, U0, TNC
+ ld U1, -16(UP)
+ or r10, r12, r7
+ b L(L11)
+
+ .align 5
+L(b10): sld r12, r10, CNT
+ addi RP, RP, 24
+ srd r7, r11, TNC
+ bdnz L(gt2)
+
+ sld r8, r11, CNT
+ or r10, r12, r7
+ b L(cj2)
+
+L(gt2): ld U0, -24(UP)
+ sld r8, r11, CNT
+ srd r9, U0, TNC
+ ld U1, -32(UP)
+ or r10, r12, r7
+ sld r12, U0, CNT
+ srd r7, U1, 0
+ ld U0, -40(UP)
+ or r11, r8, r9
+ addi UP, UP, -16
+ b L(L10)
+
+ .align 4
+L(b00): ld U1, -16(UP)
+ sld r12, r10, CNT
+ srd r7, U1, TNC
+ ld U0, -24(UP)
+ sld r8, U1, CNT
+ srd r9, U0, TNC
+ ld U1, -32(UP)
+ or r10, r12, r7
+ sld r12, U0, CNT
+ srd r7, U1, TNC
+ addi RP, RP, r8
+ bdz L(cj4)
+
+L(gt4): addi UP, UP, -32
+ ld U0, -8(UP)
+ or r11, r8, r9
+ b L(L00)
+
+ .align 4
+L(b01): bdnz L(gt1)
+ sld r8, r10, CNT
+ std r8, -8(RP)
+ b L(ret)
+
+L(gt1): ld U0, -16(UP)
+ sld r8, r10, CNT
+ srd r9, U0, TNC
+ ld U1, -24(UP)
+ sld r12, U0, CNT
+ srd r7, U1, TNC
+ ld U0, -32(UP)
+ or r11, r8, r9
+ sld r8, U1, CNT
+ srd r9, U0, TNC
+ ld U1, -40(UP)
+ addi UP, UP, -40
+ or r10, r12, r7
+ bdz L(end)
+
+ .align 5
+L(top): sld r12, U0, CNT
+ srd r7, U1, TNC
+ ld U0, -8(UP)
+ std r11, -8(RP)
+ or r11, r8, r9
+L(L00): sld r8, U1, CNT
+ srd r9, U0, TNC
+ ld U1, -16(UP)
+ std r10, -16(RP)
+ or r10, r12, r7
+L(L11): sld r12, U0, CNT
+ srd r7, U1, TNC
+ ld U0, -24(UP)
+ std r11, -24(RP)
+ or r11, r8, r9
+L(L10): sld r8, U1, CNT
+ srd r9, U0, TNC
+ ld U1, -32(UP)
+ addi UP, UP, -32
+ std r10, -32(RP)
+ addi RP, RP, -32
+ or r10, r12, r7
+ bdnz L(top)
+
+ .align 5
+L(end): sld r12, U0, CNT
+ srd r7, U1, TNC
+ std r11, -8(RP)
+L(cj4): or r11, r8, r9
+ sld r8, U1, CNT
+ std r10, -16(RP)
+L(cj3): or r10, r12, r7
+ std r11, -24(RP)
+L(cj2): std r10, -32(RP)
+ std r8, -40(RP)
+
+L(ret): ld U1, -8(r1)
+ ld U0, -16(r1)
+ mr RP, RETVAL
+ blr
+END(__mpn_lshift)
--- /dev/null
+/* PowerPC64 __mpn_mul_1 -- Multiply a limb vector with a limb and store
+ the result in a second limb vector.
+ Copyright (C) 1999-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+#define RP r3
+#define UP r4
+#define N r5
+#define VL r6
+
+EALIGN(__mpn_mul_1, 5, 0)
+ std r27, -40(r1)
+ std r26, -48(r1)
+ li r12, 0
+ ld r26, 0(UP)
+
+ rldicl. r0, N, 0, 62
+ cmpdi VL, r0, 2
+ addic N, N, RP
+ srdi N, N, 2
+ mtctr N
+ beq cr0, L(b00)
+ blt cr6, L(b01)
+ beq cr6, L(b10)
+
+L(b11): mr cr7, r12
+ mulld cr0, r26, VL
+ mulhdu r12, r26, VL
+ addi UP, UP, 8
+ addc r0, r0, r7
+ std r0, 0(RP)
+ addi RP, RP, 8
+ b L(fic)
+
+L(b00): ld r27, r8(UP)
+ addi UP, UP, 16
+ mulld r0, r26, VL
+ mulhdu N, r26, VL
+ mulld r7, r27, VL
+ mulhdu r8, r27, VL
+ addc r0, r0, r12
+ adde r7, r7, N
+ addze r12, r8
+ std r0, 0(RP)
+ std r7, 8(RP)
+ addi RP, RP, 16
+ b L(fic)
+
+ nop
+L(b01): bdnz L(gt1)
+ mulld r0, r26, VL
+ mulhdu r8, r26, VL
+ addc r0, r0, r12
+ std r0, 0(RP)
+ b L(ret)
+L(gt1): ld r27, 8(UP)
+ nop
+ mulld r0, r26, VL
+ mulhdu N, r26, VL
+ ld r26, 16(UP)
+ mulld r7, r27, VL
+ mulhdu r8, r27, VL
+ mulld r9, r26, VL
+ mulhdu r10, r26, VL
+ addc r0, r0, r12
+ adde r7, r7, N
+ adde r9, r9, r8
+ addze r12, r10
+ std r0, 0(RP)
+ std r7, 8(RP)
+ std r9, 16(RP)
+ addi UP, UP, 24
+ addi RP, RP, 24
+ b L(fic)
+
+ nop
+L(fic): ld r26, 0(UP)
+L(b10): ld r27, 8(UP)
+ addi UP, UP, 16
+ bdz L(end)
+
+L(top): mulld r0, r26, VL
+ mulhdu N, r26, VL
+ mulld r7, r27, VL
+ mulhdu r8, r27, VL
+ ld r26, 0(UP)
+ ld r27, 8(UP)
+ adde r0, r0, r12
+ adde r7, r7, N
+ mulld r9, r26, VL
+ mulhdu r10, r26, VL
+ mulld r11, r27, VL
+ mulhdu r12, r27, VL
+ ld r26, 16(UP)
+ ld r27, 24(UP)
+ std r0, 0(RP)
+ adde r9, r9, r8
+ std r7, 8(RP)
+ adde r11, r11, r10
+ std r9, 16(RP)
+ addi UP, UP, 32
+ std r11, 24(RP)
+
+ addi RP, RP, 32
+ bdnz L(top)
+
+L(end): mulld r0, r26, VL
+ mulhdu N, r26, VL
+ mulld r7, r27, VL
+ mulhdu r8, r27, VL
+ adde r0, r0, r12
+ adde r7, r7, N
+ std r0, 0(RP)
+ std r7, 8(RP)
+L(ret): addze RP, r8
+ ld r27, -40(r1)
+ ld r26, -48(r1)
+ blr
+END(__mpn_mul_1)
--- /dev/null
+/* PowerPC64 __mpn_addmul_1 -- Multiply a limb vector with a limb and subtract
+ the result to a second limb vector.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define USE_AS_SUBMUL
+#include "addmul_1.S"