--- /dev/null
+/* gmp-mparam.h -- Compiler/machine parameter header file.
+
+Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#define BITS_PER_MP_LIMB 64
+#define BYTES_PER_MP_LIMB 8
+#define BITS_PER_LONGINT 64
+#define BITS_PER_INT 32
+#define BITS_PER_SHORTINT 16
+#define BITS_PER_CHAR 8
--- /dev/null
+ # Alpha 21064 __udiv_qrnnd
+
+ # Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+ # This file is part of the GNU MP Library.
+
+ # The GNU MP Library is free software; you can redistribute it and/or modify
+ # it under the terms of the GNU Library General Public License as published by
+ # the Free Software Foundation; either version 2 of the License, or (at your
+ # option) any later version.
+
+ # The GNU MP Library is distributed in the hope that it will be useful, but
+ # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+ # License for more details.
+
+ # You should have received a copy of the GNU Library General Public License
+ # along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+ # the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+ .set noreorder
+ .set noat
+
+.text
+ .align 3
+ .globl __udiv_qrnnd
+ .ent __udiv_qrnnd 0
+__udiv_qrnnd:
+__udiv_qrnnd..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+#define cnt $2
+#define tmp $3
+#define rem_ptr $16
+#define n1 $17
+#define n0 $18
+#define d $19
+#define qb $20
+
+ ldiq cnt,16
+ blt d,Largedivisor
+
+Loop1: cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ subq cnt,1,cnt
+ bgt cnt,Loop1
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+Largedivisor:
+ and n0,1,$4
+
+ srl n0,1,n0
+ sll n1,63,tmp
+ or tmp,n0,n0
+ srl n1,1,n1
+
+ and d,1,$6
+ srl d,1,$5
+ addq $5,$6,$5
+
+Loop2: cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ subq cnt,1,cnt
+ bgt cnt,Loop2
+
+ addq n1,n1,n1
+ addq $4,n1,n1
+ bne $6,Odd
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+Odd:
+ /* q' in n0. r' in n1. */
+ addq n1,n0,n1
+ cmpult n1,n0,tmp # tmp := carry from addq
+ beq tmp,LLp6
+ addq n0,1,n0
+ subq n1,d,n1
+LLp6: cmpult n1,d,tmp
+ bne tmp,LLp7
+ addq n0,1,n0
+ subq n1,d,n1
+LLp7:
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+ .end __udiv_qrnnd
--- /dev/null
+/* mc68020 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+ sum in a third limb vector.
+
+Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+ INPUT PARAMETERS
+ res_ptr (sp + 4)
+ s1_ptr (sp + 8)
+ s2_ptr (sp + 16)
+ size (sp + 12)
+*/
+
+#include "asm-syntax.h"
+
+ TEXT
+ ALIGN
+ GLOBL ___mpn_add_n
+
+LAB(___mpn_add_n)
+/* Save used registers on the stack. */
+ INSN2(move,l ,MEM_PREDEC(sp),d2)
+ INSN2(move,l ,MEM_PREDEC(sp),a2)
+
+/* Copy the arguments to registers. Better use movem? */
+ INSN2(move,l ,a2,MEM_DISP(sp,12))
+ INSN2(move,l ,a0,MEM_DISP(sp,16))
+ INSN2(move,l ,a1,MEM_DISP(sp,20))
+ INSN2(move,l ,d2,MEM_DISP(sp,24))
+
+ INSN2(eor,w ,d2,#1)
+ INSN2(lsr,l ,d2,#1)
+ bcc L1
+ INSN2(subq,l ,d2,#1) /* clears cy as side effect */
+
+LAB(Loop)
+ INSN2(move,l ,d0,MEM_POSTINC(a0))
+ INSN2(move,l ,d1,MEM_POSTINC(a1))
+ INSN2(addx,l ,d0,d1)
+ INSN2(move,l ,MEM_POSTINC(a2),d0)
+LAB(L1) INSN2(move,l ,d0,MEM_POSTINC(a0))
+ INSN2(move,l ,d1,MEM_POSTINC(a1))
+ INSN2(addx,l ,d0,d1)
+ INSN2(move,l ,MEM_POSTINC(a2),d0)
+
+ dbf d2,Loop /* loop until 16 lsb of %4 == -1 */
+ INSN2(subx,l ,d0,d0) /* d0 <= -cy; save cy as 0 or -1 in d0 */
+ INSN2(sub,l ,d2,#0x10000)
+ bcs L2
+ INSN2(add,l ,d0,d0) /* restore cy */
+ bra Loop
+
+LAB(L2)
+ INSN1(neg,l ,d0)
+
+/* Restore used registers from stack frame. */
+ INSN2(move,l ,a2,MEM_POSTINC(sp))
+ INSN2(move,l ,d2,MEM_POSTINC(sp))
+
+ rts
--- /dev/null
+/* mc68020 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
+ the result to a second limb vector.
+
+Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+ INPUT PARAMETERS
+ res_ptr (sp + 4)
+ s1_ptr (sp + 8)
+ size (sp + 12)
+ s2_limb (sp + 16)
+*/
+
+#include "asm-syntax.h"
+
+ TEXT
+ ALIGN
+ GLOBL ___mpn_addmul_1
+
+LAB(___mpn_addmul_1)
+
+#define res_ptr a0
+#define s1_ptr a1
+#define size d2
+#define s2_limb d4
+
+/* Save used registers on the stack. */
+ INSN2(movem,l ,MEM_PREDEC(sp),d2-d5)
+
+/* Copy the arguments to registers. Better use movem? */
+ INSN2(move,l ,res_ptr,MEM_DISP(sp,20))
+ INSN2(move,l ,s1_ptr,MEM_DISP(sp,24))
+ INSN2(move,l ,size,MEM_DISP(sp,28))
+ INSN2(move,l ,s2_limb,MEM_DISP(sp,32))
+
+ INSN2(eor,w ,size,#1)
+ INSN1(clr,l ,d1)
+ INSN1(clr,l ,d5)
+ INSN2(lsr,l ,size,#1)
+ bcc L1
+ INSN2(subq,l ,size,#1)
+ INSN2(sub,l ,d0,d0) /* (d0,cy) <= (0,0) */
+
+LAB(Loop)
+ INSN2(move,l ,d3,MEM_POSTINC(s1_ptr))
+ INSN2(mulu,l ,d1:d3,s2_limb)
+ INSN2(addx,l ,d3,d0)
+ INSN2(addx,l ,d1,d5)
+ INSN2(add,l ,MEM_POSTINC(res_ptr),d3)
+LAB(L1) INSN2(move,l ,d3,MEM_POSTINC(s1_ptr))
+ INSN2(mulu,l ,d0:d3,s2_limb)
+ INSN2(addx,l ,d3,d1)
+ INSN2(addx,l ,d0,d5)
+ INSN2(add,l ,MEM_POSTINC(res_ptr),d3)
+
+ dbf size,Loop
+ INSN2(addx,l ,d0,d5)
+ INSN2(sub,l ,size,#0x10000)
+ bcc Loop
+
+/* Restore used registers from stack frame. */
+ INSN2(movem,l ,d2-d5,MEM_POSTINC(sp))
+
+ rts
--- /dev/null
+/* asm.h -- Definitions for 68k syntax variations.
+
+Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifdef MIT_SYNTAX
+#define MEM(base)base@
+#define MEM_DISP(base,displacement)base@(displacement)
+#define MEM_PREDEC(memory_base)memory_base@-
+#define MEM_POSTINC(memory_base)memory_base@+
+#ifdef __STDC__
+#define INSN1(mnemonic,size_suffix,dst)mnemonic##size_suffix dst
+#define INSN2(mnemonic,size_suffix,dst,src)mnemonic##size_suffix src,dst
+#else
+#define INSN1(mnemonic,size_suffix,dst)mnemonic/**/size_suffix dst
+#define INSN2(mnemonic,size_suffix,dst,src)mnemonic/**/size_suffix src,dst
+#endif
+#define LAB(label) label:
+#define TEXT .text
+#define ALIGN .even
+#define GLOBL .globl
+#endif
+
+#ifdef SONY_SYNTAX
+#define MEM(base)(base)
+#define MEM_DISP(base,displacement)(displacement,base)
+#define MEM_PREDEC(memory_base)-(memory_base)
+#define MEM_POSTINC(memory_base)(memory_base)+
+#define INSN1(mnemonic,size_suffix,dst)mnemonic.size_suffix dst
+#ifdef __STDC__
+#define INSN2(mnemonic,size_suffix,dst,src)mnemonic.size_suffix src##,dst
+#else
+#define INSN2(mnemonic,size_suffix,dst,src)mnemonic.size_suffix src/**/,dst
+#endif
+#define LAB(label) label:
+#define TEXT .text
+#define ALIGN .even
+#define GLOBL .globl
+#endif
+
+#ifdef MOTOROLA_SYNTAX
+#define MEM(base)(base)
+#define MEM_DISP(base,displacement)(displacement,base)
+#define MEM_PREDEC(memory_base)-(memory_base)
+#define MEM_POSTINC(memory_base)(memory_base)+
+#define INSN1(mnemonic,size_suffix,dst)mnemonic.size_suffix dst
+#ifdef __STDC__
+#define INSN2(mnemonic,size_suffix,dst,src)mnemonic.size_suffix src##,dst
+#else
+#define INSN2(mnemonic,size_suffix,dst,src)mnemonic.size_suffix src/**/,dst
+#endif
+#define LAB(label) label
+#define TEXT
+#define ALIGN
+#define GLOBL XDEF
+#define l L
+#define w W
+#define move MOVE
+#define eor EOR
+#define lsr LSR
+#define add ADD
+#define addx ADDX
+#define addq ADDQ
+#define sub SUB
+#define subx SUBX
+#define subq SUBQ
+#define neg NEG
+#define bcc BCC
+#define bcs BCS
+#define bra BRA
+#define dbf DBF
+#define rts RTS
+#define d0 D0
+#define d1 D1
+#define d2 D2
+#define d3 D3
+#define d4 D4
+#define d5 D5
+#define d6 D6
+#define d7 D7
+#define a0 A0
+#define a1 A1
+#define a2 A2
+#define a3 A3
+#define a4 A4
+#define a5 A5
+#define a6 A6
+#define a7 A7
+#define sp SP
+#endif
--- /dev/null
+/* mc68020 __mpn_mul_1 -- Multiply a limb vector with a limb and store
+ the result in a second limb vector.
+
+Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+ INPUT PARAMETERS
+ res_ptr (sp + 4)
+ s1_ptr (sp + 8)
+ size (sp + 12)
+ s2_limb (sp + 16)
+*/
+
+#include "asm-syntax.h"
+
+ TEXT
+ ALIGN
+ GLOBL ___mpn_mul_1
+
+LAB(___mpn_mul_1)
+
+#define res_ptr a0
+#define s1_ptr a1
+#define size d2
+#define s2_limb d4
+
+/* Save used registers on the stack. */
+ INSN2(movem,l ,MEM_PREDEC(sp),d2-d4)
+#if 0
+ INSN2(move,l ,MEM_PREDEC(sp),d2)
+ INSN2(move,l ,MEM_PREDEC(sp),d3)
+ INSN2(move,l ,MEM_PREDEC(sp),d4)
+#endif
+
+/* Copy the arguments to registers. Better use movem? */
+ INSN2(move,l ,res_ptr,MEM_DISP(sp,16))
+ INSN2(move,l ,s1_ptr,MEM_DISP(sp,20))
+ INSN2(move,l ,size,MEM_DISP(sp,24))
+ INSN2(move,l ,s2_limb,MEM_DISP(sp,28))
+
+ INSN2(eor,w ,size,#1)
+ INSN1(clr,l ,d1)
+ INSN2(lsr,l ,size,#1)
+ bcc L1
+ INSN2(subq,l ,size,#1)
+ INSN2(sub,l ,d0,d0) /* (d0,cy) <= (0,0) */
+
+LAB(Loop)
+ INSN2(move,l ,d3,MEM_POSTINC(s1_ptr))
+ INSN2(mulu,l ,d1:d3,s2_limb)
+ INSN2(addx,l ,d3,d0)
+ INSN2(move,l ,MEM_POSTINC(res_ptr),d3)
+LAB(L1) INSN2(move,l ,d3,MEM_POSTINC(s1_ptr))
+ INSN2(mulu,l ,d0:d3,s2_limb)
+ INSN2(addx,l ,d3,d1)
+ INSN2(move,l ,MEM_POSTINC(res_ptr),d3)
+
+ dbf size,Loop
+ INSN1(clr,l ,d3)
+ INSN2(addx,l ,d0,d3)
+ INSN2(sub,l ,size,#0x10000)
+ bcc Loop
+
+/* Restore used registers from stack frame. */
+ INSN2(movem,l ,d2-d4,MEM_POSTINC(sp))
+#if 0
+ INSN2(move,l ,d4,MEM_POSTINC(sp))
+ INSN2(move,l ,d3,MEM_POSTINC(sp))
+ INSN2(move,l ,d2,MEM_POSTINC(sp))
+#endif
+ rts
--- /dev/null
+/* mc68020 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+ store difference in a third limb vector.
+
+Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+ INPUT PARAMETERS
+ res_ptr (sp + 4)
+ s1_ptr (sp + 8)
+ s2_ptr (sp + 16)
+ size (sp + 12)
+*/
+
+#include "asm-syntax.h"
+
+ TEXT
+ ALIGN
+ GLOBL ___mpn_sub_n
+
+LAB(___mpn_sub_n)
+/* Save used registers on the stack. */
+ INSN2(move,l ,MEM_PREDEC(sp),d2)
+ INSN2(move,l ,MEM_PREDEC(sp),a2)
+
+/* Copy the arguments to registers. Better use movem? */
+ INSN2(move,l ,a2,MEM_DISP(sp,12))
+ INSN2(move,l ,a0,MEM_DISP(sp,16))
+ INSN2(move,l ,a1,MEM_DISP(sp,20))
+ INSN2(move,l ,d2,MEM_DISP(sp,24))
+
+ INSN2(eor,w ,d2,#1)
+ INSN2(lsr,l ,d2,#1)
+ bcc L1
+ INSN2(subq,l ,d2,#1) /* clears cy as side effect */
+
+LAB(Loop)
+ INSN2(move,l ,d0,MEM_POSTINC(a0))
+ INSN2(move,l ,d1,MEM_POSTINC(a1))
+ INSN2(subx,l ,d0,d1)
+ INSN2(move,l ,MEM_POSTINC(a2),d0)
+LAB(L1) INSN2(move,l ,d0,MEM_POSTINC(a0))
+ INSN2(move,l ,d1,MEM_POSTINC(a1))
+ INSN2(subx,l ,d0,d1)
+ INSN2(move,l ,MEM_POSTINC(a2),d0)
+
+ dbf d2,Loop /* loop until 16 lsb of %4 == -1 */
+ INSN2(subx,l ,d0,d0) /* d0 <= -cy; save cy as 0 or -1 in d0 */
+ INSN2(sub,l ,d2,#0x10000)
+ bcs L2
+ INSN2(add,l ,d0,d0) /* restore cy */
+ bra Loop
+
+LAB(L2)
+ INSN1(neg,l ,d0)
+
+/* Restore used registers from stack frame. */
+ INSN2(move,l ,a2,MEM_POSTINC(sp))
+ INSN2(move,l ,d2,MEM_POSTINC(sp))
+
+ rts
--- /dev/null
+/* mc68020 __mpn_submul_1 -- Multiply a limb vector with a limb and subtract
+ the result from a second limb vector.
+
+Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+ INPUT PARAMETERS
+ res_ptr (sp + 4)
+ s1_ptr (sp + 8)
+ size (sp + 12)
+ s2_limb (sp + 16)
+*/
+
+#include "asm-syntax.h"
+
+ TEXT
+ ALIGN
+ GLOBL ___mpn_submul_1
+
+LAB(___mpn_submul_1)
+
+#define res_ptr a0
+#define s1_ptr a1
+#define size d2
+#define s2_limb d4
+
+/* Save used registers on the stack. */
+ INSN2(movem,l ,MEM_PREDEC(sp),d2-d5)
+
+/* Copy the arguments to registers. Better use movem? */
+ INSN2(move,l ,res_ptr,MEM_DISP(sp,20))
+ INSN2(move,l ,s1_ptr,MEM_DISP(sp,24))
+ INSN2(move,l ,size,MEM_DISP(sp,28))
+ INSN2(move,l ,s2_limb,MEM_DISP(sp,32))
+
+ INSN2(eor,w ,size,#1)
+ INSN1(clr,l ,d1)
+ INSN1(clr,l ,d5)
+ INSN2(lsr,l ,size,#1)
+ bcc L1
+ INSN2(subq,l ,size,#1)
+ INSN2(sub,l ,d0,d0) /* (d0,cy) <= (0,0) */
+
+LAB(Loop)
+ INSN2(move,l ,d3,MEM_POSTINC(s1_ptr))
+ INSN2(mulu,l ,d1:d3,s2_limb)
+ INSN2(addx,l ,d3,d0)
+ INSN2(addx,l ,d1,d5)
+ INSN2(sub,l ,MEM_POSTINC(res_ptr),d3)
+LAB(L1) INSN2(move,l ,d3,MEM_POSTINC(s1_ptr))
+ INSN2(mulu,l ,d0:d3,s2_limb)
+ INSN2(addx,l ,d3,d1)
+ INSN2(addx,l ,d0,d5)
+ INSN2(sub,l ,MEM_POSTINC(res_ptr),d3)
+
+ dbf size,Loop
+ INSN2(addx,l ,d0,d5)
+ INSN2(sub,l ,size,#0x10000)
+ bcc Loop
+
+/* Restore used registers from stack frame. */
+ INSN2(movem,l ,d2-d5,MEM_POSTINC(sp))
+
+ rts
--- /dev/null
+; mc88100 __mpn_add -- Add two limb vectors of the same length > 0 and store
+; sum in a third limb vector.
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 2, or (at your option)
+; any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+; GNU General Public License for more details.
+
+; You should have received a copy of the GNU General Public License
+; along with the GNU MP Library; see the file COPYING. If not, write to
+; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr r2
+; s1_ptr r3
+; s2_ptr r4
+; size r5
+
+; This code has been optimized to run one instruction per clock, avoiding
+; load stalls and writeback contention. As a result, the instruction
+; order is not always natural.
+
+; The speed is approximately 4.3 clocks/limb + 18 clocks/limb-vector.
+
+#include "sysdep.h"
+
+ENTRY (__mpn_add_n)
+ ld r6,r3,0 ; read first limb from s1_ptr
+ extu r10,r5,4
+ ld r7,r4,0 ; read first limb from s2_ptr
+
+ subu.co r5,r0,r5 ; (clear carry as side effect)
+ mak r5,r5,4<4>
+ bcnd eq0,r5,Lzero
+
+ or r12,r0,lo16(Lbase)
+ or.u r12,r12,hi16(Lbase)
+ addu r12,r12,r5 ; r12 is address for entering in loop
+
+ extu r5,r5,2 ; divide by 4
+ subu r2,r2,r5 ; adjust res_ptr
+ subu r3,r3,r5 ; adjust s1_ptr
+ subu r4,r4,r5 ; adjust s2_ptr
+
+ or r8,r6,r0
+
+ jmp.n r12
+ or r9,r7,r0
+
+Loop: addu r3,r3,64
+ st r8,r2,60
+ addu r4,r4,64
+ ld r6,r3,0
+ addu r2,r2,64
+ ld r7,r4,0
+Lzero: subu r10,r10,1 ; add 0 + 16r limbs (adjust loop counter)
+Lbase: ld r8,r3,4
+ addu.cio r6,r6,r7
+ ld r9,r4,4
+ st r6,r2,0
+ ld r6,r3,8 ; add 15 + 16r limbs
+ addu.cio r8,r8,r9
+ ld r7,r4,8
+ st r8,r2,4
+ ld r8,r3,12 ; add 14 + 16r limbs
+ addu.cio r6,r6,r7
+ ld r9,r4,12
+ st r6,r2,8
+ ld r6,r3,16 ; add 13 + 16r limbs
+ addu.cio r8,r8,r9
+ ld r7,r4,16
+ st r8,r2,12
+ ld r8,r3,20 ; add 12 + 16r limbs
+ addu.cio r6,r6,r7
+ ld r9,r4,20
+ st r6,r2,16
+ ld r6,r3,24 ; add 11 + 16r limbs
+ addu.cio r8,r8,r9
+ ld r7,r4,24
+ st r8,r2,20
+ ld r8,r3,28 ; add 10 + 16r limbs
+ addu.cio r6,r6,r7
+ ld r9,r4,28
+ st r6,r2,24
+ ld r6,r3,32 ; add 9 + 16r limbs
+ addu.cio r8,r8,r9
+ ld r7,r4,32
+ st r8,r2,28
+ ld r8,r3,36 ; add 8 + 16r limbs
+ addu.cio r6,r6,r7
+ ld r9,r4,36
+ st r6,r2,32
+ ld r6,r3,40 ; add 7 + 16r limbs
+ addu.cio r8,r8,r9
+ ld r7,r4,40
+ st r8,r2,36
+ ld r8,r3,44 ; add 6 + 16r limbs
+ addu.cio r6,r6,r7
+ ld r9,r4,44
+ st r6,r2,40
+ ld r6,r3,48 ; add 5 + 16r limbs
+ addu.cio r8,r8,r9
+ ld r7,r4,48
+ st r8,r2,44
+ ld r8,r3,52 ; add 4 + 16r limbs
+ addu.cio r6,r6,r7
+ ld r9,r4,52
+ st r6,r2,48
+ ld r6,r3,56 ; add 3 + 16r limbs
+ addu.cio r8,r8,r9
+ ld r7,r4,56
+ st r8,r2,52
+ ld r8,r3,60 ; add 2 + 16r limbs
+ addu.cio r6,r6,r7
+ ld r9,r4,60
+ st r6,r2,56
+ bcnd.n ne0,r10,Loop ; add 1 + 16r limbs
+ addu.cio r8,r8,r9
+
+ st r8,r2,60 ; store most significant limb
+
+ jmp.n r1
+ addu.ci r2,r0,r0 ; return carry-out from most sign. limb
--- /dev/null
+; mc88100 __mpn_mul_1 -- Multiply a limb vector with a single limb and
+; store the product in a second limb vector.
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 2, or (at your option)
+; any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+; GNU General Public License for more details.
+
+; You should have received a copy of the GNU General Public License
+; along with the GNU MP Library; see the file COPYING. If not, write to
+; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr r2
+; s1_ptr r3
+; size r4
+; s2_limb r5
+
+; Common overhead is about 11 cycles/invocation.
+
+; The speed for S2_LIMB >= 0x10000 is approximately 21 cycles/limb. (The
+; pipeline stalls 2 cycles due to WB contention.)
+
+; The speed for S2_LIMB < 0x10000 is approximately 16 cycles/limb. (The
+; pipeline stalls 2 cycles due to WB contention and 1 cycle due to latency.)
+
+; To enhance speed:
+; 1. Unroll main loop 4-8 times.
+; 2. Schedule code to avoid WB contention. It might be tempting to move the
+; ld instruction in the loops down to save 2 cycles (less WB contention),
+; but that looses because the ultimate value will be read from outside
+; the allocated space. But if we handle the ultimate multiplication in
+; the tail, we can do this.
+; 3. Make the multiplication with less instructions. I think the code for
+; (S2_LIMB >= 0x10000) is not minimal.
+; With these techniques the (S2_LIMB >= 0x10000) case would run in 17 or
+; less cycles/limb; the (S2_LIMB < 0x10000) case would run in 11
+; cycles/limb. (Assuming infinite unrolling.)
+
+#include "sysdep.h"
+
+ENTRY (__mpn_mul_1)
+
+ ; Make S1_PTR and RES_PTR point at the end of their blocks
+ ; and negate SIZE.
+ lda r3,r3[r4]
+ lda r6,r2[r4] ; RES_PTR in r6 since r2 is retval
+ subu r4,r0,r4
+
+ addu.co r2,r0,r0 ; r2 = cy = 0
+ ld r9,r3[r4]
+ mask r7,r5,0xffff ; r7 = lo(S2_LIMB)
+ extu r8,r5,16 ; r8 = hi(S2_LIMB)
+ bcnd.n eq0,r8,Lsmall ; jump if (hi(S2_LIMB) == 0)
+ subu r6,r6,4
+
+; General code for any value of S2_LIMB.
+
+ ; Make a stack frame and save r25 and r26
+ subu r31,r31,16
+ st.d r25,r31,8
+
+ ; Enter the loop in the middle
+ br.n L1
+ addu r4,r4,1
+
+Loop:
+ ld r9,r3[r4]
+ st r26,r6[r4]
+; bcnd ne0,r0,0 ; bubble
+ addu r4,r4,1
+L1: mul r26,r9,r5 ; low word of product mul_1 WB ld
+ mask r12,r9,0xffff ; r12 = lo(s1_limb) mask_1
+ mul r11,r12,r7 ; r11 = prod_0 mul_2 WB mask_1
+ mul r10,r12,r8 ; r10 = prod_1a mul_3
+ extu r13,r9,16 ; r13 = hi(s1_limb) extu_1 WB mul_1
+ mul r12,r13,r7 ; r12 = prod_1b mul_4 WB extu_1
+ mul r25,r13,r8 ; r25 = prod_2 mul_5 WB mul_2
+ extu r11,r11,16 ; r11 = hi(prod_0) extu_2 WB mul_3
+ addu r10,r10,r11 ; addu_1 WB extu_2
+; bcnd ne0,r0,0 ; bubble WB addu_1
+ addu.co r10,r10,r12 ; WB mul_4
+ mask.u r10,r10,0xffff ; move the 16 most significant bits...
+ addu.ci r10,r10,r0 ; ...to the low half of the word...
+ rot r10,r10,16 ; ...and put carry in pos 16.
+ addu.co r26,r26,r2 ; add old carry limb
+ bcnd.n ne0,r4,Loop
+ addu.ci r2,r25,r10 ; compute new carry limb
+
+ st r26,r6[r4]
+ ld.d r25,r31,8
+ jmp.n r1
+ addu r31,r31,16
+
+; Fast code for S2_LIMB < 0x10000
+Lsmall:
+ ; Enter the loop in the middle
+ br.n SL1
+ addu r4,r4,1
+
+SLoop:
+ ld r9,r3[r4] ;
+ st r8,r6[r4] ;
+ addu r4,r4,1 ;
+SL1: mul r8,r9,r5 ; low word of product
+ mask r12,r9,0xffff ; r12 = lo(s1_limb)
+ extu r13,r9,16 ; r13 = hi(s1_limb)
+ mul r11,r12,r7 ; r11 = prod_0
+ mul r12,r13,r7 ; r12 = prod_1b
+ addu.cio r8,r8,r2 ; add old carry limb
+ extu r10,r11,16 ; r11 = hi(prod_0)
+ addu r10,r10,r12 ;
+ bcnd.n ne0,r4,SLoop
+ extu r2,r10,16 ; r2 = new carry limb
+
+ jmp.n r1
+ st r8,r6[r4]
--- /dev/null
+; mc88100 __mpn_sub -- Subtract two limb vectors of the same length > 0 and
+; store difference in a third limb vector.
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 2, or (at your option)
+; any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+; GNU General Public License for more details.
+
+; You should have received a copy of the GNU General Public License
+; along with the GNU MP Library; see the file COPYING. If not, write to
+; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr r2
+; s1_ptr r3
+; s2_ptr r4
+; size r5
+
+; This code has been optimized to run one instruction per clock, avoiding
+; load stalls and writeback contention. As a result, the instruction
+; order is not always natural.
+
+; The speed is approximately 4.3 clocks/limb + 18 clocks/limb-vector.
+
+#include "sysdep.h"
+
+ENTRY (__mpn_sub_n)
+ ld r6,r3,0 ; read first limb from s1_ptr
+ extu r10,r5,4
+ ld r7,r4,0 ; read first limb from s2_ptr
+
+ subu.co r5,r0,r5 ; (clear carry as side effect)
+ mak r5,r5,4<4>
+ bcnd eq0,r5,Lzero
+
+ or r12,r0,lo16(Lbase)
+ or.u r12,r12,hi16(Lbase)
+ addu r12,r12,r5 ; r12 is address for entering in loop
+
+ extu r5,r5,2 ; divide by 4
+ subu r2,r2,r5 ; adjust res_ptr
+ subu r3,r3,r5 ; adjust s1_ptr
+ subu r4,r4,r5 ; adjust s2_ptr
+
+ or r8,r6,r0
+
+ jmp.n r12
+ or r9,r7,r0
+
+Loop: addu r3,r3,64
+ st r8,r2,60
+ addu r4,r4,64
+ ld r6,r3,0
+ addu r2,r2,64
+ ld r7,r4,0
+Lzero: subu r10,r10,1 ; subtract 0 + 16r limbs (adjust loop counter)
+Lbase: ld r8,r3,4
+ subu.cio r6,r6,r7
+ ld r9,r4,4
+ st r6,r2,0
+ ld r6,r3,8 ; subtract 15 + 16r limbs
+ subu.cio r8,r8,r9
+ ld r7,r4,8
+ st r8,r2,4
+ ld r8,r3,12 ; subtract 14 + 16r limbs
+ subu.cio r6,r6,r7
+ ld r9,r4,12
+ st r6,r2,8
+ ld r6,r3,16 ; subtract 13 + 16r limbs
+ subu.cio r8,r8,r9
+ ld r7,r4,16
+ st r8,r2,12
+ ld r8,r3,20 ; subtract 12 + 16r limbs
+ subu.cio r6,r6,r7
+ ld r9,r4,20
+ st r6,r2,16
+ ld r6,r3,24 ; subtract 11 + 16r limbs
+ subu.cio r8,r8,r9
+ ld r7,r4,24
+ st r8,r2,20
+ ld r8,r3,28 ; subtract 10 + 16r limbs
+ subu.cio r6,r6,r7
+ ld r9,r4,28
+ st r6,r2,24
+ ld r6,r3,32 ; subtract 9 + 16r limbs
+ subu.cio r8,r8,r9
+ ld r7,r4,32
+ st r8,r2,28
+ ld r8,r3,36 ; subtract 8 + 16r limbs
+ subu.cio r6,r6,r7
+ ld r9,r4,36
+ st r6,r2,32
+ ld r6,r3,40 ; subtract 7 + 16r limbs
+ subu.cio r8,r8,r9
+ ld r7,r4,40
+ st r8,r2,36
+ ld r8,r3,44 ; subtract 6 + 16r limbs
+ subu.cio r6,r6,r7
+ ld r9,r4,44
+ st r6,r2,40
+ ld r6,r3,48 ; subtract 5 + 16r limbs
+ subu.cio r8,r8,r9
+ ld r7,r4,48
+ st r8,r2,44
+ ld r8,r3,52 ; subtract 4 + 16r limbs
+ subu.cio r6,r6,r7
+ ld r9,r4,52
+ st r6,r2,48
+ ld r6,r3,56 ; subtract 3 + 16r limbs
+ subu.cio r8,r8,r9
+ ld r7,r4,56
+ st r8,r2,52
+ ld r8,r3,60 ; subtract 2 + 16r limbs
+ subu.cio r6,r6,r7
+ ld r9,r4,60
+ st r6,r2,56
+ bcnd.n ne0,r10,Loop ; subtract 1 + 16r limbs
+ subu.cio r8,r8,r9
+
+ st r8,r2,60 ; store most significant limb
+
+ addu.ci r2,r0,r0 ; return carry-out from most sign. limb
+ jmp.n r1
+ xor r2,r2,1
--- /dev/null
+; mc88110 __mpn_mul_1 -- Multiply a limb vector with a single limb and
+; store the product in a second limb vector.
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 2, or (at your option)
+; any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+; GNU General Public License for more details.
+
+; You should have received a copy of the GNU General Public License
+; along with the GNU MP Library; see the file COPYING. If not, write to
+; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr r2
+; s1_ptr r3
+; size r4
+; s2_limb r5
+
+#include "sysdep.h"
+
+ENTRY (__mpn_mul_1)
+ ld r6,r3,0
+ sub r4,r0,r4
+ sub r3,r3,r4 ; r3 is offset s1_ptr
+ sub r2,r2,r4
+ sub r8,r2,8 ; r8 is offset res_ptr
+ mulu.d r10,r6,r5
+
+ addu r4,r4,1
+ bcnd eq0,r4,Lend
+ addu.co r2,r0,0 ; clear cy_limb
+
+Loop: ld r6,r3[r4]
+ addu.cio r9,r11,r2
+ or r2,r10,r0 ; could be avoided if unrolled
+ addu r4,r4,1
+ mulu.d r10,r6,r5
+ bcnd ne0,r4,Loop
+ st r9,r8[r4]
+
+Lend: addu.cio r9,r11,r2
+ st r9,r8,4
+ jmp.n r1
+ addu.ci r2,r10,r0
+
+; This is the Right Way to do this on '110. 4 cycles / 64-bit limb.
+; ld.d r10,
+; mulu.d
+; addu.cio
+; addu.cio
+; st.d
+; mulu.d ,r11,r5
+; ld.d r12,
+; mulu.d ,r10,r5
+; addu.cio
+; addu.cio
+; st.d
+; mulu.d
+; ld.d r10,
+; mulu.d
+; addu.cio
+; addu.cio
+; st.d
+; mulu.d
+; ld.d r10,
+; mulu.d
+; addu.cio
+; addu.cio
+; st.d
+; mulu.d
--- /dev/null
+/* gmp-mparam.h -- Compiler/machine parameter header file.
+
+Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#define BITS_PER_MP_LIMB 64
+#define BYTES_PER_MP_LIMB 8
+#define BITS_PER_LONGINT 32
+#define BITS_PER_INT 32
+#define BITS_PER_SHORTINT 16
+#define BITS_PER_CHAR 8
--- /dev/null
+/* gmp-mparam.h -- Compiler/machine parameter header file.
+
+Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#define BITS_PER_MP_LIMB 16
+#define BYTES_PER_MP_LIMB 2
+#define BITS_PER_LONGINT 32
+#define BITS_PER_INT 16
+#define BITS_PER_SHORTINT 16
+#define BITS_PER_CHAR 8