2005-07-08 Carlos O'Donell <carlos@systemhalted.org>
authorRoland McGrath <roland@gnu.org>
Fri, 8 Jul 2005 06:10:13 +0000 (06:10 +0000)
committerRoland McGrath <roland@gnu.org>
Fri, 8 Jul 2005 06:10:13 +0000 (06:10 +0000)
* sysdeps/hppa/add_n.s (__mpn_add_n): Use sr0 or r0, not 0.
* sysdeps/hppa/lshift.s (__mpn_lshift): Likewise.
* sysdeps/hppa/rshift.s (__mpn_rshift): Likewise.
* sysdeps/hppa/sub_n.s (__mpn_sub_n): Likewise.
* sysdeps/hppa/udiv_qrnnd.s (__udiv_qrnnd): Likewise.
* sysdeps/hppa/hppa1.1/udiv_qrnnd.s (__udiv_qrnnd): Likewise.

sysdeps/hppa/add_n.s
sysdeps/hppa/hppa1.1/udiv_qrnnd.s
sysdeps/hppa/lshift.s
sysdeps/hppa/rshift.s
sysdeps/hppa/sub_n.s
sysdeps/hppa/udiv_qrnnd.s

index aaabd72..a396b34 100644 (file)
@@ -38,19 +38,19 @@ __mpn_add_n:
        .callinfo       frame=0,no_calls
        .entry
 
-       ldws,ma         4(0,%r25),%r21
-       ldws,ma         4(0,%r24),%r20
+       ldws,ma         4(%r25),%r21
+       ldws,ma         4(%r24),%r20
 
        addib,=         -1,%r23,L$end   ;! check for (SIZE == 1)
         add            %r21,%r20,%r28  ;! add first limbs ignoring cy
 
-L$loop:        ldws,ma         4(0,%r25),%r21
-       ldws,ma         4(0,%r24),%r20
-       stws,ma         %r28,4(0,%r26)
+L$loop:        ldws,ma         4(%r25),%r21
+       ldws,ma         4(%r24),%r20
+       stws,ma         %r28,4(%r26)
        addib,<>        -1,%r23,L$loop
         addc           %r21,%r20,%r28
 
-L$end: stws            %r28,0(0,%r26)
+L$end: stws            %r28,0(%r26)
        bv              0(%r2)
         addc           %r0,%r0,%r28
 
index fdc63e5..7b83619 100644 (file)
@@ -38,20 +38,20 @@ __udiv_qrnnd:
        .entry
        ldo             64(%r30),%r30
 
-       stws            %r25,-16(0,%r30)        ;! n_hi
-       stws            %r24,-12(0,%r30)        ;! n_lo
+       stws            %r25,-16(%r30)  ;! n_hi
+       stws            %r24,-12(%r30)  ;! n_lo
        b,l             L$0,%r1
        ldo             L$0000-L$0(%r1),%r1
 L$0:
-       fldds           -16(0,%r30),%fr5
-       stws            %r23,-12(0,%r30)
+       fldds           -16(%r30),%fr5
+       stws            %r23,-12(%r30)
        comib,<=        0,%r25,L$1
        fcnvxf,dbl,dbl  %fr5,%fr5
-       fldds           0(0,%r1),%fr4
+       fldds           0(%r1),%fr4
        fadd,dbl        %fr4,%fr5,%fr5
 L$1:   
        fcpy,sgl        %fr0,%fr6L
-       fldws           -12(0,%r30),%fr6R
+       fldws           -12(%r30),%fr6R
        fcnvxf,dbl,dbl  %fr6,%fr4
 
        fdiv,dbl        %fr5,%fr4,%fr5
@@ -60,9 +60,9 @@ L$1:
        fstws           %fr4R,-16(%r30)
        xmpyu           %fr4R,%fr6R,%fr6
        ldws            -16(%r30),%r28
-       fstds           %fr6,-16(0,%r30)
-       ldws            -12(0,%r30),%r21
-       ldws            -16(0,%r30),%r20
+       fstds           %fr6,-16(%r30)
+       ldws            -12(%r30),%r21
+       ldws            -16(%r30),%r20
        sub             %r24,%r21,%r22
        subb            %r25,%r20,%r1
        comib,=         0,%r1,L$2
@@ -72,7 +72,7 @@ L$1:
        ldo             -1(%r28),%r28
 L$2:   
        bv              0(%r2)
-       stws            %r22,0(0,%r26)
+       stws            %r22,0(%r26)
 
        .exit
        .procend
index 400fbcf..151b283 100644 (file)
@@ -35,32 +35,32 @@ __mpn_lshift:
 
        sh2add          %r24,%r25,%r25
        sh2add          %r24,%r26,%r26
-       ldws,mb         -4(0,%r25),%r22
+       ldws,mb         -4(%r25),%r22
        subi            32,%r23,%r1
        mtsar           %r1
        addib,=         -1,%r24,L$0004
        vshd            %r0,%r22,%r28           ;! compute carry out limb
-       ldws,mb         -4(0,%r25),%r29
+       ldws,mb         -4(%r25),%r29
        addib,=         -1,%r24,L$0002
        vshd            %r22,%r29,%r20
 
-L$loop:        ldws,mb         -4(0,%r25),%r22
-       stws,mb         %r20,-4(0,%r26)
+L$loop:        ldws,mb         -4(%r25),%r22
+       stws,mb         %r20,-4(%r26)
        addib,=         -1,%r24,L$0003
        vshd            %r29,%r22,%r20
-       ldws,mb         -4(0,%r25),%r29
-       stws,mb         %r20,-4(0,%r26)
+       ldws,mb         -4(%r25),%r29
+       stws,mb         %r20,-4(%r26)
        addib,<>        -1,%r24,L$loop
        vshd            %r22,%r29,%r20
 
-L$0002:        stws,mb         %r20,-4(0,%r26)
+L$0002:        stws,mb         %r20,-4(%r26)
        vshd            %r29,%r0,%r20
        bv              0(%r2)
-       stw             %r20,-4(0,%r26)
-L$0003:        stws,mb         %r20,-4(0,%r26)
+       stw             %r20,-4(%r26)
+L$0003:        stws,mb         %r20,-4(%r26)
 L$0004:        vshd            %r22,%r0,%r20
        bv              0(%r2)
-       stw             %r20,-4(0,%r26)
+       stw             %r20,-4(%r26)
 
        .exit
        .procend
index acb772f..dff189d 100644 (file)
@@ -33,31 +33,31 @@ __mpn_rshift:
        .callinfo       frame=64,no_calls
        .entry
 
-       ldws,ma         4(0,%r25),%r22
+       ldws,ma         4(%r25),%r22
        mtsar           %r23
        addib,=         -1,%r24,L$0004
        vshd            %r22,%r0,%r28           ;! compute carry out limb
-       ldws,ma         4(0,%r25),%r29
+       ldws,ma         4(%r25),%r29
        addib,=         -1,%r24,L$0002
        vshd            %r29,%r22,%r20
 
-L$loop:        ldws,ma         4(0,%r25),%r22
-       stws,ma         %r20,4(0,%r26)
+L$loop:        ldws,ma         4(%r25),%r22
+       stws,ma         %r20,4(%r26)
        addib,=         -1,%r24,L$0003
        vshd            %r22,%r29,%r20
-       ldws,ma         4(0,%r25),%r29
-       stws,ma         %r20,4(0,%r26)
+       ldws,ma         4(%r25),%r29
+       stws,ma         %r20,4(%r26)
        addib,<>        -1,%r24,L$loop
        vshd            %r29,%r22,%r20
 
-L$0002:        stws,ma         %r20,4(0,%r26)
+L$0002:        stws,ma         %r20,4(%r26)
        vshd            %r0,%r29,%r20
        bv              0(%r2)
-       stw             %r20,0(0,%r26)
-L$0003:        stws,ma         %r20,4(0,%r26)
+       stw             %r20,0(%r26)
+L$0003:        stws,ma         %r20,4(%r26)
 L$0004:        vshd            %r0,%r22,%r20
        bv              0(%r2)
-       stw             %r20,0(0,%r26)
+       stw             %r20,0(%r26)
 
        .exit
        .procend
index 34f1968..7764961 100644 (file)
@@ -38,19 +38,19 @@ __mpn_sub_n:
        .callinfo       frame=0,no_calls
        .entry
 
-       ldws,ma         4(0,%r25),%r21
-       ldws,ma         4(0,%r24),%r20
+       ldws,ma         4(%r25),%r21
+       ldws,ma         4(%r24),%r20
 
        addib,=         -1,%r23,L$end   ;! check for (SIZE == 1)
         sub            %r21,%r20,%r28  ;! subtract first limbs ignoring cy
 
-L$loop:        ldws,ma         4(0,%r25),%r21
-       ldws,ma         4(0,%r24),%r20
-       stws,ma         %r28,4(0,%r26)
+L$loop:        ldws,ma         4(%r25),%r21
+       ldws,ma         4(%r24),%r20
+       stws,ma         %r28,4(%r26)
        addib,<>        -1,%r23,L$loop
         subb           %r21,%r20,%r28
 
-L$end: stws            %r28,0(0,%r26)
+L$end: stws            %r28,0(%r26)
        addc            %r0,%r0,%r28
        bv              0(%r2)
         subi           1,%r28,%r28
index cd2b58d..8e9c07a 100644 (file)
@@ -38,7 +38,7 @@ __udiv_qrnnd:
        .callinfo       frame=0,no_calls
        .entry
 
-       comb,<          %r23,0,L$largedivisor
+       comb,<          %r23,%r0,L$largedivisor
         sub            %r0,%r23,%r1            ;! clear cy as side-effect
        ds              %r0,%r1,%r0
        addc            %r24,%r24,%r24
@@ -107,7 +107,7 @@ __udiv_qrnnd:
        ds              %r25,%r23,%r25
        comclr,>=       %r25,%r0,%r0
        addl            %r25,%r23,%r25
-       stws            %r25,0(0,%r26)
+       stws            %r25,0(%r26)
        bv              0(%r2)
         addc           %r28,%r28,%r28
 
@@ -186,7 +186,7 @@ L$largedivisor:
        comclr,>=       %r25,%r0,%r0
        addl            %r25,%r22,%r25
        sh1addl         %r25,%r20,%r25
-       stws            %r25,0(0,%r26)
+       stws            %r25,0(%r26)
        bv              0(%r2)
         addc           %r24,%r24,%r28
 
@@ -269,7 +269,7 @@ L$odd:      addib,sv,n      1,%r22,L$FF..           ;! r22 = (d / 2 + 1)
        addc            %r0,%r28,%r28
        sub,<<          %r25,%r23,%r0
        addl            %r25,%r1,%r25
-       stws            %r25,0(0,%r26)
+       stws            %r25,0(%r26)
        bv              0(%r2)
         addc           %r0,%r28,%r28
 
@@ -278,7 +278,7 @@ L$odd:      addib,sv,n      1,%r22,L$FF..           ;! r22 = (d / 2 + 1)
 L$FF..:        add,uv          %r25,%r24,%r24
        sub,<<          %r24,%r23,%r0
        ldo             1(%r24),%r24
-       stws            %r24,0(0,%r26)
+       stws            %r24,0(%r26)
        bv              0(%r2)
         addc           %r0,%r25,%r28