1999-07-25 Jakub Jelinek <jj@ultra.linux.cz>
+ * sysdeps/sparc/sparc64/add_n.S: Avoid using %g2, %g3, %g7 registers
+ as much as possible. Declare them using .register pseudo-op if they
+ are still used.
+ * sysdeps/sparc/sparc64/lshift.S: Likewise.
+ * sysdeps/sparc/sparc64/memchr.S: Likewise.
+ * sysdeps/sparc/sparc64/memcmp.S: Likewise.
+ * sysdeps/sparc/sparc64/memcpy.S: Likewise.
+ * sysdeps/sparc/sparc64/memset.S: Likewise.
+ * sysdeps/sparc/sparc64/rawmemchr.S: Likewise.
+ * sysdeps/sparc/sparc64/rshift.S: Likewise.
+ * sysdeps/sparc/sparc64/stpcpy.S: Likewise.
+ * sysdeps/sparc/sparc64/stpncpy.S: Likewise.
+ * sysdeps/sparc/sparc64/strcat.S: Likewise.
+ * sysdeps/sparc/sparc64/strchr.S: Likewise.
+ * sysdeps/sparc/sparc64/strcmp.S: Likewise.
+ * sysdeps/sparc/sparc64/strcpy.S: Likewise.
+ * sysdeps/sparc/sparc64/strcspn.S: Likewise.
+ * sysdeps/sparc/sparc64/strlen.S: Likewise.
+ * sysdeps/sparc/sparc64/strncmp.S: Likewise.
+ * sysdeps/sparc/sparc64/strncpy.S: Likewise.
+ * sysdeps/sparc/sparc64/strpbrk.S: Likewise.
+ * sysdeps/sparc/sparc64/strspn.S: Likewise.
+ * sysdeps/sparc/sparc64/sub_n.S: Likewise.
+ * sysdeps/sparc/sparc64/dl-machine.h: Likewise.
+ Optimize trampoline code for .plt4-.plt32767.
+ Fix trampolines for .plt32768+.
+
+1999-07-25 Jakub Jelinek <jj@ultra.linux.cz>
+
* sysdeps/sparc/sparc32/sparcv8/Makefile: -mv8 is deprecated, use
-mcpu=v8.
* sysdeps/sparc/sparc32/sparcv9/Makefile: Likewise.
/* SPARC v9 __mpn_add_n -- Add two limb vectors of the same length > 0 and
store sum in a third limb vector.
- Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
+ Copyright (C) 1995, 1996, 1997, 1999 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
ENTRY(__mpn_add_n)
- sub %g0,%o3,%g3
+ sub %g0,%o3,%g5
sllx %o3,3,%g1
add %o1,%g1,%o1 ! make s1_ptr point at end
add %o2,%g1,%o2 ! make s2_ptr point at end
add %o0,%g1,%o0 ! make res_ptr point at end
mov 0,%o4 ! clear carry variable
- sllx %g3,3,%o5 ! compute initial address index
+ sllx %g5,3,%o5 ! compute initial address index
1: ldx [%o2+%o5],%g1 ! load s2 limb
- add %g3,1,%g3 ! increment loop count
- ldx [%o1+%o5],%g2 ! load s1 limb
+ add %g5,1,%g5 ! increment loop count
+ ldx [%o1+%o5],%o3 ! load s1 limb
addcc %g1,%o4,%g1 ! add s2 limb and carry variable
movcc %xcc,0,%o4 ! if carry-out, o4 was 1; clear it
- addcc %g1,%g2,%g1 ! add s1 limb to sum
+ addcc %g1,%o3,%g1 ! add s1 limb to sum
stx %g1,[%o0+%o5] ! store result
add %o5,8,%o5 ! increment address index
- brnz,pt %g3,1b
+ brnz,pt %g5,1b
movcs %xcc,1,%o4 ! if s1 add gave carry, record it
retl
return pc - *(Elf64_Addr *)(elf_pic_register + la);
}
-/* We have 3 cases to handle. And we code different code sequences
+/* We have 4 cases to handle. And we code different code sequences
for each one. I love V9 code models... */
static inline void
elf_machine_fixup_plt(struct link_map *map, const Elf64_Rela *reloc,
/* Now move plt_vaddr up to the call instruction. */
plt_vaddr += (2 * 4);
+ /* PLT entries .PLT32768 and above look always the same. */
+ if (__builtin_expect (reloc->r_addend, 0) != 0)
+ {
+ *reloc_addr = value - map->l_addr;
+ }
/* 32-bit Sparc style, the target is in the lower 32-bits of
address space. */
- if ((value >> 32) == 0)
+ else if ((value >> 32) == 0)
{
/* sethi %hi(target), %g1
jmpl %g1 + %lo(target), %g0 */
constant formation code I wrote. -DaveM */
/* sethi %hh(value), %g1
- sethi %lm(value), %g2
- or %g1, %hl(value), %g1
- or %g2, %lo(value), %g2
+ sethi %lm(value), %g5
+ or %g1, %hm(value), %g1
+ or %g5, %lo(value), %g5
sllx %g1, 32, %g1
- jmpl %g1 + %g2, %g0
+ jmpl %g1 + %g5, %g0
nop */
- insns[6] = 0x81c04002;
+ insns[6] = 0x81c04005;
__asm __volatile ("flush %0 + 24" : : "r" (insns));
insns[5] = 0x83287020;
__asm __volatile ("flush %0 + 20" : : "r" (insns));
- insns[4] = 0x8410a000 | (low32 & 0x3ff);
+ insns[4] = 0x8a116000 | (low32 & 0x3ff);
__asm __volatile ("flush %0 + 16" : : "r" (insns));
insns[3] = 0x82106000 | (high32 & 0x3ff);
__asm __volatile ("flush %0 + 12" : : "r" (insns));
- insns[2] = 0x05000000 | (low32 >> 10);
+ insns[2] = 0x0b000000 | (low32 >> 10);
__asm __volatile ("flush %0 + 8" : : "r" (insns));
insns[1] = 0x03000000 | (high32 >> 10);
/* PLT0 looks like:
save %sp, -192, %sp
- sethi %hh(_dl_runtime_{resolve,profile}_0), %g3
- sethi %lm(_dl_runtime_{resolve,profile}_0), %g4
- or %g3, %hm(_dl_runtime_{resolve,profile}_0), %g3
- or %g4, %lo(_dl_runtime_{resolve,profile}_0), %g4
- sllx %g3, 32, %g3
- jmpl %g3 + %g4, %o0
- nop
-
- PLT1 is similar except we jump to _dl_runtime_{resolve,profile}_1. */
+ sethi %hh(_dl_runtime_{resolve,profile}_0), %l0
+ sethi %lm(_dl_runtime_{resolve,profile}_0), %l1
+ or %l0, %hm(_dl_runtime_{resolve,profile}_0), %l0
+ or %l1, %lo(_dl_runtime_{resolve,profile}_0), %l1
+ sllx %l0, 32, %l0
+ jmpl %l0 + %l1, %l6
+ sethi %hi(0xffc00), %l2
+ */
plt[0] = 0x9de3bf40;
- plt[1] = 0x07000000 | (res0_addr >> (64 - 22));
- plt[2] = 0x09000000 | ((res0_addr >> 10) & 0x003fffff);
- plt[3] = 0x8610e000 | ((res0_addr >> 32) & 0x3ff);
- plt[4] = 0x88112000 | (res0_addr & 0x3ff);
- plt[5] = 0x8728f020;
- plt[6] = 0x91c0c004;
- plt[7] = 0x01000000;
+ plt[1] = 0x21000000 | (res0_addr >> (64 - 22));
+ plt[2] = 0x23000000 | ((res0_addr >> 10) & 0x003fffff);
+ plt[3] = 0xa0142000 | ((res0_addr >> 32) & 0x3ff);
+ plt[4] = 0xa2146000 | (res0_addr & 0x3ff);
+ plt[5] = 0xa12c3020;
+ plt[6] = 0xadc40011;
+ plt[7] = 0x250003ff;
+
+ /* PLT1 looks like:
+
+ save %sp, -192, %sp
+ sethi %hh(_dl_runtime_{resolve,profile}_1), %l0
+ sethi %lm(_dl_runtime_{resolve,profile}_1), %l1
+ or %l0, %hm(_dl_runtime_{resolve,profile}_1), %l0
+ or %l1, %lo(_dl_runtime_{resolve,profile}_1), %l1
+ sllx %l0, 32, %l0
+ jmpl %l0 + %l1, %l6
+ srlx %g1, 12, %o1
+ */
plt[8 + 0] = 0x9de3bf40;
- plt[8 + 1] = 0x07000000 | (res1_addr >> (64 - 22));
- plt[8 + 2] = 0x09000000 | ((res1_addr >> 10) & 0x003fffff);
- plt[8 + 3] = 0x8610e000 | ((res1_addr >> 32) & 0x3ff);
- plt[8 + 4] = 0x88112000 | (res1_addr & 0x3ff);
- plt[8 + 5] = 0x8728f020;
- plt[8 + 6] = 0x91c0c004;
- plt[8 + 7] = 0x01000000;
+ plt[8 + 1] = 0x21000000 | (res1_addr >> (64 - 22));
+ plt[8 + 2] = 0x23000000 | ((res1_addr >> 10) & 0x003fffff);
+ plt[8 + 3] = 0xa0142000 | ((res1_addr >> 32) & 0x3ff);
+ plt[8 + 4] = 0xa2146000 | (res1_addr & 0x3ff);
+ plt[8 + 5] = 0xa12c3020;
+ plt[8 + 6] = 0xadc40011;
+ plt[8 + 7] = 0x9330700c;
/* Now put the magic cookie at the beginning of .PLT3
Entry .PLT4 is unused by this implementation. */
.type " #tramp_name "_0, @function
.align 32
" #tramp_name "_0:
- ldx [%o0 + 32 + 8], %l0
- sethi %hi(1048576), %g2
- sub %g1, %o0, %o0
- xor %g2, -20, %g2
- sethi %hi(5120), %g3
- add %o0, %g2, %o0
- sethi %hi(32768), %o2
- udivx %o0, %g3, %g3
- sllx %g3, 2, %g1
- add %g1, %g3, %g1
- sllx %g1, 10, %g2
- sllx %g1, 5, %g1
- sub %o0, %g2, %o0
- udivx %o0, 24, %o0
- add %o0, %o2, %o0
- add %g1, %o0, %g1
- sllx %g1, 1, %o1
- mov %l0, %o0
- add %o1, %g1, %o1
+ ! sethi %hi(1047552), %l2 - Done in .PLT0
+ ldx [%l6 + 32 + 8], %o0
+ sub %g1, %l6, %l0
+ xor %l2, -1016, %l2
+ sethi %hi(5120), %l3
+ add %l0, %l2, %l0
+ sethi %hi(32768), %l4
+ udivx %l0, %l3, %l3
+ sllx %l3, 2, %l1
+ add %l1, %l3, %l1
+ sllx %l1, 10, %l2
+ sllx %l1, 5, %l1
+ sub %l0, %l2, %l0
+ udivx %l0, 24, %l0
+ add %l0, %l4, %l0
+ add %l1, %l0, %l1
+ add %l1, %l1, %l0
+ add %l0, %l1, %l0
mov %i7, %o2
call " #fixup_name "
- sllx %o1, 3, %o1
+ sllx %l0, 3, %o1
jmp %o0
restore
.size " #tramp_name "_0, . - " #tramp_name "_0
.type " #tramp_name "_1, @function
.align 32
" #tramp_name "_1:
- srlx %g1, 15, %o1
- ldx [%o0 + 8], %o0
- sllx %o1, 1, %o3
- add %o1, %o3, %o1
+ ! srlx %g1, 12, %o1 - Done in .PLT1
+ ldx [%l6 + 8], %o0
+ add %o1, %o1, %o3
mov %i7, %o2
call " #fixup_name "
- sllx %o1, 3, %o1
+ add %o1, %o3, %o1
jmp %o0
restore
.size " #tramp_name "_1, . - " #tramp_name "_1
/* Save the user entry point address in %l0. */
mov %o0,%l0
/* Store the highest stack address. */
- sethi %hi(__libc_stack_end), %g2
- or %g2, %lo(__libc_stack_end), %g2
- ldx [%l7 + %g2], %l1
+ sethi %hi(__libc_stack_end), %g5
+ or %g5, %lo(__libc_stack_end), %g5
+ ldx [%l7 + %g5], %l1
add %sp, 6*8, %l2
stx %l2, [%l1]
/* See if we were run as a command with the executable file name as an
extra leading argument. If so, we must shift things around since we
must keep the stack doubleword aligned. */
- sethi %hi(_dl_skip_args), %g2
- or %g2, %lo(_dl_skip_args), %g2
- ldx [%l7+%g2], %i0
+ sethi %hi(_dl_skip_args), %g5
+ or %g5, %lo(_dl_skip_args), %g5
+ ldx [%l7+%g5], %i0
ld [%i0], %i0
brz,pt %i0, 2f
nop
brnz,pt %i3, 13b
add %i1, 16, %i1
/* Load searchlist of the main object to pass to _dl_init_next. */
-2: sethi %hi(_dl_main_searchlist), %g2
- or %g2, %lo(_dl_main_searchlist), %g2
- ldx [%l7+%g2], %g2
- ldx [%g2], %l1
+2: sethi %hi(_dl_main_searchlist), %g5
+ or %g5, %lo(_dl_main_searchlist), %g5
+ ldx [%l7+%g5], %g5
+ ldx [%g5], %l1
/* Call _dl_init_next to return the address of an initializer to run. */
3: call _dl_init_next
mov %l1, %o0
jmpl %o0, %o7
sub %o7, 24, %o7
/* Clear the startup flag. */
-4: sethi %hi(_dl_starting_up), %g2
- or %g2, %lo(_dl_starting_up), %g2
- ldx [%l7+%g2], %g2
- st %g0, [%g2]
+4: sethi %hi(_dl_starting_up), %g5
+ or %g5, %lo(_dl_starting_up), %g5
+ ldx [%l7+%g5], %g5
+ st %g0, [%g5]
/* Pass our finalizer function to the user in %g1. */
sethi %hi(_dl_fini), %g1
or %g1, %lo(_dl_fini), %g1
size %o2
cnt %o3 */
+ .register %g2, #scratch
+ .register %g3, #scratch
+
ENTRY(__mpn_lshift)
sllx %o2,3,%g1
add %o1,%g1,%o1 ! make %o1 point at end of src
#ifndef XCC
#define XCC xcc
#define USE_BPR
+ .register %g2, #scratch
+ .register %g3, #scratch
#endif
/* Normally, this uses
#ifndef XCC
#define XCC xcc
#define USE_BPR
+ .register %g2, #scratch
+ .register %g3, #scratch
#endif
.text
ldxa [%o0] ASI_PNF, %g5 /* Load */
sub %o1, %o0, %o1 /* IEU1 */
- ldxa [%o0 + %o1] ASI_PNF, %g7 /* Load Group */
+ ldxa [%o0 + %o1] ASI_PNF, %g4 /* Load Group */
add %o0, 8, %o0 /* IEU0 */
-11: sllx %g7, %g2, %o4 /* IEU0 Group */
- ldxa [%o0 + %o1] ASI_PNF, %g7 /* Load */
- srlx %g7, %g3, %o5 /* IEU0 Group */
+11: sllx %g4, %g2, %o4 /* IEU0 Group */
+ ldxa [%o0 + %o1] ASI_PNF, %g4 /* Load */
+ srlx %g4, %g3, %o5 /* IEU0 Group */
mov %g5, %o3 /* IEU1 */
ldxa [%o0] ASI_PNF, %g5 /* Load */
#include <asm/asi.h>
#ifndef XCC
#define USE_BPR
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g7, #scratch
#endif
#define FPRS_FEF 4
.align 32
ENTRY(memset)
andcc %o1, 0xff, %o1
- mov %o0, %g3
+ mov %o0, %o5
be,a,pt %icc, 50f
#ifndef USE_BPR
srl %o2, 0, %o1
blu,pn %xcc, 9f
andcc %o0, 0x38, %g5
be,pn %icc, 6f
- mov 64, %o5
+ mov 64, %o4
andcc %o0, 8, %g0
be,pn %icc, 1f
- sub %o5, %g5, %o5
+ sub %o4, %g5, %o4
stx %o1, [%o0]
add %o0, 8, %o0
-1: andcc %o5, 16, %g0
+1: andcc %o4, 16, %g0
be,pn %icc, 1f
- sub %o2, %o5, %o2
+ sub %o2, %o4, %o2
stx %o1, [%o0]
stx %o1, [%o0 + 8]
add %o0, 16, %o0
-1: andcc %o5, 32, %g0
+1: andcc %o4, 32, %g0
be,pn %icc, 7f
andncc %o2, 0x3f, %o3
stw %o1, [%o0]
1: bne,a,pn %xcc, 8f
stb %o1, [%o0]
8: retl
- mov %g3, %o0
+ mov %o5, %o0
17: brz,pn %o2, 0f
8: add %o0, 1, %o0
subcc %o2, 1, %o2
bne,pt %xcc, 8b
stb %o1, [%o0 - 1]
0: retl
- mov %g3, %o0
+ mov %o5, %o0
6: stx %o1, [%o0]
andncc %o2, 0x3f, %o3
#ifndef USE_BPR
srl %o1, 0, %o1
#endif
- mov %o0, %g3
+ mov %o0, %o5
50: cmp %o1, 7
bleu,pn %xcc, 17f
andcc %o0, 3, %o2
2: blu,pn %xcc, 9f
andcc %o0, 0x38, %o2
be,pn %icc, 6f
- mov 64, %o5
+ mov 64, %o4
andcc %o0, 8, %g0
be,pn %icc, 1f
- sub %o5, %o2, %o5
+ sub %o4, %o2, %o4
stx %g0, [%o0]
add %o0, 8, %o0
-1: andcc %o5, 16, %g0
+1: andcc %o4, 16, %g0
be,pn %icc, 1f
- sub %o1, %o5, %o1
+ sub %o1, %o4, %o1
stx %g0, [%o0]
stx %g0, [%o0 + 8]
add %o0, 16, %o0
-1: andcc %o5, 32, %g0
+1: andcc %o4, 32, %g0
be,pn %icc, 7f
andncc %o1, 0x3f, %o3
stx %g0, [%o0]
1: bne,a,pn %xcc, 8f
stb %g0, [%o0]
8: retl
- mov %g3, %o0
+ mov %o5, %o0
17: be,pn %xcc, 13b
orcc %o1, 0, %g0
be,pn %xcc, 0f
bne,pt %xcc, 8b
stb %g0, [%o0 - 1]
0: retl
- mov %g3, %o0
+ mov %o5, %o0
END(__bzero)
weak_alias(__bzero, bzero)
#ifndef XCC
#define XCC xcc
#define USE_BPR
+ .register %g2, #scratch
+ .register %g3, #scratch
#endif
/* Normally, this uses
sub %o3, %g1, %o2 /* IEU0 Group */
#ifdef EIGHTBIT_NOT_RARE
- andn %o2, %o3, %g7 /* IEU0 Group */
+ andn %o2, %o3, %o5 /* IEU0 Group */
ldxa [%o0] ASI_PNF, %o3 /* Load */
- andcc %g7, %g2, %g0 /* IEU1 Group */
+ andcc %o5, %g2, %g0 /* IEU1 Group */
#else
ldxa [%o0] ASI_PNF, %o3 /* Load */
andcc %o2, %g2, %g0 /* IEU1 Group */
size %o2
cnt %o3 */
+ .register %g2, #scratch
+ .register %g3, #scratch
+
ENTRY(__mpn_rshift)
ldx [%o1],%g2 ! load first limb
sub %g0,%o3,%o5 ! negate shift count
#include <sysdep.h>
#include <asm/asi.h>
+#ifndef XCC
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g7, #scratch
+#endif
/* Normally, this uses
((xword - 0x0101010101010101) & 0x8080808080808080) test
#ifndef XCC
#define XCC xcc
#define USE_BPR
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g7, #scratch
#endif
/* Normally, this uses
#ifndef XCC
#define XCC xcc
#define USE_BPR
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g7, #scratch
#endif
/* Normally, this uses
#ifndef XCC
#define XCC xcc
#define USE_BPR
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g7, #scratch
#endif
/* Normally, this uses
#include <sysdep.h>
#include <asm/asi.h>
+#ifndef XCC
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g7, #scratch
+#endif
/* Normally, this uses
((xword - 0x0101010101010101) & 0x8080808080808080) test
#include <sysdep.h>
#include <asm/asi.h>
+#ifndef XCC
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g7, #scratch
+#endif
/* Normally, this uses
((xword - 0x0101010101010101) & 0x8080808080808080) test
#define XCC xcc
#define STACK_SIZE 128
#define STACK_OFFSET 128+0x7ff
+ .register %g2, #scratch
#else
#define STACK_SIZE 64
#define STACK_OFFSET 64
sub %sp, STACK_SIZE+32, %sp /* IEU0 Group */
mov 1, %o4 /* IEU1 */
stx %o4, [%sp + STACK_OFFSET] /* Store Group */
- mov %o0, %g7 /* IEU0 */
+ mov %o0, %g4 /* IEU0 */
stx %g0, [%sp + STACK_OFFSET + 8] /* Store Group */
add %sp, STACK_OFFSET, %o5 /* IEU0 */
ldx [%o0], %o2 /* Load Group */
4: srlx %o2, 59, %o3 /* IEU0 Group */
- srlx %o2, 56, %g3 /* IEU0 Group */
+ srlx %o2, 56, %g5 /* IEU0 Group */
5: and %o3, 0x18, %o3 /* IEU1 */
- andcc %g3, 0x3f, %g3 /* IEU1 Group */
+ andcc %g5, 0x3f, %g5 /* IEU1 Group */
ldx [%o5 + %o3], %g2 /* Load */
srlx %o2, 51, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 48, %g3 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
+ srlx %o2, 48, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 13f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 43, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 40, %g3 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
+ srlx %o2, 40, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 14f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 35, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 32, %g3 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
+ srlx %o2, 32, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 15f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 27, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 24, %g3 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
+ srlx %o2, 24, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 16f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 19, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 16, %g3 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
+ srlx %o2, 16, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 17f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 11, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
add %o0, 8, %o0 /* IEU1 */
- srlx %o2, 8, %g3 /* IEU0 Group */
+ srlx %o2, 8, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 18f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
- sllx %o4, %g3, %g1 /* IEU0 */
- mov %o2, %g3 /* IEU1 */
+ sllx %o4, %g5, %g1 /* IEU0 */
+ mov %o2, %g5 /* IEU1 */
srlx %o2, 3, %o3 /* IEU0 Group */
ldxa [%o0] ASI_PNF, %o2 /* Load */
bne,pn %xcc, 19f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
- sllx %o4, %g3, %g1 /* IEU0 */
+ sllx %o4, %g5, %g1 /* IEU0 */
srlx %o2, 59, %o3 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 Group */
be,pt %xcc, 5b /* CTI */
- srlx %o2, 56, %g3 /* IEU0 Group */
+ srlx %o2, 56, %g5 /* IEU0 Group */
sub %o0, 1, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
.align 16
19: sub %o0, 2, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
18: sub %o0, 3, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
17: add %o0, 4, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
16: add %o0, 3, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
15: add %o0, 2, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
14: add %o0, 1, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
13: add %sp, STACK_SIZE+32, %sp /* IEU1 */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
.align 16
12: sub %o0, 1, %o0 /* IEU0 Group */
add %sp, STACK_SIZE+32, %sp /* IEU1 */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
END(strcspn)
or %g1, %lo(0x01010101), %g1 /* IEU0 Group */
mov %o0, %o1 /* IEU1 */
- sllx %g1, 32, %g2 /* IEU0 Group */
+ sllx %g1, 32, %g4 /* IEU0 Group */
andcc %o0, 7, %g0 /* IEU1 */
- or %g1, %g2, %g1 /* IEU0 Group */
+ or %g1, %g4, %g1 /* IEU0 Group */
brz,pn %o3, 13f /* CTI+IEU1 */
- sllx %g1, 7, %g2 /* IEU0 Group */
+ sllx %g1, 7, %g4 /* IEU0 Group */
bne,a,pn %icc, 15f /* CTI */
add %o0, 1, %o0 /* IEU1 */
/* %g1 = 0x0101010101010101 *
- * %g2 = 0x8080808080808080 *
+ * %g4 = 0x8080808080808080 *
* %o0 = string pointer *
* %o1 = start of string */
1: ldx [%o0], %o3 /* Load Group */
add %o0, 8, %o0 /* IEU1 */
2: sub %o3, %g1, %o2 /* IEU0 Group */
#ifdef EIGHTBIT_NOT_RARE
- andn %o2, %o3, %g7 /* IEU0 Group */
+ andn %o2, %o3, %o5 /* IEU0 Group */
ldxa [%o0] ASI_PNF, %o3 /* Load */
- andcc %g7, %g2, %g0 /* IEU1 Group */
+ andcc %o5, %g4, %g0 /* IEU1 Group */
#else
ldxa [%o0] ASI_PNF, %o3 /* Load */
- andcc %o2, %g2, %g0 /* IEU1 Group */
+ andcc %o2, %g4, %g0 /* IEU1 Group */
#endif
be,pt %xcc, 2b /* CTI */
add %o0, 8, %o0 /* IEU0 */
- addcc %o2, %g1, %g3 /* IEU1 Group */
+ addcc %o2, %g1, %g5 /* IEU1 Group */
#ifdef EIGHTBIT_NOT_RARE
- srlx %g7, 32, %g7 /* IEU0 */
+ srlx %o5, 32, %o5 /* IEU0 */
-3: andcc %g7, %g2, %g0 /* IEU1 Group */
+3: andcc %o5, %g4, %g0 /* IEU1 Group */
#else
srlx %o2, 32, %o2 /* IEU0 */
-3: andcc %o2, %g2, %g0 /* IEU1 Group */
+3: andcc %o2, %g4, %g0 /* IEU1 Group */
#endif
be,pn %xcc, 4f /* CTI */
- srlx %g3, 56, %o2 /* IEU0 */
+ srlx %g5, 56, %o2 /* IEU0 */
andcc %o2, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 12f /* CTI */
- srlx %g3, 48, %o2 /* IEU0 */
+ srlx %g5, 48, %o2 /* IEU0 */
andcc %o2, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 11f /* CTI */
- srlx %g3, 40, %o2 /* IEU0 */
+ srlx %g5, 40, %o2 /* IEU0 */
andcc %o2, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 10f /* CTI */
- srlx %g3, 32, %o2 /* IEU0 */
+ srlx %g5, 32, %o2 /* IEU0 */
andcc %o2, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 9f /* CTI */
-4: srlx %g3, 24, %o2 /* IEU0 */
+4: srlx %g5, 24, %o2 /* IEU0 */
andcc %o2, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 8f /* CTI */
- srlx %g3, 16, %o2 /* IEU0 */
+ srlx %g5, 16, %o2 /* IEU0 */
andcc %o2, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 7f /* CTI */
- srlx %g3, 8, %o2 /* IEU0 */
+ srlx %g5, 8, %o2 /* IEU0 */
andcc %o2, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 6f /* CTI */
sub %o3, %g1, %o2 /* IEU0 */
- andcc %g3, 0xff, %g0 /* IEU1 Group */
+ andcc %g5, 0xff, %g0 /* IEU1 Group */
be,pn %icc, 5f /* CTI */
ldxa [%o0] ASI_PNF, %o3 /* Load */
- andcc %o2, %g2, %g0 /* IEU1 Group */
+ andcc %o2, %g4, %g0 /* IEU1 Group */
be,pt %xcc, 2b /* CTI */
add %o0, 8, %o0 /* IEU0 */
- addcc %o2, %g1, %g3 /* IEU1 Group */
+ addcc %o2, %g1, %g5 /* IEU1 Group */
ba,pt %xcc, 3b /* CTI */
srlx %o2, 32, %o2 /* IEU0 */
#ifndef XCC
#define XCC xcc
#define USE_BPR
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g7, #scratch
#endif
/* Normally, this uses
#ifndef XCC
#define XCC xcc
#define USE_BPR
+ .register %g2, #scratch
+ .register %g3, #scratch
+ .register %g7, #scratch
#endif
/* Normally, this uses
#define XCC xcc
#define STACK_SIZE 128
#define STACK_OFFSET 128+0x7ff
+ .register %g2, #scratch
#else
#define STACK_SIZE 64
#define STACK_OFFSET 64
ldub [%o0], %o2 /* Load */
ldx [%o0], %o2 /* Load Group */
4: srlx %o2, 59, %o3 /* IEU0 Group */
- srlx %o2, 56, %g3 /* IEU0 Group */
+ srlx %o2, 56, %g4 /* IEU0 Group */
5: and %o3, 0x18, %o3 /* IEU1 */
- andcc %g3, 0x3f, %g3 /* IEU1 Group */
+ andcc %g4, 0x3f, %g4 /* IEU1 Group */
ldx [%o5 + %o3], %g2 /* Load */
srlx %o2, 51, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 48, %g3 /* IEU0 Group */
+ sllx %o4, %g4, %g1 /* IEU0 Group */
+ srlx %o2, 48, %g4 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 13f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g4, 0x3f, %g4 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 43, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 40, %g3 /* IEU0 Group */
+ sllx %o4, %g4, %g1 /* IEU0 Group */
+ srlx %o2, 40, %g4 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 14f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g4, 0x3f, %g4 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 35, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 32, %g3 /* IEU0 Group */
+ sllx %o4, %g4, %g1 /* IEU0 Group */
+ srlx %o2, 32, %g4 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 15f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g4, 0x3f, %g4 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 27, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 24, %g3 /* IEU0 Group */
+ sllx %o4, %g4, %g1 /* IEU0 Group */
+ srlx %o2, 24, %g4 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 16f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g4, 0x3f, %g4 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 19, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 16, %g3 /* IEU0 Group */
+ sllx %o4, %g4, %g1 /* IEU0 Group */
+ srlx %o2, 16, %g4 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 17f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g4, 0x3f, %g4 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 11, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
+ sllx %o4, %g4, %g1 /* IEU0 Group */
add %o0, 8, %o0 /* IEU1 */
- srlx %o2, 8, %g3 /* IEU0 Group */
+ srlx %o2, 8, %g4 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
bne,pn %xcc, 18f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g4, 0x3f, %g4 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
- sllx %o4, %g3, %g1 /* IEU0 */
+ sllx %o4, %g4, %g1 /* IEU0 */
mov %o2, %g5 /* IEU1 */
srlx %o2, 3, %o3 /* IEU0 Group */
ldxa [%o0] ASI_PNF, %o2 /* Load */
andcc %g2, %g1, %g2 /* IEU1 Group */
bne,pn %xcc, 19f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g5, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g4 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
- sllx %o4, %g3, %g1 /* IEU0 */
+ sllx %o4, %g4, %g1 /* IEU0 */
srlx %o2, 59, %o3 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 Group */
be,pt %xcc, 5b /* CTI */
- srlx %o2, 56, %g3 /* IEU0 Group */
+ srlx %o2, 56, %g4 /* IEU0 Group */
sub %o0, 1, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
#define XCC xcc
#define STACK_SIZE 128
#define STACK_OFFSET 128+0x7ff
+ .register %g2, #scratch
#else
#define STACK_SIZE 64
#define STACK_OFFSET 64
sub %sp, STACK_SIZE+32, %sp /* IEU0 Group */
mov 1, %o4 /* IEU1 */
stx %g0, [%sp + STACK_OFFSET] /* Store Group */
- mov %o0, %g7 /* IEU0 */
+ mov %o0, %g4 /* IEU0 */
stx %g0, [%sp + STACK_OFFSET + 8] /* Store Group */
add %sp, STACK_OFFSET, %o5 /* IEU0 */
ldx [%o0], %o2 /* Load Group */
4: srlx %o2, 59, %o3 /* IEU0 Group */
- srlx %o2, 56, %g3 /* IEU0 Group */
+ srlx %o2, 56, %g5 /* IEU0 Group */
5: and %o3, 0x18, %o3 /* IEU1 */
- andcc %g3, 0x3f, %g3 /* IEU1 Group */
+ andcc %g5, 0x3f, %g5 /* IEU1 Group */
ldx [%o5 + %o3], %g2 /* Load */
srlx %o2, 51, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 48, %g3 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
+ srlx %o2, 48, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
be,pn %xcc, 13f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 43, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 40, %g3 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
+ srlx %o2, 40, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
be,pn %xcc, 14f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 35, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 32, %g3 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
+ srlx %o2, 32, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
be,pn %xcc, 15f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 27, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 24, %g3 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
+ srlx %o2, 24, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
be,pn %xcc, 16f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 19, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
- srlx %o2, 16, %g3 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
+ srlx %o2, 16, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
be,pn %xcc, 17f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
srlx %o2, 11, %o3 /* IEU0 */
- sllx %o4, %g3, %g1 /* IEU0 Group */
+ sllx %o4, %g5, %g1 /* IEU0 Group */
add %o0, 8, %o0 /* IEU1 */
- srlx %o2, 8, %g3 /* IEU0 Group */
+ srlx %o2, 8, %g5 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 */
be,pn %xcc, 18f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
- sllx %o4, %g3, %g1 /* IEU0 */
- mov %o2, %g3 /* IEU1 */
+ sllx %o4, %g5, %g1 /* IEU0 */
+ mov %o2, %g5 /* IEU1 */
srlx %o2, 3, %o3 /* IEU0 Group */
ldxa [%o0] ASI_PNF, %o2 /* Load */
be,pn %xcc, 19f /* CTI */
and %o3, 0x18, %o3 /* IEU0 Group */
- and %g3, 0x3f, %g3 /* IEU1 */
+ and %g5, 0x3f, %g5 /* IEU1 */
ldx [%o5 + %o3], %g2 /* Load Group */
- sllx %o4, %g3, %g1 /* IEU0 */
+ sllx %o4, %g5, %g1 /* IEU0 */
srlx %o2, 59, %o3 /* IEU0 Group */
andcc %g2, %g1, %g2 /* IEU1 Group */
bne,pt %xcc, 5b /* CTI */
- srlx %o2, 56, %g3 /* IEU0 Group */
+ srlx %o2, 56, %g5 /* IEU0 Group */
sub %o0, 1, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
.align 16
19: sub %o0, 2, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
18: sub %o0, 3, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
17: add %o0, 4, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
16: add %o0, 3, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
15: add %o0, 2, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
14: add %o0, 1, %o0 /* IEU1 */
add %sp, STACK_SIZE+32, %sp /* IEU0 Group */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
13: add %sp, STACK_SIZE+32, %sp /* IEU1 */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
.align 16
12: sub %o0, 1, %o0 /* IEU0 Group */
add %sp, STACK_SIZE+32, %sp /* IEU1 */
retl /* CTI+IEU1 Group */
- sub %o0, %g7, %o0 /* IEU0 */
+ sub %o0, %g4, %o0 /* IEU0 */
END(strspn)
/* SPARC v9 __mpn_sub_n -- Subtract two limb vectors of the same length > 0
and store difference in a third limb vector.
- Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
+ Copyright (C) 1995, 1996, 1997, 1999 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
ENTRY(__mpn_sub_n)
- sub %g0,%o3,%g3
+ sub %g0,%o3,%g5
sllx %o3,3,%g1
add %o1,%g1,%o1 ! make s1_ptr point at end
add %o2,%g1,%o2 ! make s2_ptr point at end
add %o0,%g1,%o0 ! make res_ptr point at end
mov 0,%o4 ! clear carry variable
- sllx %g3,3,%o5 ! compute initial address index
+ sllx %g5,3,%o5 ! compute initial address index
1: ldx [%o2+%o5],%g1 ! load s2 limb
- add %g3,1,%g3 ! increment loop count
- ldx [%o1+%o5],%g2 ! load s1 limb
+ add %g5,1,%g5 ! increment loop count
+ ldx [%o1+%o5],%o3 ! load s1 limb
addcc %g1,%o4,%g1 ! add s2 limb and carry variable
movcc %xcc,0,%o4 ! if carry-out, o4 was 1; clear it
- subcc %g2,%g1,%g1 ! subtract s1 limb from sum
+ subcc %o3,%g1,%g1 ! subtract s1 limb from sum
stx %g1,[%o0+%o5] ! store result
add %o5,8,%o5 ! increment address index
- brnz,pt %g3,1b
+ brnz,pt %g5,1b
movcs %xcc,1,%o4 ! if s1 subtract gave carry, record it
retl