Materialize zeros by copying from %g0, which is now marked as constant.
This makes it possible for some common operations (like integer negation) to be
performed in fewer instructions.
This continues @arichardson's patch at D132561.
Reviewed By: arsenm
Differential Revision: https://reviews.llvm.org/D138887
// Single-instruction patterns.
+// Zero immediate.
+def : Pat<(i64 0), (COPY (i64 G0))>,
+ Requires<[Is64Bit]>;
+
// The ALU instructions want their simm13 operands as i32 immediates.
def as_i32imm : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32);
//===----------------------------------------------------------------------===//
// Zero immediate.
-def : Pat<(i32 0),
- (ORrr (i32 G0), (i32 G0))>;
+def : Pat<(i32 0), (COPY (i32 G0))>;
// Small immediates.
def : Pat<(i32 simm13:$val),
(ORri (i32 G0), imm:$val)>;
def : Pat<(atomic_store_32 ADDRrr:$dst, i32:$val), (STrr ADDRrr:$dst, $val)>;
def : Pat<(atomic_store_32 ADDRri:$dst, i32:$val), (STri ADDRri:$dst, $val)>;
+// Zero register pair.
+// This forces zeroing of the upper half to be done with ORrr instead of COPY,
+// since there `COPY G0` will be converted into `COPY G0_G1` later on, which
+// is not what we want in this case.
+def : Pat<(build_vector (i32 0), (i32 0)),
+ (INSERT_SUBREG (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
+ (ORrr (i32 G0), (i32 G0)), sub_even),
+ (i32 G0), sub_odd)>;
+
// extract_vector
def : Pat<(extractelt (v2i32 IntPair:$Rn), 0),
(i32 (EXTRACT_SUBREG IntPair:$Rn, sub_even))>;
def WSTATE : SparcCtrlReg<14, "WSTATE">;
// Integer registers
-def G0 : Ri< 0, "G0">, DwarfRegNum<[0]>;
+def G0 : Ri< 0, "G0">, DwarfRegNum<[0]> {
+ let isConstant = true;
+}
def G1 : Ri< 1, "G1">, DwarfRegNum<[1]>;
def G2 : Ri< 2, "G2">, DwarfRegNum<[2]>;
def G3 : Ri< 3, "G3">, DwarfRegNum<[3]>;
def PRRegs : RegisterClass<"SP", [i64], 64,
(add TPC, TNPC, TSTATE, TT, TICK, TBA, PSTATE, TL, PIL, CWP,
CANSAVE, CANRESTORE, CLEANWIN, OTHERWIN, WSTATE)>;
+
; restore %g0, %g0, %o0
;
; CHECK: ret_imm0
-; CHECK: mov 0, %i0
+; CHECK: mov %g0, %i0
; OPT: ret_imm0
; OPT: retl
-; OPT: mov 0, %o0
+; OPT: mov %g0, %o0
define i64 @ret_imm0() {
ret i64 0
}
}
; CHECK-LABEL: setcc_resultty
-; CHECK-DAG: mov 0, %o0
+; CHECK-DAG: mov %g0, %o0
; CHECK-DAG: mov %i0, %o1
-; CHECK-DAG: mov %o0, %o2
+; CHECK-DAG: mov %g0, %o2
; CHECK-DAG: mov 32, %o3
; CHECK-DAG: call __multi3
; CHECK: cmp
; SPARC: sll %o4, %o1, %o4
; SPARC: and %o0, 255, %o0
; SPARC: sll %o0, %o1, %o0
-; SPARC: andn %g2, %o5, %g2
-; SPARC: mov %g0, %o5
+; SPARC: andn %g2, %o5, %o5
; SPARC: [[LABEL1:\.L.*]]:
-; SPARC: or %g2, %o4, %g3
-; SPARC: or %g2, %o0, %g4
-; SPARC: cas [%o2], %g4, %g3
-; SPARC: cmp %g3, %g4
-; SPARC: mov %o5, %g4
+; SPARC: or %o5, %o4, %g2
+; SPARC: or %o5, %o0, %g3
+; SPARC: cas [%o2], %g3, %g2
+; SPARC: mov %g0, %g4
+; SPARC: cmp %g2, %g3
; SPARC: move %icc, 1, %g4
; SPARC: cmp %g4, 0
; SPARC: bne %icc, [[LABEL2:\.L.*]]
; SPARC: nop
-; SPARC: and %g3, %o3, %g4
-; SPARC: cmp %g2, %g4
+; SPARC: and %g2, %o3, %g3
+; SPARC: cmp %o5, %g3
; SPARC: bne %icc, [[LABEL1]]
-; SPARC: mov %g4, %g2
+; SPARC: mov %g3, %o5
; SPARC: [[LABEL2]]:
; SPARC: retl
-; SPARC: srl %g3, %o1, %o0
+; SPARC: srl %g2, %o1, %o0
; SPARC64-LABEL: test_cmpxchg_i8
; SPARC64: and %o1, -4, %o2
; SPARC64: mov 3, %o3
; SPARC64: sll %o4, %o1, %o4
; SPARC64: and %o0, 255, %o0
; SPARC64: sll %o0, %o1, %o0
-; SPARC64: andn %g2, %o5, %g2
-; SPARC64: mov %g0, %o5
+; SPARC64: andn %g2, %o5, %o5
; SPARC64: [[LABEL1:\.L.*]]:
-; SPARC64: or %g2, %o4, %g3
-; SPARC64: or %g2, %o0, %g4
-; SPARC64: cas [%o2], %g4, %g3
-; SPARC64: cmp %g3, %g4
-; SPARC64: mov %o5, %g4
+; SPARC64: or %o5, %o4, %g2
+; SPARC64: or %o5, %o0, %g3
+; SPARC64: cas [%o2], %g3, %g2
+; SPARC64: mov %g0, %g4
+; SPARC64: cmp %g2, %g3
; SPARC64: move %icc, 1, %g4
; SPARC64: cmp %g4, 0
; SPARC64: bne %icc, [[LABEL2:\.L.*]]
; SPARC64: nop
-; SPARC64: and %g3, %o3, %g4
-; SPARC64: cmp %g2, %g4
+; SPARC64: and %g2, %o3, %g3
+; SPARC64: cmp %o5, %g3
; SPARC64: bne %icc, [[LABEL1]]
-; SPARC64: mov %g4, %g2
+; SPARC64: mov %g3, %o5
; SPARC64: [[LABEL2]]:
; SPARC64: retl
-; SPARC64: srl %g3, %o1, %o0
+; SPARC64: srl %g2, %o1, %o0
define i8 @test_cmpxchg_i8(i8 %a, i8* %ptr) {
entry:
%pair = cmpxchg i8* %ptr, i8 %a, i8 123 monotonic monotonic
; SPARC: mov 123, %o0
; SPARC: sll %o0, %o1, %o0
; SPARC: sll %o4, %o1, %o4
-; SPARC: andn %g2, %o5, %g2
-; SPARC: mov %g0, %o5
+; SPARC: andn %g2, %o5, %o5
; SPARC: [[LABEL1:\.L.*]]:
-; SPARC: or %g2, %o0, %g3
-; SPARC: or %g2, %o4, %g4
-; SPARC: cas [%o2], %g4, %g3
-; SPARC: cmp %g3, %g4
-; SPARC: mov %o5, %g4
+; SPARC: or %o5, %o0, %g2
+; SPARC: or %o5, %o4, %g3
+; SPARC: cas [%o2], %g3, %g2
+; SPARC: mov %g0, %g4
+; SPARC: cmp %g2, %g3
; SPARC: move %icc, 1, %g4
; SPARC: cmp %g4, 0
; SPARC: bne %icc, [[LABEL2:\.L.*]]
; SPARC: nop
-; SPARC: and %g3, %o3, %g4
-; SPARC: cmp %g2, %g4
+; SPARC: and %g2, %o3, %g3
+; SPARC: cmp %o5, %g3
; SPARC: bne %icc, [[LABEL1]]
-; SPARC: mov %g4, %g2
+; SPARC: mov %g3, %o5
; SPARC: [[LABEL2]]:
; SPARC: retl
-; SPARC: srl %g3, %o1, %o0
+; SPARC: srl %g2, %o1, %o0
; SPARC64: and %o1, -4, %o2
; SPARC64: and %o1, 3, %o1
; SPARC64: xor %o1, 2, %o1
; SPARC64: mov 123, %o0
; SPARC64: sll %o0, %o1, %o0
; SPARC64: sll %o4, %o1, %o4
-; SPARC64: andn %g2, %o5, %g2
-; SPARC64: mov %g0, %o5
+; SPARC64: andn %g2, %o5, %o5
; SPARC64: [[LABEL1:\.L.*]]:
-; SPARC64: or %g2, %o0, %g3
-; SPARC64: or %g2, %o4, %g4
-; SPARC64: cas [%o2], %g4, %g3
-; SPARC64: cmp %g3, %g4
-; SPARC64: mov %o5, %g4
+; SPARC64: or %o5, %o0, %g2
+; SPARC64: or %o5, %o4, %g3
+; SPARC64: cas [%o2], %g3, %g2
+; SPARC64: mov %g0, %g4
+; SPARC64: cmp %g2, %g3
; SPARC64: move %icc, 1, %g4
; SPARC64: cmp %g4, 0
; SPARC64: bne %icc, [[LABEL2:\.L.*]]
; SPARC64: nop
-; SPARC64: and %g3, %o3, %g4
-; SPARC64: cmp %g2, %g4
+; SPARC64: and %g2, %o3, %g3
+; SPARC64: cmp %o5, %g3
; SPARC64: bne %icc, [[LABEL1]]
-; SPARC64: mov %g4, %g2
+; SPARC64: mov %g3, %o5
; SPARC64: [[LABEL2]]:
; SPARC64: retl
-; SPARC64: srl %g3, %o1, %o0
+; SPARC64: srl %g2, %o1, %o0
define i16 @test_cmpxchg_i16(i16 %a, i16* %ptr) {
entry:
%pair = cmpxchg i16* %ptr, i16 %a, i16 123 monotonic monotonic
; SPARC-LABEL: test_load_add_i32
; SPARC: membar
-; SPARC: mov %g0
; SPARC: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
; SPARC: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
; SPARC: cas [%o0], [[V]], [[V2]]
; SPARC: membar
; SPARC64-LABEL: test_load_add_i32
; SPARC64: membar
-; SPARC64: mov %g0
; SPARC64: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
; SPARC64: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
; SPARC64: cas [%o0], [[V]], [[V2]]
; SPARC-NEXT: .cfi_register %o7, %i7
; SPARC-NEXT: mov %g0, %i4
; SPARC-NEXT: ldd [%i2], %i0
-; SPARC-NEXT: mov %i4, %i5
+; SPARC-NEXT: mov %g0, %i5
; SPARC-NEXT: std %i4, [%i2]
; SPARC-NEXT: ldd [%i3], %i2
; SPARC-NEXT: restore
; CHECK-LABEL: f:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0: ! %entry
-; CHECK-NEXT: mov %g0, %o1
-; CHECK-NEXT: sub %o1, %o0, %o1
+; CHECK-NEXT: sub %g0, %o0, %o1
; CHECK-NEXT: and %o0, %o1, %o1
; CHECK-NEXT: sethi 122669, %o2
; CHECK-NEXT: or %o2, 305, %o2
; CHECK-LABEL: g:
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0: ! %entry
-; CHECK-NEXT: mov %g0, %o2
-; CHECK-NEXT: sub %o2, %o1, %o3
-; CHECK-NEXT: and %o1, %o3, %o3
-; CHECK-NEXT: sethi 122669, %o4
-; CHECK-NEXT: or %o4, 305, %o4
-; CHECK-NEXT: smul %o3, %o4, %o3
-; CHECK-NEXT: sethi %hi(.LCPI1_0), %o5
-; CHECK-NEXT: add %o5, %lo(.LCPI1_0), %o5
-; CHECK-NEXT: sub %o2, %o0, %g2
-; CHECK-NEXT: and %o0, %g2, %g2
-; CHECK-NEXT: smul %g2, %o4, %o4
-; CHECK-NEXT: srl %o4, 27, %o4
-; CHECK-NEXT: ldub [%o5+%o4], %o4
+; CHECK-NEXT: sub %g0, %o1, %o2
+; CHECK-NEXT: and %o1, %o2, %o2
+; CHECK-NEXT: sethi 122669, %o3
+; CHECK-NEXT: or %o3, 305, %o3
+; CHECK-NEXT: smul %o2, %o3, %o2
+; CHECK-NEXT: sethi %hi(.LCPI1_0), %o4
+; CHECK-NEXT: add %o4, %lo(.LCPI1_0), %o4
+; CHECK-NEXT: sub %g0, %o0, %o5
+; CHECK-NEXT: and %o0, %o5, %o5
+; CHECK-NEXT: smul %o5, %o3, %o3
; CHECK-NEXT: srl %o3, 27, %o3
-; CHECK-NEXT: ldub [%o5+%o3], %o5
-; CHECK-NEXT: add %o4, 32, %o3
+; CHECK-NEXT: ldub [%o4+%o3], %o3
+; CHECK-NEXT: srl %o2, 27, %o2
+; CHECK-NEXT: ldub [%o4+%o2], %o4
+; CHECK-NEXT: add %o3, 32, %o2
; CHECK-NEXT: cmp %o1, 0
-; CHECK-NEXT: movne %icc, %o5, %o3
+; CHECK-NEXT: movne %icc, %o4, %o2
; CHECK-NEXT: or %o1, %o0, %o0
; CHECK-NEXT: cmp %o0, 0
-; CHECK-NEXT: move %icc, 0, %o3
-; CHECK-NEXT: mov %o2, %o0
+; CHECK-NEXT: move %icc, 0, %o2
+; CHECK-NEXT: mov %g0, %o0
; CHECK-NEXT: retl
-; CHECK-NEXT: mov %o3, %o1
+; CHECK-NEXT: mov %o2, %o1
entry:
%0 = call i64 @llvm.cttz.i64(i64 %x, i1 true)
%1 = icmp eq i64 %x, 0
; CHECK: sethi 1049856, %o0
; CHECK: mov %g0, %o1
; CHECK-LE: mov %g0, %o0
-; CHECK-LE: sethi 1049856, %o1
+; CHECK-LE: sethi 1049856, %o2
define <2 x i32> @bitcast() {
%1 = bitcast double 5.0 to <2 x i32>
ret <2 x i32> %1
; CHECK-LABEL: test_call
; CHECK: sethi 1049856, %o0
; CHECK: mov %g0, %o1
-; CHECK-LE: mov %g0, %o0
; CHECK-LE: sethi 1049856, %o1
+; CHECK-LE: mov %g0, %o0
declare void @a(double)
define void @test_call() {
call void @a(double 5.0)
; CHECK: sethi 1048576, %o0
; CHECK: mov %g0, %o1
; CHECK: mov %o0, %o2
-; CHECK: mov %o1, %o3
-; CHECK-LE: mov %g0, %o0
+; CHECK: mov %g0, %o3
; CHECK-LE: sethi 1048576, %o1
+; CHECK-LE: mov %g0, %o0
declare double @llvm.pow.f64(double, double)
define double @test_intrins_call() {
%1 = call double @llvm.pow.f64(double 2.0, double 2.0)
; CHECK: .cfi_startproc
; CHECK-NEXT: ! %bb.0: ! %bb
; CHECK-NEXT: retl
-; CHECK-NEXT: mov 0, %o0
+; CHECK-NEXT: mov %g0, %o0
bb:
%i = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> undef, <4 x i64> undef, <4 x i64> <i64 57, i64 27, i64 12, i64 33>)
%i1 = add <4 x i64> %i, zeroinitializer
;; Ensures that tied in and out gets allocated properly.
; CHECK-LABEL: test_i64_inout:
-; CHECK: mov %g0, %i2
; CHECK: mov 5, %i3
+; CHECK: mov %g0, %i2
; CHECK: xor %i2, %g0, %i2
; CHECK: mov %i2, %i0
; CHECK: ret
; CHECK-NEXT: nop
; CHECK-NEXT: ! %bb.1: ! %cond.false
; CHECK-NEXT: .LBB0_2: ! %targetblock
-; CHECK-NEXT: mov %g0, %o0
-; CHECK-NEXT: cmp %o0, 0
+; CHECK-NEXT: cmp %g0, 0
; CHECK-NEXT: bne %icc, .LBB0_4
; CHECK-NEXT: nop
; CHECK-NEXT: ! %bb.3: ! %cond.false.i83
; SPARC-NEXT: .cfi_def_cfa_register %fp
; SPARC-NEXT: .cfi_window_save
; SPARC-NEXT: .cfi_register %o7, %i7
-; SPARC-NEXT: ld [%fp+92], %l2
-; SPARC-NEXT: ld [%fp+96], %l3
-; SPARC-NEXT: mov %i3, %l7
-; SPARC-NEXT: mov %i2, %l5
-; SPARC-NEXT: mov %i1, %l4
-; SPARC-NEXT: mov %i0, %l6
+; SPARC-NEXT: ld [%fp+92], %l1
+; SPARC-NEXT: ld [%fp+96], %l2
+; SPARC-NEXT: mov %i3, %l6
+; SPARC-NEXT: mov %i2, %l4
+; SPARC-NEXT: mov %i1, %i3
+; SPARC-NEXT: mov %i0, %l3
; SPARC-NEXT: sra %i0, 31, %o4
; SPARC-NEXT: st %o4, [%sp+96]
; SPARC-NEXT: st %o4, [%sp+92]
; SPARC-NEXT: mov %i4, %o0
; SPARC-NEXT: mov %i5, %o1
-; SPARC-NEXT: mov %l2, %o2
-; SPARC-NEXT: mov %l3, %o3
+; SPARC-NEXT: mov %l1, %o2
+; SPARC-NEXT: mov %l2, %o3
; SPARC-NEXT: call __multi3
; SPARC-NEXT: mov %o4, %o5
; SPARC-NEXT: st %o0, [%fp+-12] ! 4-byte Folded Spill
; SPARC-NEXT: st %o2, [%fp+-20] ! 4-byte Folded Spill
; SPARC-NEXT: st %o3, [%fp+-24] ! 4-byte Folded Spill
; SPARC-NEXT: st %i5, [%sp+96]
-; SPARC-NEXT: mov %g0, %l0
; SPARC-NEXT: st %i4, [%sp+92]
-; SPARC-NEXT: mov %l0, %o0
-; SPARC-NEXT: mov %l0, %o1
+; SPARC-NEXT: mov %g0, %o0
+; SPARC-NEXT: mov %g0, %o1
; SPARC-NEXT: mov %i2, %o2
-; SPARC-NEXT: mov %i3, %o3
-; SPARC-NEXT: mov %l0, %o4
+; SPARC-NEXT: mov %l6, %o3
+; SPARC-NEXT: mov %g0, %o4
; SPARC-NEXT: call __multi3
-; SPARC-NEXT: mov %l0, %o5
+; SPARC-NEXT: mov %g0, %o5
; SPARC-NEXT: st %o0, [%fp+-28] ! 4-byte Folded Spill
; SPARC-NEXT: st %o1, [%fp+-32] ! 4-byte Folded Spill
-; SPARC-NEXT: st %o2, [%fp+-36] ! 4-byte Folded Spill
-; SPARC-NEXT: mov %o3, %i3
-; SPARC-NEXT: st %l3, [%sp+96]
-; SPARC-NEXT: st %l2, [%sp+92]
-; SPARC-NEXT: mov %l0, %o0
-; SPARC-NEXT: mov %l0, %o1
-; SPARC-NEXT: mov %i2, %o2
-; SPARC-NEXT: mov %l7, %o3
-; SPARC-NEXT: mov %l0, %o4
+; SPARC-NEXT: mov %o2, %l0
+; SPARC-NEXT: mov %o3, %i2
+; SPARC-NEXT: st %l2, [%sp+96]
+; SPARC-NEXT: st %l1, [%sp+92]
+; SPARC-NEXT: mov %g0, %o0
+; SPARC-NEXT: mov %g0, %o1
+; SPARC-NEXT: mov %l4, %o2
+; SPARC-NEXT: mov %l6, %o3
+; SPARC-NEXT: mov %g0, %o4
; SPARC-NEXT: call __multi3
-; SPARC-NEXT: mov %l0, %o5
-; SPARC-NEXT: mov %o0, %i0
-; SPARC-NEXT: mov %o1, %i1
+; SPARC-NEXT: mov %g0, %o5
+; SPARC-NEXT: mov %o0, %i1
+; SPARC-NEXT: mov %o1, %i0
; SPARC-NEXT: st %o2, [%fp+-4] ! 4-byte Folded Spill
; SPARC-NEXT: st %o3, [%fp+-8] ! 4-byte Folded Spill
-; SPARC-NEXT: st %l3, [%sp+96]
-; SPARC-NEXT: st %l2, [%sp+92]
-; SPARC-NEXT: mov %l0, %o0
-; SPARC-NEXT: mov %l0, %o1
-; SPARC-NEXT: mov %l6, %o2
-; SPARC-NEXT: mov %l4, %o3
-; SPARC-NEXT: mov %l0, %o4
+; SPARC-NEXT: st %l2, [%sp+96]
+; SPARC-NEXT: st %l1, [%sp+92]
+; SPARC-NEXT: mov %g0, %o0
+; SPARC-NEXT: mov %g0, %o1
+; SPARC-NEXT: mov %l3, %o2
+; SPARC-NEXT: mov %i3, %o3
+; SPARC-NEXT: mov %g0, %o4
; SPARC-NEXT: call __multi3
-; SPARC-NEXT: mov %l0, %o5
-; SPARC-NEXT: mov %o0, %l2
-; SPARC-NEXT: mov %o1, %l3
-; SPARC-NEXT: mov %o2, %l1
-; SPARC-NEXT: mov %o3, %i2
-; SPARC-NEXT: st %l7, [%sp+96]
+; SPARC-NEXT: mov %g0, %o5
+; SPARC-NEXT: mov %o0, %l1
+; SPARC-NEXT: mov %o1, %l2
+; SPARC-NEXT: mov %o2, %l5
+; SPARC-NEXT: mov %o3, %l7
+; SPARC-NEXT: st %l6, [%sp+96]
; SPARC-NEXT: sra %i4, 31, %o0
-; SPARC-NEXT: st %l5, [%sp+92]
+; SPARC-NEXT: st %l4, [%sp+92]
; SPARC-NEXT: mov %o0, %o1
; SPARC-NEXT: mov %o0, %o2
; SPARC-NEXT: mov %o0, %o3
-; SPARC-NEXT: mov %l6, %o4
+; SPARC-NEXT: mov %l3, %o4
; SPARC-NEXT: call __multi3
-; SPARC-NEXT: mov %l4, %o5
+; SPARC-NEXT: mov %i3, %o5
; SPARC-NEXT: st %i5, [%sp+96]
; SPARC-NEXT: st %i4, [%sp+92]
; SPARC-NEXT: ld [%fp+-24], %i4 ! 4-byte Folded Reload
; SPARC-NEXT: ld [%fp+-20], %i5 ! 4-byte Folded Reload
; SPARC-NEXT: addxcc %o2, %i5, %i5
; SPARC-NEXT: ld [%fp+-16], %g2 ! 4-byte Folded Reload
-; SPARC-NEXT: addxcc %o1, %g2, %l5
+; SPARC-NEXT: addxcc %o1, %g2, %l4
; SPARC-NEXT: ld [%fp+-12], %g2 ! 4-byte Folded Reload
-; SPARC-NEXT: addxcc %o0, %g2, %l7
-; SPARC-NEXT: addcc %i2, %i1, %i1
-; SPARC-NEXT: addxcc %l1, %i0, %i0
-; SPARC-NEXT: addxcc %l3, 0, %i2
-; SPARC-NEXT: addxcc %l2, 0, %g2
-; SPARC-NEXT: addcc %i3, %i1, %i1
-; SPARC-NEXT: ld [%fp+-36], %i3 ! 4-byte Folded Reload
-; SPARC-NEXT: addxcc %i3, %i0, %i0
-; SPARC-NEXT: ld [%fp+-32], %i3 ! 4-byte Folded Reload
-; SPARC-NEXT: addxcc %i3, 0, %i3
-; SPARC-NEXT: ld [%fp+-28], %g3 ! 4-byte Folded Reload
-; SPARC-NEXT: addxcc %g3, 0, %g3
-; SPARC-NEXT: addcc %i2, %i3, %i2
-; SPARC-NEXT: addxcc %g2, %g3, %i3
-; SPARC-NEXT: addxcc %l0, 0, %l1
-; SPARC-NEXT: addxcc %l0, 0, %l2
-; SPARC-NEXT: mov %l0, %o0
-; SPARC-NEXT: mov %l0, %o1
-; SPARC-NEXT: mov %l6, %o2
-; SPARC-NEXT: mov %l4, %o3
-; SPARC-NEXT: mov %l0, %o4
+; SPARC-NEXT: addxcc %o0, %g2, %l6
+; SPARC-NEXT: addcc %l7, %i0, %i0
+; SPARC-NEXT: addxcc %l5, %i1, %g2
+; SPARC-NEXT: addxcc %l2, 0, %g3
+; SPARC-NEXT: addxcc %l1, 0, %g4
+; SPARC-NEXT: addcc %i2, %i0, %i1
+; SPARC-NEXT: addxcc %l0, %g2, %i0
+; SPARC-NEXT: ld [%fp+-32], %i2 ! 4-byte Folded Reload
+; SPARC-NEXT: addxcc %i2, 0, %i2
+; SPARC-NEXT: ld [%fp+-28], %g2 ! 4-byte Folded Reload
+; SPARC-NEXT: addxcc %g2, 0, %g2
+; SPARC-NEXT: addcc %g3, %i2, %i2
+; SPARC-NEXT: addxcc %g4, %g2, %l0
+; SPARC-NEXT: addxcc %g0, 0, %l1
+; SPARC-NEXT: addxcc %g0, 0, %l2
+; SPARC-NEXT: mov %g0, %o0
+; SPARC-NEXT: mov %g0, %o1
+; SPARC-NEXT: mov %l3, %o2
+; SPARC-NEXT: mov %i3, %o3
+; SPARC-NEXT: mov %g0, %o4
; SPARC-NEXT: call __multi3
-; SPARC-NEXT: mov %l0, %o5
+; SPARC-NEXT: mov %g0, %o5
; SPARC-NEXT: addcc %o3, %i2, %i2
-; SPARC-NEXT: addxcc %o2, %i3, %i3
+; SPARC-NEXT: addxcc %o2, %l0, %i3
; SPARC-NEXT: addxcc %o1, %l1, %g2
; SPARC-NEXT: addxcc %o0, %l2, %g3
; SPARC-NEXT: addcc %i2, %i4, %i2
; SPARC-NEXT: addxcc %i3, %i5, %i3
-; SPARC-NEXT: addxcc %g2, %l5, %i4
-; SPARC-NEXT: addxcc %g3, %l7, %i5
+; SPARC-NEXT: addxcc %g2, %l4, %i4
+; SPARC-NEXT: addxcc %g3, %l6, %i5
; SPARC-NEXT: sra %i0, 31, %g2
; SPARC-NEXT: xor %i5, %g2, %i5
; SPARC-NEXT: xor %i3, %g2, %i3
; SPARC-NEXT: or %i2, %i4, %i2
; SPARC-NEXT: or %i2, %i3, %i2
; SPARC-NEXT: cmp %i2, 0
-; SPARC-NEXT: be .LBB0_2
+; SPARC-NEXT: bne .LBB0_1
; SPARC-NEXT: nop
-; SPARC-NEXT: ! %bb.1:
-; SPARC-NEXT: mov 1, %l0
-; SPARC-NEXT: .LBB0_2: ! %start
+; SPARC-NEXT: ! %bb.2: ! %start
+; SPARC-NEXT: ba .LBB0_3
+; SPARC-NEXT: mov %g0, %i4
+; SPARC-NEXT: .LBB0_1:
+; SPARC-NEXT: mov 1, %i4
+; SPARC-NEXT: .LBB0_3: ! %start
; SPARC-NEXT: ld [%fp+-4], %i2 ! 4-byte Folded Reload
; SPARC-NEXT: ld [%fp+-8], %i3 ! 4-byte Folded Reload
; SPARC-NEXT: ret
-; SPARC-NEXT: restore %g0, %l0, %o4
+; SPARC-NEXT: restore
;
; SPARC64-LABEL: muloti_test:
; SPARC64: .cfi_startproc
; SPARC64-NEXT: .register %g2, #scratch
; SPARC64-NEXT: .register %g3, #scratch
; SPARC64-NEXT: ! %bb.0: ! %start
-; SPARC64-NEXT: save %sp, -192, %sp
+; SPARC64-NEXT: save %sp, -176, %sp
; SPARC64-NEXT: .cfi_def_cfa_register %fp
; SPARC64-NEXT: .cfi_window_save
; SPARC64-NEXT: .cfi_register %o7, %i7
; SPARC64-NEXT: srlx %o1, 32, %g3
; SPARC64-NEXT: srlx %i1, 32, %g4
; SPARC64-NEXT: srlx %o0, 32, %g5
-; SPARC64-NEXT: addcc %o1, %i5, %i5
-; SPARC64-NEXT: st %i5, [%fp+2043] ! 4-byte Folded Spill
-; SPARC64-NEXT: addxcc %g3, %g2, %l2
-; SPARC64-NEXT: addxcc %o0, %i1, %l3
-; SPARC64-NEXT: addxcc %g5, %g4, %l4
-; SPARC64-NEXT: mov 0, %i5
-; SPARC64-NEXT: mov %i5, %o0
+; SPARC64-NEXT: addcc %o1, %i5, %l0
+; SPARC64-NEXT: addxcc %g3, %g2, %l1
+; SPARC64-NEXT: addxcc %o0, %i1, %l2
+; SPARC64-NEXT: addxcc %g5, %g4, %l3
+; SPARC64-NEXT: mov %g0, %o0
; SPARC64-NEXT: mov %i3, %o1
-; SPARC64-NEXT: mov %i5, %o2
+; SPARC64-NEXT: mov %g0, %o2
; SPARC64-NEXT: call __multi3
; SPARC64-NEXT: mov %i4, %o3
-; SPARC64-NEXT: mov %o0, %l0
+; SPARC64-NEXT: mov %o0, %i5
; SPARC64-NEXT: mov %o1, %i1
-; SPARC64-NEXT: mov %i5, %o0
+; SPARC64-NEXT: mov %g0, %o0
; SPARC64-NEXT: mov %i0, %o1
-; SPARC64-NEXT: mov %i5, %o2
+; SPARC64-NEXT: mov %g0, %o2
; SPARC64-NEXT: call __multi3
; SPARC64-NEXT: mov %i4, %o3
-; SPARC64-NEXT: srlx %l0, 32, %i4
+; SPARC64-NEXT: srlx %i5, 32, %i4
; SPARC64-NEXT: srlx %o1, 32, %g2
; SPARC64-NEXT: srlx %o0, 32, %g3
-; SPARC64-NEXT: addcc %o1, %l0, %l0
+; SPARC64-NEXT: addcc %o1, %i5, %i5
; SPARC64-NEXT: addxcc %g2, %i4, %i4
-; SPARC64-NEXT: addxcc %o0, 0, %l5
-; SPARC64-NEXT: addxcc %g3, 0, %l6
-; SPARC64-NEXT: mov %i5, %o0
+; SPARC64-NEXT: addxcc %o0, 0, %l4
+; SPARC64-NEXT: addxcc %g3, 0, %l5
+; SPARC64-NEXT: mov %g0, %o0
; SPARC64-NEXT: mov %i3, %o1
-; SPARC64-NEXT: mov %i5, %o2
+; SPARC64-NEXT: mov %g0, %o2
; SPARC64-NEXT: call __multi3
; SPARC64-NEXT: mov %i2, %o3
-; SPARC64-NEXT: srlx %o1, 32, %i3
-; SPARC64-NEXT: srlx %o0, 32, %g2
-; SPARC64-NEXT: addcc %o1, %l0, %l0
-; SPARC64-NEXT: addxcc %i3, %i4, %i3
-; SPARC64-NEXT: addxcc %o0, 0, %i4
-; SPARC64-NEXT: addxcc %g2, 0, %g2
-; SPARC64-NEXT: mov %g0, %l7
-; SPARC64-NEXT: addcc %l5, %i4, %i4
-; SPARC64-NEXT: addxcc %l6, %g2, %l5
-; SPARC64-NEXT: addxcc %l7, 0, %l6
-; SPARC64-NEXT: addxcc %l7, 0, %l1
-; SPARC64-NEXT: mov %i5, %o0
+; SPARC64-NEXT: srlx %o1, 32, %g2
+; SPARC64-NEXT: srlx %o0, 32, %g3
+; SPARC64-NEXT: addcc %o1, %i5, %i3
+; SPARC64-NEXT: addxcc %g2, %i4, %i4
+; SPARC64-NEXT: addxcc %o0, 0, %i5
+; SPARC64-NEXT: addxcc %g3, 0, %g2
+; SPARC64-NEXT: addcc %l4, %i5, %i5
+; SPARC64-NEXT: addxcc %l5, %g2, %l4
+; SPARC64-NEXT: addxcc %g0, 0, %l5
+; SPARC64-NEXT: addxcc %g0, 0, %l6
+; SPARC64-NEXT: mov %g0, %o0
; SPARC64-NEXT: mov %i0, %o1
-; SPARC64-NEXT: mov %i5, %o2
+; SPARC64-NEXT: mov %g0, %o2
; SPARC64-NEXT: call __multi3
; SPARC64-NEXT: mov %i2, %o3
+; SPARC64-NEXT: mov %g0, %i2
; SPARC64-NEXT: srlx %o1, 32, %i0
-; SPARC64-NEXT: srlx %o0, 32, %i2
-; SPARC64-NEXT: addcc %o1, %i4, %i4
-; SPARC64-NEXT: addxcc %i0, %l5, %i0
-; SPARC64-NEXT: addxcc %o0, %l6, %i5
-; SPARC64-NEXT: addxcc %i2, %l1, %i2
-; SPARC64-NEXT: ld [%fp+2043], %g2 ! 4-byte Folded Reload
-; SPARC64-NEXT: addcc %i4, %g2, %i4
-; SPARC64-NEXT: addxcc %i0, %l2, %i0
-; SPARC64-NEXT: addxcc %i5, %l3, %i5
-; SPARC64-NEXT: addxcc %i2, %l4, %i2
+; SPARC64-NEXT: addcc %o1, %i5, %i5
+; SPARC64-NEXT: srlx %o0, 32, %g2
+; SPARC64-NEXT: addxcc %i0, %l4, %i0
+; SPARC64-NEXT: addxcc %o0, %l5, %g3
+; SPARC64-NEXT: addxcc %g2, %l6, %g2
+; SPARC64-NEXT: addcc %i5, %l0, %i5
+; SPARC64-NEXT: addxcc %i0, %l1, %i0
+; SPARC64-NEXT: addxcc %g3, %l2, %g3
+; SPARC64-NEXT: addxcc %g2, %l3, %g2
+; SPARC64-NEXT: srl %g3, 0, %g3
+; SPARC64-NEXT: sllx %g2, 32, %g2
+; SPARC64-NEXT: or %g2, %g3, %g2
+; SPARC64-NEXT: sllx %i4, 32, %i4
+; SPARC64-NEXT: srax %i4, 63, %g3
+; SPARC64-NEXT: xor %g2, %g3, %g2
; SPARC64-NEXT: srl %i5, 0, %i5
-; SPARC64-NEXT: sllx %i2, 32, %i2
-; SPARC64-NEXT: or %i2, %i5, %i2
-; SPARC64-NEXT: sllx %i3, 32, %i3
-; SPARC64-NEXT: srax %i3, 63, %i5
-; SPARC64-NEXT: xor %i2, %i5, %i2
-; SPARC64-NEXT: srl %i4, 0, %i4
; SPARC64-NEXT: sllx %i0, 32, %i0
-; SPARC64-NEXT: or %i0, %i4, %i0
-; SPARC64-NEXT: xor %i0, %i5, %i0
-; SPARC64-NEXT: or %i0, %i2, %i0
+; SPARC64-NEXT: or %i0, %i5, %i0
+; SPARC64-NEXT: xor %i0, %g3, %i0
+; SPARC64-NEXT: or %i0, %g2, %i0
; SPARC64-NEXT: cmp %i0, 0
-; SPARC64-NEXT: movne %xcc, 1, %l7
-; SPARC64-NEXT: srl %l0, 0, %i0
-; SPARC64-NEXT: or %i3, %i0, %i0
-; SPARC64-NEXT: srl %l7, 0, %i2
+; SPARC64-NEXT: movne %xcc, 1, %i2
+; SPARC64-NEXT: srl %i3, 0, %i0
+; SPARC64-NEXT: or %i4, %i0, %i0
+; SPARC64-NEXT: srl %i2, 0, %i2
; SPARC64-NEXT: ret
; SPARC64-NEXT: restore
start:
; V8: ! %bb.0: ! %entry
; V8-NEXT: save %sp, -104, %sp
; V8-NEXT: mov 6, %i0
-; V8-NEXT: mov %g0, %o0
; V8-NEXT: mov 1, %o1
; V8-NEXT: mov 2, %o2
; V8-NEXT: mov 3, %o3
; V8-NEXT: mov 4, %o4
; V8-NEXT: mov 5, %o5
-; V8-NEXT: call foo7
; V8-NEXT: st %i0, [%sp+92]
+; V8-NEXT: call foo7
+; V8-NEXT: mov %g0, %o0
; V8-NEXT: ret
; V8-NEXT: restore %g0, %o0, %o0
;
; V9: ! %bb.0: ! %entry
; V9-NEXT: save %sp, -192, %sp
; V9-NEXT: mov 6, %i0
-; V9-NEXT: mov 0, %o0
; V9-NEXT: mov 1, %o1
; V9-NEXT: mov 2, %o2
; V9-NEXT: mov 3, %o3
; V9-NEXT: mov 4, %o4
; V9-NEXT: mov 5, %o5
-; V9-NEXT: call foo7
; V9-NEXT: stx %i0, [%sp+2223]
+; V9-NEXT: call foo7
+; V9-NEXT: mov %g0, %o0
; V9-NEXT: ret
; V9-NEXT: restore %g0, %o0, %o0
entry:
; SPARC-LABEL: muloti_test:
; SPARC: .cfi_startproc
; SPARC-NEXT: ! %bb.0: ! %start
-; SPARC-NEXT: save %sp, -120, %sp
+; SPARC-NEXT: save %sp, -112, %sp
; SPARC-NEXT: .cfi_def_cfa_register %fp
; SPARC-NEXT: .cfi_window_save
; SPARC-NEXT: .cfi_register %o7, %i7
; SPARC-NEXT: rd %y, %g4
; SPARC-NEXT: st %g4, [%fp+-12] ! 4-byte Folded Spill
; SPARC-NEXT: umul %i4, %i3, %g4
-; SPARC-NEXT: rd %y, %l0
-; SPARC-NEXT: st %l0, [%fp+-16] ! 4-byte Folded Spill
+; SPARC-NEXT: rd %y, %l5
; SPARC-NEXT: st %g2, [%sp+96]
-; SPARC-NEXT: umul %i5, %i3, %l0
+; SPARC-NEXT: umul %i5, %i3, %l1
; SPARC-NEXT: rd %y, %l6
; SPARC-NEXT: st %l4, [%sp+92]
; SPARC-NEXT: umul %l4, %i1, %l2
-; SPARC-NEXT: rd %y, %l1
-; SPARC-NEXT: st %l1, [%fp+-4] ! 4-byte Folded Spill
+; SPARC-NEXT: rd %y, %l0
+; SPARC-NEXT: st %l0, [%fp+-4] ! 4-byte Folded Spill
; SPARC-NEXT: add %g4, %g3, %g3
; SPARC-NEXT: umul %i0, %g2, %g4
-; SPARC-NEXT: rd %y, %l1
-; SPARC-NEXT: st %l1, [%fp+-8] ! 4-byte Folded Spill
+; SPARC-NEXT: rd %y, %l0
+; SPARC-NEXT: st %l0, [%fp+-8] ! 4-byte Folded Spill
; SPARC-NEXT: add %l6, %g3, %l3
; SPARC-NEXT: umul %i1, %g2, %g2
-; SPARC-NEXT: rd %y, %l1
+; SPARC-NEXT: rd %y, %l0
; SPARC-NEXT: add %g4, %l2, %g3
-; SPARC-NEXT: add %l1, %g3, %l2
-; SPARC-NEXT: addcc %g2, %l0, %l7
-; SPARC-NEXT: mov %g0, %l0
-; SPARC-NEXT: addxcc %l2, %l3, %l5
-; SPARC-NEXT: mov %l0, %o0
-; SPARC-NEXT: mov %l0, %o1
+; SPARC-NEXT: add %l0, %g3, %l2
+; SPARC-NEXT: addcc %g2, %l1, %l1
+; SPARC-NEXT: addxcc %l2, %l3, %l7
+; SPARC-NEXT: mov %g0, %o0
+; SPARC-NEXT: mov %g0, %o1
; SPARC-NEXT: mov %i2, %o2
; SPARC-NEXT: mov %i3, %o3
-; SPARC-NEXT: mov %l0, %o4
+; SPARC-NEXT: mov %g0, %o4
; SPARC-NEXT: call __multi3
-; SPARC-NEXT: mov %l0, %o5
-; SPARC-NEXT: addcc %o1, %l7, %i3
-; SPARC-NEXT: addxcc %o0, %l5, %g2
-; SPARC-NEXT: mov 1, %g3
+; SPARC-NEXT: mov %g0, %o5
+; SPARC-NEXT: addcc %o1, %l1, %g3
+; SPARC-NEXT: addxcc %o0, %l7, %g2
+; SPARC-NEXT: mov 1, %g4
; SPARC-NEXT: cmp %g2, %o0
+; SPARC-NEXT: mov %o3, %i3
; SPARC-NEXT: bcs .LBB0_2
-; SPARC-NEXT: mov %g3, %o4
+; SPARC-NEXT: mov %g4, %o3
; SPARC-NEXT: ! %bb.1: ! %start
-; SPARC-NEXT: mov %l0, %o4
+; SPARC-NEXT: mov %g0, %o3
; SPARC-NEXT: .LBB0_2: ! %start
-; SPARC-NEXT: cmp %i3, %o1
+; SPARC-NEXT: cmp %g3, %o1
; SPARC-NEXT: bcs .LBB0_4
-; SPARC-NEXT: mov %g3, %g4
+; SPARC-NEXT: mov %g4, %l1
; SPARC-NEXT: ! %bb.3: ! %start
-; SPARC-NEXT: mov %l0, %g4
+; SPARC-NEXT: mov %g0, %l1
; SPARC-NEXT: .LBB0_4: ! %start
; SPARC-NEXT: cmp %g2, %o0
; SPARC-NEXT: be .LBB0_6
; SPARC-NEXT: nop
; SPARC-NEXT: ! %bb.5: ! %start
-; SPARC-NEXT: mov %o4, %g4
+; SPARC-NEXT: mov %o3, %l1
; SPARC-NEXT: .LBB0_6: ! %start
; SPARC-NEXT: cmp %i2, 0
; SPARC-NEXT: bne .LBB0_8
-; SPARC-NEXT: mov %g3, %i2
+; SPARC-NEXT: mov %g4, %i2
; SPARC-NEXT: ! %bb.7: ! %start
-; SPARC-NEXT: mov %l0, %i2
+; SPARC-NEXT: mov %g0, %i2
; SPARC-NEXT: .LBB0_8: ! %start
; SPARC-NEXT: cmp %i4, 0
; SPARC-NEXT: bne .LBB0_10
-; SPARC-NEXT: mov %g3, %o1
+; SPARC-NEXT: mov %g4, %o1
; SPARC-NEXT: ! %bb.9: ! %start
-; SPARC-NEXT: mov %l0, %o1
+; SPARC-NEXT: mov %g0, %o1
; SPARC-NEXT: .LBB0_10: ! %start
-; SPARC-NEXT: ld [%fp+-16], %l5 ! 4-byte Folded Reload
; SPARC-NEXT: cmp %l5, 0
; SPARC-NEXT: bne .LBB0_12
-; SPARC-NEXT: mov %g3, %o0
+; SPARC-NEXT: mov %g4, %o0
; SPARC-NEXT: ! %bb.11: ! %start
-; SPARC-NEXT: mov %l0, %o0
+; SPARC-NEXT: mov %g0, %o0
; SPARC-NEXT: .LBB0_12: ! %start
; SPARC-NEXT: ld [%fp+-12], %l5 ! 4-byte Folded Reload
; SPARC-NEXT: cmp %l5, 0
; SPARC-NEXT: bne .LBB0_14
-; SPARC-NEXT: mov %g3, %l5
+; SPARC-NEXT: mov %g4, %l5
; SPARC-NEXT: ! %bb.13: ! %start
-; SPARC-NEXT: mov %l0, %l5
+; SPARC-NEXT: mov %g0, %l5
; SPARC-NEXT: .LBB0_14: ! %start
; SPARC-NEXT: cmp %l3, %l6
; SPARC-NEXT: bcs .LBB0_16
-; SPARC-NEXT: mov %g3, %l3
+; SPARC-NEXT: mov %g4, %l3
; SPARC-NEXT: ! %bb.15: ! %start
-; SPARC-NEXT: mov %l0, %l3
+; SPARC-NEXT: mov %g0, %l3
; SPARC-NEXT: .LBB0_16: ! %start
; SPARC-NEXT: cmp %l4, 0
; SPARC-NEXT: bne .LBB0_18
-; SPARC-NEXT: mov %g3, %l4
+; SPARC-NEXT: mov %g4, %l4
; SPARC-NEXT: ! %bb.17: ! %start
-; SPARC-NEXT: mov %l0, %l4
+; SPARC-NEXT: mov %g0, %l4
; SPARC-NEXT: .LBB0_18: ! %start
; SPARC-NEXT: cmp %i0, 0
; SPARC-NEXT: bne .LBB0_20
-; SPARC-NEXT: mov %g3, %l7
+; SPARC-NEXT: mov %g4, %l7
; SPARC-NEXT: ! %bb.19: ! %start
-; SPARC-NEXT: mov %l0, %l7
+; SPARC-NEXT: mov %g0, %l7
; SPARC-NEXT: .LBB0_20: ! %start
; SPARC-NEXT: ld [%fp+-8], %l6 ! 4-byte Folded Reload
; SPARC-NEXT: cmp %l6, 0
; SPARC-NEXT: bne .LBB0_22
-; SPARC-NEXT: mov %g3, %l6
+; SPARC-NEXT: mov %g4, %l6
; SPARC-NEXT: ! %bb.21: ! %start
-; SPARC-NEXT: mov %l0, %l6
+; SPARC-NEXT: mov %g0, %l6
; SPARC-NEXT: .LBB0_22: ! %start
; SPARC-NEXT: and %o1, %i2, %i2
; SPARC-NEXT: ld [%fp+-4], %o1 ! 4-byte Folded Reload
; SPARC-NEXT: cmp %o1, 0
; SPARC-NEXT: and %l7, %l4, %o1
; SPARC-NEXT: bne .LBB0_24
-; SPARC-NEXT: mov %g3, %l4
+; SPARC-NEXT: mov %g4, %l4
; SPARC-NEXT: ! %bb.23: ! %start
-; SPARC-NEXT: mov %l0, %l4
+; SPARC-NEXT: mov %g0, %l4
; SPARC-NEXT: .LBB0_24: ! %start
; SPARC-NEXT: or %i2, %o0, %l7
-; SPARC-NEXT: cmp %l2, %l1
+; SPARC-NEXT: cmp %l2, %l0
; SPARC-NEXT: or %o1, %l6, %l2
; SPARC-NEXT: bcs .LBB0_26
-; SPARC-NEXT: mov %g3, %i2
+; SPARC-NEXT: mov %g4, %i2
; SPARC-NEXT: ! %bb.25: ! %start
-; SPARC-NEXT: mov %l0, %i2
+; SPARC-NEXT: mov %g0, %i2
; SPARC-NEXT: .LBB0_26: ! %start
-; SPARC-NEXT: or %l7, %l5, %l1
+; SPARC-NEXT: or %l7, %l5, %l0
; SPARC-NEXT: or %i5, %i4, %i4
; SPARC-NEXT: cmp %i4, 0
; SPARC-NEXT: or %l2, %l4, %l2
; SPARC-NEXT: bne .LBB0_28
-; SPARC-NEXT: mov %g3, %i4
+; SPARC-NEXT: mov %g4, %i4
; SPARC-NEXT: ! %bb.27: ! %start
-; SPARC-NEXT: mov %l0, %i4
+; SPARC-NEXT: mov %g0, %i4
; SPARC-NEXT: .LBB0_28: ! %start
-; SPARC-NEXT: or %l1, %l3, %i5
+; SPARC-NEXT: or %l0, %l3, %i5
; SPARC-NEXT: or %i1, %i0, %i0
; SPARC-NEXT: cmp %i0, 0
; SPARC-NEXT: bne .LBB0_30
; SPARC-NEXT: or %l2, %i2, %i0
; SPARC-NEXT: ! %bb.29: ! %start
-; SPARC-NEXT: mov %l0, %g3
+; SPARC-NEXT: mov %g0, %g4
; SPARC-NEXT: .LBB0_30: ! %start
-; SPARC-NEXT: and %g3, %i4, %i1
+; SPARC-NEXT: and %g4, %i4, %i1
; SPARC-NEXT: or %i1, %i0, %i0
; SPARC-NEXT: or %i0, %i5, %i0
-; SPARC-NEXT: or %i0, %g4, %i0
+; SPARC-NEXT: or %i0, %l1, %i0
; SPARC-NEXT: and %i0, 1, %i4
; SPARC-NEXT: mov %g2, %i0
-; SPARC-NEXT: mov %i3, %i1
-; SPARC-NEXT: mov %o2, %i2
+; SPARC-NEXT: mov %g3, %i1
; SPARC-NEXT: ret
-; SPARC-NEXT: restore %g0, %o3, %o3
+; SPARC-NEXT: restore %g0, %o2, %o2
;
; SPARC64-LABEL: muloti_test:
; SPARC64: .cfi_startproc
; SPARC64-NEXT: .register %g2, #scratch
+; SPARC64-NEXT: .register %g3, #scratch
; SPARC64-NEXT: ! %bb.0: ! %start
; SPARC64-NEXT: save %sp, -176, %sp
; SPARC64-NEXT: .cfi_def_cfa_register %fp
; SPARC64-NEXT: .cfi_window_save
; SPARC64-NEXT: .cfi_register %o7, %i7
-; SPARC64-NEXT: mov 0, %i4
-; SPARC64-NEXT: mov %i4, %o0
+; SPARC64-NEXT: mov %g0, %o0
; SPARC64-NEXT: mov %i2, %o1
-; SPARC64-NEXT: mov %i4, %o2
+; SPARC64-NEXT: mov %g0, %o2
; SPARC64-NEXT: call __multi3
; SPARC64-NEXT: mov %i1, %o3
-; SPARC64-NEXT: mov %o0, %i5
-; SPARC64-NEXT: mov %o1, %l0
-; SPARC64-NEXT: mov %i4, %o0
+; SPARC64-NEXT: mov %o0, %i4
+; SPARC64-NEXT: mov %o1, %i5
+; SPARC64-NEXT: mov %g0, %o0
; SPARC64-NEXT: mov %i0, %o1
-; SPARC64-NEXT: mov %i4, %o2
+; SPARC64-NEXT: mov %g0, %o2
; SPARC64-NEXT: call __multi3
; SPARC64-NEXT: mov %i3, %o3
-; SPARC64-NEXT: mov %o0, %l1
-; SPARC64-NEXT: add %o1, %l0, %l0
-; SPARC64-NEXT: mov %i4, %o0
+; SPARC64-NEXT: mov %o0, %l0
+; SPARC64-NEXT: add %o1, %i5, %i5
+; SPARC64-NEXT: mov %g0, %o0
; SPARC64-NEXT: mov %i1, %o1
-; SPARC64-NEXT: mov %i4, %o2
+; SPARC64-NEXT: mov %g0, %o2
; SPARC64-NEXT: call __multi3
; SPARC64-NEXT: mov %i3, %o3
-; SPARC64-NEXT: add %o0, %l0, %i1
; SPARC64-NEXT: mov %g0, %i3
+; SPARC64-NEXT: mov %g0, %g2
+; SPARC64-NEXT: mov %g0, %g3
+; SPARC64-NEXT: mov %g0, %g4
+; SPARC64-NEXT: mov %g0, %g5
+; SPARC64-NEXT: add %o0, %i5, %i1
; SPARC64-NEXT: cmp %i1, %o0
-; SPARC64-NEXT: mov %i3, %i4
-; SPARC64-NEXT: movcs %xcc, 1, %i4
-; SPARC64-NEXT: cmp %l1, 0
-; SPARC64-NEXT: mov %i3, %g2
+; SPARC64-NEXT: movcs %xcc, 1, %i3
+; SPARC64-NEXT: cmp %l0, 0
; SPARC64-NEXT: movne %xcc, 1, %g2
; SPARC64-NEXT: cmp %i2, 0
-; SPARC64-NEXT: mov %i3, %i2
-; SPARC64-NEXT: movne %xcc, 1, %i2
+; SPARC64-NEXT: movne %xcc, 1, %g3
; SPARC64-NEXT: cmp %i0, 0
-; SPARC64-NEXT: mov %i3, %i0
-; SPARC64-NEXT: movne %xcc, 1, %i0
-; SPARC64-NEXT: and %i0, %i2, %i0
+; SPARC64-NEXT: movne %xcc, 1, %g4
+; SPARC64-NEXT: and %g4, %g3, %i0
; SPARC64-NEXT: or %i0, %g2, %i0
-; SPARC64-NEXT: cmp %i5, 0
-; SPARC64-NEXT: movne %xcc, 1, %i3
+; SPARC64-NEXT: cmp %i4, 0
+; SPARC64-NEXT: movne %xcc, 1, %g5
+; SPARC64-NEXT: or %i0, %g5, %i0
; SPARC64-NEXT: or %i0, %i3, %i0
-; SPARC64-NEXT: or %i0, %i4, %i0
; SPARC64-NEXT: srl %i0, 0, %i2
; SPARC64-NEXT: mov %i1, %i0
; SPARC64-NEXT: ret
; CHECK-NEXT: .cfi_window_save
; CHECK-NEXT: .cfi_register %o7, %i7
; CHECK-NEXT: st %g0, [%fp+-4]
-; CHECK-NEXT: mov %g0, %i0
-; CHECK-NEXT: cmp %i0, 0
+; CHECK-NEXT: cmp %g0, 0
; CHECK-NEXT: st %g0, [%fp+-8]
; CHECK-NEXT: be .LBB0_1
; CHECK-NEXT: mov 1, %i0
; CHECK-NEXT: st %i1, [%fp+-8]
; CHECK-NEXT: st %i2, [%fp+-12]
; CHECK-NEXT: st %i3, [%fp+-16]
-; CHECK-NEXT: mov %g0, %i0
; CHECK-NEXT: st %i4, [%fp+-20]
; CHECK-NEXT: ret
-; CHECK-NEXT: restore
+; CHECK-NEXT: restore %g0, %g0, %o0
; CHECK-NEXT: .cfi_window_save
; CHECK-NEXT: .cfi_register %o7, %i7
; CHECK-NEXT: st %g0, [%fp+-4]
-; CHECK-NEXT: mov %g0, %i0
-; CHECK-NEXT: cmp %i0, 0
+; CHECK-NEXT: cmp %g0, 0
; CHECK-NEXT: st %g0, [%fp+-8]
; CHECK-NEXT: be .LBB0_1
; CHECK-NEXT: mov 1, %i0
; CHECK-NEXT: st %i1, [%fp+-8]
; CHECK-NEXT: st %i2, [%fp+-12]
; CHECK-NEXT: st %i3, [%fp+-16]
-; CHECK-NEXT: mov %g0, %i0
; CHECK-NEXT: st %i4, [%fp+-20]
; CHECK-NEXT: ret
-; CHECK-NEXT: restore
+; CHECK-NEXT: restore %g0, %g0, %o0
%1 = alloca i32, align 4
%2 = alloca i32, align 4
%3 = alloca i32, align 4