void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
void markValueUsed(LiveInterval*, VNInfo*);
+ bool canGuaranteeAssignmentAfterRemat(unsigned VReg, MachineInstr &MI);
bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
void reMaterializeAll();
} while (!WorkList.empty());
}
+bool InlineSpiller::canGuaranteeAssignmentAfterRemat(unsigned VReg,
+ MachineInstr &MI) {
+ // Here's a quick explanation of the problem we're trying to handle here:
+ // * There are some pseudo instructions with more vreg uses than there are
+ // physical registers on the machine.
+ // * This is normally handled by spilling the vreg, and folding the reload
+ // into the user instruction. (Thus decreasing the number of used vregs
+ // until the remainder can be assigned to physregs.)
+ // * However, since we may try to spill vregs in any order, we can end up
+ // trying to spill each operand to the instruction, and then rematting it
+ // instead. When that happens, the new live intervals (for the remats) are
+ // expected to be trivially assignable (i.e. RS_Done). However, since we
+ // may have more remats than physregs, we're guaranteed to fail to assign
+ // one.
+ // At the moment, we only handle this for STATEPOINTs since they're the only
+ // psuedo op where we've seen this. If we start seeing other instructions
+ // with the same problem, we need to revisit this.
+ return (MI.getOpcode() != TargetOpcode::STATEPOINT);
+}
+
/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
// Analyze instruction
return true;
}
+ // If we can't guarantee that we'll be able to actually assign the new vreg,
+ // we can't remat.
+ if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg, MI))
+ return false;
+
// Allocate a new register for the remat.
unsigned NewVReg = Edit->createFrom(Original);
ret void
}
+; A variant of test7 where values are not directly foldable from stack slots.
+define void @test7(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i32 %j, i32 %k, i32 %l, i32 %m, i32 %n, i32 %o, i32 %p, i32 %q, i32 %r, i32 %s, i32 %t, i32 %u, i32 %v, i32 %w, i32 %x, i32 %y, i32 %z) gc "statepoint-example" {
+; The code for this is terrible, check simply for correctness for the moment
+; CHECK-LABEL: test7:
+; CHECK: callq _bar
+entry:
+ %a64 = zext i32 %a to i64
+ %b64 = zext i32 %b to i64
+ %c64 = zext i32 %c to i64
+ %d64 = zext i32 %d to i64
+ %e64 = zext i32 %e to i64
+ %f64 = zext i32 %f to i64
+ %g64 = zext i32 %g to i64
+ %h64 = zext i32 %h to i64
+ %i64 = zext i32 %i to i64
+ %j64 = zext i32 %j to i64
+ %k64 = zext i32 %k to i64
+ %l64 = zext i32 %l to i64
+ %m64 = zext i32 %m to i64
+ %n64 = zext i32 %n to i64
+ %o64 = zext i32 %o to i64
+ %p64 = zext i32 %p to i64
+ %q64 = zext i32 %q to i64
+ %r64 = zext i32 %r to i64
+ %s64 = zext i32 %s to i64
+ %t64 = zext i32 %t to i64
+ %u64 = zext i32 %u to i64
+ %v64 = zext i32 %v to i64
+ %w64 = zext i32 %w to i64
+ %x64 = zext i32 %x to i64
+ %y64 = zext i32 %y to i64
+ %z64 = zext i32 %z to i64
+ %statepoint_token1 = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* @bar, i32 0, i32 2, i32 0, i32 26, i64 %a64, i64 %b64, i64 %c64, i64 %d64, i64 %e64, i64 %f64, i64 %g64, i64 %h64, i64 %i64, i64 %j64, i64 %k64, i64 %l64, i64 %m64, i64 %n64, i64 %o64, i64 %p64, i64 %q64, i64 %r64, i64 %s64, i64 %t64, i64 %u64, i64 %v64, i64 %w64, i64 %x64, i64 %y64, i64 %z64)
+ ret void
+}
+
+; a variant of test7 with mixed types chosen to exercise register aliases
+define void @test8(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i32 %j, i32 %k, i32 %l, i32 %m, i32 %n, i32 %o, i32 %p, i32 %q, i32 %r, i32 %s, i32 %t, i32 %u, i32 %v, i32 %w, i32 %x, i32 %y, i32 %z) gc "statepoint-example" {
+; The code for this is terrible, check simply for correctness for the moment
+; CHECK-LABEL: test8:
+; CHECK: callq _bar
+entry:
+ %a8 = trunc i32 %a to i8
+ %b8 = trunc i32 %b to i8
+ %c8 = trunc i32 %c to i8
+ %d8 = trunc i32 %d to i8
+ %e16 = trunc i32 %e to i16
+ %f16 = trunc i32 %f to i16
+ %g16 = trunc i32 %g to i16
+ %h16 = trunc i32 %h to i16
+ %i64 = zext i32 %i to i64
+ %j64 = zext i32 %j to i64
+ %k64 = zext i32 %k to i64
+ %l64 = zext i32 %l to i64
+ %m64 = zext i32 %m to i64
+ %n64 = zext i32 %n to i64
+ %o64 = zext i32 %o to i64
+ %p64 = zext i32 %p to i64
+ %q64 = zext i32 %q to i64
+ %r64 = zext i32 %r to i64
+ %s64 = zext i32 %s to i64
+ %t64 = zext i32 %t to i64
+ %u64 = zext i32 %u to i64
+ %v64 = zext i32 %v to i64
+ %w64 = zext i32 %w to i64
+ %x64 = zext i32 %x to i64
+ %y64 = zext i32 %y to i64
+ %z64 = zext i32 %z to i64
+ %statepoint_token1 = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* @bar, i32 0, i32 2, i32 0, i32 26, i8 %a8, i8 %b8, i8 %c8, i8 %d8, i16 %e16, i16 %f16, i16 %g16, i16 %h16, i64 %i64, i64 %j64, i64 %k64, i64 %l64, i64 %m64, i64 %n64, i64 %o64, i64 %p64, i64 %q64, i64 %r64, i64 %s64, i64 %t64, i64 %u64, i64 %v64, i64 %w64, i64 %x64, i64 %y64, i64 %z64)
+ ret void
+}
; CHECK: Ltmp0-_test1
; CHECK: .byte 1