continue;
}
} else {
- if (!TII->isALUInstr(Use->getMachineOpcode()) ||
- (TII->get(Use->getMachineOpcode()).TSFlags &
- R600_InstFlag::VECTOR)) {
- continue;
- }
-
- int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
- AMDGPU::OpName::literal);
- if (ImmIdx == -1) {
- continue;
- }
-
- if (TII->getOperandIdx(Use->getMachineOpcode(),
- AMDGPU::OpName::dst) != -1) {
- // subtract one from ImmIdx, because the DST operand is usually index
- // 0 for MachineInstrs, but we have no DST in the Ops vector.
- ImmIdx--;
+ switch(Use->getMachineOpcode()) {
+ case AMDGPU::REG_SEQUENCE: break;
+ default:
+ if (!TII->isALUInstr(Use->getMachineOpcode()) ||
+ (TII->get(Use->getMachineOpcode()).TSFlags &
+ R600_InstFlag::VECTOR)) {
+ continue;
+ }
}
// Check that we aren't already using an immediate.
// XXX: It's possible for an instruction to have more than one
// immediate operand, but this is not supported yet.
if (ImmReg == AMDGPU::ALU_LITERAL_X) {
+ int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
+ AMDGPU::OpName::literal);
+ if (ImmIdx == -1) {
+ continue;
+ }
+
+ if (TII->getOperandIdx(Use->getMachineOpcode(),
+ AMDGPU::OpName::dst) != -1) {
+ // subtract one from ImmIdx, because the DST operand is usually index
+ // 0 for MachineInstrs, but we have no DST in the Ops vector.
+ ImmIdx--;
+ }
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
assert(C);
store float %0, float addrspace(1)* %out
ret void
}
+
+; Make sure inline literals are folded into REG_SEQUENCE instructions.
+; CHECK: @inline_literal_reg_sequence
+; CHECK: MOV T[[GPR:[0-9]]].X, 0.0
+; CHECK-NEXT: MOV T[[GPR]].Y, 0.0
+; CHECK-NEXT: MOV T[[GPR]].Z, 0.0
+; CHECK-NEXT: MOV * T[[GPR]].W, 0.0
+
+define void @inline_literal_reg_sequence(<4 x i32> addrspace(1)* %out) {
+entry:
+ store <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> addrspace(1)* %out
+ ret void
+}