} while (0)
-#define ASSEMBLE_BRANCH_TO(target) \
- do { \
- bool fallthrough = IsNextInAssemblyOrder(target); \
- if (!fallthrough) __ B(GetLabel(target)); \
- } while (0)
-
-
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
__ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
i.InputInt8(2));
break;
- case kArm64Tbz:
- __ Tbz(i.InputRegister64(0), i.InputInt6(1), GetLabel(i.InputRpo(2)));
- ASSEMBLE_BRANCH_TO(i.InputRpo(3));
- break;
- case kArm64Tbz32:
- __ Tbz(i.InputRegister32(0), i.InputInt5(1), GetLabel(i.InputRpo(2)));
- ASSEMBLE_BRANCH_TO(i.InputRpo(3));
- break;
- case kArm64Tbnz:
- __ Tbnz(i.InputRegister64(0), i.InputInt6(1), GetLabel(i.InputRpo(2)));
- ASSEMBLE_BRANCH_TO(i.InputRpo(3));
+ case kArm64TestAndBranch32:
+ case kArm64TestAndBranch:
+ // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
break;
- case kArm64Tbnz32:
- __ Tbnz(i.InputRegister32(0), i.InputInt5(1), GetLabel(i.InputRpo(2)));
- ASSEMBLE_BRANCH_TO(i.InputRpo(3));
- break;
- case kArm64Cbz32:
- __ Cbz(i.InputRegister32(0), GetLabel(i.InputRpo(1)));
- ASSEMBLE_BRANCH_TO(i.InputRpo(2));
- break;
- case kArm64Cbnz32:
- __ Cbnz(i.InputRegister32(0), GetLabel(i.InputRpo(1)));
- ASSEMBLE_BRANCH_TO(i.InputRpo(2));
+ case kArm64CompareAndBranch32:
+ // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
case kArm64Claim: {
int words = MiscField::decode(instr->opcode());
Arm64OperandConverter i(this, instr);
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
- switch (branch->condition) {
- case kUnorderedEqual:
- __ B(vs, flabel);
- // Fall through.
- case kEqual:
- __ B(eq, tlabel);
- break;
- case kUnorderedNotEqual:
- __ B(vs, tlabel);
- // Fall through.
- case kNotEqual:
- __ B(ne, tlabel);
- break;
- case kSignedLessThan:
- __ B(lt, tlabel);
- break;
- case kSignedGreaterThanOrEqual:
- __ B(ge, tlabel);
- break;
- case kSignedLessThanOrEqual:
- __ B(le, tlabel);
- break;
- case kSignedGreaterThan:
- __ B(gt, tlabel);
- break;
- case kUnorderedLessThan:
- __ B(vs, flabel);
- // Fall through.
- case kUnsignedLessThan:
- __ B(lo, tlabel);
- break;
- case kUnorderedGreaterThanOrEqual:
- __ B(vs, tlabel);
- // Fall through.
- case kUnsignedGreaterThanOrEqual:
- __ B(hs, tlabel);
- break;
- case kUnorderedLessThanOrEqual:
- __ B(vs, flabel);
- // Fall through.
- case kUnsignedLessThanOrEqual:
- __ B(ls, tlabel);
- break;
- case kUnorderedGreaterThan:
- __ B(vs, tlabel);
- // Fall through.
- case kUnsignedGreaterThan:
- __ B(hi, tlabel);
- break;
- case kOverflow:
- __ B(vs, tlabel);
- break;
- case kNotOverflow:
- __ B(vc, tlabel);
- break;
+ FlagsCondition condition = branch->condition;
+ ArchOpcode opcode = instr->arch_opcode();
+
+ if (opcode == kArm64CompareAndBranch32) {
+ switch (condition) {
+ case kEqual:
+ __ Cbz(i.InputRegister32(0), tlabel);
+ break;
+ case kNotEqual:
+ __ Cbnz(i.InputRegister32(0), tlabel);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (opcode == kArm64TestAndBranch32) {
+ switch (condition) {
+ case kEqual:
+ __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
+ break;
+ case kNotEqual:
+ __ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (opcode == kArm64TestAndBranch) {
+ switch (condition) {
+ case kEqual:
+ __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
+ break;
+ case kNotEqual:
+ __ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (condition) {
+ case kUnorderedEqual:
+ __ B(vs, flabel);
+ // Fall through.
+ case kEqual:
+ __ B(eq, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ B(vs, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ B(ne, tlabel);
+ break;
+ case kSignedLessThan:
+ __ B(lt, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ B(ge, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ B(le, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ B(gt, tlabel);
+ break;
+ case kUnorderedLessThan:
+ __ B(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThan:
+ __ B(lo, tlabel);
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ B(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ __ B(hs, tlabel);
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ B(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ __ B(ls, tlabel);
+ break;
+ case kUnorderedGreaterThan:
+ __ B(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ __ B(hi, tlabel);
+ break;
+ case kOverflow:
+ __ B(vs, tlabel);
+ break;
+ case kNotOverflow:
+ __ B(vc, tlabel);
+ break;
+ }
}
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
V(Arm64Sxtw) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
- V(Arm64Tbz) \
- V(Arm64Tbz32) \
- V(Arm64Tbnz) \
- V(Arm64Tbnz32) \
- V(Arm64Cbz32) \
- V(Arm64Cbnz32) \
+ V(Arm64TestAndBranch32) \
+ V(Arm64TestAndBranch) \
+ V(Arm64CompareAndBranch32) \
V(Arm64Claim) \
V(Arm64Poke) \
V(Arm64PokePairZero) \
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont.condition() == kEqual) ||
(cont.condition() == kNotEqual));
- ArchOpcode opcode =
- (cont.condition() == kEqual) ? kArm64Tbz32 : kArm64Tbnz32;
- Emit(opcode, NULL, g.UseRegister(m.left().node()),
+ Emit(cont.Encode(kArm64TestAndBranch32), NULL,
+ g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros32(m.right().Value())),
g.Label(cont.true_block()),
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont.condition() == kEqual) ||
(cont.condition() == kNotEqual));
- ArchOpcode opcode =
- (cont.condition() == kEqual) ? kArm64Tbz : kArm64Tbnz;
- Emit(opcode, NULL, g.UseRegister(m.left().node()),
+ Emit(cont.Encode(kArm64TestAndBranch), NULL,
+ g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros64(m.right().Value())),
g.Label(cont.true_block()),
}
// Branch could not be combined with a compare, compare against 0 and branch.
- DCHECK((cont.condition() == kEqual) || (cont.condition() == kNotEqual));
- ArchOpcode opcode = (cont.condition() == kEqual) ? kArm64Cbz32 : kArm64Cbnz32;
- Emit(opcode, NULL, g.UseRegister(value), g.Label(cont.true_block()),
+ Emit(cont.Encode(kArm64CompareAndBranch32), NULL, g.UseRegister(value),
+ g.Label(cont.true_block()),
g.Label(cont.false_block()))->MarkAsControl();
}
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Tbnz32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Tbz32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Tbnz32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Tbz32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Tbnz, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Tbz, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Tbnz, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Tbz, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Cbnz32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
}
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Cbz32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
}