case kUnsignedGreaterThan:
__ b(hi, tlabel);
break;
+ case kOverflow:
+ __ b(vs, tlabel);
+ break;
+ case kNotOverflow:
+ __ b(vc, tlabel);
+ break;
}
if (!fallthru) __ b(flabel); // no fallthru to flabel.
__ bind(&done);
ArmOperandConverter i(this, instr);
Label done;
- // Materialize a full 32-bit 1 or 0 value.
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
Label check;
- Register reg = i.OutputRegister();
+ ASSERT_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
switch (condition) {
case kUnorderedEqual:
case kUnsignedGreaterThan:
cc = hi;
break;
+ case kOverflow:
+ cc = vs;
+ break;
+ case kNotOverflow:
+ cc = vc;
+ break;
}
__ bind(&check);
__ mov(reg, Operand(0));
Int32BinopMatcher m(node);
InstructionOperand* inputs[3];
size_t input_count = 0;
+ InstructionOperand* outputs[1] = {g.DefineAsRegister(node)};
+ const size_t output_count = ARRAY_SIZE(outputs);
if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
&input_count, &inputs[1])) {
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
- InstructionOperand* outputs[1] = {g.DefineAsRegister(node)};
- const size_t output_count = ARRAY_SIZE(outputs);
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+}
+
+
+static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
+ InstructionCode opcode,
+ InstructionCode reverse_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[3];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
+ m.left().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R);
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseRegister(m.right().node());
+ }
+
+ // Define outputs depending on the projections.
+ Node* projections[2];
+ node->CollectProjections(ARRAY_SIZE(projections), projections);
+ if (projections[0]) {
+ outputs[output_count++] = g.DefineAsRegister(projections[0]);
+ }
+ if (projections[1]) {
+ opcode |= FlagsModeField::encode(kFlags_set);
+ opcode |= FlagsConditionField::encode(kOverflow);
+ outputs[output_count++] = g.DefineAsRegister(projections[1]);
+ }
+
+ ASSERT_NE(0, input_count);
+ ASSERT_NE(0, output_count);
+ ASSERT_GE(ARRAY_SIZE(inputs), input_count);
+ ASSERT_GE(ARRAY_SIZE(outputs), output_count);
+ ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
+
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
}
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kArmAdd, kArmAdd);
+}
+
+
void InstructionSelector::VisitInt32Sub(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
-
- switch (ArchOpcodeField::decode(instr->opcode())) {
+ InstructionCode opcode = instr->opcode();
+ switch (ArchOpcodeField::decode(opcode)) {
case kArchJmp:
__ B(code_->GetLabel(i.InputBlock(0)));
break;
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kArm64Add32:
- __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Adds(i.OutputRegister32(), i.InputRegister32(0),
+ i.InputOperand32(1));
+ } else {
+ __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+ }
break;
case kArm64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
case kUnsignedGreaterThan:
__ B(hi, tlabel);
break;
+ case kOverflow:
+ __ B(vs, tlabel);
+ break;
+ case kNotOverflow:
+ __ B(vc, tlabel);
+ break;
}
if (!fallthru) __ B(flabel); // no fallthru to flabel.
__ Bind(&done);
Arm64OperandConverter i(this, instr);
Label done;
- // Materialize a full 64-bit 1 or 0 value.
+ // Materialize a full 64-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
Label check;
- Register reg = i.OutputRegister();
+ ASSERT_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = nv;
switch (condition) {
case kUnorderedEqual:
case kUnsignedGreaterThan:
cc = hi;
break;
+ case kOverflow:
+ cc = vs;
+ break;
+ case kNotOverflow:
+ cc = vc;
+ break;
}
__ bind(&check);
__ Cset(reg, cc);
}
+static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ Arm64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[2];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseRegister(m.right().node());
+
+ // Define outputs depending on the projections.
+ Node* projections[2];
+ node->CollectProjections(ARRAY_SIZE(projections), projections);
+ if (projections[0]) {
+ outputs[output_count++] = g.DefineAsRegister(projections[0]);
+ }
+ if (projections[1]) {
+ opcode |= FlagsModeField::encode(kFlags_set);
+ opcode |= FlagsConditionField::encode(kOverflow);
+ outputs[output_count++] = g.DefineAsRegister(projections[1]);
+ }
+
+ ASSERT_NE(0, input_count);
+ ASSERT_NE(0, output_count);
+ ASSERT_GE(ARRAY_SIZE(inputs), input_count);
+ ASSERT_GE(ARRAY_SIZE(outputs), output_count);
+
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+}
+
+
void InstructionSelector::VisitLoad(Node* node) {
MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
Arm64OperandGenerator g(this);
}
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kArm64Add32);
+}
+
+
void InstructionSelector::VisitInt64Add(Node* node) {
VisitBinop(this, node, kArm64Add, kArithimeticImm, true);
}
return gen_->schedule()->GetBlockById(block_id);
}
- Register OutputRegister() { return ToRegister(instr_->Output()); }
+ Register OutputRegister(int index = 0) {
+ return ToRegister(instr_->OutputAt(index));
+ }
DoubleRegister OutputDoubleRegister() {
return ToDoubleRegister(instr_->Output());
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
+ case kOverflow:
+ __ j(overflow, tlabel);
+ break;
+ case kNotOverflow:
+ __ j(no_overflow, tlabel);
+ break;
}
if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
__ bind(&done);
IA32OperandConverter i(this, instr);
Label done;
- // Materialize a full 32-bit 1 or 0 value.
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
Label check;
- Register reg = i.OutputRegister();
+ ASSERT_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
case kUnorderedEqual:
case kUnsignedGreaterThan:
cc = above;
break;
+ case kOverflow:
+ cc = overflow;
+ break;
+ case kNotOverflow:
+ cc = no_overflow;
+ break;
}
__ bind(&check);
if (reg.is_byte_register()) {
// Shared routine for multiple binary operations.
-static inline void VisitBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
+ Int32BinopMatcher m(node);
// TODO(turbofan): match complex addressing modes.
// TODO(turbofan): if commutative, pick the non-live-in operand as the left as
// this might be the last use and therefore its register can be reused.
- if (g.CanBeImmediate(right)) {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
- g.UseImmediate(right));
- } else if (g.CanBeImmediate(left) &&
- node->op()->HasProperty(Operator::kCommutative)) {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
- g.UseImmediate(left));
+ if (g.CanBeImmediate(m.right().node())) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(m.left().node()),
+ g.UseImmediate(m.right().node()));
} else {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.Use(right));
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.Use(m.right().node()));
+ }
+}
+
+
+static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[2];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ // TODO(turbofan): match complex addressing modes.
+ // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
+ // this might be the last use and therefore its register can be reused.
+ if (g.CanBeImmediate(m.right().node())) {
+ inputs[input_count++] = g.Use(m.left().node());
+ inputs[input_count++] = g.UseImmediate(m.right().node());
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.Use(m.right().node());
+ }
+
+ // Define outputs depending on the projections.
+ Node* projections[2];
+ node->CollectProjections(ARRAY_SIZE(projections), projections);
+ if (projections[0]) {
+ outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
}
+ if (projections[1]) {
+ opcode |= FlagsModeField::encode(kFlags_set);
+ opcode |= FlagsConditionField::encode(kOverflow);
+ // TODO(turbofan): Use byte register here.
+ outputs[output_count++] =
+ (projections[0] ? g.DefineAsRegister(projections[1])
+ : g.DefineSameAsFirst(projections[1]));
+ }
+
+ ASSERT_NE(0, input_count);
+ ASSERT_NE(0, output_count);
+ ASSERT_GE(ARRAY_SIZE(inputs), input_count);
+ ASSERT_GE(ARRAY_SIZE(outputs), output_count);
+
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
}
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kIA32Add);
+}
+
+
void InstructionSelector::VisitInt32Sub(Node* node) {
IA32OperandGenerator g(this);
Int32BinopMatcher m(node);
kUnorderedLessThan,
kUnorderedGreaterThanOrEqual,
kUnorderedLessThanOrEqual,
- kUnorderedGreaterThan
+ kUnorderedGreaterThan,
+ kOverflow,
+ kNotOverflow
};
OStream& operator<<(OStream& os, const FlagsCondition& fc);
typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
typedef BitField<AddressingMode, 7, 4> AddressingModeField;
typedef BitField<FlagsMode, 11, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 13, 4> FlagsConditionField;
+typedef BitField<FlagsCondition, 13, 5> FlagsConditionField;
typedef BitField<int, 13, 19> MiscField;
} // namespace compiler
switch (condition_) {
case kEqual:
case kNotEqual:
+ case kOverflow:
+ case kNotOverflow:
return;
case kSignedLessThan:
condition_ = kSignedGreaterThan;
if (buffer->descriptor->ReturnCount() == 1) {
buffer->output_nodes[0] = call;
} else {
- // Iterate over all uses of {call} and collect the projections into the
- // {result} buffer.
- for (UseIter i = call->uses().begin(); i != call->uses().end(); ++i) {
- if ((*i)->opcode() == IrOpcode::kProjection) {
- int index = OpParameter<int32_t>(*i);
- ASSERT_GE(index, 0);
- ASSERT_LT(index, buffer->descriptor->ReturnCount());
- ASSERT_EQ(NULL, buffer->output_nodes[index]);
- buffer->output_nodes[index] = *i;
- }
- }
+ call->CollectProjections(buffer->descriptor->ReturnCount(),
+ buffer->output_nodes);
}
// Filter out the outputs that aren't live because no projection uses them.
case IrOpcode::kIfFalse:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
- case IrOpcode::kProjection:
case IrOpcode::kLazyDeoptimization:
case IrOpcode::kContinuation:
// No code needed for these graph artifacts.
return;
- case IrOpcode::kPhi:
- return VisitPhi(node);
case IrOpcode::kParameter: {
int index = OpParameter<int>(node);
MachineRepresentation rep = linkage()
MarkAsRepresentation(rep, node);
return VisitParameter(node);
}
+ case IrOpcode::kPhi:
+ return VisitPhi(node);
+ case IrOpcode::kProjection:
+ return VisitProjection(node);
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kExternalConstant:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
return VisitInt32Add(node);
+ case IrOpcode::kInt32AddWithOverflow:
+ return VisitInt32AddWithOverflow(node);
case IrOpcode::kInt32Sub:
return VisitInt32Sub(node);
case IrOpcode::kInt32Mul:
#endif // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_TARGET
+void InstructionSelector::VisitParameter(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
+ OpParameter<int>(node))));
+}
+
+
void InstructionSelector::VisitPhi(Node* node) {
// TODO(bmeurer): Emit a PhiInstruction here.
for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
}
-void InstructionSelector::VisitParameter(Node* node) {
- OperandGenerator g(this);
- Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
- OpParameter<int>(node))));
+void InstructionSelector::VisitProjection(Node* node) {
+ for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
+ MarkAsUsed(*i);
+ }
}
void VisitWord64Compare(Node* node, FlagsContinuation* cont);
void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
- void VisitPhi(Node* node);
void VisitParameter(Node* node);
+ void VisitPhi(Node* node);
+ void VisitProjection(Node* node);
void VisitConstant(Node* node);
void VisitCall(Node* call, BasicBlock* continuation,
BasicBlock* deoptimization);
return os << "unordered less than or equal";
case kUnorderedGreaterThan:
return os << "unordered greater than";
+ case kOverflow:
+ return os << "overflow";
+ case kNotOverflow:
+ return os << "not overflow";
}
UNREACHABLE();
return os;
Node* Int32Add(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Add(), a, b);
}
+ void Int32AddWithOverflow(Node* a, Node* b, Node** val_return,
+ Node** ovf_return) {
+ Node* add = NEW_NODE_2(MACHINE()->Int32AddWithOverflow(), a, b);
+ if (val_return) *val_return = NEW_NODE_1(COMMON()->Projection(0), add);
+ if (ovf_return) *ovf_return = NEW_NODE_1(COMMON()->Projection(1), add);
+ }
Node* Int32Sub(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Sub(), a, b);
}
SIMPLE(name, \
Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \
1)
+#define BINOP_ACO(name) \
+ SIMPLE(name, \
+ Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \
+ 2)
#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1)
#define WORD_SIZE(x) return is64() ? Word64##x() : Word32##x()
Operator* Word64Equal() { BINOP_C(Word64Equal); }
Operator* Int32Add() { BINOP_AC(Int32Add); }
+ Operator* Int32AddWithOverflow() { BINOP_ACO(Int32AddWithOverflow); }
Operator* Int32Sub() { BINOP(Int32Sub); }
Operator* Int32Mul() { BINOP_AC(Int32Mul); }
Operator* Int32Div() { BINOP(Int32Div); }
#include "src/compiler/node.h"
+#include "src/compiler/generic-node-inl.h"
+
namespace v8 {
namespace internal {
namespace compiler {
+void Node::CollectProjections(int projection_count, Node** projections) {
+ for (int i = 0; i < projection_count; ++i) projections[i] = NULL;
+ for (UseIter i = uses().begin(); i != uses().end(); ++i) {
+ if ((*i)->opcode() != IrOpcode::kProjection) continue;
+ int32_t index = OpParameter<int32_t>(*i);
+ ASSERT_GE(index, 0);
+ ASSERT_LT(index, projection_count);
+ ASSERT_EQ(NULL, projections[index]);
+ projections[index] = *i;
+ }
+}
+
+
OStream& operator<<(OStream& os, const Operator& op) { return op.PrintTo(os); }
}
return os;
}
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
: GenericNode<NodeData, Node>(graph, input_count) {}
void Initialize(Operator* op) { set_op(op); }
+
+ void CollectProjections(int projection_count, Node** projections);
};
OStream& operator<<(OStream& os, const Node& n);
V(Word64Sar) \
V(Word64Equal) \
V(Int32Add) \
+ V(Int32AddWithOverflow) \
V(Int32Sub) \
V(Int32Mul) \
V(Int32Div) \
// Handle "output same as input" for second instruction.
for (size_t i = 0; i < second->OutputCount(); i++) {
- InstructionOperand* output = second->Output();
+ InstructionOperand* output = second->OutputAt(i);
if (!output->IsUnallocated()) continue;
UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
if (second_output->HasSameAsInputPolicy()) {
- ASSERT(second->OutputCount() == 1); // Only valid for one output.
+ ASSERT(i == 0); // Only valid for first output.
UnallocatedOperand* cur_input =
UnallocatedOperand::cast(second->InputAt(0));
int output_vreg = second_output->virtual_register();
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
+ case kOverflow:
+ __ j(overflow, tlabel);
+ break;
+ case kNotOverflow:
+ __ j(no_overflow, tlabel);
+ break;
}
if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
__ bind(&done);
X64OperandConverter i(this, instr);
Label done;
- // Materialize a full 32-bit 1 or 0 value.
+ // Materialize a full 64-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
Label check;
- Register reg = i.OutputRegister();
+ ASSERT_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
case kUnorderedEqual:
case kUnsignedGreaterThan:
cc = above;
break;
+ case kOverflow:
+ cc = overflow;
+ break;
+ case kNotOverflow:
+ cc = no_overflow;
+ break;
}
__ bind(&check);
__ setcc(cc, reg);
}
+static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[2];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ // TODO(turbofan): match complex addressing modes.
+ // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
+ // this might be the last use and therefore its register can be reused.
+ if (g.CanBeImmediate(m.right().node())) {
+ inputs[input_count++] = g.Use(m.left().node());
+ inputs[input_count++] = g.UseImmediate(m.right().node());
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.Use(m.right().node());
+ }
+
+ // Define outputs depending on the projections.
+ Node* projections[2];
+ node->CollectProjections(ARRAY_SIZE(projections), projections);
+ if (projections[0]) {
+ outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
+ }
+ if (projections[1]) {
+ opcode |= FlagsModeField::encode(kFlags_set);
+ opcode |= FlagsConditionField::encode(kOverflow);
+ outputs[output_count++] =
+ (projections[0] ? g.DefineAsRegister(projections[1])
+ : g.DefineSameAsFirst(projections[1]));
+ }
+
+ ASSERT_NE(0, input_count);
+ ASSERT_NE(0, output_count);
+ ASSERT_GE(ARRAY_SIZE(inputs), input_count);
+ ASSERT_GE(ARRAY_SIZE(outputs), output_count);
+
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+}
+
+
void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kX64And32, true);
}
}
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kX64Add32);
+}
+
+
void InstructionSelector::VisitInt64Add(Node* node) {
VisitBinop(this, node, kX64Add, true);
}
}
+TEST(InstructionSelectorInt32AddWithOverflowP) {
+ {
+ InstructionSelectorTester m;
+ Node* ovf;
+ m.Int32AddWithOverflow(m.Parameter(0), m.Parameter(1), NULL, &ovf);
+ m.Return(ovf);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val;
+ m.Int32AddWithOverflow(m.Parameter(0), m.Parameter(1), &val, NULL);
+ m.Return(val);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(m.Parameter(0), m.Parameter(1), &val, &ovf);
+ m.Return(m.Word32Equal(val, ovf));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+}
+
+
+TEST(InstructionSelectorInt32AddWithOverflowImm) {
+ Immediates immediates;
+ for (Immediates::const_iterator i = immediates.begin(); i != immediates.end();
+ ++i) {
+ int32_t imm = *i;
+ {
+ InstructionSelectorTester m;
+ Node* ovf;
+ m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(imm), NULL, &ovf);
+ m.Return(ovf);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* ovf;
+ m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0), NULL, &ovf);
+ m.Return(ovf);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val;
+ m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(imm), &val, NULL);
+ m.Return(val);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val;
+ m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0), &val, NULL);
+ m.Return(val);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(imm), &val, &ovf);
+ m.Return(m.Word32Equal(val, ovf));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0), &val, &ovf);
+ m.Return(m.Word32Equal(val, ovf));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ }
+}
+
+
+TEST(InstructionSelectorInt32AddWithOverflowAndShiftP) {
+ Shifts shifts;
+ for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
+ Shift shift = *i;
+ {
+ InstructionSelectorTester m;
+ Node* ovf;
+ m.Int32AddWithOverflow(
+ m.Parameter(0), m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
+ NULL, &ovf);
+ m.Return(ovf);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* ovf;
+ m.Int32AddWithOverflow(
+ m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)), m.Parameter(2),
+ NULL, &ovf);
+ m.Return(ovf);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val;
+ m.Int32AddWithOverflow(
+ m.Parameter(0), m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
+ &val, NULL);
+ m.Return(val);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val;
+ m.Int32AddWithOverflow(
+ m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)), m.Parameter(2),
+ &val, NULL);
+ m.Return(val);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(
+ m.Parameter(0), m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
+ &val, &ovf);
+ m.Return(m.Word32Equal(val, ovf));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(
+ m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)), m.Parameter(2),
+ &val, &ovf);
+ m.Return(m.Word32Equal(val, ovf));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ }
+}
+
+
+TEST(InstructionSelectorInt32AddWithOverflowAndShiftImm) {
+ Shifts shifts;
+ for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
+ Shift shift = *i;
+ for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
+ {
+ InstructionSelectorTester m;
+ Node* ovf;
+ m.Int32AddWithOverflow(
+ m.Parameter(0),
+ m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)), NULL,
+ &ovf);
+ m.Return(ovf);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* ovf;
+ m.Int32AddWithOverflow(
+ m.NewNode(shift.op, m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1), NULL, &ovf);
+ m.Return(ovf);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val;
+ m.Int32AddWithOverflow(
+ m.Parameter(0),
+ m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)), &val,
+ NULL);
+ m.Return(val);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val;
+ m.Int32AddWithOverflow(
+ m.NewNode(shift.op, m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1), &val, NULL);
+ m.Return(val);
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(
+ m.Parameter(0),
+ m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)), &val,
+ &ovf);
+ m.Return(m.Word32Equal(val, ovf));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(
+ m.NewNode(shift.op, m.Parameter(0), m.Int32Constant(imm)),
+ m.Parameter(1), &val, &ovf);
+ m.Return(m.Word32Equal(val, ovf));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ }
+ }
+}
+
+
TEST(InstructionSelectorWord32AndAndWord32XorWithMinus1P) {
{
InstructionSelectorTester m;
#endif // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-#endif
+
+static bool sadd_overflow(int32_t x, int32_t y, int32_t* val) {
+ int32_t v =
+ static_cast<int32_t>(static_cast<uint32_t>(x) + static_cast<uint32_t>(y));
+ *val = v;
+ return (((v ^ x) & (v ^ y)) >> 31) & 1;
+}
+
+
+TEST(RunInt32AddWithOverflowP) {
+ int32_t actual_val = -1;
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(bt.param0, bt.param1, &val, &ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ bt.AddReturn(ovf);
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected_val;
+ int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+
+TEST(RunInt32AddWithOverflowImm) {
+ int32_t actual_val = -1, expected_val = 0;
+ FOR_INT32_INPUTS(i) {
+ {
+ RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0), &val, &ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.Return(ovf);
+ FOR_INT32_INPUTS(j) {
+ int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i), &val, &ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.Return(ovf);
+ FOR_INT32_INPUTS(j) {
+ int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ FOR_INT32_INPUTS(j) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j), &val,
+ &ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.Return(ovf);
+ int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call());
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+
+TEST(RunInt32AddWithOverflowInBranchP) {
+ MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(bt.param0, bt.param1, &val, &ovf);
+ m.Branch(ovf, &blocka, &blockb);
+ m.Bind(&blocka);
+ bt.AddReturn(m.Word32Not(val));
+ m.Bind(&blockb);
+ bt.AddReturn(val);
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ int32_t expected;
+ if (sadd_overflow(*i, *j, &expected)) expected = ~expected;
+ CHECK_EQ(expected, bt.call(*i, *j));
+ }
+ }
+}
+
+#endif // V8_TURBOFAN_TARGET