}
+void MacroAssembler::Umull(const Register& rd, const Register& rn,
+ const Register& rm) {
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ umaddl(rd, rn, rm, xzr);
+}
+
+
void MacroAssembler::Stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst) {
inline void Smulh(const Register& rd,
const Register& rn,
const Register& rm);
+ inline void Umull(const Register& rd, const Register& rn, const Register& rm);
inline void Stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst);
i.InputRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArmUmull:
+ __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.OutputSBit());
+ break;
case kArmSdiv: {
CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
V(ArmMls) \
V(ArmSmmul) \
V(ArmSmmla) \
+ V(ArmUmull) \
V(ArmSdiv) \
V(ArmUdiv) \
V(ArmMov) \
case kArmMls:
case kArmSmmul:
case kArmSmmla:
+ case kArmUmull:
case kArmSdiv:
case kArmUdiv:
case kArmBfc:
}
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ ArmOperandGenerator g(this);
+ InstructionOperand* outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
+ InstructionOperand* inputs[] = {g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1))};
+ Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+
static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
InstructionOperand* result_operand,
case kArm64Smull:
__ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
break;
+ case kArm64Umull:
+ __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
+ break;
case kArm64Madd:
__ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputRegister(2));
V(Arm64Mul) \
V(Arm64Mul32) \
V(Arm64Smull) \
+ V(Arm64Umull) \
V(Arm64Madd) \
V(Arm64Madd32) \
V(Arm64Msub) \
}
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ // TODO(arm64): Can we do better here?
+ Arm64OperandGenerator g(this);
+ InstructionOperand* const smull_operand = g.TempRegister();
+ Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
+}
+
+
void InstructionSelector::VisitInt32Div(Node* node) {
VisitRRR(this, kArm64Idiv32, node);
}
Node* ChangeLowering::SmiMaxValueConstant() {
const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize()
: SmiTagging<8>::SmiValueSize();
- return jsgraph()->IntPtrConstant(
+ return jsgraph()->Int32Constant(
-(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
}
case kIA32ImulHigh:
__ imul(i.InputRegister(1));
break;
+ case kIA32UmulHigh:
+ __ mul(i.InputRegister(1));
+ break;
case kIA32Idiv:
__ cdq();
__ idiv(i.InputOperand(1));
V(IA32Sub) \
V(IA32Imul) \
V(IA32ImulHigh) \
+ V(IA32UmulHigh) \
V(IA32Idiv) \
V(IA32Udiv) \
V(IA32Not) \
}
-void InstructionSelector::VisitInt32MulHigh(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kIA32ImulHigh, g.DefineAsFixed(node, edx),
- g.UseFixed(node->InputAt(0), eax),
- g.UseUniqueRegister(node->InputAt(1)));
+namespace {
+
+void VisitMulHigh(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsFixed(node, edx),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUniqueRegister(node->InputAt(1)));
}
-static inline void VisitDiv(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand* temps[] = {g.TempRegister(edx)};
- size_t temp_count = arraysize(temps);
selector->Emit(opcode, g.DefineAsFixed(node, eax),
g.UseFixed(node->InputAt(0), eax),
- g.UseUnique(node->InputAt(1)), temp_count, temps);
+ g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsFixed(node, edx),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUnique(node->InputAt(1)));
+}
+
+} // namespace
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ VisitMulHigh(this, node, kIA32ImulHigh);
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ VisitMulHigh(this, node, kIA32UmulHigh);
}
}
-static inline void VisitMod(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- IA32OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsFixed(node, edx),
- g.UseFixed(node->InputAt(0), eax),
- g.UseUnique(node->InputAt(1)));
-}
-
-
void InstructionSelector::VisitInt32Mod(Node* node) {
VisitMod(this, node, kIA32Idiv);
}
return VisitUint32LessThanOrEqual(node);
case IrOpcode::kUint32Mod:
return VisitUint32Mod(node);
+ case IrOpcode::kUint32MulHigh:
+ return VisitUint32MulHigh(node);
case IrOpcode::kInt64Add:
return VisitInt64Add(node);
case IrOpcode::kInt64Sub:
Node* MachineOperatorReducer::Word32Sar(Node* lhs, uint32_t rhs) {
+ if (rhs == 0) return lhs;
return graph()->NewNode(machine()->Word32Sar(), lhs, Uint32Constant(rhs));
}
Node* MachineOperatorReducer::Word32Shr(Node* lhs, uint32_t rhs) {
+ if (rhs == 0) return lhs;
return graph()->NewNode(machine()->Word32Shr(), lhs, Uint32Constant(rhs));
}
}
-Node* MachineOperatorReducer::TruncatingDiv(Node* dividend, int32_t divisor) {
+Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) {
+ DCHECK_NE(0, divisor);
DCHECK_NE(std::numeric_limits<int32_t>::min(), divisor);
base::MagicNumbersForDivision<uint32_t> const mag =
base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
} else if (divisor < 0 && bit_cast<int32_t>(mag.multiplier) > 0) {
quotient = Int32Sub(quotient, dividend);
}
- if (mag.shift) {
- quotient = Word32Sar(quotient, mag.shift);
+ return Int32Add(Word32Sar(quotient, mag.shift), Word32Shr(dividend, 31));
+}
+
+
+Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
+ DCHECK_LT(0, divisor);
+ base::MagicNumbersForDivision<uint32_t> const mag =
+ base::UnsignedDivisionByConstant(bit_cast<uint32_t>(divisor));
+ Node* quotient = graph()->NewNode(machine()->Uint32MulHigh(), dividend,
+ Uint32Constant(mag.multiplier));
+ if (mag.add) {
+ DCHECK_LE(1, mag.shift);
+ quotient = Word32Shr(
+ Int32Add(Word32Shr(Int32Sub(dividend, quotient), 1), quotient),
+ mag.shift - 1);
+ } else {
+ quotient = Word32Shr(quotient, mag.shift);
}
- return Int32Add(quotient, Word32Shr(dividend, 31));
+ return quotient;
}
quotient = Int32Add(Word32Shr(quotient, 32u - shift), dividend);
quotient = Word32Sar(quotient, shift);
} else {
- quotient = TruncatingDiv(quotient, Abs(divisor));
+ quotient = Int32Div(quotient, Abs(divisor));
}
if (divisor < 0) {
node->set_op(machine()->Int32Sub());
Node* const zero = Int32Constant(0);
return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
}
- if (m.right().IsPowerOf2()) { // x / 2^n => x >> n
- node->TrimInputCount(2);
- node->set_op(machine()->Word32Shr());
- node->ReplaceInput(1, Uint32Constant(WhichPowerOf2(m.right().Value())));
- return Changed(node);
+ if (m.right().HasValue()) {
+ Node* const dividend = m.left().node();
+ uint32_t const divisor = m.right().Value();
+ if (base::bits::IsPowerOfTwo32(divisor)) { // x / 2^n => x >> n
+ node->set_op(machine()->Word32Shr());
+ node->ReplaceInput(1, Uint32Constant(WhichPowerOf2(m.right().Value())));
+ node->TrimInputCount(2);
+ return Changed(node);
+ } else {
+ return Replace(Uint32Div(dividend, divisor));
+ }
}
return NoChange();
}
Node* pos = Word32And(dividend, mask);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachInt32, 2), neg, pos, merge);
- return Replace(phi);
+
+ DCHECK_EQ(3, node->InputCount());
+ node->set_op(common()->Phi(kMachInt32, 2));
+ node->ReplaceInput(0, neg);
+ node->ReplaceInput(1, pos);
+ node->ReplaceInput(2, merge);
} else {
- Node* quotient = TruncatingDiv(dividend, divisor);
+ Node* quotient = Int32Div(dividend, divisor);
node->set_op(machine()->Int32Sub());
DCHECK_EQ(dividend, node->InputAt(0));
node->ReplaceInput(1, Int32Mul(quotient, Int32Constant(divisor)));
node->TrimInputCount(2);
- return Changed(node);
}
+ return Changed(node);
}
return NoChange();
}
return ReplaceUint32(
base::bits::UnsignedMod32(m.left().Value(), m.right().Value()));
}
- if (m.right().IsPowerOf2()) { // x % 2^n => x & 2^n-1
+ if (m.right().HasValue()) {
+ Node* const dividend = m.left().node();
+ uint32_t const divisor = m.right().Value();
+ if (base::bits::IsPowerOfTwo32(divisor)) { // x % 2^n => x & 2^n-1
+ node->set_op(machine()->Word32And());
+ node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
+ } else {
+ Node* quotient = Uint32Div(dividend, divisor);
+ node->set_op(machine()->Int32Sub());
+ DCHECK_EQ(dividend, node->InputAt(0));
+ node->ReplaceInput(1, Int32Mul(quotient, Uint32Constant(divisor)));
+ }
node->TrimInputCount(2);
- node->set_op(machine()->Word32And());
- node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
return Changed(node);
}
return NoChange();
Node* Int32Add(Node* lhs, Node* rhs);
Node* Int32Sub(Node* lhs, Node* rhs);
Node* Int32Mul(Node* lhs, Node* rhs);
-
- Node* TruncatingDiv(Node* dividend, int32_t divisor);
+ Node* Int32Div(Node* dividend, int32_t divisor);
+ Node* Uint32Div(Node* dividend, uint32_t divisor);
Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
Reduction ReplaceFloat32(volatile float value) {
V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
const Operator* Uint32LessThan();
const Operator* Uint32LessThanOrEqual();
const Operator* Uint32Mod();
+ const Operator* Uint32MulHigh();
bool Int32DivIsSafe() const { return flags_ & kInt32DivIsSafe; }
bool Int32ModIsSafe() const { return flags_ & kInt32ModIsSafe; }
bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
V(Uint32LessThan) \
V(Uint32LessThanOrEqual) \
V(Uint32Mod) \
+ V(Uint32MulHigh) \
V(Int64Add) \
V(Int64Sub) \
V(Int64Mul) \
return Load(rep, base, Int32Constant(0));
}
Node* Load(MachineType rep, Node* base, Node* index) {
- return NewNode(machine()->Load(rep), base, index);
+ return NewNode(machine()->Load(rep), base, index, graph()->start(),
+ graph()->start());
}
void Store(MachineType rep, Node* base, Node* value) {
Store(rep, base, Int32Constant(0), value);
}
void Store(MachineType rep, Node* base, Node* index, Node* value) {
NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)), base,
- index, value);
+ index, value, graph()->start(), graph()->start());
}
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
return NewNode(machine()->Int32MulHigh(), a, b);
}
Node* Int32Div(Node* a, Node* b) {
- return NewNode(machine()->Int32Div(), a, b);
+ return NewNode(machine()->Int32Div(), a, b, graph()->start());
}
Node* Int32Mod(Node* a, Node* b) {
- return NewNode(machine()->Int32Mod(), a, b);
+ return NewNode(machine()->Int32Mod(), a, b, graph()->start());
}
Node* Int32LessThan(Node* a, Node* b) {
return NewNode(machine()->Int32LessThan(), a, b);
return NewNode(machine()->Int32LessThanOrEqual(), a, b);
}
Node* Uint32Div(Node* a, Node* b) {
- return NewNode(machine()->Uint32Div(), a, b);
+ return NewNode(machine()->Uint32Div(), a, b, graph()->start());
}
Node* Uint32LessThan(Node* a, Node* b) {
return NewNode(machine()->Uint32LessThan(), a, b);
return NewNode(machine()->Uint32LessThanOrEqual(), a, b);
}
Node* Uint32Mod(Node* a, Node* b) {
- return NewNode(machine()->Uint32Mod(), a, b);
+ return NewNode(machine()->Uint32Mod(), a, b, graph()->start());
+ }
+ Node* Uint32MulHigh(Node* a, Node* b) {
+ return NewNode(machine()->Uint32MulHigh(), a, b);
}
Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
case IrOpcode::kInt32Mul:
+ case IrOpcode::kInt32MulHigh:
case IrOpcode::kInt32Div:
case IrOpcode::kInt32Mod:
return VisitInt32Binop(node);
case IrOpcode::kUint32Div:
case IrOpcode::kUint32Mod:
+ case IrOpcode::kUint32MulHigh:
return VisitUint32Binop(node);
case IrOpcode::kInt32LessThan:
case IrOpcode::kInt32LessThanOrEqual:
Bounds Typer::Visitor::TypeInt32MulHigh(Node* node) {
- return Bounds(Type::Integral32());
+ return Bounds(Type::Signed32());
}
}
+Bounds Typer::Visitor::TypeUint32MulHigh(Node* node) {
+ return Bounds(Type::Unsigned32());
+}
+
+
Bounds Typer::Visitor::TypeInt64Add(Node* node) {
return Bounds(Type::Internal());
}
case IrOpcode::kInt32LessThanOrEqual:
case IrOpcode::kUint32Div:
case IrOpcode::kUint32Mod:
+ case IrOpcode::kUint32MulHigh:
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kInt64Add:
ASSEMBLE_MULT(imulq);
break;
case kX64ImulHigh32:
- __ imull(i.InputRegister(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ imull(i.InputRegister(1));
+ } else {
+ __ imull(i.InputOperand(1));
+ }
+ break;
+ case kX64UmulHigh32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ mull(i.InputRegister(1));
+ } else {
+ __ mull(i.InputOperand(1));
+ }
break;
case kX64Idiv32:
__ cdq();
V(X64Imul) \
V(X64Imul32) \
V(X64ImulHigh32) \
+ V(X64UmulHigh32) \
V(X64Idiv) \
V(X64Idiv32) \
V(X64Udiv) \
}
}
+
+void VisitMulHigh(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (selector->IsLive(left) && !selector->IsLive(right)) {
+ std::swap(left, right);
+ }
+ selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
+ g.UseUnique(right));
+}
+
+
+void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+ X64OperandGenerator g(selector);
+ InstructionOperand* temps[] = {g.TempRegister(rdx)};
+ selector->Emit(
+ opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+ X64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsFixed(node, rdx),
+ g.UseFixed(node->InputAt(0), rax),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
} // namespace
void InstructionSelector::VisitInt32MulHigh(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64ImulHigh32, g.DefineAsFixed(node, rdx),
- g.UseFixed(node->InputAt(0), rax),
- g.UseUniqueRegister(node->InputAt(1)));
-}
-
-
-static void VisitDiv(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X64OperandGenerator g(selector);
- InstructionOperand* temps[] = {g.TempRegister(rdx)};
- selector->Emit(
- opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+ VisitMulHigh(this, node, kX64ImulHigh32);
}
}
-static void VisitMod(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X64OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsFixed(node, rdx),
- g.UseFixed(node->InputAt(0), rax),
- g.UseUniqueRegister(node->InputAt(1)));
-}
-
-
void InstructionSelector::VisitInt32Mod(Node* node) {
VisitMod(this, node, kX64Idiv32);
}
}
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ VisitMulHigh(this, node, kX64UmulHigh32);
+}
+
+
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
X64OperandGenerator g(this);
Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
case IrOpcode::kUint32Div:
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
- case IrOpcode::kUint32Mod: {
+ case IrOpcode::kUint32Mod:
+ case IrOpcode::kUint32MulHigh: {
// These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
// zero-extension is a no-op.
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
}
+void Assembler::emit_imul(const Operand& src, int size) {
+ EnsureSpace ensure_space(this);
+ emit_rex(src, size);
+ emit(0xF7);
+ emit_operand(0x5, src);
+}
+
+
void Assembler::emit_imul(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
}
-void Assembler::mul(Register src) {
+void Assembler::mull(Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(src);
+ emit(0xF7);
+ emit_modrm(0x4, src);
+}
+
+
+void Assembler::mull(const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(src);
+ emit(0xF7);
+ emit_operand(0x4, src);
+}
+
+
+void Assembler::mulq(Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src);
emit(0xF7);
// Sign-extends eax into edx:eax.
void cdq();
+ // Multiply eax by src, put the result in edx:eax.
+ void mull(Register src);
+ void mull(const Operand& src);
// Multiply rax by src, put the result in rdx:rax.
- void mul(Register src);
+ void mulq(Register src);
#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
void instruction##p(Register dst, Immediate imm8) { \
// Signed multiply instructions.
// rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
void emit_imul(Register src, int size);
+ void emit_imul(const Operand& src, int size);
void emit_imul(Register dst, Register src, int size);
void emit_imul(Register dst, const Operand& src, int size);
void emit_imul(Register dst, Register src, Immediate imm, int size);
RawMachineAssemblerTester<void> m;
const Operator* kOps[] = {
- m.machine()->Word32And(), m.machine()->Word32Or(),
- m.machine()->Word32Xor(), m.machine()->Word32Shl(),
- m.machine()->Word32Shr(), m.machine()->Word32Sar(),
- m.machine()->Word32Equal(), m.machine()->Int32Add(),
- m.machine()->Int32Sub(), m.machine()->Int32Mul(),
- m.machine()->Int32MulHigh(), m.machine()->Int32Div(),
- m.machine()->Uint32Div(), m.machine()->Int32Mod(),
- m.machine()->Uint32Mod(), m.machine()->Int32LessThan(),
- m.machine()->Int32LessThanOrEqual(), m.machine()->Uint32LessThan(),
- m.machine()->Uint32LessThanOrEqual()};
+ m.machine()->Word32And(), m.machine()->Word32Or(),
+ m.machine()->Word32Xor(), m.machine()->Word32Shl(),
+ m.machine()->Word32Shr(), m.machine()->Word32Sar(),
+ m.machine()->Word32Equal(), m.machine()->Int32Add(),
+ m.machine()->Int32Sub(), m.machine()->Int32Mul(),
+ m.machine()->Int32MulHigh(), m.machine()->Int32Div(),
+ m.machine()->Uint32Div(), m.machine()->Int32Mod(),
+ m.machine()->Uint32Mod(), m.machine()->Uint32MulHigh(),
+ m.machine()->Int32LessThan(), m.machine()->Int32LessThanOrEqual(),
+ m.machine()->Uint32LessThan(), m.machine()->Uint32LessThanOrEqual()};
for (size_t i = 0; i < arraysize(kOps); ++i) {
for (int j = 0; j < 8; j++) {
}
+TEST(RunUint32MulHighP) {
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ bt.AddReturn(m.Uint32MulHigh(bt.param0, bt.param1));
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ int32_t expected = bit_cast<int32_t>(static_cast<uint32_t>(
+ (static_cast<uint64_t>(*i) * static_cast<uint64_t>(*j)) >> 32));
+ CHECK_EQ(expected, bt.call(bit_cast<int32_t>(*i), bit_cast<int32_t>(*j)));
+ }
+ }
+}
+
+
TEST(RunInt32DivP) {
{
RawMachineAssemblerTester<int32_t> m;
RawMachineAssemblerTester<int32_t> m;
const Operator* kOps[] = {
- m.machine()->Word32And(), m.machine()->Word32Or(),
- m.machine()->Word32Xor(), m.machine()->Word32Shl(),
- m.machine()->Word32Shr(), m.machine()->Word32Sar(),
- m.machine()->Word32Ror(), m.machine()->Word32Equal(),
- m.machine()->Int32Add(), m.machine()->Int32Sub(),
- m.machine()->Int32Mul(), m.machine()->Int32MulHigh(),
- m.machine()->Int32Div(), m.machine()->Uint32Div(),
- m.machine()->Int32Mod(), m.machine()->Uint32Mod(),
- m.machine()->Int32LessThan(), m.machine()->Int32LessThanOrEqual(),
- m.machine()->Uint32LessThan(), m.machine()->Uint32LessThanOrEqual()};
+ m.machine()->Word32And(), m.machine()->Word32Or(),
+ m.machine()->Word32Xor(), m.machine()->Word32Shl(),
+ m.machine()->Word32Shr(), m.machine()->Word32Sar(),
+ m.machine()->Word32Ror(), m.machine()->Word32Equal(),
+ m.machine()->Int32Add(), m.machine()->Int32Sub(),
+ m.machine()->Int32Mul(), m.machine()->Int32MulHigh(),
+ m.machine()->Int32Div(), m.machine()->Uint32Div(),
+ m.machine()->Int32Mod(), m.machine()->Uint32Mod(),
+ m.machine()->Uint32MulHigh(), m.machine()->Int32LessThan(),
+ m.machine()->Int32LessThanOrEqual(), m.machine()->Uint32LessThan(),
+ m.machine()->Uint32LessThanOrEqual()};
for (size_t i = 0; i < arraysize(kOps); ++i) {
RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
__ nop();
__ idivq(rdx);
- __ mul(rdx);
+ __ mull(rdx);
+ __ mulq(rdx);
__ negq(rdx);
__ notq(rdx);
__ testq(Operand(rbx, rcx, times_4, 10000), rdx);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = {};
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+function Uint32Div(divisor) {
+ var name = "div_";
+ name += divisor;
+ var m = eval("function Module(stdlib, foreign, heap) {\n"
+ + " \"use asm\";\n"
+ + " function " + name + "(dividend) {\n"
+ + " return ((dividend >>> 0) / " + divisor + ") >>> 0;\n"
+ + " }\n"
+ + " return { f: " + name + "}\n"
+ + "}; Module");
+ return m(stdlib, foreign, heap).f;
+}
+
+var divisors = [0, 1, 3, 4, 10, 42, 64, 100, 1024, 2147483647, 4294967295];
+for (var i in divisors) {
+ var divisor = divisors[i];
+ var mod = Uint32Div(divisor);
+ for (var dividend = 0; dividend < 4294967296; dividend += 3999773) {
+ assertEquals((dividend / divisor) >>> 0, mod(dividend));
+ }
+}
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = {};
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+function Uint32Mod(divisor) {
+ var name = "mod_";
+ name += divisor;
+ var m = eval("function Module(stdlib, foreign, heap) {\n"
+ + " \"use asm\";\n"
+ + " function " + name + "(dividend) {\n"
+ + " return ((dividend >>> 0) % " + divisor + ") >>> 0;\n"
+ + " }\n"
+ + " return { f: " + name + "}\n"
+ + "}; Module");
+ return m(stdlib, foreign, heap).f;
+}
+
+var divisors = [0, 1, 3, 4, 10, 42, 64, 100, 1024, 2147483647, 4294967295];
+for (var i in divisors) {
+ var divisor = divisors[i];
+ var mod = Uint32Mod(divisor);
+ for (var dividend = 0; dividend < 4294967296; dividend += 3999773) {
+ assertEquals((dividend % divisor) >>> 0, mod(dividend));
+ }
+}
}
+TEST_F(InstructionSelectorTest, Uint32MulHighWithParameters) {
+ StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Uint32MulHigh(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUmull, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(2U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->OutputAt(1)));
+}
+
+
TEST_F(InstructionSelectorTest, Uint32DivWithParameters) {
StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
m.Return(m.Uint32Div(m.Parameter(0), m.Parameter(1)));
IsMerge(
IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsUint32LessThanOrEqual(
- val, IsInt64Constant(SmiMaxValue())),
+ val, IsInt32Constant(SmiMaxValue())),
graph()->start()))),
AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
}
TEST_F(MachineOperatorReducerTest, Int32DivWithConstant) {
Node* const p0 = Parameter(0);
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Int32Div(), p0, Int32Constant(0)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(0), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Int32Div(), p0, Int32Constant(1)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(1), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement(), p0);
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Int32Div(), p0, Int32Constant(-1)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(-1), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Sub(IsInt32Constant(0), p0));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Int32Div(), p0, Int32Constant(2)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(2), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
IsInt32Constant(1)));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Int32Div(), p0, Int32Constant(-2)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(-2), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
IsInt32Constant(1))));
}
TRACED_FORRANGE(int32_t, shift, 2, 30) {
- Reduction const r = Reduce(
- graph()->NewNode(machine()->Int32Div(), p0, Int32Constant(1 << shift)));
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Div(), p0,
+ Int32Constant(1 << shift), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
TRACED_FORRANGE(int32_t, shift, 2, 31) {
Reduction const r = Reduce(graph()->NewNode(
machine()->Int32Div(), p0,
- Uint32Constant(bit_cast<uint32_t, int32_t>(-1) << shift)));
+ Uint32Constant(bit_cast<uint32_t, int32_t>(-1) << shift),
+ graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
TRACED_FOREACH(int32_t, divisor, kInt32Values) {
if (divisor < 0) {
if (base::bits::IsPowerOfTwo32(-divisor)) continue;
- Reduction const r = Reduce(
- graph()->NewNode(machine()->Int32Div(), p0, Int32Constant(divisor)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(divisor), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Sub(IsInt32Constant(0),
IsTruncatingDiv(p0, -divisor)));
} else if (divisor > 0) {
if (base::bits::IsPowerOfTwo32(divisor)) continue;
- Reduction const r = Reduce(
- graph()->NewNode(machine()->Int32Div(), p0, Int32Constant(divisor)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Div(), p0, Int32Constant(divisor), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTruncatingDiv(p0, divisor));
}
TEST_F(MachineOperatorReducerTest, Int32DivWithParameters) {
Node* const p0 = Parameter(0);
- Reduction const r = Reduce(graph()->NewNode(machine()->Int32Div(), p0, p0));
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Div(), p0, p0, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
TEST_F(MachineOperatorReducerTest, Uint32DivWithConstant) {
Node* const p0 = Parameter(0);
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Uint32Div(), Int32Constant(0), p0));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Div(), Int32Constant(0), p0, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Uint32Div(), p0, Int32Constant(0)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Div(), p0, Int32Constant(0), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Uint32Div(), p0, Int32Constant(1)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Div(), p0, Int32Constant(1), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement(), p0);
}
TRACED_FOREACH(uint32_t, dividend, kUint32Values) {
TRACED_FOREACH(uint32_t, divisor, kUint32Values) {
- Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Div(),
- Uint32Constant(dividend),
- Uint32Constant(divisor)));
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint32Div(), Uint32Constant(dividend),
+ Uint32Constant(divisor), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Constant(bit_cast<int32_t>(
}
}
TRACED_FORRANGE(uint32_t, shift, 1, 31) {
- Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Div(), p0,
- Uint32Constant(1u << shift)));
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Uint32Div(), p0,
+ Uint32Constant(1u << shift), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsWord32Shr(p0, IsInt32Constant(bit_cast<int32_t>(shift))));
TEST_F(MachineOperatorReducerTest, Uint32DivWithParameters) {
Node* const p0 = Parameter(0);
- Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Div(), p0, p0));
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint32Div(), p0, p0, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
Node* const p0 = Parameter(0);
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Int32Mod(), Int32Constant(0), p0));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), Int32Constant(0), p0, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Int32Mod(), p0, Int32Constant(0)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), p0, Int32Constant(0), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Int32Mod(), p0, Int32Constant(1)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), p0, Int32Constant(1), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Int32Mod(), p0, Int32Constant(-1)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), p0, Int32Constant(-1), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
TRACED_FOREACH(int32_t, dividend, kInt32Values) {
TRACED_FOREACH(int32_t, divisor, kInt32Values) {
- Reduction const r = Reduce(graph()->NewNode(machine()->Int32Mod(),
- Int32Constant(dividend),
- Int32Constant(divisor)));
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Int32Mod(), Int32Constant(dividend),
+ Int32Constant(divisor), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Constant(base::bits::SignedMod32(dividend, divisor)));
}
}
TRACED_FORRANGE(int32_t, shift, 1, 30) {
- Reduction const r = Reduce(
- graph()->NewNode(machine()->Int32Mod(), p0, Int32Constant(1 << shift)));
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Mod(), p0,
+ Int32Constant(1 << shift), graph()->start()));
ASSERT_TRUE(r.Changed());
Capture<Node*> branch;
TRACED_FORRANGE(int32_t, shift, 1, 31) {
Reduction const r = Reduce(graph()->NewNode(
machine()->Int32Mod(), p0,
- Uint32Constant(bit_cast<uint32_t, int32_t>(-1) << shift)));
+ Uint32Constant(bit_cast<uint32_t, int32_t>(-1) << shift),
+ graph()->start()));
ASSERT_TRUE(r.Changed());
Capture<Node*> branch;
}
TRACED_FOREACH(int32_t, divisor, kInt32Values) {
if (divisor == 0 || base::bits::IsPowerOfTwo32(Abs(divisor))) continue;
- Reduction const r = Reduce(
- graph()->NewNode(machine()->Int32Mod(), p0, Int32Constant(divisor)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Int32Mod(), p0, Int32Constant(divisor), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Sub(p0, IsInt32Mul(IsTruncatingDiv(p0, Abs(divisor)),
TEST_F(MachineOperatorReducerTest, Int32ModWithParameters) {
Node* const p0 = Parameter(0);
- Reduction const r = Reduce(graph()->NewNode(machine()->Int32Mod(), p0, p0));
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Mod(), p0, p0, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
TEST_F(MachineOperatorReducerTest, Uint32ModWithConstant) {
Node* const p0 = Parameter(0);
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Uint32Mod(), p0, Int32Constant(0)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Mod(), p0, Int32Constant(0), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Uint32Mod(), Int32Constant(0), p0));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Mod(), Int32Constant(0), p0, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(machine()->Uint32Mod(), p0, Int32Constant(1)));
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Uint32Mod(), p0, Int32Constant(1), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
TRACED_FOREACH(uint32_t, dividend, kUint32Values) {
TRACED_FOREACH(uint32_t, divisor, kUint32Values) {
- Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Mod(),
- Uint32Constant(dividend),
- Uint32Constant(divisor)));
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint32Mod(), Uint32Constant(dividend),
+ Uint32Constant(divisor), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Constant(bit_cast<int32_t>(
}
}
TRACED_FORRANGE(uint32_t, shift, 1, 31) {
- Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Mod(), p0,
- Uint32Constant(1u << shift)));
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Uint32Mod(), p0,
+ Uint32Constant(1u << shift), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsWord32And(p0, IsInt32Constant(
TEST_F(MachineOperatorReducerTest, Uint32ModWithParameters) {
Node* const p0 = Parameter(0);
- Reduction const r = Reduce(graph()->NewNode(machine()->Uint32Mod(), p0, p0));
+ Reduction const r = Reduce(
+ graph()->NewNode(machine()->Uint32Mod(), p0, p0, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
}
EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), rax));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1)));
- ASSERT_EQ(1U, s[0]->OutputCount());
+ ASSERT_LE(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), rdx));
+}
+
+
+TEST_F(InstructionSelectorTest, Uint32MulHigh) {
+ StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Uint32MulHigh(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64UmulHigh32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), rax));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1)));
+ ASSERT_LE(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), rdx));
}