i.InputInt32(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArmClz:
+ __ clz(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmCmp:
__ cmp(i.InputRegister(0), i.InputOperand2(1));
DCHECK_EQ(SetCC, i.OutputSBit());
V(ArmAdd) \
V(ArmAnd) \
V(ArmBic) \
+ V(ArmClz) \
V(ArmCmp) \
V(ArmCmn) \
V(ArmTst) \
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmClz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
__ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
break;
}
+ case kArm64Clz32:
+ __ Clz(i.OutputRegister32(), i.InputRegister32(0));
+ break;
case kArm64Cmp:
__ Cmp(i.InputRegister(0), i.InputOperand(1));
break;
V(Arm64And32) \
V(Arm64Bic) \
V(Arm64Bic32) \
+ V(Arm64Clz32) \
V(Arm64Cmp) \
V(Arm64Cmp32) \
V(Arm64Cmn) \
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
__ ror_cl(i.OutputOperand());
}
break;
+ case kIA32Lzcnt:
+ __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
V(IA32Shr) \
V(IA32Sar) \
V(IA32Ror) \
+ V(IA32Lzcnt) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
return VisitWord32Ror(node);
case IrOpcode::kWord32Equal:
return VisitWord32Equal(node);
+ case IrOpcode::kWord32Clz:
+ return VisitWord32Clz(node);
case IrOpcode::kWord64And:
return VisitWord64And(node);
case IrOpcode::kWord64Or:
return ReduceInlineDoubleHi(node);
case Runtime::kInlineIsRegExp:
return ReduceInlineIsInstanceType(node, JS_REGEXP_TYPE);
+ case Runtime::kInlineMathClz32:
+ return ReduceInlineMathClz32(node);
case Runtime::kInlineMathFloor:
return ReduceInlineMathFloor(node);
case Runtime::kInlineMathSqrt:
}
+Reduction JSIntrinsicLowering::ReduceInlineMathClz32(Node* node) {
+ return Change(node, machine()->Word32Clz());
+}
+
+
Reduction JSIntrinsicLowering::ReduceInlineMathFloor(Node* node) {
if (!machine()->HasFloat64RoundDown()) return NoChange();
return Change(node, machine()->Float64RoundDown());
Reduction ReduceInlineConstructDouble(Node* node);
Reduction ReduceInlineDoubleLo(Node* node);
Reduction ReduceInlineDoubleHi(Node* node);
+ Reduction ReduceInlineMathClz32(Node* node);
Reduction ReduceInlineMathFloor(Node* node);
Reduction ReduceInlineMathSqrt(Node* node);
Reduction ReduceInlineStringGetLength(Node* node);
V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \
V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \
V(Word32Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
const Operator* Word32Sar();
const Operator* Word32Ror();
const Operator* Word32Equal();
+ const Operator* Word32Clz();
bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
const Operator* Word64And();
V(Word32Shr) \
V(Word32Sar) \
V(Word32Ror) \
+ V(Word32Clz) \
V(Word64And) \
V(Word64Or) \
V(Word64Xor) \
Node* Word32Ror(Node* a, Node* b) {
return NewNode(machine()->Word32Ror(), a, b);
}
+ Node* Word32Clz(Node* a) { return NewNode(machine()->Word32Clz(), a); }
Node* Word32Equal(Node* a, Node* b) {
return NewNode(machine()->Word32Equal(), a, b);
}
case IrOpcode::kWord32Equal:
return VisitBinop(node, kRepWord32, kRepBit);
+ case IrOpcode::kWord32Clz:
+ return VisitUnop(node, kMachUint32, kMachUint32);
+
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
case IrOpcode::kInt32Mul:
case Runtime::kInlineMathFloor:
case Runtime::kInlineMathSqrt:
return Bounds(Type::None(zone()), Type::Number());
+ case Runtime::kInlineMathClz32:
+ return Bounds(Type::None(), Type::Range(0, 32, zone()));
default:
break;
}
}
+Bounds Typer::Visitor::TypeWord32Clz(Node* node) {
+ return Bounds(Type::Integral32());
+}
+
+
Bounds Typer::Visitor::TypeWord64And(Node* node) {
return Bounds(Type::Internal());
}
case IrOpcode::kWord32Sar:
case IrOpcode::kWord32Ror:
case IrOpcode::kWord32Equal:
+ case IrOpcode::kWord32Clz:
case IrOpcode::kWord64And:
case IrOpcode::kWord64Or:
case IrOpcode::kWord64Xor:
case kX64Ror:
ASSEMBLE_SHIFT(rorq, 6);
break;
+ case kX64Lzcnt32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
case kSSEFloat64Cmp:
ASSEMBLE_DOUBLE_BINOP(ucomisd);
break;
V(X64Sar32) \
V(X64Ror) \
V(X64Ror32) \
+ V(X64Lzcnt32) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
X64OperandGenerator g(this);
SC(math_asin, V8.MathAsin) \
SC(math_atan, V8.MathAtan) \
SC(math_atan2, V8.MathAtan2) \
+ SC(math_clz32, V8.MathClz32) \
SC(math_exp, V8.MathExp) \
SC(math_floor, V8.MathFloor) \
SC(math_log, V8.MathLog) \
}
+void HOptimizedGraphBuilder::GenerateMathClz32(CallRuntime* call) {
+ DCHECK(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathClz32);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
F(ConstructDouble) \
F(DoubleHi) \
F(DoubleLo) \
+ F(MathClz32) \
F(MathFloor) \
F(MathSqrt) \
F(MathLogRT) \
void LCodeGen::DoMathClz32(LMathClz32* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Label not_zero_input;
- __ bsr(result, input);
- __ j(not_zero, ¬_zero_input);
- __ Move(result, Immediate(63)); // 63^31 == 32
-
- __ bind(¬_zero_input);
- __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+ __ Lzcnt(result, input);
}
}
+void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
+ // TODO(intel): Add support for LZCNT (with ABM/BMI1).
+ Label not_zero_src;
+ bsr(dst, src);
+ j(not_zero, ¬_zero_src, Label::kNear);
+ Move(dst, Immediate(63)); // 63^31 == 32
+ bind(¬_zero_src);
+ xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
}
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+ void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
+ void Lzcnt(Register dst, const Operand& src);
+
// Emit call to the code we are currently generating.
void CallSelf() {
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
}
// ES6 draft 07-18-14, section 20.2.2.11
-function MathClz32(x) {
- x = ToUint32(TO_NUMBER_INLINE(x));
- if (x == 0) return 32;
- var result = 0;
- // Binary search.
- if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; };
- if ((x & 0xFF000000) === 0) { x <<= 8; result += 8; };
- if ((x & 0xF0000000) === 0) { x <<= 4; result += 4; };
- if ((x & 0xC0000000) === 0) { x <<= 2; result += 2; };
- if ((x & 0x80000000) === 0) { x <<= 1; result += 1; };
- return result;
+function MathClz32JS(x) {
+ return %_MathClz32(x >>> 0);
}
// ES6 draft 09-27-13, section 20.2.2.9.
"atanh", MathAtanh,
"hypot", MathHypot,
"fround", MathFroundJS,
- "clz32", MathClz32,
+ "clz32", MathClz32JS,
"cbrt", MathCbrt
));
%SetInlineBuiltinFlag(MathAbs);
%SetInlineBuiltinFlag(MathCeil);
+%SetInlineBuiltinFlag(MathClz32JS);
%SetInlineBuiltinFlag(MathFloorJS);
%SetInlineBuiltinFlag(MathRandom);
%SetInlineBuiltinFlag(MathSqrtJS);
}
+RUNTIME_FUNCTION(Runtime_MathClz32) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ isolate->counters()->math_clz32()->Increment();
+
+ CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
+ return *isolate->factory()->NewNumberFromUint(
+ base::bits::CountLeadingZeros32(x));
+}
+
+
RUNTIME_FUNCTION(Runtime_MathFloor) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
F(ConstructDouble, 2, 1) \
F(DoubleHi, 1, 1) \
F(DoubleLo, 1, 1) \
+ F(MathClz32, 1, 1) \
F(MathFloor, 1, 1) \
F(MathSqrt, 1, 1) \
F(MathLogRT, 1, 1) \
}
+void Assembler::bsrl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_operand(dst, src);
+}
+
+
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
void bsrl(Register dst, Register src);
+ void bsrl(Register dst, const Operand& src);
// Miscellaneous
void clc();
void LCodeGen::DoMathClz32(LMathClz32* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Label not_zero_input;
- __ bsrl(result, input);
- __ j(not_zero, ¬_zero_input);
- __ Set(result, 63); // 63^31 == 32
-
- __ bind(¬_zero_input);
- __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+ __ Lzcntl(result, input);
}
}
+void MacroAssembler::Lzcntl(Register dst, Register src) {
+ // TODO(intel): Add support for LZCNT (BMI1/ABM).
+ Label not_zero_src;
+ bsrl(dst, src);
+ j(not_zero, ¬_zero_src, Label::kNear);
+ Set(dst, 63); // 63^31 == 32
+ bind(¬_zero_src);
+ xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
+}
+
+
+void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
+ // TODO(intel): Add support for LZCNT (BMI1/ABM).
+ Label not_zero_src;
+ bsrl(dst, src);
+ j(not_zero, ¬_zero_src, Label::kNear);
+ Set(dst, 63); // 63^31 == 32
+ bind(¬_zero_src);
+ xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
+}
+
+
void MacroAssembler::Pushad() {
Push(rax);
Push(rcx);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+ void Lzcntl(Register dst, Register src);
+ void Lzcntl(Register dst, const Operand& src);
+
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
__ addq(rdi, Operand(rbp, rcx, times_4, -3999));
__ addq(Operand(rbp, rcx, times_4, 12), Immediate(12));
+ __ bsrl(rax, r15);
+ __ bsrl(r9, Operand(rcx, times_8, 91919));
+
__ nop();
__ addq(rbx, Immediate(12));
__ nop();
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var stdlib = { Math: Math };
+
+var f = (function Module(stdlib) {
+ "use asm";
+
+ var clz32 = stdlib.Math.clz32;
+
+ function f(a) {
+ a = a >>> 0;
+ return clz32(a)|0;
+ }
+
+ return f;
+})(stdlib);
+
+assertEquals(32, f(0));
+assertEquals(32, f(NaN));
+assertEquals(32, f(undefined));
+for (var i = 0; i < 32; ++i) {
+ assertEquals(i, f((-1) >>> i));
+}
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(%MathClz32(i), f(i));
+ assertEquals(%_MathClz32(i), f(i));
+}
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmClz, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Clz32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
namespace {
// Immediates (random subset).
-static const int32_t kImmediates[] = {
- kMinInt, -42, -1, 0, 1, 2, 3, 4, 5,
- 6, 7, 8, 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
+const int32_t kImmediates[] = {kMinInt, -42, -1, 0, 1, 2,
+ 3, 4, 5, 6, 7, 8,
+ 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
} // namespace
EXPECT_EQ(kUnsignedGreaterThan, s[0]->flags_condition());
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Lzcnt, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
}
+// -----------------------------------------------------------------------------
+// %_MathClz32
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineMathClz32) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathClz32, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsWord32Clz(input));
+}
+
+
// -----------------------------------------------------------------------------
// %_ValueOf
PURE(Word32And, 2, 0, 1), PURE(Word32Or, 2, 0, 1), PURE(Word32Xor, 2, 0, 1),
PURE(Word32Shl, 2, 0, 1), PURE(Word32Shr, 2, 0, 1),
PURE(Word32Sar, 2, 0, 1), PURE(Word32Ror, 2, 0, 1),
- PURE(Word32Equal, 2, 0, 1), PURE(Word64And, 2, 0, 1),
- PURE(Word64Or, 2, 0, 1), PURE(Word64Xor, 2, 0, 1), PURE(Word64Shl, 2, 0, 1),
- PURE(Word64Shr, 2, 0, 1), PURE(Word64Sar, 2, 0, 1),
- PURE(Word64Ror, 2, 0, 1), PURE(Word64Equal, 2, 0, 1),
- PURE(Int32Add, 2, 0, 1), PURE(Int32AddWithOverflow, 2, 0, 2),
- PURE(Int32Sub, 2, 0, 1), PURE(Int32SubWithOverflow, 2, 0, 2),
- PURE(Int32Mul, 2, 0, 1), PURE(Int32MulHigh, 2, 0, 1),
- PURE(Int32Div, 2, 1, 1), PURE(Uint32Div, 2, 1, 1), PURE(Int32Mod, 2, 1, 1),
- PURE(Uint32Mod, 2, 1, 1), PURE(Int32LessThan, 2, 0, 1),
- PURE(Int32LessThanOrEqual, 2, 0, 1), PURE(Uint32LessThan, 2, 0, 1),
- PURE(Uint32LessThanOrEqual, 2, 0, 1), PURE(Int64Add, 2, 0, 1),
- PURE(Int64Sub, 2, 0, 1), PURE(Int64Mul, 2, 0, 1), PURE(Int64Div, 2, 0, 1),
- PURE(Uint64Div, 2, 0, 1), PURE(Int64Mod, 2, 0, 1), PURE(Uint64Mod, 2, 0, 1),
- PURE(Int64LessThan, 2, 0, 1), PURE(Int64LessThanOrEqual, 2, 0, 1),
- PURE(Uint64LessThan, 2, 0, 1), PURE(ChangeFloat32ToFloat64, 1, 0, 1),
- PURE(ChangeFloat64ToInt32, 1, 0, 1), PURE(ChangeFloat64ToUint32, 1, 0, 1),
- PURE(ChangeInt32ToInt64, 1, 0, 1), PURE(ChangeUint32ToFloat64, 1, 0, 1),
- PURE(ChangeUint32ToUint64, 1, 0, 1),
+ PURE(Word32Equal, 2, 0, 1), PURE(Word32Clz, 1, 0, 1),
+ PURE(Word64And, 2, 0, 1), PURE(Word64Or, 2, 0, 1), PURE(Word64Xor, 2, 0, 1),
+ PURE(Word64Shl, 2, 0, 1), PURE(Word64Shr, 2, 0, 1),
+ PURE(Word64Sar, 2, 0, 1), PURE(Word64Ror, 2, 0, 1),
+ PURE(Word64Equal, 2, 0, 1), PURE(Int32Add, 2, 0, 1),
+ PURE(Int32AddWithOverflow, 2, 0, 2), PURE(Int32Sub, 2, 0, 1),
+ PURE(Int32SubWithOverflow, 2, 0, 2), PURE(Int32Mul, 2, 0, 1),
+ PURE(Int32MulHigh, 2, 0, 1), PURE(Int32Div, 2, 1, 1),
+ PURE(Uint32Div, 2, 1, 1), PURE(Int32Mod, 2, 1, 1), PURE(Uint32Mod, 2, 1, 1),
+ PURE(Int32LessThan, 2, 0, 1), PURE(Int32LessThanOrEqual, 2, 0, 1),
+ PURE(Uint32LessThan, 2, 0, 1), PURE(Uint32LessThanOrEqual, 2, 0, 1),
+ PURE(Int64Add, 2, 0, 1), PURE(Int64Sub, 2, 0, 1), PURE(Int64Mul, 2, 0, 1),
+ PURE(Int64Div, 2, 0, 1), PURE(Uint64Div, 2, 0, 1), PURE(Int64Mod, 2, 0, 1),
+ PURE(Uint64Mod, 2, 0, 1), PURE(Int64LessThan, 2, 0, 1),
+ PURE(Int64LessThanOrEqual, 2, 0, 1), PURE(Uint64LessThan, 2, 0, 1),
+ PURE(ChangeFloat32ToFloat64, 1, 0, 1), PURE(ChangeFloat64ToInt32, 1, 0, 1),
+ PURE(ChangeFloat64ToUint32, 1, 0, 1), PURE(ChangeInt32ToInt64, 1, 0, 1),
+ PURE(ChangeUint32ToFloat64, 1, 0, 1), PURE(ChangeUint32ToUint64, 1, 0, 1),
PURE(TruncateFloat64ToFloat32, 1, 0, 1),
PURE(TruncateFloat64ToInt32, 1, 0, 1), PURE(TruncateInt64ToInt32, 1, 0, 1),
PURE(Float64Add, 2, 0, 1), PURE(Float64Sub, 2, 0, 1),
IS_UNOP_MATCHER(NumberToUint32)
IS_UNOP_MATCHER(ObjectIsSmi)
IS_UNOP_MATCHER(ObjectIsNonNegativeSmi)
+IS_UNOP_MATCHER(Word32Clz)
#undef IS_UNOP_MATCHER
} // namespace compiler
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Equal(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Clz(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lzcnt32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8