#if V8_TARGET_ARCH_X64
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/x64/lithium-codegen-x64.h"
namespace v8 {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+ virtual void BeforeCall(int call_size) const OVERRIDE {}
- virtual void AfterCall() const V8_OVERRIDE {
+ virtual void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
Comment(";;; -------------------- Jump table --------------------");
}
for (int i = 0; i < jump_table_.length(); i++) {
- __ bind(&jump_table_[i].label);
- Address entry = jump_table_[i].address;
- Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (jump_table_[i].needs_frame) {
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
+ if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
if (needs_frame.is_bound()) {
}
-XMMRegister LCodeGen::ToSIMD128Register(int index) const {
- return XMMRegister::FromAllocationIndex(index);
-}
-
-
Register LCodeGen::ToRegister(LOperand* op) const {
DCHECK(op->IsRegister());
return ToRegister(op->index());
}
-XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
- DCHECK(op->IsFloat32x4Register());
- return ToSIMD128Register(op->index());
-}
-
-
-XMMRegister LCodeGen::ToFloat64x2Register(LOperand* op) const {
- DCHECK(op->IsFloat64x2Register());
- return ToSIMD128Register(op->index());
-}
-
-
-XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
- DCHECK(op->IsInt32x4Register());
- return ToSIMD128Register(op->index());
-}
-
-
-XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
- DCHECK(op->IsFloat32x4Register() || op->IsFloat64x2Register() ||
- op->IsInt32x4Register());
- return ToSIMD128Register(op->index());
-}
-
-
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
}
Operand LCodeGen::ToOperand(LOperand* op) const {
// Does not handle registers. In X64 assembler, plain registers are not
// representable as an Operand.
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot() ||
- op->IsFloat32x4StackSlot() || op->IsFloat64x2StackSlot() ||
- op->IsInt32x4StackSlot());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
return Operand(rbp, StackSlotOffset(op->index()));
} else {
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsFloat32x4StackSlot()) {
- translation->StoreSIMD128StackSlot(op->index(),
- Translation::FLOAT32x4_STACK_SLOT);
- } else if (op->IsFloat64x2StackSlot()) {
- translation->StoreSIMD128StackSlot(op->index(),
- Translation::FLOAT64x2_STACK_SLOT);
- } else if (op->IsInt32x4StackSlot()) {
- translation->StoreSIMD128StackSlot(op->index(),
- Translation::INT32x4_STACK_SLOT);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
} else if (op->IsDoubleRegister()) {
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
- } else if (op->IsFloat32x4Register()) {
- XMMRegister reg = ToFloat32x4Register(op);
- translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
- } else if (op->IsFloat64x2Register()) {
- XMMRegister reg = ToFloat64x2Register(op);
- translation->StoreSIMD128Register(reg, Translation::FLOAT64x2_REGISTER);
- } else if (op->IsInt32x4Register()) {
- XMMRegister reg = ToInt32x4Register(op);
- translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
__ bind(&done);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (cc == no_condition && frame_is_built_ &&
!info()->saves_caller_doubles()) {
+ DeoptComment(reason);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
- jump_table_.last().address != entry ||
- jump_table_.last().needs_frame != !frame_is_built_ ||
- jump_table_.last().bailout_type != bailout_type) {
- Deoptimizer::JumpTableEntry table_entry(entry,
- bailout_type,
- !frame_is_built_);
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, environment, bailout_type);
+ DeoptimizeIf(cc, instr, detail, bailout_type);
}
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, 0, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
__ andl(dividend, Immediate(mask));
__ negl(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
}
DCHECK(ToRegister(instr->result()).is(rax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmpl(dividend, Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for kMinInt % -1, idiv would signal a divide error. We
__ j(not_zero, &no_overflow_possible, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "minus zero");
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Set(result_reg, 0);
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
__ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
// If the divisor is negative, we have to negate and handle edge cases.
__ negl(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
return;
}
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
__ testl(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(÷nd_not_zero);
}
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(÷nd_not_min_int);
}
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ testl(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ TruncatingDiv(dividend, Abs(divisor));
__ movl(rax, rdx);
__ imull(rax, rax, Immediate(divisor));
__ subl(rax, dividend);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "lost precision");
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
__ testl(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(÷nd_not_zero);
}
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(÷nd_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
}
}
if (can_overflow) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
? !instr->hydrogen_value()->representation().IsSmi()
: SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "minus zero");
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmpl(kScratchRegister, Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr, "minus zero");
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
} else {
__ orl(kScratchRegister, ToOperand(right));
}
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
} else {
__ orl(kScratchRegister, ToRegister(right));
}
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr, "minus zero");
}
__ bind(&done);
}
__ shrl_cl(ToRegister(left));
if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case Token::SHL:
__ shrl(ToRegister(left), Immediate(shift_count));
} else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case Token::SHL:
__ shll(ToRegister(left), Immediate(shift_count - 1));
}
__ Integer32ToSmi(ToRegister(left), ToRegister(left));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
DCHECK(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
double v = instr->value();
- uint64_t int_val = BitCast<uint64_t, double>(v);
+ uint64_t int_val = bit_cast<uint64_t, double>(v);
// Use xor to produce +0.0 in a fast and compact way, but avoid to
// do so if the constant is -0.0.
if (int_val == 0) {
DCHECK(object.is(rax));
Condition cc = masm()->CheckSmi(object);
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "Smi");
__ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a date object");
if (index->value() == 0) {
__ movp(result, FieldOperand(object, JSDate::kValueOffset));
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
}
DCHECK(ToRegister(instr->right()).is(rax));
DCHECK(ToRegister(instr->result()).is(rax));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
}
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
- } else if (r.IsSIMD128()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, no_condition);
} else {
DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value());
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "Smi");
}
const Register map = kScratchRegister;
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr, "unexpected object");
}
}
}
DCHECK(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
__ JumpIfSmi(input, is_false);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
DCHECK(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
__ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ Move(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(rax));
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
+ Smi::FromInt(instr->hydrogen()->slot()));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(rax));
- __ Move(LoadIC::NameRegister(), instr->name());
+ __ Move(LoadDescriptor::NameRegister(), instr->name());
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(rax));
- __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
DCHECK(!value.is(cell));
__ Move(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
// Store the value.
__ movp(Operand(cell, 0), value);
} else {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(target, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
} else {
__ j(not_equal, &skip_assignment);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(rax));
- __ Move(LoadIC::NameRegister(), instr->name());
+ __ Move(LoadDescriptor::NameRegister(), instr->name());
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(rax));
- __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
- }
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
}
-bool LCodeGen::HandleExternalArrayOpRequiresPreScale(
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind) {
- Register key_reg = ToRegister(key);
- if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
- int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
- static_cast<int>(maximal_scale_factor);
- DCHECK(pre_shift_size > 0);
- __ shll(key_reg, Immediate(pre_shift_size));
- return true;
- }
- return false;
-}
-
-
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
Representation key_representation =
instr->hydrogen()->key()->representation();
if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
- if (!HandleExternalArrayOpRequiresPreScale(
- key, key_representation, elements_kind))
- __ SmiToInteger64(key_reg, key_reg);
+ __ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
- } else if (kPointerSize == kInt64Size && !key->IsConstantOperand()) {
- Representation key_representation =
- instr->hydrogen()->key()->representation();
- if (ExternalArrayOpRequiresTemp(key_representation, elements_kind))
- HandleExternalArrayOpRequiresPreScale(
- key, key_representation, elements_kind);
}
-
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
- } else if (IsSIMD128ElementsKind(elements_kind)) {
- __ movups(ToSIMD128Register(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32x4_ELEMENTS:
- case EXTERNAL_FLOAT64x2_ELEMENTS:
- case EXTERNAL_INT32x4_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case FLOAT32x4_ELEMENTS:
- case FLOAT64x2_ELEMENTS:
- case INT32x4_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
Operand double_load_operand = BuildFastArrayOperand(
}
__ Load(result,
- BuildFastArrayOperand(instr->elements(),
- key,
+ BuildFastArrayOperand(instr->elements(), key,
instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- offset),
+ FAST_ELEMENTS, offset),
representation);
// Check for the hole value.
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr->environment());
+ DeoptimizeIf(NegateCondition(smi), instr, "not a Smi");
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "hole");
}
}
}
if (constant_value & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
-
return Operand(elements_pointer_reg,
(constant_value << shift_size) + offset);
} else {
DCHECK(SmiValuesAre31Bits());
shift_size -= kSmiTagSize;
}
- if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
- // Make sure the key is pre-scaled against maximal_scale_factor.
- shift_size = static_cast<int>(maximal_scale_factor);
- }
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg,
ToRegister(key),
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(rax));
- __ Move(LoadIC::SlotRegister(), Smi::FromInt(instr->hydrogen()->slot()));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
- DeoptimizeIf(is_smi, instr->environment());
+ DeoptimizeIf(is_smi, instr, "Smi");
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr, "not a JavaScript object");
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpp(length, Immediate(kArgumentsLimit));
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr, "too many arguments");
__ Push(receiver);
__ movp(receiver, length);
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+
+ Register scratch = rbx;
+ DCHECK(!scratch.is(receiver) && !scratch.is(name));
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, no_reg);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ leave();
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number");
Label slow, allocated, done;
Register tmp = input_reg.is(rax) ? rcx : rax;
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negp(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
// Deoptimize if minus zero.
__ movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "minus zero");
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr->environment());
+ DeoptimizeIf(parity_even, instr, "NaN");
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ testq(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr, "minus zero");
__ Set(output_reg, 0);
__ jmp(&done);
__ bind(&positive_sign);
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ bind(&done);
}
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, dist);
__ bind(&below_one_half);
__ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x1));
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
- __ RecordComment("Minus zero");
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr, "minus zero");
}
__ Set(output_reg, 0);
__ bind(&done);
}
-void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
- switch (instr->op()) {
- case kFloat32x4Zero: {
- XMMRegister result_reg = ToFloat32x4Register(instr->result());
- __ xorps(result_reg, result_reg);
- return;
- }
- case kFloat64x2Zero: {
- XMMRegister result_reg = ToFloat64x2Register(instr->result());
- __ xorpd(result_reg, result_reg);
- return;
- }
- case kInt32x4Zero: {
- XMMRegister result_reg = ToInt32x4Register(instr->result());
- __ xorps(result_reg, result_reg);
- return;
- }
- default:
- UNREACHABLE();
- return;
- }
-}
-
-
-void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
- uint8_t select = 0;
- switch (instr->op()) {
- case kFloat32x4Coercion: {
- XMMRegister input_reg = ToFloat32x4Register(instr->value());
- XMMRegister result_reg = ToFloat32x4Register(instr->result());
- if (!result_reg.is(input_reg)) {
- __ movaps(result_reg, input_reg);
- }
- return;
- }
- case kFloat64x2Coercion: {
- XMMRegister input_reg = ToFloat64x2Register(instr->value());
- XMMRegister result_reg = ToFloat64x2Register(instr->result());
- if (!result_reg.is(input_reg)) {
- __ movaps(result_reg, input_reg);
- }
- return;
- }
- case kInt32x4Coercion: {
- XMMRegister input_reg = ToInt32x4Register(instr->value());
- XMMRegister result_reg = ToInt32x4Register(instr->result());
- if (!result_reg.is(input_reg)) {
- __ movaps(result_reg, input_reg);
- }
- return;
- }
- case kSIMD128Change: {
- Comment(";;; deoptimize: can not perform representation change"
- "for float32x4 or int32x4");
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- case kFloat32x4Abs:
- case kFloat32x4Neg:
- case kFloat32x4Reciprocal:
- case kFloat32x4ReciprocalSqrt:
- case kFloat32x4Sqrt: {
- DCHECK(instr->value()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
- XMMRegister input_reg = ToFloat32x4Register(instr->value());
- switch (instr->op()) {
- case kFloat32x4Abs:
- __ absps(input_reg);
- break;
- case kFloat32x4Neg:
- __ negateps(input_reg);
- break;
- case kFloat32x4Reciprocal:
- __ rcpps(input_reg, input_reg);
- break;
- case kFloat32x4ReciprocalSqrt:
- __ rsqrtps(input_reg, input_reg);
- break;
- case kFloat32x4Sqrt:
- __ sqrtps(input_reg, input_reg);
- break;
- default:
- UNREACHABLE();
- break;
- }
- return;
- }
- case kFloat64x2Abs:
- case kFloat64x2Neg:
- case kFloat64x2Sqrt: {
- DCHECK(instr->value()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
- XMMRegister input_reg = ToFloat64x2Register(instr->value());
- switch (instr->op()) {
- case kFloat64x2Abs:
- __ abspd(input_reg);
- break;
- case kFloat64x2Neg:
- __ negatepd(input_reg);
- break;
- case kFloat64x2Sqrt:
- __ sqrtpd(input_reg, input_reg);
- break;
- default:
- UNREACHABLE();
- break;
- }
- return;
- }
- case kInt32x4Not:
- case kInt32x4Neg: {
- DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
- XMMRegister input_reg = ToInt32x4Register(instr->value());
- switch (instr->op()) {
- case kInt32x4Not:
- __ notps(input_reg);
- break;
- case kInt32x4Neg:
- __ pnegd(input_reg);
- break;
- default:
- UNREACHABLE();
- break;
- }
- return;
- }
- case kFloat32x4BitsToInt32x4:
- case kFloat32x4ToInt32x4: {
- DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
- XMMRegister input_reg = ToFloat32x4Register(instr->value());
- XMMRegister result_reg = ToInt32x4Register(instr->result());
- if (instr->op() == kFloat32x4BitsToInt32x4) {
- if (!result_reg.is(input_reg)) {
- __ movaps(result_reg, input_reg);
- }
- } else {
- DCHECK(instr->op() == kFloat32x4ToInt32x4);
- __ cvtps2dq(result_reg, input_reg);
- }
- return;
- }
- case kInt32x4BitsToFloat32x4:
- case kInt32x4ToFloat32x4: {
- DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
- XMMRegister input_reg = ToInt32x4Register(instr->value());
- XMMRegister result_reg = ToFloat32x4Register(instr->result());
- if (instr->op() == kInt32x4BitsToFloat32x4) {
- if (!result_reg.is(input_reg)) {
- __ movaps(result_reg, input_reg);
- }
- } else {
- DCHECK(instr->op() == kInt32x4ToFloat32x4);
- __ cvtdq2ps(result_reg, input_reg);
- }
- return;
- }
- case kFloat32x4Splat: {
- DCHECK(instr->hydrogen()->value()->representation().IsDouble());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister result_reg = ToFloat32x4Register(instr->result());
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ cvtsd2ss(xmm_scratch, input_reg);
- __ shufps(xmm_scratch, xmm_scratch, 0x0);
- __ movaps(result_reg, xmm_scratch);
- return;
- }
- case kInt32x4Splat: {
- DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
- Register input_reg = ToRegister(instr->value());
- XMMRegister result_reg = ToInt32x4Register(instr->result());
- __ movd(result_reg, input_reg);
- __ shufps(result_reg, result_reg, 0x0);
- return;
- }
- case kInt32x4GetSignMask: {
- DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
- XMMRegister input_reg = ToInt32x4Register(instr->value());
- Register result = ToRegister(instr->result());
- __ movmskps(result, input_reg);
- return;
- }
- case kFloat32x4GetSignMask: {
- DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
- XMMRegister input_reg = ToFloat32x4Register(instr->value());
- Register result = ToRegister(instr->result());
- __ movmskps(result, input_reg);
- return;
- }
- case kFloat32x4GetW:
- select++;
- case kFloat32x4GetZ:
- select++;
- case kFloat32x4GetY:
- select++;
- case kFloat32x4GetX: {
- DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
- XMMRegister input_reg = ToFloat32x4Register(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
-
- if (select == 0x0) {
- __ xorps(xmm_scratch, xmm_scratch);
- __ cvtss2sd(xmm_scratch, input_reg);
- if (!xmm_scratch.is(result)) {
- __ movaps(result, xmm_scratch);
- }
- } else {
- __ pshufd(xmm_scratch, input_reg, select);
- if (!xmm_scratch.is(result)) {
- __ xorps(result, result);
- }
- __ cvtss2sd(result, xmm_scratch);
- }
- return;
- }
- case kFloat64x2GetSignMask: {
- DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
- XMMRegister input_reg = ToFloat64x2Register(instr->value());
- Register result = ToRegister(instr->result());
- __ movmskpd(result, input_reg);
- return;
- }
- case kFloat64x2GetX: {
- DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
- XMMRegister input_reg = ToFloat64x2Register(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
-
- if (!input_reg.is(result)) {
- __ movaps(result, input_reg);
- }
- return;
- }
- case kFloat64x2GetY: {
- DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
- XMMRegister input_reg = ToFloat64x2Register(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
-
- if (!input_reg.is(result)) {
- __ movaps(result, input_reg);
- }
- __ shufpd(result, input_reg, 0x1);
- return;
- }
- case kInt32x4GetX:
- case kInt32x4GetY:
- case kInt32x4GetZ:
- case kInt32x4GetW:
- case kInt32x4GetFlagX:
- case kInt32x4GetFlagY:
- case kInt32x4GetFlagZ:
- case kInt32x4GetFlagW: {
- DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
- bool flag = false;
- switch (instr->op()) {
- case kInt32x4GetFlagX:
- flag = true;
- case kInt32x4GetX:
- break;
- case kInt32x4GetFlagY:
- flag = true;
- case kInt32x4GetY:
- select = 0x1;
- break;
- case kInt32x4GetFlagZ:
- flag = true;
- case kInt32x4GetZ:
- select = 0x2;
- break;
- case kInt32x4GetFlagW:
- flag = true;
- case kInt32x4GetW:
- select = 0x3;
- break;
- default:
- UNREACHABLE();
- }
-
- XMMRegister input_reg = ToInt32x4Register(instr->value());
- Register result = ToRegister(instr->result());
- if (select == 0x0) {
- __ movd(result, input_reg);
- } else {
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope scope(masm(), SSE4_1);
- __ extractps(result, input_reg, select);
- } else {
- XMMRegister xmm_scratch = xmm0;
- __ pshufd(xmm_scratch, input_reg, select);
- __ movd(result, xmm_scratch);
- }
- }
-
- if (flag) {
- Label false_value, done;
- __ testl(result, result);
- __ j(zero, &false_value, Label::kNear);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&false_value);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
- }
- return;
- }
- default:
- UNREACHABLE();
- return;
- }
-}
-
-
-void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
- uint8_t imm8 = 0; // for with operation
- switch (instr->op()) {
- case kFloat32x4Add:
- case kFloat32x4Sub:
- case kFloat32x4Mul:
- case kFloat32x4Div:
- case kFloat32x4Min:
- case kFloat32x4Max: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
- DCHECK(instr->hydrogen()->right()->representation().IsFloat32x4());
- XMMRegister left_reg = ToFloat32x4Register(instr->left());
- XMMRegister right_reg = ToFloat32x4Register(instr->right());
- switch (instr->op()) {
- case kFloat32x4Add:
- __ addps(left_reg, right_reg);
- break;
- case kFloat32x4Sub:
- __ subps(left_reg, right_reg);
- break;
- case kFloat32x4Mul:
- __ mulps(left_reg, right_reg);
- break;
- case kFloat32x4Div:
- __ divps(left_reg, right_reg);
- break;
- case kFloat32x4Min:
- __ minps(left_reg, right_reg);
- break;
- case kFloat32x4Max:
- __ maxps(left_reg, right_reg);
- break;
- default:
- UNREACHABLE();
- break;
- }
- return;
- }
- case kFloat32x4Scale: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
- DCHECK(instr->hydrogen()->right()->representation().IsDouble());
- XMMRegister left_reg = ToFloat32x4Register(instr->left());
- XMMRegister right_reg = ToDoubleRegister(instr->right());
- XMMRegister scratch_reg = xmm0;
- __ xorps(scratch_reg, scratch_reg);
- __ cvtsd2ss(scratch_reg, right_reg);
- __ shufps(scratch_reg, scratch_reg, 0x0);
- __ mulps(left_reg, scratch_reg);
- return;
- }
- case kFloat64x2Add:
- case kFloat64x2Sub:
- case kFloat64x2Mul:
- case kFloat64x2Div:
- case kFloat64x2Min:
- case kFloat64x2Max: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
- DCHECK(instr->hydrogen()->right()->representation().IsFloat64x2());
- XMMRegister left_reg = ToFloat64x2Register(instr->left());
- XMMRegister right_reg = ToFloat64x2Register(instr->right());
- switch (instr->op()) {
- case kFloat64x2Add:
- __ addpd(left_reg, right_reg);
- break;
- case kFloat64x2Sub:
- __ subpd(left_reg, right_reg);
- break;
- case kFloat64x2Mul:
- __ mulpd(left_reg, right_reg);
- break;
- case kFloat64x2Div:
- __ divpd(left_reg, right_reg);
- break;
- case kFloat64x2Min:
- __ minpd(left_reg, right_reg);
- break;
- case kFloat64x2Max:
- __ maxpd(left_reg, right_reg);
- break;
- default:
- UNREACHABLE();
- break;
- }
- return;
- }
- case kFloat64x2Scale: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
- DCHECK(instr->hydrogen()->right()->representation().IsDouble());
- XMMRegister left_reg = ToFloat64x2Register(instr->left());
- XMMRegister right_reg = ToDoubleRegister(instr->right());
- __ shufpd(right_reg, right_reg, 0x0);
- __ mulpd(left_reg, right_reg);
- return;
- }
- case kFloat32x4Shuffle: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
- if (instr->hydrogen()->right()->IsConstant() &&
- HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
- int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
- uint8_t select = static_cast<uint8_t>(value & 0xFF);
- XMMRegister left_reg = ToFloat32x4Register(instr->left());
- __ shufps(left_reg, left_reg, select);
- return;
- } else {
- Comment(";;; deoptimize: non-constant selector for shuffle");
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- }
- case kInt32x4Shuffle: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
- if (instr->hydrogen()->right()->IsConstant() &&
- HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
- int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
- uint8_t select = static_cast<uint8_t>(value & 0xFF);
- XMMRegister left_reg = ToInt32x4Register(instr->left());
- __ pshufd(left_reg, left_reg, select);
- return;
- } else {
- Comment(";;; deoptimize: non-constant selector for shuffle");
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- }
- case kInt32x4ShiftLeft:
- case kInt32x4ShiftRight:
- case kInt32x4ShiftRightArithmetic: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
- if (instr->hydrogen()->right()->IsConstant() &&
- HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
- int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
- uint8_t shift = static_cast<uint8_t>(value & 0xFF);
- XMMRegister left_reg = ToInt32x4Register(instr->left());
- switch (instr->op()) {
- case kInt32x4ShiftLeft:
- __ pslld(left_reg, shift);
- break;
- case kInt32x4ShiftRight:
- __ psrld(left_reg, shift);
- break;
- case kInt32x4ShiftRightArithmetic:
- __ psrad(left_reg, shift);
- break;
- default:
- UNREACHABLE();
- }
- return;
- } else {
- XMMRegister left_reg = ToInt32x4Register(instr->left());
- Register shift = ToRegister(instr->right());
- XMMRegister xmm_scratch = double_scratch0();
- __ movd(xmm_scratch, shift);
- switch (instr->op()) {
- case kInt32x4ShiftLeft:
- __ pslld(left_reg, xmm_scratch);
- break;
- case kInt32x4ShiftRight:
- __ psrld(left_reg, xmm_scratch);
- break;
- case kInt32x4ShiftRightArithmetic:
- __ psrad(left_reg, xmm_scratch);
- break;
- default:
- UNREACHABLE();
- }
- return;
- }
- }
- case kFloat32x4LessThan:
- case kFloat32x4LessThanOrEqual:
- case kFloat32x4Equal:
- case kFloat32x4NotEqual:
- case kFloat32x4GreaterThanOrEqual:
- case kFloat32x4GreaterThan: {
- DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
- DCHECK(instr->hydrogen()->right()->representation().IsFloat32x4());
- XMMRegister left_reg = ToFloat32x4Register(instr->left());
- XMMRegister right_reg = ToFloat32x4Register(instr->right());
- XMMRegister result_reg = ToInt32x4Register(instr->result());
- switch (instr->op()) {
- case kFloat32x4LessThan:
- if (result_reg.is(left_reg)) {
- __ cmpltps(result_reg, right_reg);
- } else if (result_reg.is(right_reg)) {
- __ cmpnltps(result_reg, left_reg);
- } else {
- __ movaps(result_reg, left_reg);
- __ cmpltps(result_reg, right_reg);
- }
- break;
- case kFloat32x4LessThanOrEqual:
- if (result_reg.is(left_reg)) {
- __ cmpleps(result_reg, right_reg);
- } else if (result_reg.is(right_reg)) {
- __ cmpnleps(result_reg, left_reg);
- } else {
- __ movaps(result_reg, left_reg);
- __ cmpleps(result_reg, right_reg);
- }
- break;
- case kFloat32x4Equal:
- if (result_reg.is(left_reg)) {
- __ cmpeqps(result_reg, right_reg);
- } else if (result_reg.is(right_reg)) {
- __ cmpeqps(result_reg, left_reg);
- } else {
- __ movaps(result_reg, left_reg);
- __ cmpeqps(result_reg, right_reg);
- }
- break;
- case kFloat32x4NotEqual:
- if (result_reg.is(left_reg)) {
- __ cmpneqps(result_reg, right_reg);
- } else if (result_reg.is(right_reg)) {
- __ cmpneqps(result_reg, left_reg);
- } else {
- __ movaps(result_reg, left_reg);
- __ cmpneqps(result_reg, right_reg);
- }
- break;
- case kFloat32x4GreaterThanOrEqual:
- if (result_reg.is(left_reg)) {
- __ cmpnltps(result_reg, right_reg);
- } else if (result_reg.is(right_reg)) {
- __ cmpltps(result_reg, left_reg);
- } else {
- __ movaps(result_reg, left_reg);
- __ cmpnltps(result_reg, right_reg);
- }
- break;
- case kFloat32x4GreaterThan:
- if (result_reg.is(left_reg)) {
- __ cmpnleps(result_reg, right_reg);
- } else if (result_reg.is(right_reg)) {
- __ cmpleps(result_reg, left_reg);
- } else {
- __ movaps(result_reg, left_reg);
- __ cmpnleps(result_reg, right_reg);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- return;
- }
- case kInt32x4And:
- case kInt32x4Or:
- case kInt32x4Xor:
- case kInt32x4Add:
- case kInt32x4Sub:
- case kInt32x4Mul:
- case kInt32x4GreaterThan:
- case kInt32x4Equal:
- case kInt32x4LessThan: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
- DCHECK(instr->hydrogen()->right()->representation().IsInt32x4());
- XMMRegister left_reg = ToInt32x4Register(instr->left());
- XMMRegister right_reg = ToInt32x4Register(instr->right());
- switch (instr->op()) {
- case kInt32x4And:
- __ andps(left_reg, right_reg);
- break;
- case kInt32x4Or:
- __ orps(left_reg, right_reg);
- break;
- case kInt32x4Xor:
- __ xorps(left_reg, right_reg);
- break;
- case kInt32x4Add:
- __ paddd(left_reg, right_reg);
- break;
- case kInt32x4Sub:
- __ psubd(left_reg, right_reg);
- break;
- case kInt32x4Mul:
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope scope(masm(), SSE4_1);
- __ pmulld(left_reg, right_reg);
- } else {
- // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
- XMMRegister xmm_scratch = xmm0;
- __ movaps(xmm_scratch, left_reg);
- __ pmuludq(left_reg, right_reg);
- __ psrldq(xmm_scratch, 4);
- __ psrldq(right_reg, 4);
- __ pmuludq(xmm_scratch, right_reg);
- __ pshufd(left_reg, left_reg, 8);
- __ pshufd(xmm_scratch, xmm_scratch, 8);
- __ punpackldq(left_reg, xmm_scratch);
- }
- break;
- case kInt32x4GreaterThan:
- __ pcmpgtd(left_reg, right_reg);
- break;
- case kInt32x4Equal:
- __ pcmpeqd(left_reg, right_reg);
- break;
- case kInt32x4LessThan: {
- XMMRegister xmm_scratch = xmm0;
- __ movaps(xmm_scratch, right_reg);
- __ pcmpgtd(xmm_scratch, left_reg);
- __ movaps(left_reg, xmm_scratch);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return;
- }
- case kFloat32x4WithW:
- imm8++;
- case kFloat32x4WithZ:
- imm8++;
- case kFloat32x4WithY:
- imm8++;
- case kFloat32x4WithX: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
- DCHECK(instr->hydrogen()->right()->representation().IsDouble());
- XMMRegister left_reg = ToFloat32x4Register(instr->left());
- XMMRegister right_reg = ToDoubleRegister(instr->right());
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ cvtsd2ss(xmm_scratch, right_reg);
- if (CpuFeatures::IsSupported(SSE4_1)) {
- imm8 = imm8 << 4;
- CpuFeatureScope scope(masm(), SSE4_1);
- __ insertps(left_reg, xmm_scratch, imm8);
- } else {
- __ subq(rsp, Immediate(kFloat32x4Size));
- __ movups(Operand(rsp, 0), left_reg);
- __ movss(Operand(rsp, imm8 * kFloatSize), xmm_scratch);
- __ movups(left_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kFloat32x4Size));
- }
- return;
- }
- case kFloat64x2WithX: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
- DCHECK(instr->hydrogen()->right()->representation().IsDouble());
- XMMRegister left_reg = ToFloat64x2Register(instr->left());
- XMMRegister right_reg = ToDoubleRegister(instr->right());
- __ subq(rsp, Immediate(kFloat64x2Size));
- __ movups(Operand(rsp, 0), left_reg);
- __ movsd(Operand(rsp, 0 * kDoubleSize), right_reg);
- __ movups(left_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kFloat64x2Size));
- return;
- }
- case kFloat64x2WithY: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
- DCHECK(instr->hydrogen()->right()->representation().IsDouble());
- XMMRegister left_reg = ToFloat64x2Register(instr->left());
- XMMRegister right_reg = ToDoubleRegister(instr->right());
- __ subq(rsp, Immediate(kFloat64x2Size));
- __ movups(Operand(rsp, 0), left_reg);
- __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
- __ movups(left_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kFloat64x2Size));
- return;
- }
- case kFloat64x2Constructor: {
- DCHECK(instr->hydrogen()->left()->representation().IsDouble());
- DCHECK(instr->hydrogen()->right()->representation().IsDouble());
- XMMRegister left_reg = ToDoubleRegister(instr->left());
- XMMRegister right_reg = ToDoubleRegister(instr->right());
- XMMRegister result_reg = ToFloat64x2Register(instr->result());
- __ subq(rsp, Immediate(kFloat64x2Size));
- __ movsd(Operand(rsp, 0 * kDoubleSize), left_reg);
- __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
- __ movups(result_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kFloat64x2Size));
- return;
- }
- case kInt32x4WithW:
- imm8++;
- case kInt32x4WithZ:
- imm8++;
- case kInt32x4WithY:
- imm8++;
- case kInt32x4WithX: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
- DCHECK(instr->hydrogen()->right()->representation().IsInteger32());
- XMMRegister left_reg = ToInt32x4Register(instr->left());
- Register right_reg = ToRegister(instr->right());
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope scope(masm(), SSE4_1);
- __ pinsrd(left_reg, right_reg, imm8);
- } else {
- __ subq(rsp, Immediate(kInt32x4Size));
- __ movdqu(Operand(rsp, 0), left_reg);
- __ movl(Operand(rsp, imm8 * kFloatSize), right_reg);
- __ movdqu(left_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kInt32x4Size));
- }
- return;
- }
- case kInt32x4WithFlagW:
- imm8++;
- case kInt32x4WithFlagZ:
- imm8++;
- case kInt32x4WithFlagY:
- imm8++;
- case kInt32x4WithFlagX: {
- DCHECK(instr->left()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
- DCHECK(instr->hydrogen()->right()->representation().IsTagged());
- HType type = instr->hydrogen()->right()->type();
- XMMRegister left_reg = ToInt32x4Register(instr->left());
- Register right_reg = ToRegister(instr->right());
- Label load_false_value, done;
- if (type.IsBoolean()) {
- __ subq(rsp, Immediate(kInt32x4Size));
- __ movups(Operand(rsp, 0), left_reg);
- __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
- __ j(not_equal, &load_false_value, Label::kNear);
- } else {
- Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- // load true value.
- __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
- __ jmp(&done, Label::kNear);
- __ bind(&load_false_value);
- __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0x0));
- __ bind(&done);
- __ movups(left_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kInt32x4Size));
- return;
- }
- default:
- UNREACHABLE();
- return;
- }
-}
-
-
-void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
- switch (instr->op()) {
- case kFloat32x4Select: {
- DCHECK(instr->hydrogen()->first()->representation().IsInt32x4());
- DCHECK(instr->hydrogen()->second()->representation().IsFloat32x4());
- DCHECK(instr->hydrogen()->third()->representation().IsFloat32x4());
-
- XMMRegister mask_reg = ToInt32x4Register(instr->first());
- XMMRegister left_reg = ToFloat32x4Register(instr->second());
- XMMRegister right_reg = ToFloat32x4Register(instr->third());
- XMMRegister result_reg = ToFloat32x4Register(instr->result());
- XMMRegister temp_reg = xmm0;
-
- // Copy mask.
- __ movaps(temp_reg, mask_reg);
- // Invert it.
- __ notps(temp_reg);
- // temp_reg = temp_reg & falseValue.
- __ andps(temp_reg, right_reg);
-
- if (!result_reg.is(mask_reg)) {
- if (result_reg.is(left_reg)) {
- // result_reg = result_reg & trueValue.
- __ andps(result_reg, mask_reg);
- // out = result_reg | temp_reg.
- __ orps(result_reg, temp_reg);
- } else {
- __ movaps(result_reg, mask_reg);
- // result_reg = result_reg & trueValue.
- __ andps(result_reg, left_reg);
- // out = result_reg | temp_reg.
- __ orps(result_reg, temp_reg);
- }
- } else {
- // result_reg = result_reg & trueValue.
- __ andps(result_reg, left_reg);
- // out = result_reg | temp_reg.
- __ orps(result_reg, temp_reg);
- }
- return;
- }
- case kInt32x4Select: {
- DCHECK(instr->hydrogen()->first()->representation().IsInt32x4());
- DCHECK(instr->hydrogen()->second()->representation().IsInt32x4());
- DCHECK(instr->hydrogen()->third()->representation().IsInt32x4());
-
- XMMRegister mask_reg = ToInt32x4Register(instr->first());
- XMMRegister left_reg = ToInt32x4Register(instr->second());
- XMMRegister right_reg = ToInt32x4Register(instr->third());
- XMMRegister result_reg = ToInt32x4Register(instr->result());
- XMMRegister temp_reg = xmm0;
-
- // Copy mask.
- __ movaps(temp_reg, mask_reg);
- // Invert it.
- __ notps(temp_reg);
- // temp_reg = temp_reg & falseValue.
- __ andps(temp_reg, right_reg);
-
- if (!result_reg.is(mask_reg)) {
- if (result_reg.is(left_reg)) {
- // result_reg = result_reg & trueValue.
- __ andps(result_reg, mask_reg);
- // out = result_reg | temp_reg.
- __ orps(result_reg, temp_reg);
- } else {
- __ movaps(result_reg, mask_reg);
- // result_reg = result_reg & trueValue.
- __ andps(result_reg, left_reg);
- // out = result_reg | temp_reg.
- __ orps(result_reg, temp_reg);
- }
- } else {
- // result_reg = result_reg & trueValue.
- __ andps(result_reg, left_reg);
- // out = result_reg | temp_reg.
- __ orps(result_reg, temp_reg);
- }
- return;
- }
- case kFloat32x4ShuffleMix: {
- DCHECK(instr->first()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->first()->representation().IsFloat32x4());
- DCHECK(instr->hydrogen()->second()->representation().IsFloat32x4());
- DCHECK(instr->hydrogen()->third()->representation().IsInteger32());
- if (instr->hydrogen()->third()->IsConstant() &&
- HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
- int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
- uint8_t select = static_cast<uint8_t>(value & 0xFF);
- XMMRegister first_reg = ToFloat32x4Register(instr->first());
- XMMRegister second_reg = ToFloat32x4Register(instr->second());
- __ shufps(first_reg, second_reg, select);
- return;
- } else {
- Comment(";;; deoptimize: non-constant selector for shuffle");
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- }
- case kFloat32x4Clamp: {
- DCHECK(instr->first()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->first()->representation().IsFloat32x4());
- DCHECK(instr->hydrogen()->second()->representation().IsFloat32x4());
- DCHECK(instr->hydrogen()->third()->representation().IsFloat32x4());
-
- XMMRegister value_reg = ToFloat32x4Register(instr->first());
- XMMRegister lower_reg = ToFloat32x4Register(instr->second());
- XMMRegister upper_reg = ToFloat32x4Register(instr->third());
- __ minps(value_reg, upper_reg);
- __ maxps(value_reg, lower_reg);
- return;
- }
- case kFloat64x2Clamp: {
- DCHECK(instr->first()->Equals(instr->result()));
- DCHECK(instr->hydrogen()->first()->representation().IsFloat64x2());
- DCHECK(instr->hydrogen()->second()->representation().IsFloat64x2());
- DCHECK(instr->hydrogen()->third()->representation().IsFloat64x2());
-
- XMMRegister value_reg = ToFloat64x2Register(instr->first());
- XMMRegister lower_reg = ToFloat64x2Register(instr->second());
- XMMRegister upper_reg = ToFloat64x2Register(instr->third());
- __ minpd(value_reg, upper_reg);
- __ maxpd(value_reg, lower_reg);
- return;
- }
- default:
- UNREACHABLE();
- return;
- }
-}
-
-
-void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
- switch (instr->op()) {
- case kFloat32x4Constructor: {
- DCHECK(instr->hydrogen()->x()->representation().IsDouble());
- DCHECK(instr->hydrogen()->y()->representation().IsDouble());
- DCHECK(instr->hydrogen()->z()->representation().IsDouble());
- DCHECK(instr->hydrogen()->w()->representation().IsDouble());
- XMMRegister x_reg = ToDoubleRegister(instr->x());
- XMMRegister y_reg = ToDoubleRegister(instr->y());
- XMMRegister z_reg = ToDoubleRegister(instr->z());
- XMMRegister w_reg = ToDoubleRegister(instr->w());
- XMMRegister result_reg = ToFloat32x4Register(instr->result());
- __ subq(rsp, Immediate(kFloat32x4Size));
- __ xorps(xmm0, xmm0);
- __ cvtsd2ss(xmm0, x_reg);
- __ movss(Operand(rsp, 0 * kFloatSize), xmm0);
- __ xorps(xmm0, xmm0);
- __ cvtsd2ss(xmm0, y_reg);
- __ movss(Operand(rsp, 1 * kFloatSize), xmm0);
- __ xorps(xmm0, xmm0);
- __ cvtsd2ss(xmm0, z_reg);
- __ movss(Operand(rsp, 2 * kFloatSize), xmm0);
- __ xorps(xmm0, xmm0);
- __ cvtsd2ss(xmm0, w_reg);
- __ movss(Operand(rsp, 3 * kFloatSize), xmm0);
- __ movups(result_reg, Operand(rsp, 0 * kFloatSize));
- __ addq(rsp, Immediate(kFloat32x4Size));
- return;
- }
- case kInt32x4Constructor: {
- DCHECK(instr->hydrogen()->x()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->y()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->z()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->w()->representation().IsInteger32());
- Register x_reg = ToRegister(instr->x());
- Register y_reg = ToRegister(instr->y());
- Register z_reg = ToRegister(instr->z());
- Register w_reg = ToRegister(instr->w());
- XMMRegister result_reg = ToInt32x4Register(instr->result());
- __ subq(rsp, Immediate(kInt32x4Size));
- __ movl(Operand(rsp, 0 * kInt32Size), x_reg);
- __ movl(Operand(rsp, 1 * kInt32Size), y_reg);
- __ movl(Operand(rsp, 2 * kInt32Size), z_reg);
- __ movl(Operand(rsp, 3 * kInt32Size), w_reg);
- __ movups(result_reg, Operand(rsp, 0 * kInt32Size));
- __ addq(rsp, Immediate(kInt32x4Size));
- return;
- }
- case kInt32x4Bool: {
- DCHECK(instr->hydrogen()->x()->representation().IsTagged());
- DCHECK(instr->hydrogen()->y()->representation().IsTagged());
- DCHECK(instr->hydrogen()->z()->representation().IsTagged());
- DCHECK(instr->hydrogen()->w()->representation().IsTagged());
- HType x_type = instr->hydrogen()->x()->type();
- HType y_type = instr->hydrogen()->y()->type();
- HType z_type = instr->hydrogen()->z()->type();
- HType w_type = instr->hydrogen()->w()->type();
- if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
- !z_type.IsBoolean() || !w_type.IsBoolean()) {
- Comment(";;; deoptimize: other types for int32x4.bool.");
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- XMMRegister result_reg = ToInt32x4Register(instr->result());
- Register x_reg = ToRegister(instr->x());
- Register y_reg = ToRegister(instr->y());
- Register z_reg = ToRegister(instr->z());
- Register w_reg = ToRegister(instr->w());
- Label load_false_x, done_x, load_false_y, done_y,
- load_false_z, done_z, load_false_w, done_w;
- __ subq(rsp, Immediate(kInt32x4Size));
-
- __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
- __ j(not_equal, &load_false_x, Label::kNear);
- __ movl(Operand(rsp, 0 * kInt32Size), Immediate(-1));
- __ jmp(&done_x, Label::kNear);
- __ bind(&load_false_x);
- __ movl(Operand(rsp, 0 * kInt32Size), Immediate(0x0));
- __ bind(&done_x);
-
- __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
- __ j(not_equal, &load_false_y, Label::kNear);
- __ movl(Operand(rsp, 1 * kInt32Size), Immediate(-1));
- __ jmp(&done_y, Label::kNear);
- __ bind(&load_false_y);
- __ movl(Operand(rsp, 1 * kInt32Size), Immediate(0x0));
- __ bind(&done_y);
-
- __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
- __ j(not_equal, &load_false_z, Label::kNear);
- __ movl(Operand(rsp, 2 * kInt32Size), Immediate(-1));
- __ jmp(&done_z, Label::kNear);
- __ bind(&load_false_z);
- __ movl(Operand(rsp, 2 * kInt32Size), Immediate(0x0));
- __ bind(&done_z);
-
- __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
- __ j(not_equal, &load_false_w, Label::kNear);
- __ movl(Operand(rsp, 3 * kInt32Size), Immediate(-1));
- __ jmp(&done_w, Label::kNear);
- __ bind(&load_false_w);
- __ movl(Operand(rsp, 3 * kInt32Size), Immediate(0x0));
- __ bind(&done_w);
-
- __ movups(result_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kInt32x4Size));
- return;
- }
- default:
- UNREACHABLE();
- return;
- }
-}
-
-
void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
- Register exponent = rdx;
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(exponent));
+ ToRegister(instr->right()).is(tagged_exponent));
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(xmm1));
DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
- __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
- DeoptimizeIf(not_equal, instr->environment());
+ __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
+ __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- __ Move(StoreIC::NameRegister(), instr->hydrogen()->name());
+ __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "out of bounds");
}
}
Representation key_representation =
instr->hydrogen()->key()->representation();
if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
- if (!HandleExternalArrayOpRequiresPreScale(
- key, key_representation, elements_kind))
- __ SmiToInteger64(key_reg, key_reg);
+ __ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
- } else if (kPointerSize == kInt64Size && !key->IsConstantOperand()) {
- Representation key_representation =
- instr->hydrogen()->key()->representation();
- if (ExternalArrayOpRequiresTemp(key_representation, elements_kind))
- HandleExternalArrayOpRequiresPreScale(
- key, key_representation, elements_kind);
}
-
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
__ cvtsd2ss(value, value);
__ movss(operand, value);
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
- } else if (IsSIMD128ElementsKind(elements_kind)) {
- __ movups(operand, ToSIMD128Register(instr->value()));
} else {
Register value(ToRegister(instr->value()));
switch (elements_kind) {
__ movl(operand, value);
break;
case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT32x4_ELEMENTS:
- case EXTERNAL_FLOAT64x2_ELEMENTS:
- case EXTERNAL_INT32x4_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case FLOAT32x4_ELEMENTS:
- case FLOAT64x2_ELEMENTS:
- case INT32x4_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
__ ucomisd(value, value);
__ j(parity_odd, &have_value, Label::kNear); // NaN.
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ __ Set(kScratchRegister,
+ bit_cast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
__ movq(value, kScratchRegister);
__ bind(&have_value);
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic = instr->strict_mode() == STRICT
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "memento found");
__ bind(&no_memento_found);
}
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagI FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
instr_->temp2(), SIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
instr_->temp2(), UNSIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
}
-void LCodeGen::DoDeferredSIMD128ToTagged(LSIMD128ToTagged* instr,
- Runtime::FunctionId id) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ Move(reg, Smi::FromInt(0));
-
- {
- PushSafepointRegistersScope scope(this);
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ movp(kScratchRegister, rax);
- }
- __ movp(reg, kScratchRegister);
-}
-
-
-template<class T>
-void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
- class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
- public:
- DeferredSIMD128ToTagged(LCodeGen* codegen,
- LSIMD128ToTagged* instr,
- Runtime::FunctionId id)
- : LDeferredCode(codegen), instr_(instr), id_(id) { }
- virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
- }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
- private:
- LSIMD128ToTagged* instr_;
- Runtime::FunctionId id_;
- };
-
- XMMRegister input_reg = ToSIMD128Register(instr->value());
- Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->temp());
- Register tmp2 = ToRegister(instr->temp2());
- Register tmp3 = ToRegister(instr->temp3());
-
- DeferredSIMD128ToTagged* deferred =
- new(zone()) DeferredSIMD128ToTagged(this, instr,
- static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
- if (FLAG_inline_new) {
- if (T::kInstanceType == FLOAT32x4_TYPE) {
- __ AllocateFloat32x4(reg, tmp, tmp2, tmp3, deferred->entry());
- } else if (T::kInstanceType == INT32x4_TYPE) {
- __ AllocateInt32x4(reg, tmp, tmp2, tmp3, deferred->entry());
- } else if (T::kInstanceType == FLOAT64x2_TYPE) {
- __ AllocateFloat64x2(reg, tmp, tmp2, tmp3, deferred->entry());
- }
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
-
- // Load the inner FixedTypedArray object.
- __ movp(tmp, FieldOperand(reg, T::kValueOffset));
-
- __ movups(FieldOperand(tmp, FixedTypedArrayBase::kDataOffset), input_reg);
-}
-
-
-void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
- if (instr->value()->IsFloat32x4Register()) {
- HandleSIMD128ToTagged<Float32x4>(instr);
- } else if (instr->value()->IsFloat64x2Register()) {
- HandleSIMD128ToTagged<Float64x2>(instr);
- } else {
- DCHECK(instr->value()->IsInt32x4Register());
- HandleSIMD128ToTagged<Int32x4>(instr);
- }
-}
-
-
void LCodeGen::DoSmiTag(LSmiTag* instr) {
HChange* hchange = instr->hydrogen();
Register input = ToRegister(instr->value());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
- DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ DeoptimizeIf(NegateCondition(is_smi), instr, "overflow");
}
__ Integer32ToSmi(output, input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
- DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ DeoptimizeIf(NegateCondition(is_smi), instr, "not a Smi");
} else {
__ AssertSmi(input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- XMMRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+ XMMRegister result_reg, NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
}
if (deoptimize_on_minus_zero) {
__ j(not_equal, &done, Label::kNear);
__ movmskpd(kScratchRegister, result_reg);
__ testq(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, env);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ xorps(result_reg, result_reg);
__ divsd(result_reg, result_reg);
__ bind(&check_false);
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
- __ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
__ Set(input_reg, 0);
- __ jmp(done);
} else {
- Label bailout;
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ TaggedToI(input_reg, input_reg, xmm_temp,
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
- __ jmp(done);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ XMMRegister scratch = ToDoubleRegister(instr->temp());
+ DCHECK(!scratch.is(xmm0));
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ cvttsd2si(input_reg, xmm0);
+ __ Cvtlsi2sd(scratch, input_reg);
+ __ ucomisd(xmm0, scratch);
+ DeoptimizeIf(not_equal, instr, "lost precision");
+ DeoptimizeIf(parity_even, instr, "NaN");
+ if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
+ __ testl(input_reg, input_reg);
+ __ j(not_zero, done);
+ __ movmskpd(input_reg, xmm0);
+ __ andl(input_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr, "minus zero");
+ }
}
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_, done());
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
-}
-
-
-template<class T>
-void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsSIMD128Register());
- LOperand* temp = instr->temp();
- DCHECK(temp->IsRegister());
-
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToSIMD128Register(result);
- Register temp_reg = ToRegister(temp);
-
- __ testp(input_reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
- __ CmpObjectType(input_reg, T::kInstanceType, kScratchRegister);
- DeoptimizeIf(not_equal, instr->environment());
-
- // Load the inner FixedTypedArray object.
- __ movp(temp_reg, FieldOperand(input_reg, T::kValueOffset));
-
- __ movups(
- result_reg, FieldOperand(temp_reg, FixedTypedArrayBase::kDataOffset));
-}
-
-
-void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
- if (instr->representation().IsFloat32x4()) {
- HandleTaggedToSIMD128<Float32x4>(instr);
- } else if (instr->representation().IsFloat64x2()) {
- HandleTaggedToSIMD128<Float64x2>(instr);
- } else {
- DCHECK(instr->representation().IsInt32x4());
- HandleTaggedToSIMD128<Int32x4>(instr);
- }
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
if (instr->truncating()) {
__ TruncateDoubleToI(result_reg, input_reg);
} else {
- Label bailout, done;
+ Label lost_precision, is_nan, minus_zero, done;
XMMRegister xmm_scratch = double_scratch0();
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
- __ jmp(&done, Label::kNear);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
+ &is_nan, &minus_zero, dist);
+ __ jmp(&done, dist);
+ __ bind(&lost_precision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
+ __ bind(&is_nan);
+ DeoptimizeIf(no_condition, instr, "NaN");
+ __ bind(&minus_zero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
}
}
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
- Label bailout, done;
+ Label lost_precision, is_nan, minus_zero, done;
XMMRegister xmm_scratch = double_scratch0();
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
- __ jmp(&done, Label::kNear);
- __ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
+ &minus_zero, dist);
+ __ jmp(&done, dist);
+ __ bind(&lost_precision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
+ __ bind(&is_nan);
+ DeoptimizeIf(no_condition, instr, "NaN");
+ __ bind(&minus_zero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
-
__ Integer32ToSmi(result_reg, result_reg);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr, "overflow");
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(NegateCondition(cc), instr->environment());
+ DeoptimizeIf(NegateCondition(cc), instr, "not a Smi");
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "Smi");
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
} else {
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr, "wrong instance type");
}
}
} else {
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
- DCHECK(tag == 0 || IsPowerOf2(tag));
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(mask));
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
} else {
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ andb(kScratchRegister, Immediate(mask));
__ cmpb(kScratchRegister, Immediate(tag));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
}
}
}
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
__ Cmp(reg, instr->hydrogen()->object().handle());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "value mismatch");
}
__ testp(rax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr, "instance migration failed");
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate V8_FINAL : public LDeferredCode {
+ class DeferredAllocate FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
-
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(no_condition, instr->environment(), type);
+ DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
}
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ class DeferredStackCheck FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "undefined");
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmpp(rax, null_value);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr, "null");
Condition cc = masm()->CheckSmi(rax);
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- DeoptimizeIf(below_equal, instr->environment());
+ DeoptimizeIf(below_equal, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
__ bind(&use_cache);
}
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr, "no cache");
}
Register object = ToRegister(instr->value());
__ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register object_;