// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
- info()->CommitDependencies(code);
-}
-
-
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
}
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
} else {
__ Push(rdi);
}
+XMMRegister LCodeGen::ToFloat64x2Register(LOperand* op) const {
+ ASSERT(op->IsFloat64x2Register());
+ return ToSIMD128Register(op->index());
+}
+
+
XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
ASSERT(op->IsInt32x4Register());
return ToSIMD128Register(op->index());
XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
- ASSERT(op->IsFloat32x4Register() || op->IsInt32x4Register());
+ ASSERT(op->IsFloat32x4Register() || op->IsFloat64x2Register() ||
+ op->IsInt32x4Register());
return ToSIMD128Register(op->index());
}
// Does not handle registers. In X64 assembler, plain registers are not
// representable as an Operand.
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot() ||
- op->IsFloat32x4StackSlot() || op->IsInt32x4StackSlot());
+ op->IsFloat32x4StackSlot() || op->IsFloat64x2StackSlot() ||
+ op->IsInt32x4StackSlot());
if (NeedsEagerFrame()) {
return Operand(rbp, StackSlotOffset(op->index()));
} else {
} else if (op->IsFloat32x4StackSlot()) {
translation->StoreSIMD128StackSlot(op->index(),
Translation::FLOAT32x4_STACK_SLOT);
+ } else if (op->IsFloat64x2StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::FLOAT64x2_STACK_SLOT);
} else if (op->IsInt32x4StackSlot()) {
translation->StoreSIMD128StackSlot(op->index(),
Translation::INT32x4_STACK_SLOT);
} else if (op->IsFloat32x4Register()) {
XMMRegister reg = ToFloat32x4Register(op);
translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
+ } else if (op->IsFloat64x2Register()) {
+ XMMRegister reg = ToFloat64x2Register(op);
+ translation->StoreSIMD128Register(reg, Translation::FLOAT64x2_REGISTER);
} else if (op->IsInt32x4Register()) {
XMMRegister reg = ToInt32x4Register(op);
translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
ASSERT(ToRegister(instr->result()).is(rax));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
}
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(rax));
+ ASSERT(remainder.is(rdx));
+ ASSERT(result.is(rax));
+ ASSERT(!divisor.is(rax));
+ ASSERT(!divisor.is(rdx));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(divisor, divisor);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ testl(dividend, dividend);
+ __ j(not_zero, ÷nd_not_zero, Label::kNear);
+ __ testl(divisor, divisor);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(÷nd_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmpl(dividend, Immediate(kMinInt));
+ __ j(not_zero, ÷nd_not_min_int, Label::kNear);
+ __ cmpl(divisor, Immediate(-1));
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(÷nd_not_min_int);
+ }
+
+ // Sign extend to rdx (= remainder).
+ __ cdq();
+ __ idivl(divisor);
+
+ Label done;
+ __ testl(remainder, remainder);
+ __ j(zero, &done, Label::kNear);
+ __ xorl(remainder, divisor);
+ __ sarl(remainder, Immediate(31));
+ __ addl(result, remainder);
+ __ bind(&done);
+}
+
+
void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
ASSERT(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
}
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
void LCodeGen::DoDivI(LDivI* instr) {
HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->left());
- Register divisor = ToRegister(instr->right());
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
Register remainder = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
ASSERT(dividend.is(rax));
ASSERT(remainder.is(rdx));
- ASSERT(result.is(rax));
+ ASSERT(ToRegister(instr->result()).is(rax));
ASSERT(!divisor.is(rax));
ASSERT(!divisor.is(rdx));
__ cdq();
__ idivl(divisor);
- if (hdiv->IsMathFloorOfDiv()) {
- Label done;
- __ testl(remainder, remainder);
- __ j(zero, &done, Label::kNear);
- __ xorl(remainder, divisor);
- __ sarl(remainder, Immediate(31));
- __ addl(result, remainder);
- __ bind(&done);
- } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
DeoptimizeIf(not_zero, instr->environment());
case Token::SHL:
if (shift_count != 0) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ shl(ToRegister(left), Immediate(shift_count));
+ __ shlp(ToRegister(left), Immediate(shift_count));
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
+ ASSERT(!target_rep.IsSmi()); // No support for smi-immediates.
int32_t offset = ToInteger32(LConstantOperand::cast(right));
if (is_p) {
__ leap(ToRegister(instr->result()),
}
} else {
if (right->IsConstantOperand()) {
+ ASSERT(!target_rep.IsSmi()); // No support for smi-immediates.
if (is_p) {
__ addp(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
ASSERT(ToRegister(instr->right()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
- BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
__ j(equal, instr->TrueLabel(chunk_));
}
+ if (expected.Contains(ToBooleanStub::FLOAT64x2)) {
+ // Float64x2 value -> true.
+ __ CmpInstanceType(map, FLOAT64x2_TYPE);
+ __ j(equal, instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::INT32x4)) {
// Int32x4 value -> true.
__ CmpInstanceType(map, INT32x4_TYPE);
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ Push(ToRegister(instr->left()));
__ Push(ToRegister(instr->right()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ testp(rax, rax);
__ j(zero, &true_value, Label::kNear);
PushSafepointRegistersScope scope(this);
InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
- InstanceofStub stub(flags);
+ InstanceofStub stub(isolate(), flags);
__ Push(ToRegister(instr->value()));
__ Push(instr->function());
// safepoint with two arguments because stub is going to
// remove the third argument from the stack before jumping
// to instanceof builtin on the slow path.
- CallCodeGeneric(stub.GetCode(isolate()),
+ CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS,
__ SmiToInteger32(reg, reg);
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg);
- __ shl(reg, Immediate(kPointerSizeLog2));
+ __ shlp(reg, Immediate(kPointerSizeLog2));
__ addp(rsp, reg);
__ jmp(return_addr_reg);
}
}
Representation representation = access.representation();
- if (representation.IsSmi() &&
+ if (representation.IsSmi() && SmiValuesAre32Bits() &&
instr->hydrogen()->representation().IsInteger32()) {
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch, FieldOperand(object, offset), representation);
- __ AssertSmi(scratch);
-#endif
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(object, offset), representation);
+ __ AssertSmi(scratch);
+ }
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
representation = Representation::Integer32();
}
int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
static_cast<int>(maximal_scale_factor);
ASSERT(pre_shift_size > 0);
- __ shl(ToRegister(key), Immediate(pre_shift_size));
+ __ shll(ToRegister(key), Immediate(pre_shift_size));
}
}
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
case EXTERNAL_INT32x4_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
case INT32x4_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->representation();
- if (representation.IsInteger32() &&
+ if (representation.IsInteger32() && SmiValuesAre32Bits() &&
hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
ASSERT(!requires_hole_check);
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- offset,
- instr->additional_index()),
- Representation::Smi());
- __ AssertSmi(scratch);
-#endif
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+ }
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
}
__ bind(&allocated);
__ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ shl(tmp2, Immediate(1));
- __ shr(tmp2, Immediate(1));
+ __ shlq(tmp2, Immediate(1));
+ __ shrq(tmp2, Immediate(1));
__ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ xorps(result_reg, result_reg);
return;
}
+ case kFloat64x2Zero: {
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ __ xorpd(result_reg, result_reg);
+ return;
+ }
case kInt32x4Zero: {
XMMRegister result_reg = ToInt32x4Register(instr->result());
__ xorps(result_reg, result_reg);
}
return;
}
+ case kFloat64x2Abs:
+ case kFloat64x2Neg:
+ case kFloat64x2Sqrt: {
+ ASSERT(instr->value()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ switch (instr->op()) {
+ case kFloat64x2Abs:
+ __ abspd(input_reg);
+ break;
+ case kFloat64x2Neg:
+ __ negatepd(input_reg);
+ break;
+ case kFloat64x2Sqrt:
+ __ sqrtpd(input_reg, input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
case kInt32x4Not:
case kInt32x4Neg: {
ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
}
return;
}
+ case kFloat64x2GetSignMask: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskpd(result, input_reg);
+ return;
+ }
+ case kFloat64x2GetX: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (!input_reg.is(result)) {
+ __ movaps(result, input_reg);
+ }
+ return;
+ }
+ case kFloat64x2GetY: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (!input_reg.is(result)) {
+ __ movaps(result, input_reg);
+ }
+ __ shufpd(result, input_reg, 0x1);
+ return;
+ }
case kInt32x4GetX:
case kInt32x4GetY:
case kInt32x4GetZ:
__ mulps(left_reg, scratch_reg);
return;
}
+ case kFloat64x2Add:
+ case kFloat64x2Sub:
+ case kFloat64x2Mul:
+ case kFloat64x2Div:
+ case kFloat64x2Min:
+ case kFloat64x2Max: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsFloat64x2());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToFloat64x2Register(instr->right());
+ switch (instr->op()) {
+ case kFloat64x2Add:
+ __ addpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Sub:
+ __ subpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Mul:
+ __ mulpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Div:
+ __ divpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Min:
+ __ minpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Max:
+ __ maxpd(left_reg, right_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat64x2Scale: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ shufpd(right_reg, right_reg, 0x0);
+ __ mulpd(left_reg, right_reg);
+ return;
+ }
case kFloat32x4Shuffle: {
ASSERT(instr->left()->Equals(instr->result()));
ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
}
return;
}
+ case kFloat64x2WithX: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ subq(rsp, Immediate(kFloat64x2Size));
+ __ movups(Operand(rsp, 0), left_reg);
+ __ movsd(Operand(rsp, 0 * kDoubleSize), right_reg);
+ __ movups(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kFloat64x2WithY: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ subq(rsp, Immediate(kFloat64x2Size));
+ __ movups(Operand(rsp, 0), left_reg);
+ __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
+ __ movups(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kFloat64x2Constructor: {
+ ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToDoubleRegister(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ __ subq(rsp, Immediate(kFloat64x2Size));
+ __ movsd(Operand(rsp, 0 * kDoubleSize), left_reg);
+ __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
+ __ movups(result_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat64x2Size));
+ return;
+ }
case kInt32x4WithW:
imm8++;
case kInt32x4WithZ:
__ maxps(value_reg, lower_reg);
return;
}
+ case kFloat64x2Clamp: {
+ ASSERT(instr->first()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->first()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->second()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->third()->representation().IsFloat64x2());
+
+ XMMRegister value_reg = ToFloat64x2Register(instr->first());
+ XMMRegister lower_reg = ToFloat64x2Register(instr->second());
+ XMMRegister upper_reg = ToFloat64x2Register(instr->third());
+ __ minpd(value_reg, upper_reg);
+ __ maxpd(value_reg, lower_reg);
+ return;
+ }
default:
UNREACHABLE();
return;
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsSmi()) {
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
__ Set(rax, instr->arity());
// No cell in ebx for construct type feedback in optimized code
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
: DONT_OVERRIDE;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
}
}
Register object = ToRegister(instr->object());
- Handle<Map> transition = instr->transition();
SmiCheck check_needed = hinstr->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition cc = masm()->CheckSmi(value);
DeoptimizeIf(cc, instr->environment());
- // We know that value is a smi now, so we can omit the check below.
+ // We know now that value is not a smi, so we can omit the check below.
check_needed = OMIT_SMI_CHECK;
}
}
} else if (representation.IsDouble()) {
- ASSERT(transition.is_null());
ASSERT(access.IsInobject());
+ ASSERT(!hinstr->has_transition());
ASSERT(!hinstr->NeedsWriteBarrier());
XMMRegister value = ToDoubleRegister(instr->value());
__ movsd(FieldOperand(object, offset), value);
return;
}
- if (!transition.is_null()) {
+ if (hinstr->has_transition()) {
+ Handle<Map> transition = hinstr->transition_map();
+ AddDeprecationDependency(transition);
if (!hinstr->NeedsWriteBarrierForMap()) {
__ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
__ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
- if (representation.IsSmi() &&
+ if (representation.IsSmi() && SmiValuesAre32Bits() &&
hinstr->value()->representation().IsInteger32()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch, FieldOperand(write_register, offset), representation);
- __ AssertSmi(scratch);
-#endif
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(write_register, offset), representation);
+ __ AssertSmi(scratch);
+ }
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
representation = Representation::Integer32();
}
}
-void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
- if (FLAG_debug_code && check->hydrogen()->skip_check()) {
- Label done;
- __ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, check->environment());
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- HBoundsCheck* hinstr = instr->hydrogen();
- if (hinstr->skip_check()) return;
-
- Representation representation = hinstr->length()->representation();
- ASSERT(representation.Equals(hinstr->index()->representation()));
+ Representation representation = instr->hydrogen()->length()->representation();
+ ASSERT(representation.Equals(instr->hydrogen()->index()->representation()));
ASSERT(representation.IsSmiOrInteger32());
- if (instr->length()->IsRegister()) {
- Register reg = ToRegister(instr->length());
-
- if (instr->index()->IsConstantOperand()) {
- int32_t constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
+ Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
+ if (instr->length()->IsConstantOperand()) {
+ int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+ Register index = ToRegister(instr->index());
+ if (representation.IsSmi()) {
+ __ Cmp(index, Smi::FromInt(length));
+ } else {
+ __ cmpl(index, Immediate(length));
+ }
+ cc = ReverseCondition(cc);
+ } else if (instr->index()->IsConstantOperand()) {
+ int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->length()->IsRegister()) {
+ Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
- __ Cmp(reg, Smi::FromInt(constant_index));
+ __ Cmp(length, Smi::FromInt(index));
} else {
- __ cmpl(reg, Immediate(constant_index));
+ __ cmpl(length, Immediate(index));
}
} else {
- Register reg2 = ToRegister(instr->index());
+ Operand length = ToOperand(instr->length());
if (representation.IsSmi()) {
- __ cmpp(reg, reg2);
+ __ Cmp(length, Smi::FromInt(index));
} else {
- __ cmpl(reg, reg2);
+ __ cmpl(length, Immediate(index));
}
}
} else {
- Operand length = ToOperand(instr->length());
- if (instr->index()->IsConstantOperand()) {
- int32_t constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
+ Register index = ToRegister(instr->index());
+ if (instr->length()->IsRegister()) {
+ Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
- __ Cmp(length, Smi::FromInt(constant_index));
+ __ cmpp(length, index);
} else {
- __ cmpl(length, Immediate(constant_index));
+ __ cmpl(length, index);
}
} else {
+ Operand length = ToOperand(instr->length());
if (representation.IsSmi()) {
- __ cmpp(length, ToRegister(instr->index()));
+ __ cmpp(length, index);
} else {
- __ cmpl(length, ToRegister(instr->index()));
+ __ cmpl(length, index);
}
}
}
- Condition condition = hinstr->allow_equality() ? below : below_equal;
- ApplyCheckIf(condition, instr);
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, instr->environment());
+ }
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
case EXTERNAL_INT32x4_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
case INT32x4_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->value()->representation();
- if (representation.IsInteger32()) {
+ if (representation.IsInteger32() && SmiValuesAre32Bits()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- offset,
- instr->additional_index()),
- Representation::Smi());
- __ AssertSmi(scratch);
-#endif
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+ }
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
}
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp()), kDontSaveFPRegs);
} else {
+ ASSERT(object_reg.is(rax));
ASSERT(ToRegister(instr->context()).is(rsi));
PushSafepointRegistersScope scope(this);
- if (!object_reg.is(rax)) {
- __ movp(rax, object_reg);
- }
__ Move(rbx, to_map);
bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
}
__ bind(¬_applicable);
}
ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->left()).is(rdx));
ASSERT(ToRegister(instr->right()).is(rax));
- StringAddStub stub(instr->hydrogen()->flags(),
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
if (instr->value()->IsFloat32x4Register()) {
HandleSIMD128ToTagged<Float32x4>(instr);
+ } else if (instr->value()->IsFloat64x2Register()) {
+ HandleSIMD128ToTagged<Float64x2>(instr);
} else {
ASSERT(instr->value()->IsInt32x4Register());
HandleSIMD128ToTagged<Int32x4>(instr);
void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
if (instr->representation().IsFloat32x4()) {
HandleTaggedToSIMD128<Float32x4>(instr);
+ } else if (instr->representation().IsFloat64x2()) {
+ HandleTaggedToSIMD128<Float64x2>(instr);
} else {
ASSERT(instr->representation().IsInt32x4());
HandleTaggedToSIMD128<Int32x4>(instr);
Register object_;
};
- if (instr->hydrogen()->CanOmitMapChecks()) return;
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
- UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
- for (int i = 0; i < map_set.size() - 1; i++) {
- Handle<Map> map = map_set.at(i).handle();
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
__ CompareMap(reg, map);
__ j(equal, &success, Label::kNear);
}
- Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
__ CompareMap(reg, map);
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
DeoptimizeIf(not_equal, instr->environment());
Register result_reg = ToRegister(instr->result());
if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
__ movq(result_reg, value_reg);
- __ shr(result_reg, Immediate(32));
+ __ shrq(result_reg, Immediate(32));
} else {
__ movd(result_reg, value_reg);
}
__ movl(temp, Immediate((size / kPointerSize) - 1));
} else {
temp = ToRegister(instr->size());
- __ sar(temp, Immediate(kPointerSizeLog2));
+ __ sarp(temp, Immediate(kPointerSizeLog2));
__ decl(temp);
}
Label loop;
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ Move(rbx, instr->hydrogen()->shared_info());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ Push(rsi);
__ Push(instr->hydrogen()->shared_info());
Label::Distance false_distance = right_block == next_block ? Label::kNear
: Label::kFar;
Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label, true_distance);
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->float32x4_string())) {
+ } else if (String::Equals(type_name, factory->float32x4_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FLOAT32x4_TYPE, input);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->int32x4_string())) {
+ } else if (String::Equals(type_name, factory->float64x2_string())) {
+ __ JumpIfSmi(input, false_label, false_distance);
+ __ CmpObjectType(input, FLOAT64x2_TYPE, input);
+ final_branch_condition = equal;
+
+ } else if (String::Equals(type_name, factory->int32x4_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, INT32x4_TYPE, input);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->string_string())) {
+ } else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label, false_distance);
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
- } else if (type_name->Equals(heap()->symbol_string())) {
+ } else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->boolean_string())) {
+ } else if (String::Equals(type_name, factory->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label, true_distance);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory->null_string())) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->undefined_string())) {
+ } else if (String::Equals(type_name, factory->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label, true_distance);
__ JumpIfSmi(input, false_label, false_distance);
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
- } else if (type_name->Equals(heap()->function_string())) {
+ } else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->object_string())) {
+ } else if (String::Equals(type_name, factory->object_string())) {
__ JumpIfSmi(input, false_label, false_distance);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ Push(object);
+ __ Push(index);
+ __ xorp(rsi, rsi);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(object, rax);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
+
Label out_of_object, done;
+ __ Move(kScratchRegister, Smi::FromInt(1));
+ __ testp(index, kScratchRegister);
+ __ j(not_zero, deferred->entry());
+
+ __ sarp(index, Immediate(1));
+
__ SmiToInteger32(index, index);
__ cmpl(index, Immediate(0));
__ j(less, &out_of_object, Label::kNear);
index,
times_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}