// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
int frame_count = 0;
int jsframe_count = 0;
ASSERT(ToRegister(instr->result()).Is(x0));
int arity = instr->arity();
- CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ after_push_argument_ = false;
}
// No cell in x2 for construct type feedback in optimized code.
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ after_push_argument_ = false;
ASSERT(ToRegister(instr->result()).is(x0));
}
: DONT_OVERRIDE;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
__ Cbz(x10, &packed_case);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ B(&done);
__ Bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ Bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
+ after_push_argument_ = false;
ASSERT(ToRegister(instr->result()).is(x0));
}
if (context->IsRegister()) {
__ Mov(cp, ToRegister(context));
} else if (context->IsStackSlot()) {
- __ Ldr(cp, ToMemOperand(context));
+ __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in x1.
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
} else {
__ Push(x1);
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
- info()->CommitDependencies(code);
-}
-
-
-void LCodeGen::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
}
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(constant->HasInteger32Value());
- return Operand(signedness == SIGNED_INT32
- ? constant->Integer32Value()
- : static_cast<uint32_t>(constant->Integer32Value()));
+ return (signedness == SIGNED_INT32)
+ ? Operand(constant->Integer32Value())
+ : Operand(static_cast<uint32_t>(constant->Integer32Value()));
} else {
// Other constants not implemented.
Abort(kToOperand32UnsupportedImmediate);
}
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
ASSERT(op != NULL);
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return MemOperand(fp, StackSlotOffset(op->index()));
+ int fp_offset = StackSlotOffset(op->index());
+ if (op->index() >= 0) {
+ // Loads and stores have a bigger reach in positive offset than negative.
+ // When the load or the store can't be done in one instruction via fp
+ // (too big negative offset), we try to access via jssp (positive offset).
+ // We can reference a stack slot from jssp only if jssp references the end
+ // of the stack slots. It's not the case when:
+ // - stack_mode != kCanUseStackPointer: this is the case when a deferred
+ // code saved the registers.
+ // - after_push_argument_: arguments has been pushed for a call.
+ // - inlined_arguments_: inlined arguments have been pushed once. All the
+ // remainder of the function cannot trust jssp any longer.
+ // - saves_caller_doubles: some double registers have been pushed, jssp
+ // references the end of the double registers and not the end of the
+ // stack slots.
+ // Also, if the offset from fp is small enough to make a load/store in
+ // one instruction, we use a fp access.
+ if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ &&
+ !inlined_arguments_ && !is_int9(fp_offset) &&
+ !info()->saves_caller_doubles()) {
+ int jssp_offset =
+ (GetStackSlotCount() - op->index() - 1) * kPointerSize;
+ return MemOperand(masm()->StackPointer(), jssp_offset);
+ }
+ }
+ return MemOperand(fp, fp_offset);
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ // We push some arguments and they will be pop in an other block. We can't
+ // trust that jssp references the end of the stack slots until the end of
+ // the function.
+ inlined_arguments_ = true;
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
ASSERT(ToRegister(instr->right()).is(x0));
ASSERT(ToRegister(instr->result()).is(x0));
- BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
- if (FLAG_debug_code && check->hydrogen()->skip_check()) {
- __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed);
- } else {
- DeoptimizeIf(cc, check->environment());
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
- if (instr->hydrogen()->skip_check()) return;
-
+ Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
+ ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
- Register length = ToRegister32(instr->length());
-
if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
-
- if (instr->hydrogen()->length()->representation().IsSmi()) {
- __ Cmp(length, Smi::FromInt(constant_index));
- } else {
- __ Cmp(length, constant_index);
- }
+ Operand index = ToOperand32I(instr->index());
+ Register length = ToRegister32(instr->length());
+ __ Cmp(length, index);
+ cond = ReverseConditionForCmp(cond);
} else {
- ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
- __ Cmp(length, ToRegister32(instr->index()));
+ Register index = ToRegister32(instr->index());
+ Operand length = ToOperand32I(instr->length());
+ __ Cmp(index, length);
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ __ Assert(InvertCondition(cond), kEliminatedBoundsCheckFailed);
+ } else {
+ DeoptimizeIf(cond, instr->environment());
}
- Condition condition = instr->hydrogen()->allow_equality() ? lo : ls;
- ApplyCheckIf(condition, instr);
}
__ Call(target);
}
generator.AfterCall();
+ after_push_argument_ = false;
}
__ Call(x10);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ after_push_argument_ = false;
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
+ after_push_argument_ = false;
}
ASSERT(ToRegister(instr->result()).is(x0));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
UNREACHABLE();
}
+ after_push_argument_ = false;
}
Register object_;
};
- if (instr->hydrogen()->CanOmitMapChecks()) {
- ASSERT(instr->value() == NULL);
- ASSERT(instr->temp() == NULL);
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
return;
}
__ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, object);
__ Bind(deferred->check_maps());
}
- UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
- for (int i = 0; i < map_set.size(); i++) {
- Handle<Map> map = map_set.at(i).handle();
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
__ CompareMap(map_reg, map);
__ B(eq, &success);
}
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
+ __ CompareMap(map_reg, map);
// We didn't match a map.
- if (instr->hydrogen()->has_migration_target()) {
- __ B(deferred->entry());
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ __ B(ne, deferred->entry());
} else {
- Deoptimize(instr->environment());
+ DeoptimizeIf(ne, instr->environment());
}
__ Bind(&success);
Register temp1 = x10;
Register temp2 = x11;
Smi* index = instr->index();
- Label runtime, done, deopt, obj_ok;
+ Label runtime, done;
ASSERT(object.is(result) && object.Is(x0));
ASSERT(instr->IsMarkedAsCall());
- __ JumpIfSmi(object, &deopt);
+ DeoptimizeIfSmi(object, instr->environment());
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
- __ B(eq, &obj_ok);
-
- __ Bind(&deopt);
- Deoptimize(instr->environment());
+ DeoptimizeIf(ne, instr->environment());
- __ Bind(&obj_ok);
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
Register dividend = ToRegister32(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister32(instr->result());
- ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
ASSERT(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
}
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
void LCodeGen::DoDivI(LDivI* instr) {
HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister32(instr->left());
- Register divisor = ToRegister32(instr->right());
+ Register dividend = ToRegister32(instr->dividend());
+ Register divisor = ToRegister32(instr->divisor());
Register result = ToRegister32(instr->result());
// Issue the division first, and then check for any deopt cases whilst the
return;
}
- Label deopt;
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ Cbz(divisor, &deopt);
+ DeoptimizeIfZero(divisor, instr->environment());
}
// Check for (0 / -x) as that will produce negative zero.
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
- __ B(eq, &deopt);
+ DeoptimizeIf(eq, instr->environment());
}
// Check for (kMinInt / -1).
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
- __ B(eq, &deopt);
+ DeoptimizeIf(eq, instr->environment());
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
- __ Cbnz(remainder, &deopt);
-
- Label div_ok;
- __ B(&div_ok);
- __ Bind(&deopt);
- Deoptimize(instr->environment());
- __ Bind(&div_ok);
+ DeoptimizeIfNotZero(remainder, instr->environment());
}
DeoptimizeIfMinusZero(input, instr->environment());
}
- __ TryConvertDoubleToInt32(result, input, double_scratch());
+ __ TryRepresentDoubleAsInt32(result, input, double_scratch());
DeoptimizeIf(ne, instr->environment());
if (instr->tag_result()) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ Mov(x2, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ Mov(x2, Operand(instr->hydrogen()->shared_info()));
__ Mov(x1, Operand(pretenure ? factory()->true_value()
ASSERT(instr->IsMarkedAsCall());
ASSERT(object.Is(x0));
- Label deopt;
-
- __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt);
+ DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
+ instr->environment());
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Cmp(object, null_value);
- __ B(eq, &deopt);
+ DeoptimizeIf(eq, instr->environment());
- __ JumpIfSmi(object, &deopt);
+ DeoptimizeIfSmi(object, instr->environment());
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
- __ B(le, &deopt);
+ DeoptimizeIf(le, instr->environment());
Label use_cache, call_runtime;
__ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
__ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
__ B(&use_cache);
- __ Bind(&deopt);
- Deoptimize(instr->environment());
-
// Get the set of properties to enumerate.
__ Bind(&call_runtime);
__ Push(object);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
+ DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment());
__ Bind(&use_cache);
}
ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
// InstanceofStub returns a result in x0:
// 0 => not an instance
ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
__ LoadObject(InstanceofStub::right(), instr->function());
- InstanceofStub stub(flags);
- CallCodeGeneric(stub.GetCode(isolate()),
+ InstanceofStub stub(isolate(), flags);
+ CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
instr,
x1);
}
+ after_push_argument_ = false;
}
Register function = ToRegister(instr->function());
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
- Label deopt;
// Check that the function really is a function. Leaves map in the result
// register.
- __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt);
+ __ CompareObjectType(function, result, temp, JS_FUNCTION_TYPE);
+ DeoptimizeIf(ne, instr->environment());
// Make sure that the function has an instance prototype.
Label non_instance;
JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
- __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
// If the function does not have an initial map, we're done.
Label done;
// map.
__ Bind(&non_instance);
__ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
- __ B(&done);
-
- // Deoptimize case.
- __ Bind(&deopt);
- Deoptimize(instr->environment());
// All done.
__ Bind(&done);
ElementsKind elements_kind,
int additional_index) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
- ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
- : 0;
+ int additional_offset = additional_index << element_size_shift;
+ if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ additional_offset += FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
+ }
if (key_is_constant) {
- int base_offset = ((constant_key + additional_index) << element_size_shift);
- return MemOperand(base, base_offset + additional_offset);
+ int key_offset = constant_key << element_size_shift;
+ return MemOperand(base, key_offset + additional_offset);
}
- if (additional_index == 0) {
- if (key_is_smi) {
- // Key is smi: untag, and scale by element size.
- __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
- return MemOperand(scratch, additional_offset);
- } else {
- // Key is not smi, and element size is not byte: scale by element size.
- if (additional_offset == 0) {
- return MemOperand(base, key, SXTW, element_size_shift);
- } else {
- __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
- return MemOperand(scratch, additional_offset);
- }
- }
- } else {
- // TODO(all): Try to combine these cases a bit more intelligently.
- if (additional_offset == 0) {
- if (key_is_smi) {
- __ SmiUntag(scratch, key);
- __ Add(scratch.W(), scratch.W(), additional_index);
- } else {
- __ Add(scratch.W(), key.W(), additional_index);
- }
- return MemOperand(base, scratch, LSL, element_size_shift);
- } else {
- if (key_is_smi) {
- __ Add(scratch, base,
- Operand::UntagSmiAndScale(key, element_size_shift));
- } else {
- __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
- }
- return MemOperand(
- scratch,
- (additional_index << element_size_shift) + additional_offset);
- }
+ if (key_is_smi) {
+ __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
+ return MemOperand(scratch, additional_offset);
}
+
+ if (additional_offset == 0) {
+ return MemOperand(base, key, SXTW, element_size_shift);
+ }
+
+ ASSERT(!AreAliased(scratch, key));
+ __ Add(scratch, base, additional_offset);
+ return MemOperand(scratch, key, SXTW, element_size_shift);
}
: ToRegister32(instr->value());
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
- Label done;
- __ Abs(result, input, NULL, &done);
- Deoptimize(instr->environment());
- __ Bind(&done);
+ __ Abs(result, input);
+ DeoptimizeIf(vs, instr->environment());
}
}
}
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
- // TODO(jbramley): If we could provide a double result, we could use frintm
- // and produce a valid double result in a single instruction.
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ __ Frintm(result, input);
+}
+
+
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
}
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
Register dividend = ToRegister32(instr->dividend());
Register divisor = ToRegister32(instr->divisor());
ASSERT(ToDoubleRegister(instr->result()).is(d0));
if (exponent_type.IsSmi()) {
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
instr->environment());
__ Bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
// Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
// supports large integer exponents.
Register exponent = ToRegister(instr->right());
__ Sxtw(exponent, exponent);
- MathPowStub stub(MathPowStub::INTEGER);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
-void LCodeGen::DoMathRound(LMathRound* instr) {
- // TODO(jbramley): We could provide a double result here using frint.
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister scratch_d = double_scratch();
+
+ ASSERT(!AreAliased(input, result, scratch_d));
+
+ Label done;
+
+ __ Frinta(result, input);
+ __ Fcmp(input, 0.0);
+ __ Fccmp(result, input, ZFlag, lt);
+ // The result is correct if the input was in [-0, +infinity], or was a
+ // negative integral value.
+ __ B(eq, &done);
+
+ // Here the input is negative, non integral, with an exponent lower than 52.
+ // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
+ // case. So we can safely add 0.5.
+ __ Fmov(scratch_d, 0.5);
+ __ Fadd(result, input, scratch_d);
+ __ Frintm(result, result);
+ // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
+ __ Fabs(result, result);
+ __ Fneg(result, result);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
Register result = ToRegister(instr->result());
// Since we're providing a 32-bit result, we can implement ties-to-infinity by
// adding 0.5 to the input, then taking the floor of the result. This does not
// work for very large positive doubles because adding 0.5 would cause an
- // intermediate rounding stage, so a different approach will be necessary if a
+ // intermediate rounding stage, so a different approach is necessary when a
// double result is needed.
__ Fadd(temp1, input, dot_five);
__ Fcvtms(result, temp1);
Register divisor = ToRegister32(instr->right());
Register result = ToRegister32(instr->result());
- Label deopt, done;
+ Label done;
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- // Combine the deoptimization sites.
- Label ok;
- __ Cbnz(divisor, &ok);
- __ Bind(&deopt);
- Deoptimize(instr->environment());
- __ Bind(&ok);
+ DeoptimizeIfZero(divisor, instr->environment());
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
- if (deopt.is_bound()) { // TODO(all) This is a hack, remove this...
- __ Tbnz(dividend, kWSignBit, &deopt);
- } else {
- DeoptimizeIfNegative(dividend, instr->environment());
- }
+ DeoptimizeIfNegative(dividend, instr->environment());
}
__ Bind(&done);
}
Abort(kDoPushArgumentNotImplementedForDoubleType);
} else {
__ Push(ToRegister(argument));
+ after_push_argument_ = true;
}
}
return FieldMemOperand(string, SeqString::kHeaderSize + offset);
}
+ __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ Add(temp, string, Operand(ToRegister32(index), SXTW));
+ return MemOperand(temp, ToRegister32(index), SXTW);
} else {
STATIC_ASSERT(kUC16Size == 2);
- __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1));
+ return MemOperand(temp, ToRegister32(index), SXTW, 1);
}
- return FieldMemOperand(temp, SeqString::kHeaderSize);
}
Register object = ToRegister(instr->object());
HObjectAccess access = instr->hydrogen()->access();
- Handle<Map> transition = instr->transition();
int offset = access.offset();
if (access.IsExternalMemory()) {
- ASSERT(transition.is_null());
+ ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
Register value = ToRegister(instr->value());
__ Store(value, MemOperand(object, offset), representation);
return;
} else if (representation.IsDouble()) {
- ASSERT(transition.is_null());
ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
FPRegister value = ToDoubleRegister(instr->value());
__ Str(value, FieldMemOperand(object, offset));
!instr->hydrogen()->value()->type().IsHeapObject()) {
DeoptimizeIfSmi(value, instr->environment());
- // We know that value is a smi now, so we can omit the check below.
+ // We know now that value is not a smi, so we can omit the check below.
check_needed = OMIT_SMI_CHECK;
}
- if (!transition.is_null()) {
+ if (instr->hydrogen()->has_transition()) {
+ Handle<Map> transition = instr->hydrogen()->transition_map();
+ AddDeprecationDependency(transition);
// Store the new map value.
Register new_map_value = ToRegister(instr->temp0());
__ Mov(new_map_value, Operand(transition));
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).Is(x1));
ASSERT(ToRegister(instr->right()).Is(x0));
- StringAddStub stub(instr->hydrogen()->flags(),
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
__ Cmp(char_code, String::kMaxOneByteCharCode);
__ B(hi, deferred->entry());
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ Add(result, result, Operand(char_code, SXTW, kPointerSizeLog2));
- __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
__ CompareRoot(result, Heap::kUndefinedValueRootIndex);
__ B(eq, deferred->entry());
__ Bind(deferred->exit());
// A heap number: load value and convert to int32 using non-truncating
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
- __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2);
+ __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
DeoptimizeIf(ne, instr->environment());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object = ToRegister(instr->object());
- Register temp1 = ToRegister(instr->temp1());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
ElementsKind to_kind = instr->to_kind();
Label not_applicable;
- __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register temp1 = ToRegister(instr->temp1());
Register new_map = ToRegister(instr->temp2());
+ __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
__ Mov(new_map, Operand(to_map));
__ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
GetLinkRegisterState(), kDontSaveFPRegs);
} else {
+ {
+ UseScratchRegisterScope temps(masm());
+ // Use the temp register only in a restricted scope - the codegen checks
+ // that we do not use any register across a call.
+ __ CheckMap(object, temps.AcquireX(), from_map, ¬_applicable,
+ DONT_DO_SMI_CHECK);
+ }
+ ASSERT(object.is(x0));
ASSERT(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
- __ Mov(x0, object);
__ Mov(x1, Operand(to_map));
bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
__ Bind(¬_applicable);
}
Register temp2 = ToRegister(instr->temp2());
Label no_memento_found;
- __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found);
- Deoptimize(instr->environment());
+ __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
+ DeoptimizeIf(eq, instr->environment());
__ Bind(&no_memento_found);
}
Label* false_label = instr->FalseLabel(chunk_);
Register value = ToRegister(instr->value());
- if (type_name->Equals(heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
ASSERT(instr->temp1() != NULL);
Register map = ToRegister(instr->temp1());
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
EmitBranch(instr, eq);
- } else if (type_name->Equals(heap()->string_string())) {
+ } else if (String::Equals(type_name, factory->string_string())) {
ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
Register map = ToRegister(instr->temp1());
Register scratch = ToRegister(instr->temp2());
__ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
- } else if (type_name->Equals(heap()->symbol_string())) {
+ } else if (String::Equals(type_name, factory->symbol_string())) {
ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
Register map = ToRegister(instr->temp1());
Register scratch = ToRegister(instr->temp2());
__ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
EmitBranch(instr, eq);
- } else if (type_name->Equals(heap()->boolean_string())) {
+ } else if (String::Equals(type_name, factory->boolean_string())) {
__ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
__ CompareRoot(value, Heap::kFalseValueRootIndex);
EmitBranch(instr, eq);
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory->null_string())) {
__ CompareRoot(value, Heap::kNullValueRootIndex);
EmitBranch(instr, eq);
- } else if (type_name->Equals(heap()->undefined_string())) {
+ } else if (String::Equals(type_name, factory->undefined_string())) {
ASSERT(instr->temp1() != NULL);
Register scratch = ToRegister(instr->temp1());
__ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
- } else if (type_name->Equals(heap()->function_string())) {
+ } else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
ASSERT(instr->temp1() != NULL);
Register type = ToRegister(instr->temp1());
// HeapObject's type has been loaded into type register by JumpIfObjectType.
EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
- } else if (type_name->Equals(heap()->object_string())) {
+ } else if (String::Equals(type_name, factory->object_string())) {
ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
Register map = ToRegister(instr->temp1());
Register scratch = ToRegister(instr->temp2());
// If the receiver is null or undefined, we have to pass the global object as
// a receiver to normal functions. Values have to be passed unchanged to
// builtins and strict-mode functions.
- Label global_object, done, deopt;
+ Label global_object, done, copy_receiver;
if (!instr->hydrogen()->known_function()) {
__ Ldr(result, FieldMemOperand(function,
FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
// Do not transform the receiver to object for strict mode functions.
- __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done);
+ __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, ©_receiver);
// Do not transform the receiver to object for builtins.
- __ Tbnz(result, SharedFunctionInfo::kNative, &done);
+ __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver);
}
// Normal function. Replace undefined or null with global receiver.
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
- __ JumpIfSmi(receiver, &deopt);
+ DeoptimizeIfSmi(receiver, instr->environment());
__ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
- __ Mov(result, receiver);
- __ B(ge, &done);
- // Otherwise, fall through to deopt.
-
- __ Bind(&deopt);
+ __ B(ge, ©_receiver);
Deoptimize(instr->environment());
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
__ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
__ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ B(&done);
+ __ Bind(©_receiver);
+ __ Mov(result, receiver);
__ Bind(&done);
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object);
+ __ Push(index);
+ __ Mov(cp, 0);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
__ AssertSmi(index);
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, result, object, index);
+
Label out_of_object, done;
+
+ __ TestAndBranchIfAnySet(
+ index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
+ __ Mov(index, Operand(index, ASR, 1));
+
__ Cmp(index, Smi::FromInt(0));
__ B(lt, &out_of_object);
__ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Ldr(result, FieldMemOperand(result,
FixedArray::kHeaderSize - kPointerSize));
+ __ Bind(deferred->exit());
__ Bind(&done);
}