#include "src/arm/lithium-codegen-arm.h"
#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
-#include "src/stub-cache.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+ virtual void BeforeCall(int call_size) const OVERRIDE {}
- virtual void AfterCall() const V8_OVERRIDE {
+ virtual void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
}
}
-bool LCodeGen::GenerateDeoptJumpTable() {
+bool LCodeGen::GenerateJumpTable() {
// Check that the jump table is accessible from everywhere in the function
// code, i.e. that offsets to the table can be encoded in the 24bit signed
// immediate of a branch instruction.
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 7)) {
+ jump_table_.length() * 7)) {
Abort(kGeneratedCodeIsTooLarge);
}
- if (deopt_jump_table_.length() > 0) {
+ if (jump_table_.length() > 0) {
Label needs_frame, call_deopt_entry;
Comment(";;; -------------------- Jump table --------------------");
- Address base = deopt_jump_table_[0].address;
+ Address base = jump_table_[0].address;
Register entry_offset = scratch0();
- int length = deopt_jump_table_.length();
+ int length = jump_table_.length();
for (int i = 0; i < length; i++) {
- __ bind(&deopt_jump_table_[i].label);
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
- Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
- DCHECK(type == deopt_jump_table_[0].bailout_type);
- Address entry = deopt_jump_table_[i].address;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- DCHECK(id != Deoptimizer::kNotDeoptimizationEntry);
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
// Second-level deopt table entries are contiguous and small, so instead
// of loading the full, absolute address of each one, load an immediate
// offset which will be added to the base address later.
__ mov(entry_offset, Operand(entry - base));
- if (deopt_jump_table_[i].needs_frame) {
+ if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
if (needs_frame.is_bound()) {
__ b(&needs_frame);
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
__ stop("trap_on_deopt", condition);
}
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
+ DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().bailout_type != bailout_type) ||
- (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- Deoptimizer::JumpTableEntry table_entry(entry,
- bailout_type,
- !frame_is_built_);
- deopt_jump_table_.Add(table_entry, zone());
+ if (jump_table_.is_empty() ||
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
}
- __ b(condition, &deopt_jump_table_.last().label);
+ __ b(condition, &jump_table_.last().label);
}
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, environment, bailout_type);
+ DeoptimizeIf(condition, instr, detail, bailout_type);
}
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, 0, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
__ and_(dividend, dividend, Operand(mask));
__ rsb(dividend, dividend, Operand::Zero(), SetCC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
__ b(&done);
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
Label remainder_not_zero;
__ b(ne, &remainder_not_zero);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr);
__ bind(&remainder_not_zero);
}
}
// case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
__ b(ne, &no_overflow_possible);
__ cmp(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ b(ne, &no_overflow_possible);
__ mov(result_reg, Operand::Zero());
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr);
}
__ bind(&done);
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
__ Move(result_reg, left_reg);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
__ bind(&done);
}
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ tst(dividend, Operand(mask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
__ mov(ip, Operand(divisor));
__ smull(scratch0(), ip, result, ip);
__ sub(scratch0(), scratch0(), dividend, SetCC);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (0 / -x) that will produce negative zero.
}
__ b(pl, &positive);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&positive);
}
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(dividend, Operand(kMinInt));
__ cmp(divisor, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
if (CpuFeatures::IsSupported(SUDIV)) {
Register remainder = scratch0();
__ Mls(remainder, result, divisor, dividend);
__ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
// If the divisor is negative, we have to negate and handle edge cases.
__ rsb(result, dividend, Operand::Zero(), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
return;
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (0 / -x) that will produce negative zero.
}
__ b(pl, &positive);
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&positive);
}
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
__ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
if (CpuFeatures::IsSupported(SUDIV)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
switch (constant) {
case -1:
if (overflow) {
__ rsb(result, left, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ rsb(result, left, Operand::Zero());
}
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
__ mov(result, Operand::Zero());
break;
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs)) {
+ if (base::bits::IsPowerOfTwo32(constant_abs)) {
int32_t shift = WhichPowerOf2(constant_abs);
__ mov(result, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
- } else if (IsPowerOf2(constant_abs - 1)) {
+ } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
__ add(result, left, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
- } else if (IsPowerOf2(constant_abs + 1)) {
+ } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
int32_t shift = WhichPowerOf2(constant_abs + 1);
__ rsb(result, left, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
__ smull(result, scratch, left, right);
}
__ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
__ b(pl, &done);
// Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&done);
}
}
case Token::SHR:
if (instr->can_deopt()) {
__ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
} else {
__ mov(result, Operand(left, LSR, scratch));
}
} else {
if (instr->can_deopt()) {
__ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ Move(result, left);
}
} else {
__ SmiTag(result, left, SetCC);
}
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ mov(result, Operand(left, LSL, shift_count));
}
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
DCHECK(!scratch.is(object));
__ SmiTst(object);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
DCHECK(ToRegister(instr->right()).is(r0));
DCHECK(ToRegister(instr->result()).is(r0));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
const Register map = scratch0();
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
}
}
}
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
__ JumpIfSmi(input, is_false);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
__ b(ne, is_true);
} else {
__ b(ne, is_false);
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
&load_bool_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
Label* load_bool() { return &load_bool_; }
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
if (NeedsEagerFrame()) {
no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
}
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- int32_t sp_delta = (parameter_count + 1) * kPointerSize;
- if (sp_delta != 0) {
- __ add(sp, sp, Operand(sp_delta));
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (sp_delta != 0) {
+ __ add(sp, sp, Operand(sp_delta));
+ }
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiUntag(reg);
+ __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
}
- } else {
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- __ SmiUntag(reg);
- __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
- }
- __ Jump(lr);
+ __ Jump(lr);
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ Move(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(r0));
- __ mov(LoadIC::NameRegister(), Operand(instr->name()));
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(r0));
- __ mov(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
Register payload = ToRegister(instr->temp());
__ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Store the value.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
}
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ b(ne, &skip_assignment);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(r0));
// Name is always in r2.
- __ mov(LoadIC::NameRegister(), Operand(instr->name()));
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(r0));
- __ mov(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
- }
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
// If the function does not have an initial map, we're done.
Label done;
}
-void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
- Runtime::FunctionId id) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ mov(reg, Operand::Zero());
-
- PushSafepointRegistersScope scope(this);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ sub(r0, r0, Operand(kHeapObjectTag));
- __ StoreToSafepointRegisterSlot(r0, reg);
-}
-
-
-template<class T>
-void LCodeGen::DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr) {
- class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
- public:
- DeferredSIMD128ToTagged(LCodeGen* codegen, LInstruction* instr,
- Runtime::FunctionId id)
- : LDeferredCode(codegen), instr_(instr), id_(id) { }
- virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
- }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
- private:
- LInstruction* instr_;
- Runtime::FunctionId id_;
- };
-
- // Allocate a SIMD128 object on the heap.
- Register reg = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
- Register scratch = scratch0();
-
- DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
- this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
- __ jmp(deferred->entry());
- __ bind(deferred->exit());
-
- // Copy the SIMD128 value from the external array to the heap object.
- STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int base_offset = instr->base_offset();
- Operand operand = key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size);
-
- __ add(scratch, external_pointer, operand);
-
- // Load the inner FixedTypedArray.
- __ ldr(temp2, MemOperand(reg, T::kValueOffset));
-
- for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
- __ ldr(temp, MemOperand(scratch, base_offset + offset));
- __ str(
- temp,
- MemOperand(
- temp2,
- FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
- }
-
- // Now that we have finished with the object's real address tag it
- __ add(reg, reg, Operand(kHeapObjectTag));
-}
-
-
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), base_offset);
}
- } else if (IsFloat32x4ElementsKind(elements_kind)) {
- DoLoadKeyedSIMD128ExternalArray<Float32x4>(instr);
- } else if (IsFloat64x2ElementsKind(elements_kind)) {
- DoLoadKeyedSIMD128ExternalArray<Float64x2>(instr);
- } else if (IsInt32x4ElementsKind(elements_kind)) {
- DoLoadKeyedSIMD128ExternalArray<Int32x4>(instr);
} else {
Register result = ToRegister(instr->result());
MemOperand mem_operand = PrepareKeyedOperand(
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr->environment());
+ DeoptimizeIf(cs, instr);
}
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
- case FLOAT32x4_ELEMENTS:
- case FLOAT64x2_ELEMENTS:
- case INT32x4_ELEMENTS:
- case EXTERNAL_FLOAT32x4_ELEMENTS:
- case EXTERNAL_FLOAT64x2_ELEMENTS:
- case EXTERNAL_INT32x4_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
}
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(LoadIC::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
- // No need to allocate this register.
- DCHECK(LoadIC::SlotRegister().is(r0));
- __ mov(LoadIC::SlotRegister(),
- Operand(Smi::FromInt(instr->hydrogen()->slot())));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr);
__ b(&result_in_receiver);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
// Push the receiver and use the register to keep the original
// number of arguments.
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
Label done;
Register exponent = scratch0();
// if input is positive.
__ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
Label done, exact;
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
__ bind(&done);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
+ DeoptimizeIf(mi, instr); // [-0.5, -0].
}
__ VFPCompareAndSetFlags(input, dot_five);
__ mov(result, Operand(1), LeaveCC, eq); // +0.5.
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
__ bind(&done);
}
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(d1));
DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(r2));
+ ToRegister(instr->right()).is(tagged_exponent));
DCHECK(ToDoubleRegister(instr->left()).is(d0));
DCHECK(ToDoubleRegister(instr->result()).is(d2));
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(r2, &no_deopt);
- __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DCHECK(!r6.is(tagged_exponent));
+ __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r6, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+ DCHECK(receiver.is(r1));
+ DCHECK(name.is(r2));
+
+ Register scratch = r3;
+ Register extra = r4;
+ Register extra2 = r5;
+ Register extra3 = r6;
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
PlatformInterfaceDescriptor* call_descriptor =
- instr->descriptor()->platform_specific_descriptor();
+ instr->descriptor().platform_specific_descriptor();
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
call_descriptor->storage_mode());
} else {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- __ mov(StoreIC::NameRegister(), Operand(instr->name()));
+ __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
- }
-}
-
-
-template<class T>
-void LCodeGen::DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr) {
- DCHECK(instr->value()->IsRegister());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
- Register input_reg = ToRegister(instr->value());
- __ SmiTst(input_reg);
- DeoptimizeIf(eq, instr->environment());
- __ CompareObjectType(input_reg, temp, no_reg, T::kInstanceType);
- DeoptimizeIf(ne, instr->environment());
-
- STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int base_offset = instr->base_offset();
- Register address = scratch0();
- if (key_is_constant) {
- if (constant_key != 0) {
- __ add(address, external_pointer,
- Operand(constant_key << element_size_shift));
- } else {
- address = external_pointer;
- }
- } else {
- __ add(address, external_pointer, Operand(key, LSL, shift_size));
- }
-
- // Load the inner FixedTypedArray.
- __ ldr(temp2, MemOperand(input_reg, T::kValueOffset - kHeapObjectTag));
-
- for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
- __ ldr(temp, MemOperand(temp2,
- FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
- __ str(temp, MemOperand(address, base_offset + offset));
+ DeoptimizeIf(cc, instr);
}
}
} else { // Storing doubles, not floats.
__ vstr(value, address, base_offset);
}
- } else if (IsFloat32x4ElementsKind(elements_kind)) {
- DoStoreKeyedSIMD128ExternalArray<Float32x4>(instr);
- } else if (IsFloat64x2ElementsKind(elements_kind)) {
- DoStoreKeyedSIMD128ExternalArray<Float64x2>(instr);
- } else if (IsInt32x4ElementsKind(elements_kind)) {
- DoStoreKeyedSIMD128ExternalArray<Int32x4>(instr);
} else {
Register value(ToRegister(instr->value()));
MemOperand mem_operand = PrepareKeyedOperand(
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
- case FLOAT32x4_ELEMENTS:
- case FLOAT64x2_ELEMENTS:
- case INT32x4_ELEMENTS:
- case EXTERNAL_FLOAT32x4_ELEMENTS:
- case EXTERNAL_FLOAT64x2_ELEMENTS:
- case EXTERNAL_INT32x4_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
- DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic = instr->strict_mode() == STRICT
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&no_memento_found);
}
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagI FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
SIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ tst(input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTag(output, input, SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ SmiTag(output, input);
}
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
__ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr->environment());
+ DeoptimizeIf(cs, instr);
} else {
__ SmiUntag(result, input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DwVfpRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
DCHECK(!result_reg.is(double_scratch0()));
if (can_convert_undefined_to_nan) {
__ b(ne, &convert);
} else {
- DeoptimizeIf(ne, env);
+ DeoptimizeIf(ne, instr);
}
// load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
__ b(ne, &done);
__ VmovHigh(scratch, result_reg);
__ cmp(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, env);
+ DeoptimizeIf(eq, instr);
}
__ jmp(&done);
if (can_convert_undefined_to_nan) {
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
+ DeoptimizeIf(ne, instr);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(scratch2, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "cannot truncate");
__ mov(input_reg, Operand::Zero());
- __ b(&done);
} else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a heap number");
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "minus zero");
}
}
__ bind(&done);
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&done);
}
}
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&done);
}
}
__ SmiTag(result_reg, SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
- DeoptimizeIf(lo, instr->environment());
+ DeoptimizeIf(lo, instr);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
}
}
} else {
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
- DCHECK(tag == 0 || IsPowerOf2(tag));
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+ DeoptimizeIf(tag == 0 ? ne : eq, instr);
} else {
__ and_(scratch, scratch, Operand(mask));
__ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
}
} else {
__ cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ mov(result_reg, Operand::Zero());
__ jmp(&done);
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate V8_FINAL : public LDeferredCode {
+ class DeferredAllocate FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
type = Deoptimizer::LAZY;
}
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(al, instr->environment(), type);
+ DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
}
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ class DeferredStackCheck FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
Register null_value = r5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r0, null_value);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ SmiTst(r0);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr->environment());
+ DeoptimizeIf(le, instr);
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&use_cache);
}
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&done);
}
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;