#include "src/arm/lithium-codegen-arm.h"
#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/stub-cache.h"
#include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+ virtual void BeforeCall(int call_size) const OVERRIDE {}
- virtual void AfterCall() const V8_OVERRIDE {
+ virtual void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
bool LCodeGen::GenerateCode() {
LPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
+ DCHECK(is_unused());
status_ = GENERATING;
// Open a frame scope to indicate that there is a frame on the stack. The
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
}
void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
+ DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
void LCodeGen::SaveCallerDoubles() {
- ASSERT(info()->saves_caller_doubles());
- ASSERT(NeedsEagerFrame());
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
void LCodeGen::RestoreCallerDoubles() {
- ASSERT(info()->saves_caller_doubles());
- ASSERT(NeedsEagerFrame());
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
Comment(";;; Restore clobbered callee double registers");
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
+ DCHECK(is_generating());
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
__ b(ne, &ok);
__ ldr(r2, GlobalObjectOperand());
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
__ str(r2, MemOperand(sp, receiver_offset));
need_write_barrier = false;
} else {
__ push(r1);
- __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both r0 and cp. It replaces the context
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- ASSERT(slots >= 0);
+ DCHECK(slots >= 0);
__ sub(sp, sp, Operand(slots * kPointerSize));
}
bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
+ DCHECK(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
if (NeedsDeferredFrame()) {
Comment(";;; Build frame");
- ASSERT(!frame_is_built_);
- ASSERT(info()->IsStub());
+ DCHECK(!frame_is_built_);
+ DCHECK(info()->IsStub());
frame_is_built_ = true;
__ PushFixedFrame();
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
- ASSERT(frame_is_built_);
+ DCHECK(frame_is_built_);
__ pop(ip);
__ PopFixedFrame();
frame_is_built_ = false;
}
-bool LCodeGen::GenerateDeoptJumpTable() {
+bool LCodeGen::GenerateJumpTable() {
// Check that the jump table is accessible from everywhere in the function
// code, i.e. that offsets to the table can be encoded in the 24bit signed
// immediate of a branch instruction.
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 7)) {
+ jump_table_.length() * 7)) {
Abort(kGeneratedCodeIsTooLarge);
}
- if (deopt_jump_table_.length() > 0) {
+ if (jump_table_.length() > 0) {
+ Label needs_frame, call_deopt_entry;
+
Comment(";;; -------------------- Jump table --------------------");
- }
- Label table_start;
- __ bind(&table_start);
- Label needs_frame;
- for (int i = 0; i < deopt_jump_table_.length(); i++) {
- __ bind(&deopt_jump_table_[i].label);
- Address entry = deopt_jump_table_[i].address;
- Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (deopt_jump_table_[i].needs_frame) {
- ASSERT(!info()->saves_caller_doubles());
- __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
- if (needs_frame.is_bound()) {
- __ b(&needs_frame);
+ Address base = jump_table_[0].address;
+
+ Register entry_offset = scratch0();
+
+ int length = jump_table_.length();
+ for (int i = 0; i < length; i++) {
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
+
+ DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
+
+ // Second-level deopt table entries are contiguous and small, so instead
+ // of loading the full, absolute address of each one, load an immediate
+ // offset which will be added to the base address later.
+ __ mov(entry_offset, Operand(entry - base));
+
+ if (table_entry->needs_frame) {
+ DCHECK(!info()->saves_caller_doubles());
+ if (needs_frame.is_bound()) {
+ __ b(&needs_frame);
+ } else {
+ __ bind(&needs_frame);
+ Comment(";;; call deopt with frame");
+ __ PushFixedFrame();
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(ip);
+ __ add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ bind(&call_deopt_entry);
+ // Add the base address to the offset previously loaded in
+ // entry_offset.
+ __ add(entry_offset, entry_offset,
+ Operand(ExternalReference::ForDeoptEntry(base)));
+ __ blx(entry_offset);
+ }
+
+ masm()->CheckConstPool(false, false);
} else {
- __ bind(&needs_frame);
- __ PushFixedFrame();
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ mov(lr, Operand(pc), LeaveCC, al);
- __ mov(pc, ip);
+ // The last entry can fall through into `call_deopt_entry`, avoiding a
+ // branch.
+ bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
+
+ if (need_branch) __ b(&call_deopt_entry);
+
+ masm()->CheckConstPool(false, !need_branch);
}
- } else {
+ }
+
+ if (!call_deopt_entry.is_bound()) {
+ Comment(";;; call deopt");
+ __ bind(&call_deopt_entry);
+
if (info()->saves_caller_doubles()) {
- ASSERT(info()->IsStub());
+ DCHECK(info()->IsStub());
RestoreCallerDoubles();
}
- __ mov(lr, Operand(pc), LeaveCC, al);
- __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+
+ // Add the base address to the offset previously loaded in entry_offset.
+ __ add(entry_offset, entry_offset,
+ Operand(ExternalReference::ForDeoptEntry(base)));
+ __ blx(entry_offset);
}
- masm()->CheckConstPool(false, false);
}
// Force constant pool emission at the end of the deopt jump table to make
bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
+ DCHECK(is_done());
safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
+ DCHECK(op->IsRegister());
return ToRegister(op->index());
}
Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
+ DCHECK(literal->IsNumber());
__ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
- ASSERT(r.IsSmiOrTagged());
+ DCHECK(r.IsSmiOrTagged());
__ Move(scratch, literal);
}
return scratch;
DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
+ DCHECK(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
}
Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
+ DCHECK(literal->IsNumber());
__ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
__ vmov(flt_scratch, ip);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
return constant->handle(isolate());
}
HConstant* constant = chunk_->LookupConstant(op);
int32_t value = constant->Integer32Value();
if (r.IsInteger32()) return value;
- ASSERT(r.IsSmiOrTagged());
+ DCHECK(r.IsSmiOrTagged());
return reinterpret_cast<int32_t>(Smi::FromInt(value));
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(constant->HasDoubleValue());
+ DCHECK(constant->HasDoubleValue());
return constant->DoubleValue();
}
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsSmi()) {
- ASSERT(constant->HasSmiValue());
+ DCHECK(constant->HasSmiValue());
return Operand(Smi::FromInt(constant->Integer32Value()));
} else if (r.IsInteger32()) {
- ASSERT(constant->HasInteger32Value());
+ DCHECK(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
Abort(kToOperandUnsupportedDoubleImmediate);
}
- ASSERT(r.IsTagged());
+ DCHECK(r.IsTagged());
return Operand(constant->handle(isolate()));
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
static int ArgumentsOffsetWithoutFrame(int index) {
- ASSERT(index < 0);
+ DCHECK(index < 0);
return -(index + 1) * kPointerSize;
}
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- ASSERT(!op->IsRegister());
- ASSERT(!op->IsDoubleRegister());
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
return MemOperand(fp, StackSlotOffset(op->index()));
} else {
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- ASSERT(op->IsDoubleStackSlot());
+ DCHECK(op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
} else {
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case JS_GETTER:
- ASSERT(translation_size == 1);
- ASSERT(height == 0);
+ DCHECK(translation_size == 1);
+ DCHECK(height == 0);
translation->BeginGetterStubFrame(closure_id);
break;
case JS_SETTER:
- ASSERT(translation_size == 2);
- ASSERT(height == 0);
+ DCHECK(translation_size == 2);
+ DCHECK(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case STUB:
LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode) {
- ASSERT(instr != NULL);
+ DCHECK(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles) {
- ASSERT(instr != NULL);
+ DCHECK(instr != NULL);
__ CallRuntime(function, num_arguments, save_doubles);
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(environment->HasBeenRegistered());
+ DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- ASSERT(info()->IsOptimizing() || info()->IsStub());
+ DCHECK(info()->IsOptimizing() || info()->IsStub());
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
__ mov(scratch, Operand(count));
__ ldr(r1, MemOperand(scratch));
__ sub(r1, r1, Operand(1), SetCC);
- __ movw(r1, FLAG_deopt_every_n_times, eq);
+ __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
__ str(r1, MemOperand(scratch));
__ pop(r1);
__ stop("trap_on_deopt", condition);
}
- ASSERT(info()->IsStub() || frame_is_built_);
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
+ DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (condition == al && frame_is_built_ &&
!info()->saves_caller_doubles()) {
+ DeoptComment(reason);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().bailout_type != bailout_type) ||
- (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- Deoptimizer::JumpTableEntry table_entry(entry,
- bailout_type,
- !frame_is_built_);
- deopt_jump_table_.Add(table_entry, zone());
+ if (jump_table_.is_empty() ||
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
}
- __ b(condition, &deopt_jump_table_.last().label);
+ __ b(condition, &jump_table_.last().label);
}
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, environment, bailout_type);
+ DeoptimizeIf(condition, instr, detail, bailout_type);
}
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
+ DCHECK(deoptimization_literals_.length() == 0);
const ZoneList<Handle<JSFunction> >* inlined_closures =
chunk()->inlined_closures();
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
} else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode deopt_mode) {
- ASSERT(expected_safepoint_kind_ == kind);
+ DCHECK(expected_safepoint_kind_ == kind);
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
}
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
- LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
-}
-
-
void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(r0));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpExec: {
RegExpExecStub stub(isolate());
void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
- ASSERT(dividend.is(ToRegister(instr->result())));
+ DCHECK(dividend.is(ToRegister(instr->result())));
// Theoretically, a variation of the branch-free code for integer division by
// a power of 2 (calculating the remainder via an additional multiplication
__ and_(dividend, dividend, Operand(mask));
__ rsb(dividend, dividend, Operand::Zero(), SetCC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
__ b(&done);
}
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- ASSERT(!dividend.is(result));
+ DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
Label remainder_not_zero;
__ b(ne, &remainder_not_zero);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr);
__ bind(&remainder_not_zero);
}
}
// case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
__ b(ne, &no_overflow_possible);
__ cmp(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ b(ne, &no_overflow_possible);
__ mov(result_reg, Operand::Zero());
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr);
}
__ bind(&done);
Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
Register scratch = scratch0();
- ASSERT(!scratch.is(left_reg));
- ASSERT(!scratch.is(right_reg));
- ASSERT(!scratch.is(result_reg));
+ DCHECK(!scratch.is(left_reg));
+ DCHECK(!scratch.is(right_reg));
+ DCHECK(!scratch.is(result_reg));
DwVfpRegister dividend = ToDoubleRegister(instr->temp());
DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
- ASSERT(!divisor.is(dividend));
+ DCHECK(!divisor.is(dividend));
LowDwVfpRegister quotient = double_scratch0();
- ASSERT(!quotient.is(dividend));
- ASSERT(!quotient.is(divisor));
+ DCHECK(!quotient.is(dividend));
+ DCHECK(!quotient.is(divisor));
Label done;
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
__ Move(result_reg, left_reg);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
__ bind(&done);
}
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
- ASSERT(!result.is(dividend));
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+ DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ tst(dividend, Operand(mask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- ASSERT(!dividend.is(result));
+ DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
__ mov(ip, Operand(divisor));
__ smull(scratch0(), ip, result, ip);
__ sub(scratch0(), scratch0(), dividend, SetCC);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (0 / -x) that will produce negative zero.
}
__ b(pl, &positive);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&positive);
}
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(dividend, Operand(kMinInt));
__ cmp(divisor, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
if (CpuFeatures::IsSupported(SUDIV)) {
Register remainder = scratch0();
__ Mls(remainder, result, divisor, dividend);
__ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
// This is computed in-place.
- ASSERT(addend.is(ToDoubleRegister(instr->result())));
+ DCHECK(addend.is(ToDoubleRegister(instr->result())));
__ vmla(addend, multiplier, multiplicand);
}
DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
// This is computed in-place.
- ASSERT(minuend.is(ToDoubleRegister(instr->result())));
+ DCHECK(minuend.is(ToDoubleRegister(instr->result())));
__ vmls(minuend, multiplier, multiplicand);
}
// If the divisor is negative, we have to negate and handle edge cases.
__ rsb(result, dividend, Operand::Zero(), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
return;
}
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
- ASSERT(!dividend.is(result));
+ DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Easy case: We need no dynamic check for the dividend and the flooring
// In the general case we may need to adjust before and after the truncating
// division to get a flooring division.
Register temp = ToRegister(instr->temp());
- ASSERT(!temp.is(dividend) && !temp.is(result));
+ DCHECK(!temp.is(dividend) && !temp.is(result));
Label needs_adjustment, done;
__ cmp(dividend, Operand::Zero());
__ b(divisor > 0 ? lt : gt, &needs_adjustment);
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (0 / -x) that will produce negative zero.
}
__ b(pl, &positive);
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&positive);
}
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
__ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
if (CpuFeatures::IsSupported(SUDIV)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
switch (constant) {
case -1:
if (overflow) {
__ rsb(result, left, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ rsb(result, left, Operand::Zero());
}
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
__ mov(result, Operand::Zero());
break;
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs)) {
+ if (base::bits::IsPowerOfTwo32(constant_abs)) {
int32_t shift = WhichPowerOf2(constant_abs);
__ mov(result, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
- } else if (IsPowerOf2(constant_abs - 1)) {
+ } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
__ add(result, left, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
- } else if (IsPowerOf2(constant_abs + 1)) {
+ } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
int32_t shift = WhichPowerOf2(constant_abs + 1);
__ rsb(result, left, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
}
} else {
- ASSERT(right_op->IsRegister());
+ DCHECK(right_op->IsRegister());
Register right = ToRegister(right_op);
if (overflow) {
__ smull(result, scratch, left, right);
}
__ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
__ b(pl, &done);
// Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&done);
}
}
void LCodeGen::DoBitI(LBitI* instr) {
LOperand* left_op = instr->left();
LOperand* right_op = instr->right();
- ASSERT(left_op->IsRegister());
+ DCHECK(left_op->IsRegister());
Register left = ToRegister(left_op);
Register result = ToRegister(instr->result());
Operand right(no_reg);
if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, ip));
} else {
- ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+ DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
right = ToOperand(right_op);
}
case Token::SHR:
if (instr->can_deopt()) {
__ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
} else {
__ mov(result, Operand(left, LSR, scratch));
}
} else {
if (instr->can_deopt()) {
__ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ Move(result, left);
}
} else {
__ SmiTag(result, left, SetCC);
}
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ mov(result, Operand(left, LSL, shift_count));
}
Register right_reg = EmitLoadRegister(right, ip);
__ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
__ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
Register right_reg = EmitLoadRegister(right, ip);
__ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
__ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
+ DCHECK(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
double v = instr->value();
__ Vmov(result, v, scratch0());
Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
Label runtime, done;
- ASSERT(object.is(result));
- ASSERT(object.is(r0));
- ASSERT(!scratch.is(scratch0()));
- ASSERT(!scratch.is(object));
+ DCHECK(object.is(result));
+ DCHECK(object.is(r0));
+ DCHECK(!scratch.is(scratch0()));
+ DCHECK(!scratch.is(object));
__ SmiTst(object);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
return FieldMemOperand(string, SeqString::kHeaderSize + offset);
}
Register scratch = scratch0();
- ASSERT(!scratch.is(string));
- ASSERT(!scratch.is(ToRegister(index)));
+ DCHECK(!scratch.is(string));
+ DCHECK(!scratch.is(ToRegister(index)));
if (encoding == String::ONE_BYTE_ENCODING) {
__ add(scratch, string, Operand(ToRegister(index)));
} else {
Register right_reg = EmitLoadRegister(right, ip);
__ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
+ DCHECK(right->IsRegister() || right->IsConstantOperand());
__ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
__ Move(result_reg, left_reg, condition);
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
} else {
- ASSERT(instr->hydrogen()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->representation().IsDouble());
DwVfpRegister left_reg = ToDoubleRegister(left);
DwVfpRegister right_reg = ToDoubleRegister(right);
DwVfpRegister result_reg = ToDoubleRegister(instr->result());
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->left()).is(r1));
- ASSERT(ToRegister(instr->right()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r1));
+ DCHECK(ToRegister(instr->right()).is(r0));
+ DCHECK(ToRegister(instr->result()).is(r0));
- BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ cmp(reg, Operand::Zero());
EmitBranch(instr, ne);
} else if (r.IsDouble()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
DwVfpRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
__ VFPCompareAndSetFlags(reg, 0.0);
__ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
EmitBranch(instr, ne);
} else {
- ASSERT(r.IsTagged());
+ DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
EmitBranch(instr, eq);
} else if (type.IsSmi()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
__ cmp(reg, Operand::Zero());
EmitBranch(instr, ne);
} else if (type.IsJSArray()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
EmitBranch(instr, al);
} else if (type.IsHeapNumber()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
DwVfpRegister dbl_scratch = double_scratch0();
__ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false.
__ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
EmitBranch(instr, ne);
} else if (type.IsString()) {
- ASSERT(!info()->IsStub());
+ DCHECK(!info()->IsStub());
__ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
__ cmp(ip, Operand::Zero());
EmitBranch(instr, ne);
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
const Register map = scratch0();
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
}
}
}
void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Representation rep = instr->hydrogen()->value()->representation();
- ASSERT(!rep.IsInteger32());
+ DCHECK(!rep.IsInteger32());
Register scratch = ToRegister(instr->temp());
if (rep.IsDouble()) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
+ DCHECK(from == to || to == LAST_TYPE);
return from;
}
Register input,
Register temp,
Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!input.is(temp2));
- ASSERT(!temp.is(temp2));
+ DCHECK(!input.is(temp));
+ DCHECK(!input.is(temp2));
+ DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
__ b(ne, is_true);
} else {
__ b(ne, is_false);
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0.
+ DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1.
InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
+ &load_bool_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
+ Label* load_bool() { return &load_bool_; }
+
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
+ Label load_bool_;
};
DeferredInstanceOfKnownGlobal* deferred;
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
- PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
__ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
__ cmp(map, Operand(ip));
__ b(ne, &cache_miss);
+ __ bind(deferred->load_bool()); // Label for calculating code patching.
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
+ Label* map_check,
+ Label* bool_load) {
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kArgsInRegisters);
flags | InstanceofStub::kReturnTrueFalseObject);
InstanceofStub stub(isolate(), flags);
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
LoadContextFromDeferred(instr->context());
__ Move(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 4;
+
+ int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
+ int additional_delta = (call_size / Assembler::kInstrSize) + 4;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
- int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- __ BlockConstPoolFor(kAdditionalDelta);
- // r5 is used to communicate the offset to the location of the map check.
- __ mov(r5, Operand(delta * kPointerSize));
- // The mov above can generate one or two instructions. The delta was computed
- // for two instructions, so we need to pad here in case of one instruction.
- if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
- ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
- __ nop();
+ PredictableCodeSizeScope predictable(
+ masm_, (additional_delta + 1) * Assembler::kInstrSize);
+ // Make sure we don't emit any additional entries in the constant pool before
+ // the call to ensure that the CallCodeSize() calculated the correct number of
+ // instructions for the constant pool load.
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ int map_check_delta =
+ masm_->InstructionsGeneratedSince(map_check) + additional_delta;
+ int bool_load_delta =
+ masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
+ Label before_push_delta;
+ __ bind(&before_push_delta);
+ __ BlockConstPoolFor(additional_delta);
+ // r5 is used to communicate the offset to the location of the map check.
+ __ mov(r5, Operand(map_check_delta * kPointerSize));
+ // r6 is used to communicate the offset to the location of the bool load.
+ __ mov(r6, Operand(bool_load_delta * kPointerSize));
+ // The mov above can generate one or two instructions. The delta was
+ // computed for two instructions, so we need to pad here in case of one
+ // instruction.
+ while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
+ __ nop();
+ }
}
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
void LCodeGen::DoCmpT(LCmpT* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
if (NeedsEagerFrame()) {
no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
}
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- int32_t sp_delta = (parameter_count + 1) * kPointerSize;
- if (sp_delta != 0) {
- __ add(sp, sp, Operand(sp_delta));
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (sp_delta != 0) {
+ __ add(sp, sp, Operand(sp_delta));
+ }
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ __ SmiUntag(reg);
+ __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
}
- } else {
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- __ SmiUntag(reg);
- __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
- }
- __ Jump(lr);
+ __ Jump(lr);
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ Move(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
+
+
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->global_object()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).is(r0));
- __ mov(r2, Operand(instr->name()));
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+ if (FLAG_vector_ics) {
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+ }
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
Register payload = ToRegister(instr->temp());
__ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Store the value.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
}
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ b(ne, &skip_assignment);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->object()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).is(r0));
// Name is always in r2.
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+ if (FLAG_vector_ics) {
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
Register function = ToRegister(instr->function());
Register result = ToRegister(instr->result());
- // Check that the function really is a function. Load map into the
- // result register.
- __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- DeoptimizeIf(ne, instr->environment());
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- __ b(ne, &non_instance);
-
// Get the prototype or initial map from the function.
__ ldr(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
// If the function does not have an initial map, we're done.
Label done;
// Get the prototype from the initial map.
__ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- __ jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- __ bind(&non_instance);
- __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
// All done.
__ bind(&done);
}
-void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
- Runtime::FunctionId id) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ mov(reg, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ sub(r0, r0, Operand(kHeapObjectTag));
- __ StoreToSafepointRegisterSlot(r0, reg);
-}
-
-
-template<class T>
-void LCodeGen::DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr) {
- class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
- public:
- DeferredSIMD128ToTagged(LCodeGen* codegen, LInstruction* instr,
- Runtime::FunctionId id)
- : LDeferredCode(codegen), instr_(instr), id_(id) { }
- virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
- }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
- private:
- LInstruction* instr_;
- Runtime::FunctionId id_;
- };
-
- // Allocate a SIMD128 object on the heap.
- Register reg = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
- Register scratch = scratch0();
-
- DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
- this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
- __ jmp(deferred->entry());
- __ bind(deferred->exit());
-
- // Copy the SIMD128 value from the external array to the heap object.
- STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int base_offset = instr->base_offset();
- Operand operand = key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size);
-
- __ add(scratch, external_pointer, operand);
-
- // Load the inner FixedTypedArray.
- __ ldr(temp2, MemOperand(reg, T::kValueOffset));
-
- for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
- __ ldr(temp, MemOperand(scratch, base_offset + offset));
- __ str(
- temp,
- MemOperand(
- temp2,
- FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
- }
-
- // Now that we have finished with the object's real address tag it
- __ add(reg, reg, Operand(kHeapObjectTag));
-}
-
-
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), base_offset);
}
- } else if (IsFloat32x4ElementsKind(elements_kind)) {
- DoLoadKeyedSIMD128ExternalArray<Float32x4>(instr);
- } else if (IsFloat64x2ElementsKind(elements_kind)) {
- DoLoadKeyedSIMD128ExternalArray<Float64x2>(instr);
- } else if (IsInt32x4ElementsKind(elements_kind)) {
- DoLoadKeyedSIMD128ExternalArray<Int32x4>(instr);
} else {
Register result = ToRegister(instr->result());
MemOperand mem_operand = PrepareKeyedOperand(
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr->environment());
+ DeoptimizeIf(cs, instr);
}
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
- case FLOAT32x4_ELEMENTS:
- case FLOAT64x2_ELEMENTS:
- case INT32x4_ELEMENTS:
- case EXTERNAL_FLOAT32x4_ELEMENTS:
- case EXTERNAL_FLOAT64x2_ELEMENTS:
- case EXTERNAL_INT32x4_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
}
if (shift_size >= 0) {
return MemOperand(base, key, LSL, shift_size);
} else {
- ASSERT_EQ(-1, shift_size);
+ DCHECK_EQ(-1, shift_size);
return MemOperand(base, key, LSR, 1);
}
}
__ add(scratch0(), base, Operand(key, LSL, shift_size));
return MemOperand(scratch0(), base_offset);
} else {
- ASSERT_EQ(-1, shift_size);
+ DCHECK_EQ(-1, shift_size);
__ add(scratch0(), base, Operand(key, ASR, 1));
return MemOperand(scratch0(), base_offset);
}
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->object()).is(r1));
- ASSERT(ToRegister(instr->key()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+ if (FLAG_vector_ics) {
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+ }
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr);
__ b(&result_in_receiver);
__ bind(&global_object);
__ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
__ ldr(result,
ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(result,
- FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
- ASSERT(receiver.is(r0)); // Used for parameter count.
- ASSERT(function.is(r1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(receiver.is(r0)); // Used for parameter count.
+ DCHECK(function.is(r1)); // Required by InvokeFunction.
+ DCHECK(ToRegister(instr->result()).is(r0));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
// Push the receiver and use the register to keep the original
// number of arguments.
__ b(ne, &loop);
__ bind(&invoke);
- ASSERT(instr->HasPointerMap());
+ DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
__ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
// If there is no frame, the context must be in cp.
- ASSERT(result.is(cp));
+ DCHECK(result.is(cp));
}
}
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
__ push(cp); // The context is the first argument.
__ Move(scratch0(), instr->hydrogen()->pairs());
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
- CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
- ASSERT(instr->context() != NULL);
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(instr->context() != NULL);
+ DCHECK(ToRegister(instr->context()).is(cp));
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
Label done;
Register exponent = scratch0();
// Input is negative. Reverse its sign.
// Preserve the value of all registers.
{
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
// Registers were saved at the safepoint, so we can use
// many scratch registers.
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
// if input is positive.
__ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
Label done, exact;
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
__ bind(&done);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
+ DeoptimizeIf(mi, instr); // [-0.5, -0].
}
__ VFPCompareAndSetFlags(input, dot_five);
__ mov(result, Operand(1), LeaveCC, eq); // +0.5.
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
__ bind(&done);
}
+void LCodeGen::DoMathFround(LMathFround* instr) {
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
+ DwVfpRegister output_reg = ToDoubleRegister(instr->result());
+ LowDwVfpRegister scratch = double_scratch0();
+ __ vcvt_f32_f64(scratch.low(), input_reg);
+ __ vcvt_f64_f32(output_reg, scratch.low());
+}
+
+
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->right()->IsDoubleRegister() ||
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(d1));
- ASSERT(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(r2));
- ASSERT(ToDoubleRegister(instr->left()).is(d0));
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ DCHECK(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(tagged_exponent));
+ DCHECK(ToDoubleRegister(instr->left()).is(d0));
+ DCHECK(ToDoubleRegister(instr->result()).is(d2));
if (exponent_type.IsSmi()) {
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(r2, &no_deopt);
- __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ DCHECK(!r6.is(tagged_exponent));
+ __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r6, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
- ASSERT(exponent_type.IsDouble());
+ DCHECK(exponent_type.IsDouble());
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(instr->HasPointerMap());
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->function()).is(r1));
+ DCHECK(instr->HasPointerMap());
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
}
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+ DCHECK(receiver.is(r1));
+ DCHECK(name.is(r2));
+
+ Register scratch = r3;
+ Register extra = r4;
+ Register extra2 = r5;
+ Register extra3 = r6;
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+ LoadIC::GenerateMiss(masm());
+}
+
+
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->result()).is(r0));
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
LConstantOperand* target = LConstantOperand::cast(instr->target());
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- PlatformCallInterfaceDescriptor* call_descriptor =
- instr->descriptor()->platform_specific_descriptor();
+ PlatformInterfaceDescriptor* call_descriptor =
+ instr->descriptor().platform_specific_descriptor();
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
call_descriptor->storage_mode());
} else {
- ASSERT(instr->target()->IsRegister());
+ DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
- __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Make sure we don't emit any additional entries in the constant pool
+ // before the call to ensure that the CallCodeSize() calculated the correct
+ // number of instructions for the constant pool load.
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
__ Call(target);
}
generator.AfterCall();
void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->function()).is(r1));
+ DCHECK(ToRegister(instr->result()).is(r0));
if (instr->hydrogen()->pass_argument_count()) {
__ mov(r0, Operand(instr->arity()));
void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->function()).is(r1));
+ DCHECK(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->constructor()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->constructor()).is(r1));
+ DCHECK(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
// No cell in r2 for construct type feedback in optimized code
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->constructor()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->constructor()).is(r1));
+ DCHECK(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ AssertNotSmi(object);
- ASSERT(!representation.IsSmi() ||
+ DCHECK(!representation.IsSmi() ||
!instr->value()->IsConstantOperand() ||
IsSmi(LConstantOperand::cast(instr->value())));
if (representation.IsDouble()) {
- ASSERT(access.IsInobject());
- ASSERT(!instr->hydrogen()->has_transition());
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DCHECK(access.IsInobject());
+ DCHECK(!instr->hydrogen()->has_transition());
+ DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
DwVfpRegister value = ToDoubleRegister(instr->value());
__ vstr(value, FieldMemOperand(object, offset));
return;
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- // Name is always in r2.
- __ mov(r2, Operand(instr->name()));
+ __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
- }
-}
-
-
-template<class T>
-void LCodeGen::DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr) {
- ASSERT(instr->value()->IsRegister());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
- Register input_reg = ToRegister(instr->value());
- __ SmiTst(input_reg);
- DeoptimizeIf(eq, instr->environment());
- __ CompareObjectType(input_reg, temp, no_reg, T::kInstanceType);
- DeoptimizeIf(ne, instr->environment());
-
- STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int base_offset = instr->base_offset();
- Register address = scratch0();
- if (key_is_constant) {
- if (constant_key != 0) {
- __ add(address, external_pointer,
- Operand(constant_key << element_size_shift));
- } else {
- address = external_pointer;
- }
- } else {
- __ add(address, external_pointer, Operand(key, LSL, shift_size));
- }
-
- // Load the inner FixedTypedArray.
- __ ldr(temp2, MemOperand(input_reg, T::kValueOffset - kHeapObjectTag));
-
- for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
- __ ldr(temp, MemOperand(temp2,
- FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
- __ str(temp, MemOperand(address, base_offset + offset));
+ DeoptimizeIf(cc, instr);
}
}
} else { // Storing doubles, not floats.
__ vstr(value, address, base_offset);
}
- } else if (IsFloat32x4ElementsKind(elements_kind)) {
- DoStoreKeyedSIMD128ExternalArray<Float32x4>(instr);
- } else if (IsFloat64x2ElementsKind(elements_kind)) {
- DoStoreKeyedSIMD128ExternalArray<Float64x2>(instr);
- } else if (IsInt32x4ElementsKind(elements_kind)) {
- DoStoreKeyedSIMD128ExternalArray<Int32x4>(instr);
} else {
Register value(ToRegister(instr->value()));
MemOperand mem_operand = PrepareKeyedOperand(
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
- case FLOAT32x4_ELEMENTS:
- case FLOAT64x2_ELEMENTS:
- case INT32x4_ELEMENTS:
- case EXTERNAL_FLOAT32x4_ELEMENTS:
- case EXTERNAL_FLOAT64x2_ELEMENTS:
- case EXTERNAL_INT32x4_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
// Do the store.
if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
offset += ToInteger32(const_operand) * kPointerSize;
store_base = elements;
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->object()).is(r2));
- ASSERT(ToRegister(instr->key()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- Handle<Code> ic = instr->strict_mode() == STRICT
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
GetLinkRegisterState(),
kDontSaveFPRegs);
} else {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(object_reg.is(r0));
- PushSafepointRegistersScope scope(
- this, Safepoint::kWithRegistersAndDoubles);
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(object_reg.is(r0));
+ PushSafepointRegistersScope scope(this);
__ Move(r1, to_map);
bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(
+ RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
__ bind(¬_applicable);
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&no_memento_found);
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->left()).is(r1));
- ASSERT(ToRegister(instr->right()).is(r0));
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r1));
+ DCHECK(ToRegister(instr->right()).is(r0));
StringAddStub stub(isolate(),
instr->hydrogen()->flags(),
instr->hydrogen()->pretenure_flag());
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
// contained in the register pointer map.
__ mov(result, Operand::Zero());
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
__ push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
instr->context());
__ AssertSmi(r0);
__ SmiUntag(r0);
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
DeferredStringCharFromCode* deferred =
new(zone()) DeferredStringCharFromCode(this, instr);
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
+ DCHECK(!char_code.is(result));
__ cmp(char_code, Operand(String::kMaxOneByteCharCode));
__ b(hi, deferred->entry());
// contained in the register pointer map.
__ mov(result, Operand::Zero());
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
+ DCHECK(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
+ DCHECK(output->IsDoubleRegister());
SwVfpRegister single_scratch = double_scratch0().low();
if (input->IsStackSlot()) {
Register scratch = scratch0();
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagI FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
SIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
__ mov(dst, Operand::Zero());
// Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kHiddenAllocateHeapNumber.
+ // They only call Runtime::kAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ sub(r0, r0, Operand(kHeapObjectTag));
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
Register reg = ToRegister(instr->result());
__ mov(reg, Operand::Zero());
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kHiddenAllocateHeapNumber.
+ // They only call Runtime::kAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ sub(r0, r0, Operand(kHeapObjectTag));
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ tst(input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTag(output, input, SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ SmiTag(output, input);
}
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
__ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr->environment());
+ DeoptimizeIf(cs, instr);
} else {
__ SmiUntag(result, input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DwVfpRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
- ASSERT(!result_reg.is(double_scratch0()));
+ DCHECK(!result_reg.is(double_scratch0()));
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
if (can_convert_undefined_to_nan) {
__ b(ne, &convert);
} else {
- DeoptimizeIf(ne, env);
+ DeoptimizeIf(ne, instr);
}
// load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
__ b(ne, &done);
__ VmovHigh(scratch, result_reg);
__ cmp(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, env);
+ DeoptimizeIf(eq, instr);
}
__ jmp(&done);
if (can_convert_undefined_to_nan) {
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
+ DeoptimizeIf(ne, instr);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
}
} else {
__ SmiUntag(scratch, input_reg);
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
}
// Smi to double register conversion
__ bind(&load_smi);
LowDwVfpRegister double_scratch = double_scratch0();
DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
- ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+ DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+ DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
Label done;
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(scratch2, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "cannot truncate");
__ mov(input_reg, Operand::Zero());
- __ b(&done);
} else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "not a heap number");
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr, "minus zero");
}
}
__ bind(&done);
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
+ DCHECK(input->IsRegister());
+ DCHECK(input->Equals(instr->result()));
Register input_reg = ToRegister(input);
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* input = instr->value();
- ASSERT(input->IsRegister());
+ DCHECK(input->IsRegister());
LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
+ DCHECK(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
DwVfpRegister result_reg = ToDoubleRegister(result);
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&done);
}
}
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&done);
}
}
__ SmiTag(result_reg, SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
- DeoptimizeIf(lo, instr->environment());
+ DeoptimizeIf(lo, instr);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
}
}
} else {
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
- if (IsPowerOf2(mask)) {
- ASSERT(tag == 0 || IsPowerOf2(tag));
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+ DeoptimizeIf(tag == 0 ? ne : eq, instr);
} else {
__ and_(scratch, scratch, Operand(mask));
__ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
}
} else {
__ cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
__ push(object);
__ mov(cp, Operand::Zero());
__ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
Register map_reg = scratch0();
LOperand* input = instr->value();
- ASSERT(input->IsRegister());
+ DCHECK(input->IsRegister());
Register reg = ToRegister(input);
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ mov(result_reg, Operand::Zero());
__ jmp(&done);
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate V8_FINAL : public LDeferredCode {
+ class DeferredAllocate FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
}
}
} else {
Register size = ToRegister(instr->size());
- __ Allocate(size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- flags);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
}
__ bind(deferred->exit());
if (instr->hydrogen()->MustPrefillWithFiller()) {
+ STATIC_ASSERT(kHeapObjectTag == 1);
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ mov(scratch, Operand(size));
+ __ mov(scratch, Operand(size - kHeapObjectTag));
} else {
- scratch = ToRegister(instr->size());
+ __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
}
- __ sub(scratch, scratch, Operand(kPointerSize));
- __ sub(result, result, Operand(kHeapObjectTag));
+ __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
Label loop;
__ bind(&loop);
- __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
__ str(scratch2, MemOperand(result, scratch));
- __ sub(scratch, scratch, Operand(kPointerSize));
- __ cmp(scratch, Operand(0));
__ b(ge, &loop);
- __ add(result, result, Operand(kHeapObjectTag));
}
}
// contained in the register pointer map.
__ mov(result, Operand(Smi::FromInt(0)));
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
Register size = ToRegister(instr->size());
- ASSERT(!size.is(result));
+ DCHECK(!size.is(result));
__ SmiTag(size);
__ push(size);
} else {
int flags = AllocateDoubleAlignFlag::encode(
instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
flags = AllocateTargetSpace::update(flags, NEW_SPACE);
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->value()).is(r0));
+ DCHECK(ToRegister(instr->value()).is(r0));
__ push(r0);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
Label materialized;
// Registers will be used as follows:
// r6 = literals array.
__ mov(r4, Operand(instr->hydrogen()->pattern()));
__ mov(r3, Operand(instr->hydrogen()->flags()));
__ Push(r6, r5, r4, r3);
- CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
__ bind(&materialized);
__ bind(&runtime_allocate);
__ mov(r0, Operand(Smi::FromInt(size)));
__ Push(r1, r0);
- CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
__ pop(r1);
__ bind(&allocated);
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->context()).is(cp));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(),
- instr->hydrogen()->strict_mode(),
- instr->hydrogen()->is_generator());
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ mov(r1, Operand(pretenure ? factory()->true_value()
: factory()->false_value()));
__ Push(cp, r2, r1);
- CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+ CallRuntime(Runtime::kNewClosure, 3, instr);
}
}
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = eq;
- } else if (FLAG_harmony_typeof &&
- String::Equals(type_name, factory->null_string())) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- final_branch_condition = eq;
-
} else if (String::Equals(type_name, factory->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ b(eq, true_label);
} else if (String::Equals(type_name, factory->object_string())) {
Register map = scratch;
__ JumpIfSmi(input, false_label);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ b(eq, true_label);
- }
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ b(eq, true_label);
__ CheckObjectTypeRange(input,
map,
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- ASSERT(!temp1.is(temp2));
+ DCHECK(!temp1.is(temp2));
// Get the frame pointer for the calling frame.
__ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Block literal pool emission for duration of padding.
Assembler::BlockConstPoolScope block_const_pool(masm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
__ nop();
padding_size -= Assembler::kInstrSize;
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
last_lazy_deopt_pc_ = masm()->pc_offset();
- ASSERT(instr->HasEnvironment());
+ DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
type = Deoptimizer::LAZY;
}
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(al, instr->environment(), type);
+ DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- ASSERT(instr->HasEnvironment());
+ DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck V8_FINAL : public LDeferredCode {
+ class DeferredStackCheck FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
- ASSERT(instr->HasEnvironment());
+ DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
// There is no LLazyBailout instruction for stack-checks. We have to
// prepare for lazy deoptimization explicitly here.
Handle<Code> stack_check = isolate()->builtins()->StackCheck();
PredictableCodeSizeScope predictable(masm(),
CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
- ASSERT(instr->context()->IsRegister());
- ASSERT(ToRegister(instr->context()).is(cp));
+ DCHECK(instr->context()->IsRegister());
+ DCHECK(ToRegister(instr->context()).is(cp));
CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
+ DCHECK(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
new(zone()) DeferredStackCheck(this, instr);
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
+ DCHECK(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
GenerateOsrPrologue();
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
Register null_value = r5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r0, null_value);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ SmiTst(r0);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr->environment());
+ DeoptimizeIf(le, instr);
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&use_cache);
}
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&done);
}
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
Register result,
Register object,
Register index) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(this);
__ Push(object);
__ Push(index);
__ mov(cp, Operand::Zero());
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
object_(object),
index_(index) {
}
- virtual void Generate() V8_OVERRIDE {
+ virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
- virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, 2, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}