void CodeGenerator::LoadGlobal() {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- __ ldr(r0, GlobalObject());
- frame_->EmitPush(r0);
+ Register reg = frame_->GetTOSRegister();
+ __ ldr(reg, GlobalObject());
+ frame_->EmitPush(reg);
}
void CodeGenerator::LoadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ LoadReference");
Expression* e = ref->expression();
Property* property = e->AsProperty();
if (property != NULL) {
// The expression is either a property or a variable proxy that rewrites
// to a property.
- LoadAndSpill(property->obj());
+ Load(property->obj());
if (property->key()->IsPropertyName()) {
ref->set_type(Reference::NAMED);
} else {
- LoadAndSpill(property->key());
+ Load(property->key());
ref->set_type(Reference::KEYED);
}
} else if (var != NULL) {
}
} else {
// Anything else is a runtime error.
+ VirtualFrame::SpilledScope spilled_scope(frame_);
LoadAndSpill(e);
frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
}
LoadAndSpill(applicand);
Handle<String> name = Factory::LookupAsciiSymbol("apply");
__ mov(r2, Operand(name));
+ __ ldr(r0, MemOperand(sp, 0));
frame_->CallLoadIC(RelocInfo::CODE_TARGET);
frame_->EmitPush(r0);
void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
if (slot->type() == Slot::LOOKUP) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(slot->var()->is_dynamic());
+ // JumpTargets do not yet support merging frames so the frame must be
+ // spilled when jumping to these targets.
JumpTarget slow;
JumpTarget done;
// perform a runtime call for all variables in the scope
// containing the eval.
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
+ LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
// If there was no control flow to slow, we can exit early.
if (!slow.is_linked()) {
frame_->EmitPush(r0);
return;
}
+ frame_->SpillAll();
done.Jump();
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ frame_->SpillAll();
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads.
}
slow.Bind();
+ VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->EmitPush(cp);
__ mov(r0, Operand(slot->var()->name()));
frame_->EmitPush(r0);
void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
- Register tmp,
- Register tmp2,
JumpTarget* slow) {
// Check that no extension objects have been created by calls to
// eval from the current scope to the global scope.
+ Register tmp = frame_->scratch0();
+ Register tmp2 = frame_->scratch1();
Register context = cp;
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
+ frame_->SpillAll();
// Check that extension is NULL.
__ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(tmp2, tmp2);
}
if (s->is_eval_scope()) {
+ frame_->SpillAll();
Label next, fast;
__ Move(tmp, context);
__ bind(&next);
// Load the global object.
LoadGlobal();
// Setup the name register and call load IC.
+ frame_->SpillAllButCopyTOSToR0();
__ mov(r2, Operand(slot->var()->name()));
frame_->CallLoadIC(typeof_state == INSIDE_TYPEOF
? RelocInfo::CODE_TARGET
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Property");
{ Reference property(this, node);
__ DecrementCounter(&Counters::named_load_inline, 1, r1, r2);
__ IncrementCounter(&Counters::named_load_inline_miss, 1, r1, r2);
- // Setup the name register and call load IC.
+ // Setup the registers and call load IC.
+ // On entry to this deferred code, r0 is assumed to already contain the
+ // receiver from the top of the stack.
__ mov(r2, Operand(name_));
// The rest of the instructions in the deferred code must be together.
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property");
// Setup the name register and call load IC.
+ frame_->SpillAllButCopyTOSToR0();
__ mov(r2, Operand(name));
frame_->CallLoadIC(is_contextual
? RelocInfo::CODE_TARGET_CONTEXT
// Inline the in-object property case.
Comment cmnt(masm(), "[ Inlined named property load");
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(name);
-
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
__ IncrementCounter(&Counters::named_load_inline, 1,
// Parts of this code is patched, so the exact instructions generated needs
// to be fixed. Therefore the instruction pool is blocked when generating
// this code
+
+ // Load the receiver from the stack.
+ frame_->SpillAllButCopyTOSToR0();
+
+ DeferredReferenceGetNamedValue* deferred =
+ new DeferredReferenceGetNamedValue(name);
+
#ifdef DEBUG
- int kInlinedNamedLoadInstructions = 8;
+ int kInlinedNamedLoadInstructions = 7;
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Load the receiver from the stack.
- __ ldr(r1, MemOperand(sp, 0));
+ { Assembler::BlockConstPoolScope block_const_pool(masm_);
// Check that the receiver is a heap object.
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(r0, Operand(kSmiTagMask));
deferred->Branch(eq);
// Check the map. The null map used below is patched by the inline cache
// code.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r3, Operand(Factory::null_value()));
__ cmp(r2, r3);
deferred->Branch(ne);
// Initially use an invalid index. The index will be patched by the
// inline cache code.
- __ ldr(r0, MemOperand(r1, 0));
+ __ ldr(r0, MemOperand(r0, 0));
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedNamedLoadInstructions,
void CodeGenerator::EmitKeyedLoad() {
if (loop_nesting() == 0) {
+ VirtualFrame::SpilledScope spilled(frame_);
Comment cmnt(masm_, "[ Load from keyed property");
frame_->CallKeyedLoadIC();
} else {
// Inline the keyed load.
Comment cmnt(masm_, "[ Inlined load from keyed property");
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue();
-
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
__ IncrementCounter(&Counters::keyed_load_inline, 1,
frame_->scratch0(), frame_->scratch1());
// Load the receiver from the stack.
- __ ldr(r0, MemOperand(sp, kPointerSize));
+ frame_->SpillAllButCopyTOSToR0();
+ VirtualFrame::SpilledScope spilled(frame_);
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue();
// Check that the receiver is a heap object.
__ tst(r0, Operand(kSmiTagMask));
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
- Register tmp,
- Register tmp2,
JumpTarget* slow);
// Special code for typeof expressions: Unfortunately, we must
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
// ----------- S t a t e -------------
- // -- r0 : receiver
// -- r2 : name
// -- lr : return address
+ // -- r0 : receiver
// -- [sp] : receiver
// -----------------------------------
// Registers r0 and r2 contain objects that need to be pushed on the
__ add(r2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
- __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+ __ Push(r3, r2, r1);
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
- __ ldr(ip, CodeGenerator::GlobalObject());
- __ push(ip);
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ push(r0);
__ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
__ mov(r1, Operand(key_literal->handle()));
// Push both as arguments to ic.
- __ stm(db_w, sp, r2.bit() | r1.bit());
+ __ Push(r2, r1);
// Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r2, Operand(expr->pattern()));
__ mov(r1, Operand(expr->flags()));
- __ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
+ __ Push(r4, r3, r2, r1);
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ bind(&done);
Apply(context_, r0);
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->constant_properties()));
__ mov(r0, Operand(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
- __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit() | r0.bit());
+ __ Push(r3, r2, r1, r0);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->constant_elements()));
- __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+ __ Push(r3, r2, r1);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
+ __ ldr(r0, MemOperand(sp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
}
// dictionary.
//
// r2 - holds the name of the property and is unchanged.
+ // r4 - used as temporary.
Label done;
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(t1, FieldMemOperand(r2, String::kHashFieldOffset));
+ __ ldr(r4, FieldMemOperand(r2, String::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
ASSERT(StringDictionary::GetProbeOffset(i) <
1 << (32 - String::kHashFieldOffset));
- __ add(t1, t1, Operand(
+ __ add(r4, r4, Operand(
StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
- __ and_(t1, r3, Operand(t1, LSR, String::kHashShift));
+ __ and_(r4, r3, Operand(r4, LSR, String::kHashShift));
// Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3);
- __ add(t1, t1, Operand(t1, LSL, 1)); // t1 = t1 * 3
+ __ add(r4, r4, Operand(r4, LSL, 1)); // r4 = r4 * 3
// Check if the key is identical to the name.
- __ add(t1, t0, Operand(t1, LSL, 2));
- __ ldr(ip, FieldMemOperand(t1, kElementsStartOffset));
+ __ add(r4, t0, Operand(r4, LSL, 2));
+ __ ldr(ip, FieldMemOperand(r4, kElementsStartOffset));
__ cmp(r2, Operand(ip));
if (i != kProbes - 1) {
__ b(eq, &done);
}
// Check that the value is a normal property.
- __ bind(&done); // t1 == t0 + 4*index
- __ ldr(r3, FieldMemOperand(t1, kElementsStartOffset + 2 * kPointerSize));
+ __ bind(&done); // r4 == t0 + 4*index
+ __ ldr(r3, FieldMemOperand(r4, kElementsStartOffset + 2 * kPointerSize));
__ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ b(ne, miss);
// Get the value at the masked, scaled index and return.
- __ ldr(t1, FieldMemOperand(t1, kElementsStartOffset + 1 * kPointerSize));
+ __ ldr(t1, FieldMemOperand(r4, kElementsStartOffset + 1 * kPointerSize));
}
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
+ // -- r0 : receiver
+ // -- sp[0] : receiver
// -----------------------------------
Label miss;
- __ ldr(r0, MemOperand(sp, 0));
-
StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
+ // -- r0 : receiver
+ // -- sp[0] : receiver
// -----------------------------------
Label miss;
- __ ldr(r0, MemOperand(sp, 0));
-
StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
// Cache miss: Jump to runtime.
__ bind(&miss);
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
+ // -- r0 : receiver
+ // -- sp[0] : receiver
// -----------------------------------
Label miss;
- // Load receiver.
- __ ldr(r0, MemOperand(sp, 0));
-
StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
static void GenerateNormalHelper(MacroAssembler* masm,
int argc,
bool is_global_object,
- Label* miss) {
+ Label* miss,
+ Register scratch) {
// Search dictionary - put result in register r1.
GenerateDictionaryLoad(masm, miss, r0, r1);
__ b(eq, miss);
// Check that the value is a JSFunction.
- __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
__ b(ne, miss);
// Patch the receiver with the global proxy if necessary.
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &miss);
- GenerateNormalHelper(masm, argc, true, &miss);
+ GenerateNormalHelper(masm, argc, true, &miss, r4);
// Accessing non-global object: Check for access to global proxy.
Label global_proxy, invoke;
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &miss);
__ bind(&invoke);
- GenerateNormalHelper(masm, argc, false, &miss);
+ GenerateNormalHelper(masm, argc, false, &miss, r4);
// Global object access: Check access rights.
__ bind(&global_proxy);
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
+ // -- r0 : receiver
+ // -- sp[0] : receiver
// -----------------------------------
- __ ldr(r0, MemOperand(sp, 0));
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
+ // -- r0 : receiver
+ // -- sp[0] : receiver
// -----------------------------------
Label miss, probe, global;
- __ ldr(r0, MemOperand(sp, 0));
// Check that the receiver isn't a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss);
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
+ // -- r0 : receiver
+ // -- sp[0] : receiver
// -----------------------------------
- __ ldr(r3, MemOperand(sp, 0));
- __ stm(db_w, sp, r2.bit() | r3.bit());
+ __ mov(r3, r0);
+ __ Push(r3, r2);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
- // -- [sp] : receiver
+ // -- r0 : receiver
+ // -- sp[0] : receiver
// -----------------------------------
Label miss;
- // Get the receiver from the stack.
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
-
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss);
}
// Check that the map of the global has not changed.
- CheckPrototypes(object, r1, holder, r3, r0, name, &miss);
+ CheckPrototypes(object, r0, holder, r3, r4, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
- __ ldr(r0, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
+ __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(r4, ip);
__ b(eq, &miss);
}
+ __ mov(r0, r4);
__ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
__ Ret();
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+ ASSERT(SpilledScope::is_spilled());
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
}
+void VirtualFrame::SpillAllButCopyTOSToR0() {
+ switch (top_of_stack_state_) {
+ case NO_TOS_REGISTERS:
+ __ ldr(r0, MemOperand(sp, 0));
+ break;
+ case R0_TOS:
+ __ push(r0);
+ break;
+ case R1_TOS:
+ __ push(r1);
+ __ mov(r0, r1);
+ break;
+ case R0_R1_TOS:
+ __ Push(r1, r0);
+ break;
+ case R1_R0_TOS:
+ __ Push(r0, r1);
+ __ mov(r0, r1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ top_of_stack_state_ = NO_TOS_REGISTERS;
+}
+
+
Register VirtualFrame::Peek() {
AssertIsNotSpilled();
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
// must be copied to a scratch register before modification.
Register Peek();
+ // A little specialized, this one. It flushes all registers, but it puts a
+ // copy of the top-of-stack in R0.
+ void SpillAllButCopyTOSToR0();
+
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);