LoadAndSpill(property->obj());
LoadAndSpill(property->key());
- EmitKeyedLoad(false);
+ EmitKeyedLoad();
frame_->Drop(); // key
// Put the function below the receiver.
if (property->is_synthetic()) {
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop(1) instruction to indicate that the
// in-object has been inlined.
- __ nop(NAMED_PROPERTY_LOAD_INLINED);
+ __ nop(PROPERTY_LOAD_INLINED);
+
+ // Block the constant pool for one more instruction after leaving this
+ // constant pool block scope to include the branch instruction ending the
+ // deferred code.
+ __ BlockConstPoolFor(1);
+ }
+}
+
+
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+ DeferredReferenceGetKeyedValue() {
+ set_comment("[ DeferredReferenceGetKeyedValue");
+ }
+
+ virtual void Generate();
+};
+
+
+void DeferredReferenceGetKeyedValue::Generate() {
+ __ DecrementCounter(&Counters::keyed_load_inline, 1, r1, r2);
+ __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, r1, r2);
+
+ // The rest of the instructions in the deferred code must be together.
+ { Assembler::BlockConstPoolScope block_const_pool(masm_);
+ // Call keyed load IC. It has all arguments on the stack.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The call must be followed by a nop instruction to indicate that the
+ // keyed load has been inlined.
+ __ nop(PROPERTY_LOAD_INLINED);
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET);
} else {
- // Inline the inobject property case.
+ // Inline the in-object property case.
Comment cmnt(masm(), "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
__ cmp(r2, r3);
deferred->Branch(ne);
- // Use initially use an invalid index. The index will be patched by the
+ // Initially use an invalid index. The index will be patched by the
// inline cache code.
__ ldr(r0, MemOperand(r1, 0));
}
-void CodeGenerator::EmitKeyedLoad(bool is_global) {
- Comment cmnt(masm_, "[ Load from keyed Property");
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- RelocInfo::Mode rmode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- frame_->CallCodeObject(ic, rmode, 0);
+void CodeGenerator::EmitKeyedLoad() {
+ if (loop_nesting() == 0) {
+ Comment cmnt(masm_, "[ Load from keyed property");
+ frame_->CallKeyedLoadIC();
+ } else {
+ // Inline the keyed load.
+ Comment cmnt(masm_, "[ Inlined load from keyed property");
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue();
+
+ // Counter will be decremented in the deferred code. Placed here to avoid
+ // having it in the instruction stream below where patching will occur.
+ __ IncrementCounter(&Counters::keyed_load_inline, 1,
+ frame_->scratch0(), frame_->scratch1());
+
+ // Load the receiver from the stack.
+ __ ldr(r0, MemOperand(sp, kPointerSize));
+
+ // Check that the receiver is a heap object.
+ __ tst(r0, Operand(kSmiTagMask));
+ deferred->Branch(eq);
+
+ // The following instructions are the inlined load keyed property. Parts
+ // of this code are patched, so the exact number of instructions generated
+ // need to be fixed. Therefore the constant pool is blocked while generating
+ // this code.
+#ifdef DEBUG
+ int kInlinedKeyedLoadInstructions = 20;
+ Label check_inlined_codesize;
+ masm_->bind(&check_inlined_codesize);
+#endif
+ { Assembler::BlockConstPoolScope block_const_pool(masm_);
+ // Check the map. The null map used below is patched by the inline cache
+ // code.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ mov(r2, Operand(Factory::null_value()));
+ __ cmp(r1, r2);
+ deferred->Branch(ne);
+
+ // Load the key from the stack.
+ __ ldr(r1, MemOperand(sp, 0));
+
+ // Check that the key is a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ ldr(r2, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ ldr(r3, FieldMemOperand(r2, JSObject::kMapOffset));
+ __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r3, r4);
+ deferred->Branch(ne);
+
+ // Check that key is within bounds.
+ __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ cmp(r3, Operand(r1, ASR, kSmiTagSize));
+ deferred->Branch(ls); // Unsigned less equal.
+
+ // Load and check that the result is not the hole (r1 is a smi).
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r0, MemOperand(r2, r1, LSL,
+ kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
+ __ cmp(r0, r3);
+ deferred->Branch(eq);
+
+ // Make sure that the expected number of instructions are generated.
+ ASSERT_EQ(kInlinedKeyedLoadInstructions,
+ masm_->InstructionsGeneratedSince(&check_inlined_codesize));
+ }
+
+ deferred->BindExit();
+ }
}
}
case KEYED: {
- // TODO(181): Implement inlined version of array indexing once
- // loop nesting is properly tracked on ARM.
ASSERT(property != NULL);
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- ASSERT(var == NULL || var->is_global());
- cgen_->EmitKeyedLoad(var != NULL);
+ cgen_->EmitKeyedLoad();
cgen_->frame()->EmitPush(r0);
break;
}
}
-void LoadIC::ClearInlinedVersion(Address address) {
- // Reset the map check of the inlined inobject property load (if present) to
- // guarantee failure by holding an invalid map (the null value). The offset
- // can be patched to anything.
- PatchInlinedLoad(address, Heap::null_value(), 0);
-}
-
-
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+static inline bool IsInlinedICSite(Address address,
+ Address* inline_end_address) {
// If the instruction after the call site is not the pseudo instruction nop1
// then this is not related to an inlined in-object property load. The nop1
// instruction is located just after the call to the IC in the deferred code
// a branch instruction for jumping back from the deferred code.
Address address_after_call = address + Assembler::kCallTargetAddressOffset;
Instr instr_after_call = Assembler::instr_at(address_after_call);
- if (!Assembler::IsNop(instr_after_call, NAMED_PROPERTY_LOAD_INLINED)) {
+ if (!Assembler::IsNop(instr_after_call, PROPERTY_LOAD_INLINED)) {
return false;
}
- ASSERT_EQ(0, RegisterAllocator::kNumRegisters);
- Address address_after_nop1 = address_after_call + Assembler::kInstrSize;
- Instr instr_after_nop1 = Assembler::instr_at(address_after_nop1);
- ASSERT(Assembler::IsBranch(instr_after_nop1));
+ Address address_after_nop = address_after_call + Assembler::kInstrSize;
+ Instr instr_after_nop = Assembler::instr_at(address_after_nop);
+ ASSERT(Assembler::IsBranch(instr_after_nop));
// Find the end of the inlined code for handling the load.
int b_offset =
- Assembler::GetBranchOffset(instr_after_nop1) + Assembler::kPcLoadDelta;
+ Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
ASSERT(b_offset < 0); // Jumping back from deferred code.
- Address inline_end_address = address_after_nop1 + b_offset;
+ *inline_end_address = address_after_nop + b_offset;
+
+ return true;
+}
+
+
+void LoadIC::ClearInlinedVersion(Address address) {
+ // Reset the map check of the inlined in-object property load (if present) to
+ // guarantee failure by holding an invalid map (the null value). The offset
+ // can be patched to anything.
+ PatchInlinedLoad(address, Heap::null_value(), 0);
+}
+
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+ // Find the end of the inlined code for handling the load if this is an
+ // inlined IC call site.
+ Address inline_end_address;
+ if (!IsInlinedICSite(address, &inline_end_address)) return false;
// Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
- // The immediate must be represenatble in 12 bits.
+ // The immediate must be representable in 12 bits.
ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
- Address ldr_property_instr_address = inline_end_address - 4;
+ Address ldr_property_instr_address =
+ inline_end_address - Assembler::kInstrSize;
ASSERT(Assembler::IsLdrRegisterImmediate(
Assembler::instr_at(ldr_property_instr_address)));
Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
// Patch the map check.
- Address ldr_map_instr_address = inline_end_address - 16;
+ Address ldr_map_instr_address =
+ inline_end_address - 4 * Assembler::kInstrSize;
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
-void KeyedLoadIC::ClearInlinedVersion(Address address) {}
+void KeyedLoadIC::ClearInlinedVersion(Address address) {
+ // Reset the map check of the inlined keyed load (if present) to
+ // guarantee failure by holding an invalid map (the null value).
+ PatchInlinedLoad(address, Heap::null_value());
+}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- return false;
+ Address inline_end_address;
+ if (!IsInlinedICSite(address, &inline_end_address)) return false;
+
+ // Patch the map check.
+ Address ldr_map_instr_address =
+ inline_end_address - 19 * Assembler::kInstrSize;
+ Assembler::set_target_address_at(ldr_map_instr_address,
+ reinterpret_cast<Address>(map));
+ return true;
}