Fix inlined keyed property load on ARM
authorsgjesse@chromium.org <sgjesse@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 10 May 2010 10:45:18 +0000 (10:45 +0000)
committersgjesse@chromium.org <sgjesse@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 10 May 2010 10:45:18 +0000 (10:45 +0000)
The change r4608 accidently disabled the inlined keyed load as the key/receiver registers was mixed up. Also make sure that the registers for the keyed load IC is not clobbered before bailout to deferred code. This adds one instriction to the inlined code path.
Review URL: http://codereview.chromium.org/2018005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4629 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/arm/codegen-arm.cc
src/arm/ic-arm.cc

index 5509830b307d1b8ea7c2b18fd9fe53f0caec1677..b5778cf88b6a588156fbcd9cd12568ff3967f791 100644 (file)
@@ -5578,8 +5578,8 @@ void CodeGenerator::EmitKeyedLoad() {
 
     // Load the key and receiver from the stack to r0 and r1.
     frame_->PopToR1R0();
-    Register receiver = r0;
-    Register key = r1;
+    Register key = r0;
+    Register receiver = r1;
     VirtualFrame::SpilledScope spilled(frame_);
 
     // The deferred code expects key and receiver in r0 and r1.
@@ -5594,17 +5594,16 @@ void CodeGenerator::EmitKeyedLoad() {
     // property code which can be patched. Therefore the exact number of
     // instructions generated need to be fixed, so the constant pool is blocked
     // while generating this code.
-#ifdef DEBUG
-    int kInlinedKeyedLoadInstructions = 19;
-    Label check_inlined_codesize;
-    masm_->bind(&check_inlined_codesize);
-#endif
     { Assembler::BlockConstPoolScope block_const_pool(masm_);
       Register scratch1 = VirtualFrame::scratch0();
       Register scratch2 = VirtualFrame::scratch1();
       // Check the map. The null map used below is patched by the inline cache
       // code.
       __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+#ifdef DEBUG
+    Label check_inlined_codesize;
+    masm_->bind(&check_inlined_codesize);
+#endif
       __ mov(scratch2, Operand(Factory::null_value()));
       __ cmp(scratch1, scratch2);
       deferred->Branch(ne);
@@ -5632,17 +5631,15 @@ void CodeGenerator::EmitKeyedLoad() {
       __ add(scratch1,
              scratch1,
              Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-      __ ldr(r0,
+      __ ldr(scratch1,
              MemOperand(scratch1, key, LSL,
                         kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
-      __ cmp(r0, scratch2);
-      // This is the only branch to deferred where r0 and r1 do not contain the
-      // receiver and key.  We can't just load undefined here because we have to
-      // check the prototype.
+      __ cmp(scratch1, scratch2);
       deferred->Branch(eq);
 
+      __ mov(r0, scratch1);
       // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(kInlinedKeyedLoadInstructions,
+      ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatchSize,
                 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
     }
 
index 34ba5e5f78e039b5d83f67babdf80609513b3bcf..c308d69df5d558777ae6e900cefbcda8f8bc8631 100644 (file)
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "assembler-arm.h"
+#include "codegen.h"
 #include "codegen-inl.h"
 #include "disasm.h"
 #include "ic-inl.h"
@@ -639,7 +640,9 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
 
   // Patch the map check.
   Address ldr_map_instr_address =
-      inline_end_address - 18 * Assembler::kInstrSize;
+      inline_end_address -
+      CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatchSize *
+      Assembler::kInstrSize;
   Assembler::set_target_address_at(ldr_map_instr_address,
                                    reinterpret_cast<Address>(map));
   return true;