From: erik.corry@gmail.com Date: Wed, 28 Apr 2010 09:12:04 +0000 (+0000) Subject: Fix keyed load inlining after my last commit accidentally X-Git-Tag: upstream/4.7.83~21923 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ea1d2ad87b8ddf9a67a1aa1c298f272f5cc612fe;p=platform%2Fupstream%2Fv8.git Fix keyed load inlining after my last commit accidentally broke it. Review URL: http://codereview.chromium.org/1780010 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4526 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index cc749c1..e428104 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -5280,8 +5280,10 @@ class DeferredReferenceGetKeyedValue: public DeferredCode { void DeferredReferenceGetKeyedValue::Generate() { - __ DecrementCounter(&Counters::keyed_load_inline, 1, r1, r2); - __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, r1, r2); + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2); + __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2); // The rest of the instructions in the deferred code must be together. { Assembler::BlockConstPoolScope block_const_pool(masm_); @@ -5375,15 +5377,17 @@ void CodeGenerator::EmitKeyedLoad() { __ IncrementCounter(&Counters::keyed_load_inline, 1, frame_->scratch0(), frame_->scratch1()); - // Load the receiver from the stack. - frame_->SpillAllButCopyTOSToR0(); + // Load the receiver and key from the stack. + frame_->SpillAllButCopyTOSToR1R0(); + Register receiver = r0; + Register key = r1; VirtualFrame::SpilledScope spilled(frame_); DeferredReferenceGetKeyedValue* deferred = new DeferredReferenceGetKeyedValue(); // Check that the receiver is a heap object. - __ tst(r0, Operand(kSmiTagMask)); + __ tst(receiver, Operand(kSmiTagMask)); deferred->Branch(eq); // The following instructions are the inlined load keyed property. Parts @@ -5391,44 +5395,49 @@ void CodeGenerator::EmitKeyedLoad() { // need to be fixed. Therefore the constant pool is blocked while generating // this code. #ifdef DEBUG - int kInlinedKeyedLoadInstructions = 20; + int kInlinedKeyedLoadInstructions = 19; Label check_inlined_codesize; masm_->bind(&check_inlined_codesize); #endif { Assembler::BlockConstPoolScope block_const_pool(masm_); + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); // Check the map. The null map used below is patched by the inline cache // code. - __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ mov(r2, Operand(Factory::null_value())); - __ cmp(r1, r2); + __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ mov(scratch2, Operand(Factory::null_value())); + __ cmp(scratch1, scratch2); deferred->Branch(ne); - // Load the key from the stack. - __ ldr(r1, MemOperand(sp, 0)); - // Check that the key is a smi. - __ tst(r1, Operand(kSmiTagMask)); + __ tst(key, Operand(kSmiTagMask)); deferred->Branch(ne); // Get the elements array from the receiver and check that it // is not a dictionary. - __ ldr(r2, FieldMemOperand(r0, JSObject::kElementsOffset)); - __ ldr(r3, FieldMemOperand(r2, JSObject::kMapOffset)); - __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex); - __ cmp(r3, r4); + __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(scratch2, ip); deferred->Branch(ne); // Check that key is within bounds. - __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); - __ cmp(r3, Operand(r1, ASR, kSmiTagSize)); + __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); + __ cmp(scratch2, Operand(key, ASR, kSmiTagSize)); deferred->Branch(ls); // Unsigned less equal. - // Load and check that the result is not the hole (r1 is a smi). - __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); - __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r0, MemOperand(r2, r1, LSL, - kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); - __ cmp(r0, r3); + // Load and check that the result is not the hole (key is a smi). + __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); + __ add(scratch1, + scratch1, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r0, + MemOperand(scratch1, key, LSL, + kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); + __ cmp(r0, scratch2); + // This is the only branch to deferred where r0 and r1 do not contain the + // receiver and key. We can't just load undefined here because we have to + // check the prototype. deferred->Branch(eq); // Make sure that the expected number of instructions are generated. diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc index e1d36f0..949f6c4 100644 --- a/src/arm/ic-arm.cc +++ b/src/arm/ic-arm.cc @@ -639,7 +639,7 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { // Patch the map check. Address ldr_map_instr_address = - inline_end_address - 19 * Assembler::kInstrSize; + inline_end_address - 18 * Assembler::kInstrSize; Assembler::set_target_address_at(ldr_map_instr_address, reinterpret_cast
(map)); return true; diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc index e31f47f..507b7ca 100644 --- a/src/arm/virtual-frame-arm.cc +++ b/src/arm/virtual-frame-arm.cc @@ -88,7 +88,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) { break; case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS): __ pop(r1); - __ pop(r1); + __ pop(r0); break; case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS): __ push(r0); @@ -429,6 +429,35 @@ void VirtualFrame::SpillAllButCopyTOSToR0() { } +void VirtualFrame::SpillAllButCopyTOSToR1R0() { + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + __ ldr(r1, MemOperand(sp, 0)); + __ ldr(r0, MemOperand(sp, kPointerSize)); + break; + case R0_TOS: + __ push(r0); + __ mov(r1, r0); + __ ldr(r0, MemOperand(sp, kPointerSize)); + break; + case R1_TOS: + __ push(r1); + __ ldr(r0, MemOperand(sp, kPointerSize)); + break; + case R0_R1_TOS: + __ Push(r1, r0); + __ Swap(r0, r1, ip); + break; + case R1_R0_TOS: + __ Push(r0, r1); + break; + default: + UNREACHABLE(); + } + top_of_stack_state_ = NO_TOS_REGISTERS; +} + + Register VirtualFrame::Peek() { AssertIsNotSpilled(); if (top_of_stack_state_ == NO_TOS_REGISTERS) { diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h index ae81756..d9d8e93 100644 --- a/src/arm/virtual-frame-arm.h +++ b/src/arm/virtual-frame-arm.h @@ -344,10 +344,13 @@ class VirtualFrame : public ZoneObject { // must be copied to a scratch register before modification. Register Peek(); - // A little specialized, this one. It flushes all registers, but it puts a - // copy of the top-of-stack in R0. + // Flushes all registers, but it puts a copy of the top-of-stack in r0. void SpillAllButCopyTOSToR0(); + // Flushes all registers, but it puts a copy of the top-of-stack in r1 + // and the next value on the stack in r0. + void SpillAllButCopyTOSToR1R0(); + // Pop and save an element from the top of the expression stack and // emit a corresponding pop instruction. void EmitPop(Register reg);