From 89a7341d3f20681b2493bcf5b0b73adf1dd88626 Mon Sep 17 00:00:00 2001 From: "sgjesse@chromium.org" Date: Fri, 7 May 2010 10:16:11 +0000 Subject: [PATCH] Pass key and receiver in registers for keyed load IC on ARM The calling convention for keyed load IC's on ARM now passes the key and receiver in registers r0 and r1. The code path in the ARM full compiler for handling keyed property load now has the same structure as for ia32 where the keyed load IC is also called with key end receiver in registers. This change have been tested with an exhaustive combinations of the flags --special-command="@ --nofull-compiler" --special-command="@ --always-full-compiler" --special-command="@ --noenable-vfp3" to the test runner. Review URL: http://codereview.chromium.org/2024002 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4608 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm/codegen-arm.cc | 41 ++++--- src/arm/full-codegen-arm.cc | 59 ++++++---- src/arm/ic-arm.cc | 264 ++++++++++++++++++++++-------------------- src/arm/macro-assembler-arm.h | 9 +- src/arm/stub-cache-arm.cc | 27 ++--- src/arm/virtual-frame-arm.cc | 44 ++++++- src/arm/virtual-frame-arm.h | 7 +- 7 files changed, 258 insertions(+), 193 deletions(-) diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index eb875bd..8eb4aa2 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -3473,7 +3473,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { if (node->is_compound()) { // For a compound assignment the right-hand side is a binary operation // between the current property value and the actual right-hand side. - // Load of the current value leaves receiver and key on the stack. + // Duplicate receiver and key for loading the current property value. + frame_->Dup2(); EmitKeyedLoad(); frame_->EmitPush(r0); @@ -3767,19 +3768,23 @@ void CodeGenerator::VisitCall(Call* node) { // ------------------------------------------- LoadAndSpill(property->obj()); + if (!property->is_synthetic()) { + // Duplicate receiver for later use. + __ ldr(r0, MemOperand(sp, 0)); + frame_->EmitPush(r0); + } LoadAndSpill(property->key()); EmitKeyedLoad(); - frame_->Drop(); // key // Put the function below the receiver. if (property->is_synthetic()) { // Use the global receiver. - frame_->Drop(); - frame_->EmitPush(r0); + frame_->EmitPush(r0); // Function. LoadGlobalReceiver(r0); } else { - frame_->EmitPop(r1); // receiver - frame_->EmitPush(r0); // function - frame_->EmitPush(r1); // receiver + // Switch receiver and function. + frame_->EmitPop(r1); // Receiver. + frame_->EmitPush(r0); // Function. + frame_->EmitPush(r1); // Receiver. } // Call the function. @@ -5388,8 +5393,7 @@ void DeferredReferenceGetKeyedValue::Generate() { // The rest of the instructions in the deferred code must be together. { Assembler::BlockConstPoolScope block_const_pool(masm_); - // Call keyed load IC. It has all arguments on the stack and the key in r0. - __ ldr(r0, MemOperand(sp, 0)); + // Call keyed load IC. It has the arguments key and receiver in r0 and r1. Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // The call must be followed by a nop instruction to indicate that the @@ -5522,12 +5526,13 @@ void CodeGenerator::EmitKeyedLoad() { __ IncrementCounter(&Counters::keyed_load_inline, 1, frame_->scratch0(), frame_->scratch1()); - // Load the receiver and key from the stack. - frame_->SpillAllButCopyTOSToR1R0(); + // Load the key and receiver from the stack to r0 and r1. + frame_->PopToR1R0(); Register receiver = r0; Register key = r1; VirtualFrame::SpilledScope spilled(frame_); + // The deferred code expects key and receiver in r0 and r1. DeferredReferenceGetKeyedValue* deferred = new DeferredReferenceGetKeyedValue(); @@ -5721,6 +5726,9 @@ void Reference::GetValue() { Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); ASSERT(slot != NULL); cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); + if (!persist_after_get_) { + cgen_->UnloadReference(this); + } break; } @@ -5730,23 +5738,26 @@ void Reference::GetValue() { ASSERT(!is_global || var->is_global()); cgen_->EmitNamedLoad(GetName(), is_global); cgen_->frame()->EmitPush(r0); + if (!persist_after_get_) { + cgen_->UnloadReference(this); + } break; } case KEYED: { + if (persist_after_get_) { + cgen_->frame()->Dup2(); + } ASSERT(property != NULL); cgen_->EmitKeyedLoad(); cgen_->frame()->EmitPush(r0); + if (!persist_after_get_) set_unloaded(); break; } default: UNREACHABLE(); } - - if (!persist_after_get_) { - cgen_->UnloadReference(this); - } } diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index e9bdfe5..6680af9 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -738,15 +738,10 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var, // Load the key. __ mov(r0, Operand(key_literal->handle())); - // Push both as arguments to ic. - __ Push(r1, r0); - - // Call keyed load IC. It has all arguments on the stack and the key in r0. + // Call keyed load IC. It has arguments key and receiver in r0 and r1. Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); - - // Drop key and object left on the stack by IC, and push the result. - DropAndApply(2, context, r0); + Apply(context, r0); } } @@ -935,8 +930,16 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } break; case KEYED_PROPERTY: - VisitForValue(prop->obj(), kStack); - VisitForValue(prop->key(), kStack); + // We need the key and receiver on both the stack and in r0 and r1. + if (expr->is_compound()) { + VisitForValue(prop->obj(), kStack); + VisitForValue(prop->key(), kAccumulator); + __ ldr(r1, MemOperand(sp, 0)); + __ push(r0); + } else { + VisitForValue(prop->obj(), kStack); + VisitForValue(prop->key(), kStack); + } break; } @@ -1005,8 +1008,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); - // Call keyed load IC. It has all arguments on the stack and the key in r0. - __ ldr(r0, MemOperand(sp, 0)); + // Call keyed load IC. It has arguments key and receiver in r0 and r1. Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); } @@ -1171,10 +1173,10 @@ void FullCodeGenerator::VisitProperty(Property* expr) { // Drop receiver left on the stack by IC. DropAndApply(1, context_, r0); } else { - VisitForValue(expr->key(), kStack); + VisitForValue(expr->key(), kAccumulator); + __ pop(r1); EmitKeyedPropertyLoad(expr); - // Drop key and receiver left on the stack by IC. - DropAndApply(2, context_, r0); + Apply(context_, r0); } } @@ -1246,24 +1248,31 @@ void FullCodeGenerator::VisitCall(Call* expr) { // Call to a keyed property, use keyed load IC followed by function // call. VisitForValue(prop->obj(), kStack); - VisitForValue(prop->key(), kStack); + VisitForValue(prop->key(), kAccumulator); // Record source code position for IC call. SetSourcePosition(prop->position()); - // Call keyed load IC. It has all arguments on the stack and the key in - // r0. - __ ldr(r0, MemOperand(sp, 0)); + if (prop->is_synthetic()) { + __ pop(r1); // We do not need to keep the receiver. + } else { + __ ldr(r1, MemOperand(sp, 0)); // Keep receiver, to call function on. + } + Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); - // Load receiver object into r1. if (prop->is_synthetic()) { + // Push result (function). + __ push(r0); + // Push Global receiver. __ ldr(r1, CodeGenerator::GlobalObject()); __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); + __ push(r1); } else { - __ ldr(r1, MemOperand(sp, kPointerSize)); + // Pop receiver. + __ pop(r1); + // Push result (function). + __ push(r0); + __ push(r1); } - // Overwrite (object, key) with (function, receiver). - __ str(r0, MemOperand(sp, kPointerSize)); - __ str(r1, MemOperand(sp)); EmitCallWithStub(expr); } } else { @@ -1552,7 +1561,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { if (assign_type == NAMED_PROPERTY) { EmitNamedPropertyLoad(prop); } else { - VisitForValue(prop->key(), kStack); + VisitForValue(prop->key(), kAccumulator); + __ ldr(r1, MemOperand(sp, 0)); + __ push(r0); EmitKeyedPropertyLoad(prop); } } diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc index 5b1915f..3b3d1f8 100644 --- a/src/arm/ic-arm.cc +++ b/src/arm/ic-arm.cc @@ -683,11 +683,9 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- - __ ldr(r1, MemOperand(sp, kPointerSize)); __ Push(r1, r0); ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss)); @@ -699,11 +697,9 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- - __ ldr(r1, MemOperand(sp, kPointerSize)); __ Push(r1, r0); __ TailCallRuntime(Runtime::kGetProperty, 2, 1); @@ -714,18 +710,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label slow, fast, check_pixel_array, check_number_dictionary; - // Get the object from the stack. - __ ldr(r1, MemOperand(sp, kPointerSize)); + Register key = r0; + Register receiver = r1; // Check that the object isn't a smi. - __ BranchOnSmi(r1, &slow); + __ BranchOnSmi(receiver, &slow); // Get the map of the receiver. - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check bit field. __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); __ tst(r3, Operand(kSlowCaseBitFieldMask)); @@ -740,60 +735,65 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ b(lt, &slow); // Check that the key is a smi. - __ BranchOnNotSmi(r0, &slow); - // Save key in r2 in case we want it for the number dictionary case. - __ mov(r2, r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + __ BranchOnNotSmi(key, &slow); + // Untag key into r2.. + __ mov(r2, Operand(key, ASR, kSmiTagSize)); // Get the elements array of the object. - __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). - __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ cmp(r3, ip); __ b(ne, &check_pixel_array); // Check that the key (index) is within bounds. - __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset)); - __ cmp(r0, r3); + __ ldr(r3, FieldMemOperand(r4, Array::kLengthOffset)); + __ cmp(r2, r3); __ b(hs, &slow); // Fast case: Do the load. - __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2)); + __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r0, ip); + __ cmp(r2, ip); // In case the loaded value is the_hole we have to consult GetProperty // to ensure the prototype chain is searched. __ b(eq, &slow); + __ mov(r0, r2); __ Ret(); // Check whether the elements is a pixel array. + // r0: key + // r2: untagged index + // r3: elements map + // r4: elements __ bind(&check_pixel_array); __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); __ cmp(r3, ip); __ b(ne, &check_number_dictionary); - __ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset)); - __ cmp(r0, ip); + __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset)); + __ cmp(r2, ip); __ b(hs, &slow); - __ ldr(ip, FieldMemOperand(r1, PixelArray::kExternalPointerOffset)); - __ ldrb(r0, MemOperand(ip, r0)); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi. + __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset)); + __ ldrb(r2, MemOperand(ip, r2)); + __ mov(r0, Operand(r2, LSL, kSmiTagSize)); // Tag result as smi. __ Ret(); __ bind(&check_number_dictionary); // Check whether the elements is a number dictionary. - // r0: untagged index - // r1: elements - // r2: key + // r0: key + // r2: untagged index + // r3: elements map + // r4: elements __ LoadRoot(ip, Heap::kHashTableMapRootIndex); __ cmp(r3, ip); __ b(ne, &slow); - GenerateNumberDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4); + GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5); + __ mov(r0, r2); __ Ret(); - // Slow case: Push extra copies of the arguments (2). + // Slow case, key and receiver still in r0 and r1. __ bind(&slow); - __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); - __ ldr(r0, MemOperand(sp, 0)); + __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3); GenerateRuntimeGetProperty(masm); } @@ -802,8 +802,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; Label index_not_smi; @@ -811,9 +810,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { Label slow_char_code; Label got_char_code; - // Get the object from the stack. - __ ldr(r1, MemOperand(sp, kPointerSize)); - Register object = r1; Register index = r0; Register code = r2; @@ -845,6 +841,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { // string and a number), and call runtime. __ bind(&slow_char_code); __ EnterInternalFrame(); + ASSERT(object.code() > index.code()); __ Push(object, index); __ CallRuntime(Runtime::kStringCharCodeAt, 2); ASSERT(!code.is(r0)); @@ -913,25 +910,21 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label slow, failed_allocation; - // Get the object from the stack. - __ ldr(r1, MemOperand(sp, kPointerSize)); - - // r0: key - // r1: receiver object + Register key = r0; + Register receiver = r1; // Check that the object isn't a smi - __ BranchOnSmi(r1, &slow); + __ BranchOnSmi(receiver, &slow); // Check that the key is a smi. - __ BranchOnNotSmi(r0, &slow); + __ BranchOnNotSmi(key, &slow); // Check that the object is a JS object. Load map into r2. - __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE); + __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE); __ b(lt, &slow); // Check that the receiver does not require access checks. We need @@ -943,53 +936,51 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // Check that the elements array is the appropriate type of // ExternalArray. - // r0: index (as a smi) - // r1: JSObject - __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); __ cmp(r2, ip); __ b(ne, &slow); // Check that the index is in range. - __ ldr(ip, FieldMemOperand(r1, ExternalArray::kLengthOffset)); - __ cmp(r1, Operand(r0, ASR, kSmiTagSize)); + __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); + __ cmp(ip, Operand(key, ASR, kSmiTagSize)); // Unsigned comparison catches both negative and too-large values. __ b(lo, &slow); - // r0: index (smi) - // r1: elements array - __ ldr(r1, FieldMemOperand(r1, ExternalArray::kExternalPointerOffset)); - // r1: base pointer of external storage + // r3: elements array + __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); + // r3: base pointer of external storage // We are not untagging smi key and instead work with it // as if it was premultiplied by 2. ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); + Register value = r2; switch (array_type) { case kExternalByteArray: - __ ldrsb(r0, MemOperand(r1, r0, LSR, 1)); + __ ldrsb(value, MemOperand(r3, key, LSR, 1)); break; case kExternalUnsignedByteArray: - __ ldrb(r0, MemOperand(r1, r0, LSR, 1)); + __ ldrb(value, MemOperand(r3, key, LSR, 1)); break; case kExternalShortArray: - __ ldrsh(r0, MemOperand(r1, r0, LSL, 0)); + __ ldrsh(value, MemOperand(r3, key, LSL, 0)); break; case kExternalUnsignedShortArray: - __ ldrh(r0, MemOperand(r1, r0, LSL, 0)); + __ ldrh(value, MemOperand(r3, key, LSL, 0)); break; case kExternalIntArray: case kExternalUnsignedIntArray: - __ ldr(r0, MemOperand(r1, r0, LSL, 1)); + __ ldr(value, MemOperand(r3, key, LSL, 1)); break; case kExternalFloatArray: if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); - __ add(r0, r1, Operand(r0, LSL, 1)); - __ vldr(s0, r0, 0); + __ add(r2, r3, Operand(key, LSL, 1)); + __ vldr(s0, r2, 0); } else { - __ ldr(r0, MemOperand(r1, r0, LSL, 1)); + __ ldr(value, MemOperand(r3, key, LSL, 1)); } break; default: @@ -998,37 +989,36 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, } // For integer array types: - // r0: value + // r2: value // For floating-point array type // s0: value (if VFP3 is supported) - // r0: value (if VFP3 is not supported) + // r2: value (if VFP3 is not supported) if (array_type == kExternalIntArray) { // For the Int and UnsignedInt array types, we need to see whether // the value can be represented in a Smi. If not, we need to convert // it to a HeapNumber. Label box_int; - __ cmp(r0, Operand(0xC0000000)); + __ cmp(value, Operand(0xC0000000)); __ b(mi, &box_int); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + // Tag integer as smi and return it. + __ mov(r0, Operand(value, LSL, kSmiTagSize)); __ Ret(); __ bind(&box_int); - - __ mov(r1, r0); - // Allocate a HeapNumber for the int and perform int-to-double - // conversion. + // Allocate a HeapNumber for the result and perform int-to-double + // conversion. Use r0 for result as key is not needed any more. __ AllocateHeapNumber(r0, r3, r4, &slow); if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); - __ vmov(s0, r1); + __ vmov(s0, value); __ vcvt_f64_s32(d0, s0); - __ sub(r1, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r1, HeapNumber::kValueOffset); + __ sub(r3, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r3, HeapNumber::kValueOffset); __ Ret(); } else { - WriteInt32ToHeapNumberStub stub(r1, r0, r3); + WriteInt32ToHeapNumberStub stub(value, r0, r3); __ TailCallStub(&stub); } } else if (array_type == kExternalUnsignedIntArray) { @@ -1038,51 +1028,60 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); Label box_int, done; - __ tst(r0, Operand(0xC0000000)); + __ tst(value, Operand(0xC0000000)); __ b(ne, &box_int); - - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + // Tag integer as smi and return it. + __ mov(r0, Operand(value, LSL, kSmiTagSize)); __ Ret(); __ bind(&box_int); - __ vmov(s0, r0); - __ AllocateHeapNumber(r0, r1, r2, &slow); + __ vmov(s0, value); + // Allocate a HeapNumber for the result and perform int-to-double + // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all + // registers - also when jumping due to exhausted young space. + __ AllocateHeapNumber(r2, r3, r4, &slow); __ vcvt_f64_u32(d0, s0); - __ sub(r1, r0, Operand(kHeapObjectTag)); + __ sub(r1, r2, Operand(kHeapObjectTag)); __ vstr(d0, r1, HeapNumber::kValueOffset); + + __ mov(r0, r2); __ Ret(); } else { // Check whether unsigned integer fits into smi. Label box_int_0, box_int_1, done; - __ tst(r0, Operand(0x80000000)); + __ tst(value, Operand(0x80000000)); __ b(ne, &box_int_0); - __ tst(r0, Operand(0x40000000)); + __ tst(value, Operand(0x40000000)); __ b(ne, &box_int_1); - // Tag integer as smi and return it. - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ mov(r0, Operand(value, LSL, kSmiTagSize)); __ Ret(); + Register hiword = value; // r2. + Register loword = r3; + __ bind(&box_int_0); // Integer does not have leading zeros. - GenerateUInt2Double(masm, r0, r1, r2, 0); + GenerateUInt2Double(masm, hiword, loword, r4, 0); __ b(&done); __ bind(&box_int_1); // Integer has one leading zero. - GenerateUInt2Double(masm, r0, r1, r2, 1); + GenerateUInt2Double(masm, hiword, loword, r4, 1); + __ bind(&done); - // Integer was converted to double in registers r0:r1. - // Wrap it into a HeapNumber. - __ AllocateHeapNumber(r2, r3, r5, &slow); + // Integer was converted to double in registers hiword:loword. + // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber + // clobbers all registers - also when jumping due to exhausted young + // space. + __ AllocateHeapNumber(r4, r5, r6, &slow); - __ str(r0, FieldMemOperand(r2, HeapNumber::kExponentOffset)); - __ str(r1, FieldMemOperand(r2, HeapNumber::kMantissaOffset)); - - __ mov(r0, r2); + __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); + __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); + __ mov(r0, r4); __ Ret(); } } else if (array_type == kExternalFloatArray) { @@ -1090,40 +1089,52 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // HeapNumber. if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); - __ AllocateHeapNumber(r0, r1, r2, &slow); + // Allocate a HeapNumber for the result. Don't use r0 and r1 as + // AllocateHeapNumber clobbers all registers - also when jumping due to + // exhausted young space. + __ AllocateHeapNumber(r2, r3, r4, &slow); __ vcvt_f64_f32(d0, s0); - __ sub(r1, r0, Operand(kHeapObjectTag)); + __ sub(r1, r2, Operand(kHeapObjectTag)); __ vstr(d0, r1, HeapNumber::kValueOffset); + + __ mov(r0, r2); __ Ret(); } else { - __ AllocateHeapNumber(r3, r1, r2, &slow); + // Allocate a HeapNumber for the result. Don't use r0 and r1 as + // AllocateHeapNumber clobbers all registers - also when jumping due to + // exhausted young space. + __ AllocateHeapNumber(r3, r4, r5, &slow); // VFP is not available, do manual single to double conversion. - // r0: floating point value (binary32) + // r2: floating point value (binary32) + // r3: heap number for result - // Extract mantissa to r1. - __ and_(r1, r0, Operand(kBinary32MantissaMask)); + // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to + // the slow case from here. + __ and_(r0, value, Operand(kBinary32MantissaMask)); - // Extract exponent to r2. - __ mov(r2, Operand(r0, LSR, kBinary32MantissaBits)); - __ and_(r2, r2, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); + // Extract exponent to r1. OK to clobber r1 now as there are no jumps to + // the slow case from here. + __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); + __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); Label exponent_rebiased; - __ teq(r2, Operand(0x00)); + __ teq(r1, Operand(0x00)); __ b(eq, &exponent_rebiased); - __ teq(r2, Operand(0xff)); - __ mov(r2, Operand(0x7ff), LeaveCC, eq); + __ teq(r1, Operand(0xff)); + __ mov(r1, Operand(0x7ff), LeaveCC, eq); __ b(eq, &exponent_rebiased); // Rebias exponent. - __ add(r2, - r2, + __ add(r1, + r1, Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); __ bind(&exponent_rebiased); - __ and_(r0, r0, Operand(kBinary32SignMask)); - __ orr(r0, r0, Operand(r2, LSL, HeapNumber::kMantissaBitsInTopWord)); + __ and_(r2, value, Operand(kBinary32SignMask)); + value = no_reg; + __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); // Shift mantissa. static const int kMantissaShiftForHiWord = @@ -1132,24 +1143,25 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, static const int kMantissaShiftForLoWord = kBitsPerInt - kMantissaShiftForHiWord; - __ orr(r0, r0, Operand(r1, LSR, kMantissaShiftForHiWord)); - __ mov(r1, Operand(r1, LSL, kMantissaShiftForLoWord)); + __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); + __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); + + __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); + __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); - __ str(r0, FieldMemOperand(r3, HeapNumber::kExponentOffset)); - __ str(r1, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); __ mov(r0, r3); __ Ret(); } } else { - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + // Tag integer as smi and return it. + __ mov(r0, Operand(value, LSL, kSmiTagSize)); __ Ret(); } - // Slow case: Load name and receiver from stack and jump to runtime. + // Slow case, key and receiver still in r0 and r1. __ bind(&slow); - __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1); - __ ldr(r0, MemOperand(sp, 0)); + __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3); GenerateRuntimeGetProperty(masm); } @@ -1158,14 +1170,10 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label slow; - // Get the object from the stack. - __ ldr(r1, MemOperand(sp, kPointerSize)); - // Check that the receiver isn't a smi. __ BranchOnSmi(r1, &slow); diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 8cccf19..a349c6c 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -284,7 +284,9 @@ class MacroAssembler: public Assembler { // Allocate an object in new space. The object_size is specified in words (not // bytes). If the new space is exhausted control continues at the gc_required // label. The allocated object is returned in result. If the flag - // tag_allocated_object is true the result is tagged as as a heap object. + // tag_allocated_object is true the result is tagged as as a heap object. All + // registers are clobbered also when control continues at the gc_required + // label. void AllocateInNewSpace(int object_size, Register result, Register scratch1, @@ -328,8 +330,9 @@ class MacroAssembler: public Assembler { Register scratch2, Label* gc_required); - // Allocates a heap number or jumps to the need_gc label if the young space - // is full and a scavenge is needed. + // Allocates a heap number or jumps to the gc_required label if the young + // space is full and a scavenge is needed. All registers are clobbered also + // when control continues at the gc_required label. void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc index b302fa0..a5faf41 100644 --- a/src/arm/stub-cache-arm.cc +++ b/src/arm/stub-cache-arm.cc @@ -1813,8 +1813,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; @@ -1822,7 +1821,6 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1838,8 +1836,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; @@ -1848,7 +1845,6 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, __ b(ne, &miss); Failure* failure = Failure::InternalError(); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, callback, name, &miss, &failure); if (!success) return failure; @@ -1867,8 +1863,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; @@ -1876,7 +1871,6 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1892,8 +1886,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; @@ -1903,7 +1896,6 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, LookupResult lookup; LookupPostInterceptor(holder, name, &lookup); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadInterceptor(receiver, holder, &lookup, @@ -1924,8 +1916,7 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; @@ -1933,7 +1924,6 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadArrayLength(masm(), r1, r2, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1946,8 +1936,7 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- Label miss; __ IncrementCounter(&Counters::keyed_load_string_length, 1, r1, r3); @@ -1956,7 +1945,6 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadStringLength(masm(), r1, r2, r3, &miss); __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3); @@ -1972,8 +1960,7 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver + // -- r1 : receiver // ----------------------------------- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc index e432c59..e64eb90 100644 --- a/src/arm/virtual-frame-arm.cc +++ b/src/arm/virtual-frame-arm.cc @@ -323,7 +323,8 @@ void VirtualFrame::CallStoreIC(Handle name, bool is_contextual) { void VirtualFrame::CallKeyedLoadIC() { Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); - SpillAllButCopyTOSToR0(); + PopToR1R0(); + SpillAll(); CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); } @@ -532,6 +533,47 @@ void VirtualFrame::Dup() { } +void VirtualFrame::Dup2() { + if (SpilledScope::is_spilled()) { + __ ldr(ip, MemOperand(sp, kPointerSize)); + EmitPush(ip); + __ ldr(ip, MemOperand(sp, kPointerSize)); + EmitPush(ip); + } else { + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + __ ldr(r0, MemOperand(sp, 0)); + __ ldr(r1, MemOperand(sp, kPointerSize)); + top_of_stack_state_ = R0_R1_TOS; + break; + case R0_TOS: + __ push(r0); + __ ldr(r1, MemOperand(sp, kPointerSize)); + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_TOS: + __ push(r1); + __ ldr(r0, MemOperand(sp, kPointerSize)); + top_of_stack_state_ = R1_R0_TOS; + break; + case R0_R1_TOS: + __ push(r1); + __ push(r0); + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_R0_TOS: + __ push(r0); + __ push(r1); + top_of_stack_state_ = R1_R0_TOS; + break; + default: + UNREACHABLE(); + } + } + element_count_ += 2; +} + + Register VirtualFrame::PopToRegister(Register but_not_to_this_one) { ASSERT(but_not_to_this_one.is(r0) || but_not_to_this_one.is(r1) || diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h index 77bc70e..b255929 100644 --- a/src/arm/virtual-frame-arm.h +++ b/src/arm/virtual-frame-arm.h @@ -316,8 +316,8 @@ class VirtualFrame : public ZoneObject { // Result is returned in r0. void CallStoreIC(Handle name, bool is_contextual); - // Call keyed load IC. Key and receiver are on the stack. Result is returned - // in r0. + // Call keyed load IC. Key and receiver are on the stack. Both are consumed. + // Result is returned in r0. void CallKeyedLoadIC(); // Call keyed store IC. Key and receiver are on the stack and the value is in @@ -355,6 +355,9 @@ class VirtualFrame : public ZoneObject { // Duplicate the top of stack. void Dup(); + // Duplicate the two elements on top of stack. + void Dup2(); + // Flushes all registers, but it puts a copy of the top-of-stack in r0. void SpillAllButCopyTOSToR0(); -- 2.7.4