From: kaznacheev@chromium.org Date: Mon, 1 Mar 2010 16:24:05 +0000 (+0000) Subject: Implementing inline caches for binary operations (ia32). X-Git-Tag: upstream/4.7.83~22377 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7d5885237406076d5b6101b10462e7041c4087b3;p=platform%2Fupstream%2Fv8.git Implementing inline caches for binary operations (ia32). This is a subset of a CL reviewed earlier(http://codereview.chromium.org/551093). The register usage optimisation part has been reviewed and submitted separately. Two fast cases supported: HeapNumber operands and String operands for ADD. Review URL: http://codereview.chromium.org/553117 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3988 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index ce8467866..966d5b938 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -31,6 +31,7 @@ #include "codegen-inl.h" #include "compiler.h" #include "debug.h" +#include "ic-inl.h" #include "parser.h" #include "register-allocator-inl.h" #include "runtime.h" @@ -6235,6 +6236,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { } +Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + return Handle::null(); +} + + void StackCheckStub::Generate(MacroAssembler* masm) { // Do tail-call to runtime routine. Runtime routines expect at least one // argument, so give it a Smi. diff --git a/src/code-stubs.cc b/src/code-stubs.cc index 4d0fd2992..e42f75894 100644 --- a/src/code-stubs.cc +++ b/src/code-stubs.cc @@ -83,6 +83,11 @@ void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { } +int CodeStub::GetCodeKind() { + return Code::STUB; +} + + Handle CodeStub::GetCode() { Code* code; if (!FindCodeInCache(&code)) { @@ -97,7 +102,10 @@ Handle CodeStub::GetCode() { masm.GetCode(&desc); // Copy the generated code into a heap object. - Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop()); + Code::Flags flags = Code::ComputeFlags( + static_cast(GetCodeKind()), + InLoop(), + GetICState()); Handle new_object = Factory::NewCode(desc, NULL, flags, masm.CodeObject()); RecordCodeGeneration(*new_object, &masm); @@ -132,7 +140,10 @@ Object* CodeStub::TryGetCode() { masm.GetCode(&desc); // Try to copy the generated code into a heap object. - Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop()); + Code::Flags flags = Code::ComputeFlags( + static_cast(GetCodeKind()), + InLoop(), + GetICState()); Object* new_object = Heap::CreateCode(desc, NULL, flags, masm.CodeObject()); if (new_object->IsFailure()) return new_object; diff --git a/src/code-stubs.h b/src/code-stubs.h index d5189c272..de2ad56cc 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -28,6 +28,8 @@ #ifndef V8_CODE_STUBS_H_ #define V8_CODE_STUBS_H_ +#include "globals.h" + namespace v8 { namespace internal { @@ -139,6 +141,14 @@ class CodeStub BASE_EMBEDDED { // lazily generated function should be fully optimized or not. virtual InLoopFlag InLoop() { return NOT_IN_LOOP; } + // GenericBinaryOpStub needs to override this. + virtual int GetCodeKind(); + + // GenericBinaryOpStub needs to override this. + virtual InlineCacheState GetICState() { + return UNINITIALIZED; + } + // Returns a name for logging/debugging purposes. virtual const char* GetName() { return MajorName(MajorKey(), false); } diff --git a/src/debug.cc b/src/debug.cc index 8c4f51d95..41952eba0 100644 --- a/src/debug.cc +++ b/src/debug.cc @@ -123,7 +123,9 @@ void BreakLocationIterator::Next() { if (RelocInfo::IsCodeTarget(rmode())) { Address target = original_rinfo()->target_address(); Code* code = Code::GetCodeFromTargetAddress(target); - if (code->is_inline_cache_stub() || RelocInfo::IsConstructCall(rmode())) { + if ((code->is_inline_cache_stub() && + code->kind() != Code::BINARY_OP_IC) || + RelocInfo::IsConstructCall(rmode())) { break_point_++; return; } @@ -1337,24 +1339,26 @@ Handle Debug::FindDebugBreak(Handle code, RelocInfo::Mode mode) { // Find the builtin debug break function matching the calling convention // used by the call site. if (code->is_inline_cache_stub()) { - if (code->is_call_stub()) { - return ComputeCallDebugBreak(code->arguments_count()); - } - if (code->is_load_stub()) { - return Handle(Builtins::builtin(Builtins::LoadIC_DebugBreak)); - } - if (code->is_store_stub()) { - return Handle(Builtins::builtin(Builtins::StoreIC_DebugBreak)); - } - if (code->is_keyed_load_stub()) { - Handle result = - Handle(Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak)); - return result; - } - if (code->is_keyed_store_stub()) { - Handle result = - Handle(Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak)); - return result; + switch (code->kind()) { + case Code::CALL_IC: + return ComputeCallDebugBreak(code->arguments_count()); + + case Code::LOAD_IC: + return Handle(Builtins::builtin(Builtins::LoadIC_DebugBreak)); + + case Code::STORE_IC: + return Handle(Builtins::builtin(Builtins::StoreIC_DebugBreak)); + + case Code::KEYED_LOAD_IC: + return Handle( + Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak)); + + case Code::KEYED_STORE_IC: + return Handle( + Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak)); + + default: + UNREACHABLE(); } } if (RelocInfo::IsConstructCall(mode)) { diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc index 16c610a37..62b3c9a0b 100644 --- a/src/ia32/codegen-ia32.cc +++ b/src/ia32/codegen-ia32.cc @@ -850,13 +850,14 @@ const char* GenericBinaryOpStub::GetName() { } OS::SNPrintF(Vector(name_, kMaxNameLength), - "GenericBinaryOpStub_%s_%s%s_%s%s_%s", + "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", op_name, overwrite_name, (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", args_in_registers_ ? "RegArgs" : "StackArgs", args_reversed_ ? "_R" : "", - NumberInfo::ToString(operands_type_)); + NumberInfo::ToString(static_operands_type_), + BinaryOpIC::GetName(runtime_operands_type_)); return name_; } @@ -8083,146 +8084,174 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { // Generate fast case smi code if requested. This flag is set when the fast // case smi code is not generated by the caller. Generating it here will speed // up common operations. - if (HasSmiCodeInStub()) { + if (ShouldGenerateSmiCode()) { GenerateSmiCode(masm, &call_runtime); } else if (op_ != Token::MOD) { // MOD goes straight to runtime. - GenerateLoadArguments(masm); + if (!HasArgsInRegisters()) { + GenerateLoadArguments(masm); + } } // Floating point case. - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - if (NumberInfo::IsNumber(operands_type_)) { - if (FLAG_debug_code) { - // Assert at runtime that inputs are only numbers. - __ AbortIfNotNumber(edx, - "GenericBinaryOpStub operand not a number."); - __ AbortIfNotNumber(eax, - "GenericBinaryOpStub operand not a number."); - } - FloatingPointHelper::LoadSSE2Operands(masm); - } else { - FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime); + if (ShouldGenerateFPCode()) { + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: { + if (runtime_operands_type_ == BinaryOpIC::DEFAULT && + HasSmiCodeInStub()) { + // Execution reaches this point when the first non-smi argument occurs + // (and only if smi code is generated). This is the right moment to + // patch to HEAP_NUMBERS state. The transition is attempted only for + // the four basic operations. The stub stays in the DEFAULT state + // forever for all other operations (also if smi code is skipped). + GenerateTypeTransition(masm); } - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - GenerateHeapResultAllocation(masm, &call_runtime); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - GenerateReturn(masm); - } else { // SSE2 not available, use FPU. - if (NumberInfo::IsNumber(operands_type_)) { - if (FLAG_debug_code) { - // Assert at runtime that inputs are only numbers. - __ AbortIfNotNumber(edx, - "GenericBinaryOpStub operand not a number."); - __ AbortIfNotNumber(eax, - "GenericBinaryOpStub operand not a number."); + Label not_floats; + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + if (NumberInfo::IsNumber(static_operands_type_)) { + if (FLAG_debug_code) { + // Assert at runtime that inputs are only numbers. + __ AbortIfNotNumber(edx, + "GenericBinaryOpStub operand not a number."); + __ AbortIfNotNumber(eax, + "GenericBinaryOpStub operand not a number."); + } + FloatingPointHelper::LoadSSE2Operands(masm); + } else { + FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime); } - } else { - FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); + + switch (op_) { + case Token::ADD: __ addsd(xmm0, xmm1); break; + case Token::SUB: __ subsd(xmm0, xmm1); break; + case Token::MUL: __ mulsd(xmm0, xmm1); break; + case Token::DIV: __ divsd(xmm0, xmm1); break; + default: UNREACHABLE(); + } + GenerateHeapResultAllocation(masm, &call_runtime); + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + GenerateReturn(masm); + } else { // SSE2 not available, use FPU. + if (NumberInfo::IsNumber(static_operands_type_)) { + if (FLAG_debug_code) { + // Assert at runtime that inputs are only numbers. + __ AbortIfNotNumber(edx, + "GenericBinaryOpStub operand not a number."); + __ AbortIfNotNumber(eax, + "GenericBinaryOpStub operand not a number."); + } + } else { + FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); + } + FloatingPointHelper::LoadFloatOperands( + masm, + ecx, + FloatingPointHelper::ARGS_IN_REGISTERS); + switch (op_) { + case Token::ADD: __ faddp(1); break; + case Token::SUB: __ fsubp(1); break; + case Token::MUL: __ fmulp(1); break; + case Token::DIV: __ fdivp(1); break; + default: UNREACHABLE(); + } + Label after_alloc_failure; + GenerateHeapResultAllocation(masm, &after_alloc_failure); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + GenerateReturn(masm); + __ bind(&after_alloc_failure); + __ ffree(); + __ jmp(&call_runtime); } - FloatingPointHelper::LoadFloatOperands( - masm, - ecx, - FloatingPointHelper::ARGS_IN_REGISTERS); - switch (op_) { - case Token::ADD: __ faddp(1); break; - case Token::SUB: __ fsubp(1); break; - case Token::MUL: __ fmulp(1); break; - case Token::DIV: __ fdivp(1); break; - default: UNREACHABLE(); + __ bind(¬_floats); + if (runtime_operands_type_ == BinaryOpIC::DEFAULT && + !HasSmiCodeInStub()) { + // Execution reaches this point when the first non-number argument + // occurs (and only if smi code is skipped from the stub, otherwise + // the patching has already been done earlier in this case branch). + // Try patching to STRINGS for ADD operation. + if (op_ == Token::ADD) { + GenerateTypeTransition(masm); + } } - Label after_alloc_failure; - GenerateHeapResultAllocation(masm, &after_alloc_failure); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - GenerateReturn(masm); - __ bind(&after_alloc_failure); - __ ffree(); - __ jmp(&call_runtime); - } - } - case Token::MOD: { - // For MOD we go directly to runtime in the non-smi case. - break; - } - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: { - Label non_smi_result; - FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime); - switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; - case Token::SAR: __ sar_cl(eax); break; - case Token::SHL: __ shl_cl(eax); break; - case Token::SHR: __ shr_cl(eax); break; - default: UNREACHABLE(); + break; } - if (op_ == Token::SHR) { - // Check if result is non-negative and fits in a smi. - __ test(eax, Immediate(0xc0000000)); - __ j(not_zero, &call_runtime); - } else { - // Check if result fits in a smi. - __ cmp(eax, 0xc0000000); - __ j(negative, &non_smi_result); + case Token::MOD: { + // For MOD we go directly to runtime in the non-smi case. + break; } - // Tag smi result and return. - __ SmiTag(eax); - GenerateReturn(masm); - - // All ops except SHR return a signed int32 that we load in a HeapNumber. - if (op_ != Token::SHR) { - __ bind(&non_smi_result); - // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result - Label skip_allocation; - switch (mode_) { - case OVERWRITE_LEFT: - case OVERWRITE_RIGHT: - // If the operand was an object, we skip the - // allocation of a heap number. - __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? - 1 * kPointerSize : 2 * kPointerSize)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &skip_allocation, not_taken); - // Fall through! - case NO_OVERWRITE: - __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); - __ bind(&skip_allocation); - break; + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SAR: + case Token::SHL: + case Token::SHR: { + Label non_smi_result; + FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime); + switch (op_) { + case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; + case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; + case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::SAR: __ sar_cl(eax); break; + case Token::SHL: __ shl_cl(eax); break; + case Token::SHR: __ shr_cl(eax); break; default: UNREACHABLE(); } - // Store the result in the HeapNumber and return. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + if (op_ == Token::SHR) { + // Check if result is non-negative and fits in a smi. + __ test(eax, Immediate(0xc0000000)); + __ j(not_zero, &call_runtime); } else { - __ mov(Operand(esp, 1 * kPointerSize), ebx); - __ fild_s(Operand(esp, 1 * kPointerSize)); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + // Check if result fits in a smi. + __ cmp(eax, 0xc0000000); + __ j(negative, &non_smi_result); } + // Tag smi result and return. + __ SmiTag(eax); GenerateReturn(masm); + + // All ops except SHR return a signed int32 that we load in + // a HeapNumber. + if (op_ != Token::SHR) { + __ bind(&non_smi_result); + // Allocate a heap number if needed. + __ mov(ebx, Operand(eax)); // ebx: result + Label skip_allocation; + switch (mode_) { + case OVERWRITE_LEFT: + case OVERWRITE_RIGHT: + // If the operand was an object, we skip the + // allocation of a heap number. + __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? + 1 * kPointerSize : 2 * kPointerSize)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &skip_allocation, not_taken); + // Fall through! + case NO_OVERWRITE: + __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); + __ bind(&skip_allocation); + break; + default: UNREACHABLE(); + } + // Store the result in the HeapNumber and return. + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + __ cvtsi2sd(xmm0, Operand(ebx)); + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + } else { + __ mov(Operand(esp, 1 * kPointerSize), ebx); + __ fild_s(Operand(esp, 1 * kPointerSize)); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + } + GenerateReturn(masm); + } + break; } - break; + default: UNREACHABLE(); break; } - default: UNREACHABLE(); break; } // If all else fails, use the runtime system to get the correct @@ -8230,21 +8259,20 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { // stack in the correct order below the return address. __ bind(&call_runtime); if (HasArgsInRegisters()) { - __ pop(ecx); - if (HasArgsReversed()) { - __ push(eax); - __ push(edx); - } else { - __ push(edx); - __ push(eax); - } - __ push(ecx); + GenerateRegisterArgsPush(masm); } + switch (op_) { case Token::ADD: { // Test for string arguments before calling runtime. Label not_strings, not_string1, string1, string1_smi2; - Result answer; + + // If this stub has already generated FP-specific code then the arguments + // are already in edx, eax + if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { + GenerateLoadArguments(masm); + } + __ test(edx, Immediate(kSmiTagMask)); __ j(zero, ¬_string1); __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx); @@ -8333,6 +8361,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { default: UNREACHABLE(); } + + // Generate an unreachable reference to the DEFAULT stub so that it can be + // found at the end of this stub when clearing ICs at GC. + if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { + GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); + __ TailCallStub(&uninit); + } } @@ -8386,10 +8421,9 @@ void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { // If arguments are not passed in registers read them from the stack. - if (!HasArgsInRegisters()) { - __ mov(eax, Operand(esp, 1 * kPointerSize)); - __ mov(edx, Operand(esp, 2 * kPointerSize)); - } + ASSERT(!HasArgsInRegisters()); + __ mov(eax, Operand(esp, 1 * kPointerSize)); + __ mov(edx, Operand(esp, 2 * kPointerSize)); } @@ -8404,6 +8438,75 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { } +void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { + ASSERT(HasArgsInRegisters()); + __ pop(ecx); + if (HasArgsReversed()) { + __ push(eax); + __ push(edx); + } else { + __ push(edx); + __ push(eax); + } + __ push(ecx); +} + + +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + Label get_result; + + // Keep a copy of operands on the stack and make sure they are also in + // edx, eax. + if (HasArgsInRegisters()) { + GenerateRegisterArgsPush(masm); + } else { + GenerateLoadArguments(masm); + } + + // Internal frame is necessary to handle exceptions properly. + __ EnterInternalFrame(); + + // Push arguments on stack if the stub expects them there. + if (!HasArgsInRegisters()) { + __ push(edx); + __ push(eax); + } + // Call the stub proper to get the result in eax. + __ call(&get_result); + __ LeaveInternalFrame(); + + __ pop(ecx); // Return address. + // Left and right arguments are now on top. + // Push the operation result. The tail call to BinaryOp_Patch will + // return it to the original caller. + __ push(eax); + // Push this stub's key. Although the operation and the type info are + // encoded into the key, the encoding is opaque, so push them too. + __ push(Immediate(Smi::FromInt(MinorKey()))); + __ push(Immediate(Smi::FromInt(op_))); + __ push(Immediate(Smi::FromInt(runtime_operands_type_))); + + __ push(ecx); // Return address. + + // Patch the caller to an appropriate specialized stub + // and return the operation result. + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), + 6, + 1); + + // The entry point for the result calculation is assumed to be immediately + // after this sequence. + __ bind(&get_result); +} + +Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + GenericBinaryOpStub stub(key, type_info); + HandleScope scope; + return stub.GetCode(); +} + + void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Input on stack: // esp[4]: argument (should be number). diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h index 3681d77c4..0b605b606 100644 --- a/src/ia32/codegen-ia32.h +++ b/src/ia32/codegen-ia32.h @@ -28,6 +28,8 @@ #ifndef V8_IA32_CODEGEN_IA32_H_ #define V8_IA32_CODEGEN_IA32_H_ +#include "ic-inl.h" + namespace v8 { namespace internal { @@ -699,12 +701,25 @@ class GenericBinaryOpStub: public CodeStub { flags_(flags), args_in_registers_(false), args_reversed_(false), - name_(NULL), - operands_type_(operands_type) { + static_operands_type_(operands_type), + runtime_operands_type_(BinaryOpIC::DEFAULT), + name_(NULL) { use_sse3_ = CpuFeatures::IsSupported(SSE3); ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } + GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + flags_(FlagBits::decode(key)), + args_in_registers_(ArgsInRegistersBits::decode(key)), + args_reversed_(ArgsReversedBits::decode(key)), + use_sse3_(SSE3Bits::decode(key)), + static_operands_type_(StaticTypeInfoBits::decode(key)), + runtime_operands_type_(runtime_operands_type), + name_(NULL) { + } + // Generate code to call the stub with the supplied arguments. This will add // code at the call site to prepare arguments either in registers or on the // stack together with the actual call. @@ -724,8 +739,14 @@ class GenericBinaryOpStub: public CodeStub { bool args_in_registers_; // Arguments passed in registers not on the stack. bool args_reversed_; // Left and right argument are swapped. bool use_sse3_; + + // Number type information of operands, determined by code generator. + NumberInfo::Type static_operands_type_; + + // Operand type information determined at runtime. + BinaryOpIC::TypeInfo runtime_operands_type_; + char* name_; - NumberInfo::Type operands_type_; // Number type information of operands. const char* GetName(); @@ -739,29 +760,31 @@ class GenericBinaryOpStub: public CodeStub { static_cast(flags_), static_cast(args_in_registers_), static_cast(args_reversed_), - NumberInfo::ToString(operands_type_)); + NumberInfo::ToString(static_operands_type_)); } #endif - // Minor key encoding in 16 bits NNNFRASOOOOOOOMM. + // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM. class ModeBits: public BitField {}; class OpBits: public BitField {}; class SSE3Bits: public BitField {}; class ArgsInRegistersBits: public BitField {}; class ArgsReversedBits: public BitField {}; class FlagBits: public BitField {}; - class NumberInfoBits: public BitField {}; + class StaticTypeInfoBits: public BitField {}; + class RuntimeTypeInfoBits: public BitField {}; Major MajorKey() { return GenericBinaryOp; } int MinorKey() { - // Encode the parameters in a unique 16 bit value. + // Encode the parameters in a unique 18 bit value. return OpBits::encode(op_) | ModeBits::encode(mode_) | FlagBits::encode(flags_) | SSE3Bits::encode(use_sse3_) | ArgsInRegistersBits::encode(args_in_registers_) | ArgsReversedBits::encode(args_reversed_) - | NumberInfoBits::encode(operands_type_); + | StaticTypeInfoBits::encode(static_operands_type_) + | RuntimeTypeInfoBits::encode(runtime_operands_type_); } void Generate(MacroAssembler* masm); @@ -769,6 +792,8 @@ class GenericBinaryOpStub: public CodeStub { void GenerateLoadArguments(MacroAssembler* masm); void GenerateReturn(MacroAssembler* masm); void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure); + void GenerateRegisterArgsPush(MacroAssembler* masm); + void GenerateTypeTransition(MacroAssembler* masm); bool ArgsInRegistersSupported() { return op_ == Token::ADD || op_ == Token::SUB @@ -783,6 +808,22 @@ class GenericBinaryOpStub: public CodeStub { bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; } bool HasArgsInRegisters() { return args_in_registers_; } bool HasArgsReversed() { return args_reversed_; } + + bool ShouldGenerateSmiCode() { + return HasSmiCodeInStub() && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + bool ShouldGenerateFPCode() { + return runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(runtime_operands_type_); + } }; diff --git a/src/ic.cc b/src/ic.cc index 8fd777a36..298edf8eb 100644 --- a/src/ic.cc +++ b/src/ic.cc @@ -222,6 +222,7 @@ void IC::Clear(Address address) { case Code::STORE_IC: return StoreIC::Clear(address, target); case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target); case Code::CALL_IC: return CallIC::Clear(address, target); + case Code::BINARY_OP_IC: return BinaryOpIC::Clear(address, target); default: UNREACHABLE(); } } @@ -1416,6 +1417,112 @@ Object* KeyedStoreIC_Miss(Arguments args) { } +void BinaryOpIC::patch(Code* code) { + set_target(code); +} + + +void BinaryOpIC::Clear(Address address, Code* target) { + if (target->ic_state() == UNINITIALIZED) return; + + // At the end of a fast case stub there should be a reference to + // a corresponding UNINITIALIZED stub, so look for the last reloc info item. + RelocInfo* rinfo = NULL; + for (RelocIterator it(target, RelocInfo::kCodeTargetMask); + !it.done(); it.next()) { + rinfo = it.rinfo(); + } + + ASSERT(rinfo != NULL); + Code* uninit_stub = Code::GetCodeFromTargetAddress(rinfo->target_address()); + ASSERT(uninit_stub->ic_state() == UNINITIALIZED && + uninit_stub->kind() == Code::BINARY_OP_IC); + SetTargetAtAddress(address, uninit_stub); +} + + +const char* BinaryOpIC::GetName(TypeInfo type_info) { + switch (type_info) { + case DEFAULT: return "Default"; + case GENERIC: return "Generic"; + case HEAP_NUMBERS: return "HeapNumbers"; + case STRINGS: return "Strings"; + default: return "Invalid"; + } +} + + +BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) { + switch (type_info) { + // DEFAULT is mapped to UNINITIALIZED so that calls to DEFAULT stubs + // are not cleared at GC. + case DEFAULT: return UNINITIALIZED; + + // Could have mapped GENERIC to MONOMORPHIC just as well but MEGAMORPHIC is + // conceptually closer. + case GENERIC: return MEGAMORPHIC; + + default: return MONOMORPHIC; + } +} + + +BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left, + Object* right) { + // Patching is never requested for the two smis. + ASSERT(!left->IsSmi() || !right->IsSmi()); + + if (left->IsNumber() && right->IsNumber()) { + return HEAP_NUMBERS; + } + + if (left->IsString() || right->IsString()) { + // Patching for fast string ADD makes sense even if only one of the + // arguments is a string. + return STRINGS; + } + + return GENERIC; +} + + +// defined in codegen-.cc +Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info); + + +Object* BinaryOp_Patch(Arguments args) { + ASSERT(args.length() == 6); + + Handle left = args.at(0); + Handle right = args.at(1); + Handle result = args.at(2); + int key = Smi::cast(args[3])->value(); +#ifdef DEBUG + Token::Value op = static_cast(Smi::cast(args[4])->value()); + BinaryOpIC::TypeInfo prev_type_info = + static_cast(Smi::cast(args[5])->value()); +#endif // DEBUG + { HandleScope scope; + BinaryOpIC::TypeInfo type_info = BinaryOpIC::GetTypeInfo(*left, *right); + Handle code = GetBinaryOpStub(key, type_info); + if (!code.is_null()) { + BinaryOpIC ic; + ic.patch(*code); +#ifdef DEBUG + if (FLAG_trace_ic) { + PrintF("[BinaryOpIC (%s->%s)#%s]\n", + BinaryOpIC::GetName(prev_type_info), + BinaryOpIC::GetName(type_info), + Token::Name(op)); + } +#endif // DEBUG + } + } + + return *result; +} + + static Address IC_utilities[] = { #define ADDR(name) FUNCTION_ADDR(name), IC_UTIL_LIST(ADDR) diff --git a/src/ic.h b/src/ic.h index d545989bf..2fe3a7d52 100644 --- a/src/ic.h +++ b/src/ic.h @@ -55,7 +55,8 @@ enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE }; ICU(LoadPropertyWithInterceptorForLoad) \ ICU(LoadPropertyWithInterceptorForCall) \ ICU(KeyedLoadPropertyWithInterceptor) \ - ICU(StoreInterceptorProperty) + ICU(StoreInterceptorProperty) \ + ICU(BinaryOp_Patch) // // IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC, @@ -444,6 +445,30 @@ class KeyedStoreIC: public IC { }; +class BinaryOpIC: public IC { + public: + + enum TypeInfo { + DEFAULT, // Initial state. When first executed, patches to one + // of the following states depending on the operands types. + HEAP_NUMBERS, // Both arguments are HeapNumbers. + STRINGS, // At least one of the arguments is String. + GENERIC // Non-specialized case (processes any type combination). + }; + + BinaryOpIC() : IC(NO_EXTRA_FRAME) { } + + void patch(Code* code); + + static void Clear(Address address, Code* target); + + static const char* GetName(TypeInfo type_info); + + static State ToState(TypeInfo type_info); + + static TypeInfo GetTypeInfo(Object* left, Object* right); +}; + } } // namespace v8::internal #endif // V8_IC_H_ diff --git a/src/log.cc b/src/log.cc index 9d0b35246..588d34549 100644 --- a/src/log.cc +++ b/src/log.cc @@ -1267,6 +1267,8 @@ void Logger::LogCodeObject(Object* object) { switch (code_object->kind()) { case Code::FUNCTION: return; // We log this later using LogCompiledFunctions. + case Code::BINARY_OP_IC: + // fall through case Code::STUB: description = CodeStub::MajorName(code_object->major_key(), true); if (description == NULL) diff --git a/src/objects-inl.h b/src/objects-inl.h index a33930ef6..79ffaa01d 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -2149,14 +2149,14 @@ int Code::arguments_count() { CodeStub::Major Code::major_key() { - ASSERT(kind() == STUB); + ASSERT(kind() == STUB || kind() == BINARY_OP_IC); return static_cast(READ_BYTE_FIELD(this, kStubMajorKeyOffset)); } void Code::set_major_key(CodeStub::Major major) { - ASSERT(kind() == STUB); + ASSERT(kind() == STUB || kind() == BINARY_OP_IC); ASSERT(0 <= major && major < 256); WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major); } diff --git a/src/objects.cc b/src/objects.cc index 4f1fa334a..e50b73100 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -5126,6 +5126,7 @@ const char* Code::Kind2String(Kind kind) { case STORE_IC: return "STORE_IC"; case KEYED_STORE_IC: return "KEYED_STORE_IC"; case CALL_IC: return "CALL_IC"; + case BINARY_OP_IC: return "BINARY_OP_IC"; } UNREACHABLE(); return NULL; diff --git a/src/objects.h b/src/objects.h index 2b9321c68..192033d15 100644 --- a/src/objects.h +++ b/src/objects.h @@ -2618,13 +2618,14 @@ class Code: public HeapObject { CALL_IC, STORE_IC, KEYED_STORE_IC, - // No more than eight kinds. The value currently encoded in three bits in + BINARY_OP_IC, + // No more than 16 kinds. The value currently encoded in four bits in // Flags. // Pseudo-kinds. REGEXP = BUILTIN, FIRST_IC_KIND = LOAD_IC, - LAST_IC_KIND = KEYED_STORE_IC + LAST_IC_KIND = BINARY_OP_IC }; enum { @@ -2670,7 +2671,7 @@ class Code: public HeapObject { inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; } inline bool is_call_stub() { return kind() == CALL_IC; } - // [major_key]: For kind STUB, the major key. + // [major_key]: For kind STUB or BINARY_OP_IC, the major key. inline CodeStub::Major major_key(); inline void set_major_key(CodeStub::Major major); @@ -2777,14 +2778,14 @@ class Code: public HeapObject { static const int kFlagsICStateShift = 0; static const int kFlagsICInLoopShift = 3; static const int kFlagsKindShift = 4; - static const int kFlagsTypeShift = 7; - static const int kFlagsArgumentsCountShift = 10; - - static const int kFlagsICStateMask = 0x00000007; // 0000000111 - static const int kFlagsICInLoopMask = 0x00000008; // 0000001000 - static const int kFlagsKindMask = 0x00000070; // 0001110000 - static const int kFlagsTypeMask = 0x00000380; // 1110000000 - static const int kFlagsArgumentsCountMask = 0xFFFFFC00; + static const int kFlagsTypeShift = 8; + static const int kFlagsArgumentsCountShift = 11; + + static const int kFlagsICStateMask = 0x00000007; // 00000000111 + static const int kFlagsICInLoopMask = 0x00000008; // 00000001000 + static const int kFlagsKindMask = 0x000000F0; // 00011110000 + static const int kFlagsTypeMask = 0x00000700; // 11100000000 + static const int kFlagsArgumentsCountMask = 0xFFFFF800; static const int kFlagsNotUsedInLookup = (kFlagsICInLoopMask | kFlagsTypeMask); diff --git a/src/spaces.cc b/src/spaces.cc index 2c495d852..6181dabc2 100644 --- a/src/spaces.cc +++ b/src/spaces.cc @@ -1379,6 +1379,7 @@ static void ReportCodeKindStatistics() { CASE(STORE_IC); CASE(KEYED_STORE_IC); CASE(CALL_IC); + CASE(BINARY_OP_IC); } } diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index 0725e311b..d3d2e55bc 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -8898,6 +8898,11 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { } +Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + return Handle::null(); +} + + int CompareStub::MinorKey() { // Encode the three parameters in a unique 16 bit value. ASSERT(static_cast(cc_) < (1 << 14));