Implementing inline caches for binary operations (ia32).
authorkaznacheev@chromium.org <kaznacheev@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 1 Mar 2010 16:24:05 +0000 (16:24 +0000)
committerkaznacheev@chromium.org <kaznacheev@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 1 Mar 2010 16:24:05 +0000 (16:24 +0000)
This is a subset of a CL reviewed earlier(http://codereview.chromium.org/551093).
The register usage optimisation part has been reviewed and submitted separately.
Two fast cases supported: HeapNumber operands and String operands for ADD.

Review URL: http://codereview.chromium.org/553117

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3988 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

14 files changed:
src/arm/codegen-arm.cc
src/code-stubs.cc
src/code-stubs.h
src/debug.cc
src/ia32/codegen-ia32.cc
src/ia32/codegen-ia32.h
src/ic.cc
src/ic.h
src/log.cc
src/objects-inl.h
src/objects.cc
src/objects.h
src/spaces.cc
src/x64/codegen-x64.cc

index ce84678667ffa073a75b25841cfd8b3b33a71219..966d5b93872ae9e0aa9381d85f944de07238cd9e 100644 (file)
@@ -31,6 +31,7 @@
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
+#include "ic-inl.h"
 #include "parser.h"
 #include "register-allocator-inl.h"
 #include "runtime.h"
@@ -6235,6 +6236,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
 }
 
 
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+  return Handle<Code>::null();
+}
+
+
 void StackCheckStub::Generate(MacroAssembler* masm) {
   // Do tail-call to runtime routine.  Runtime routines expect at least one
   // argument, so give it a Smi.
index 4d0fd29923a6db704cca3f23b9eac6c5f27a5b07..e42f75894b2d7656676123ebcb214f42d4e5a4d3 100644 (file)
@@ -83,6 +83,11 @@ void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
 }
 
 
+int CodeStub::GetCodeKind() {
+  return Code::STUB;
+}
+
+
 Handle<Code> CodeStub::GetCode() {
   Code* code;
   if (!FindCodeInCache(&code)) {
@@ -97,7 +102,10 @@ Handle<Code> CodeStub::GetCode() {
     masm.GetCode(&desc);
 
     // Copy the generated code into a heap object.
-    Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
+    Code::Flags flags = Code::ComputeFlags(
+        static_cast<Code::Kind>(GetCodeKind()),
+        InLoop(),
+        GetICState());
     Handle<Code> new_object =
         Factory::NewCode(desc, NULL, flags, masm.CodeObject());
     RecordCodeGeneration(*new_object, &masm);
@@ -132,7 +140,10 @@ Object* CodeStub::TryGetCode() {
     masm.GetCode(&desc);
 
     // Try to copy the generated code into a heap object.
-    Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
+    Code::Flags flags = Code::ComputeFlags(
+        static_cast<Code::Kind>(GetCodeKind()),
+        InLoop(),
+        GetICState());
     Object* new_object =
         Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
     if (new_object->IsFailure()) return new_object;
index d5189c27246a9b8cd0425303f29c0c66c9f4aa16..de2ad56cc65a54ff62f01ad74564fd5558b8fd47 100644 (file)
@@ -28,6 +28,8 @@
 #ifndef V8_CODE_STUBS_H_
 #define V8_CODE_STUBS_H_
 
+#include "globals.h"
+
 namespace v8 {
 namespace internal {
 
@@ -139,6 +141,14 @@ class CodeStub BASE_EMBEDDED {
   // lazily generated function should be fully optimized or not.
   virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
 
+  // GenericBinaryOpStub needs to override this.
+  virtual int GetCodeKind();
+
+  // GenericBinaryOpStub needs to override this.
+  virtual InlineCacheState GetICState() {
+    return UNINITIALIZED;
+  }
+
   // Returns a name for logging/debugging purposes.
   virtual const char* GetName() { return MajorName(MajorKey(), false); }
 
index 8c4f51d95a2824c6c92abb2f505e0a76968e9e4c..41952eba009120b23f58f41d8b7c6a5d76b43cef 100644 (file)
@@ -123,7 +123,9 @@ void BreakLocationIterator::Next() {
     if (RelocInfo::IsCodeTarget(rmode())) {
       Address target = original_rinfo()->target_address();
       Code* code = Code::GetCodeFromTargetAddress(target);
-      if (code->is_inline_cache_stub() || RelocInfo::IsConstructCall(rmode())) {
+      if ((code->is_inline_cache_stub() &&
+           code->kind() != Code::BINARY_OP_IC) ||
+          RelocInfo::IsConstructCall(rmode())) {
         break_point_++;
         return;
       }
@@ -1337,24 +1339,26 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
   // Find the builtin debug break function matching the calling convention
   // used by the call site.
   if (code->is_inline_cache_stub()) {
-    if (code->is_call_stub()) {
-      return ComputeCallDebugBreak(code->arguments_count());
-    }
-    if (code->is_load_stub()) {
-      return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
-    }
-    if (code->is_store_stub()) {
-      return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
-    }
-    if (code->is_keyed_load_stub()) {
-      Handle<Code> result =
-          Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
-      return result;
-    }
-    if (code->is_keyed_store_stub()) {
-      Handle<Code> result =
-          Handle<Code>(Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
-      return result;
+    switch (code->kind()) {
+      case Code::CALL_IC:
+        return ComputeCallDebugBreak(code->arguments_count());
+
+      case Code::LOAD_IC:
+        return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
+
+      case Code::STORE_IC:
+        return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
+
+      case Code::KEYED_LOAD_IC:
+        return Handle<Code>(
+            Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
+
+      case Code::KEYED_STORE_IC:
+        return Handle<Code>(
+            Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
+
+      default:
+        UNREACHABLE();
     }
   }
   if (RelocInfo::IsConstructCall(mode)) {
index 16c610a373ee36d001ecf194624e6fdb17ec5a2b..62b3c9a0ba16f338ade90af0b3ccd5bee975b5b9 100644 (file)
@@ -850,13 +850,14 @@ const char* GenericBinaryOpStub::GetName() {
   }
 
   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
+               "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
                op_name,
                overwrite_name,
                (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
                args_in_registers_ ? "RegArgs" : "StackArgs",
                args_reversed_ ? "_R" : "",
-               NumberInfo::ToString(operands_type_));
+               NumberInfo::ToString(static_operands_type_),
+               BinaryOpIC::GetName(runtime_operands_type_));
   return name_;
 }
 
@@ -8083,146 +8084,174 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   // Generate fast case smi code if requested. This flag is set when the fast
   // case smi code is not generated by the caller. Generating it here will speed
   // up common operations.
-  if (HasSmiCodeInStub()) {
+  if (ShouldGenerateSmiCode()) {
     GenerateSmiCode(masm, &call_runtime);
   } else if (op_ != Token::MOD) {  // MOD goes straight to runtime.
-    GenerateLoadArguments(masm);
+    if (!HasArgsInRegisters()) {
+      GenerateLoadArguments(masm);
+    }
   }
 
   // Floating point case.
-  switch (op_) {
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV: {
-      if (CpuFeatures::IsSupported(SSE2)) {
-        CpuFeatures::Scope use_sse2(SSE2);
-        if (NumberInfo::IsNumber(operands_type_)) {
-          if (FLAG_debug_code) {
-            // Assert at runtime that inputs are only numbers.
-            __ AbortIfNotNumber(edx,
-                                "GenericBinaryOpStub operand not a number.");
-            __ AbortIfNotNumber(eax,
-                                "GenericBinaryOpStub operand not a number.");
-          }
-          FloatingPointHelper::LoadSSE2Operands(masm);
-        } else {
-          FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+  if (ShouldGenerateFPCode()) {
+    switch (op_) {
+      case Token::ADD:
+      case Token::SUB:
+      case Token::MUL:
+      case Token::DIV: {
+        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+            HasSmiCodeInStub()) {
+          // Execution reaches this point when the first non-smi argument occurs
+          // (and only if smi code is generated). This is the right moment to
+          // patch to HEAP_NUMBERS state. The transition is attempted only for
+          // the four basic operations. The stub stays in the DEFAULT state
+          // forever for all other operations (also if smi code is skipped).
+          GenerateTypeTransition(masm);
         }
 
-        switch (op_) {
-          case Token::ADD: __ addsd(xmm0, xmm1); break;
-          case Token::SUB: __ subsd(xmm0, xmm1); break;
-          case Token::MUL: __ mulsd(xmm0, xmm1); break;
-          case Token::DIV: __ divsd(xmm0, xmm1); break;
-          default: UNREACHABLE();
-        }
-        GenerateHeapResultAllocation(masm, &call_runtime);
-        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-        GenerateReturn(masm);
-      } else {  // SSE2 not available, use FPU.
-        if (NumberInfo::IsNumber(operands_type_)) {
-          if (FLAG_debug_code) {
-            // Assert at runtime that inputs are only numbers.
-            __ AbortIfNotNumber(edx,
-                                "GenericBinaryOpStub operand not a number.");
-            __ AbortIfNotNumber(eax,
-                                "GenericBinaryOpStub operand not a number.");
+        Label not_floats;
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          if (NumberInfo::IsNumber(static_operands_type_)) {
+            if (FLAG_debug_code) {
+              // Assert at runtime that inputs are only numbers.
+              __ AbortIfNotNumber(edx,
+                                  "GenericBinaryOpStub operand not a number.");
+              __ AbortIfNotNumber(eax,
+                                  "GenericBinaryOpStub operand not a number.");
+            }
+            FloatingPointHelper::LoadSSE2Operands(masm);
+          } else {
+            FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
           }
-        } else {
-          FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+
+          switch (op_) {
+            case Token::ADD: __ addsd(xmm0, xmm1); break;
+            case Token::SUB: __ subsd(xmm0, xmm1); break;
+            case Token::MUL: __ mulsd(xmm0, xmm1); break;
+            case Token::DIV: __ divsd(xmm0, xmm1); break;
+            default: UNREACHABLE();
+          }
+          GenerateHeapResultAllocation(masm, &call_runtime);
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+          GenerateReturn(masm);
+        } else {  // SSE2 not available, use FPU.
+          if (NumberInfo::IsNumber(static_operands_type_)) {
+            if (FLAG_debug_code) {
+              // Assert at runtime that inputs are only numbers.
+              __ AbortIfNotNumber(edx,
+                                  "GenericBinaryOpStub operand not a number.");
+              __ AbortIfNotNumber(eax,
+                                  "GenericBinaryOpStub operand not a number.");
+            }
+          } else {
+            FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+          }
+          FloatingPointHelper::LoadFloatOperands(
+              masm,
+              ecx,
+              FloatingPointHelper::ARGS_IN_REGISTERS);
+          switch (op_) {
+            case Token::ADD: __ faddp(1); break;
+            case Token::SUB: __ fsubp(1); break;
+            case Token::MUL: __ fmulp(1); break;
+            case Token::DIV: __ fdivp(1); break;
+            default: UNREACHABLE();
+          }
+          Label after_alloc_failure;
+          GenerateHeapResultAllocation(masm, &after_alloc_failure);
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+          GenerateReturn(masm);
+          __ bind(&after_alloc_failure);
+          __ ffree();
+          __ jmp(&call_runtime);
         }
-        FloatingPointHelper::LoadFloatOperands(
-            masm,
-            ecx,
-            FloatingPointHelper::ARGS_IN_REGISTERS);
-        switch (op_) {
-          case Token::ADD: __ faddp(1); break;
-          case Token::SUB: __ fsubp(1); break;
-          case Token::MUL: __ fmulp(1); break;
-          case Token::DIV: __ fdivp(1); break;
-          default: UNREACHABLE();
+        __ bind(&not_floats);
+        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+            !HasSmiCodeInStub()) {
+          // Execution reaches this point when the first non-number argument
+          // occurs (and only if smi code is skipped from the stub, otherwise
+          // the patching has already been done earlier in this case branch).
+          // Try patching to STRINGS for ADD operation.
+          if (op_ == Token::ADD) {
+            GenerateTypeTransition(masm);
+          }
         }
-        Label after_alloc_failure;
-        GenerateHeapResultAllocation(masm, &after_alloc_failure);
-        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-        GenerateReturn(masm);
-        __ bind(&after_alloc_failure);
-        __ ffree();
-        __ jmp(&call_runtime);
-      }
-    }
-    case Token::MOD: {
-      // For MOD we go directly to runtime in the non-smi case.
-      break;
-    }
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-    case Token::SAR:
-    case Token::SHL:
-    case Token::SHR: {
-      Label non_smi_result;
-      FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
-      switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
-        case Token::SAR: __ sar_cl(eax); break;
-        case Token::SHL: __ shl_cl(eax); break;
-        case Token::SHR: __ shr_cl(eax); break;
-        default: UNREACHABLE();
+        break;
       }
-      if (op_ == Token::SHR) {
-        // Check if result is non-negative and fits in a smi.
-        __ test(eax, Immediate(0xc0000000));
-        __ j(not_zero, &call_runtime);
-      } else {
-        // Check if result fits in a smi.
-        __ cmp(eax, 0xc0000000);
-        __ j(negative, &non_smi_result);
+      case Token::MOD: {
+        // For MOD we go directly to runtime in the non-smi case.
+        break;
       }
-      // Tag smi result and return.
-      __ SmiTag(eax);
-      GenerateReturn(masm);
-
-      // All ops except SHR return a signed int32 that we load in a HeapNumber.
-      if (op_ != Token::SHR) {
-        __ bind(&non_smi_result);
-        // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
-        Label skip_allocation;
-        switch (mode_) {
-          case OVERWRITE_LEFT:
-          case OVERWRITE_RIGHT:
-            // If the operand was an object, we skip the
-            // allocation of a heap number.
-            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
-                                1 * kPointerSize : 2 * kPointerSize));
-            __ test(eax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation, not_taken);
-            // Fall through!
-          case NO_OVERWRITE:
-            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
-            __ bind(&skip_allocation);
-            break;
+      case Token::BIT_OR:
+      case Token::BIT_AND:
+      case Token::BIT_XOR:
+      case Token::SAR:
+      case Token::SHL:
+      case Token::SHR: {
+        Label non_smi_result;
+        FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
+        switch (op_) {
+          case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+          case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+          case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+          case Token::SAR: __ sar_cl(eax); break;
+          case Token::SHL: __ shl_cl(eax); break;
+          case Token::SHR: __ shr_cl(eax); break;
           default: UNREACHABLE();
         }
-        // Store the result in the HeapNumber and return.
-        if (CpuFeatures::IsSupported(SSE2)) {
-          CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
-          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        if (op_ == Token::SHR) {
+          // Check if result is non-negative and fits in a smi.
+          __ test(eax, Immediate(0xc0000000));
+          __ j(not_zero, &call_runtime);
         } else {
-          __ mov(Operand(esp, 1 * kPointerSize), ebx);
-          __ fild_s(Operand(esp, 1 * kPointerSize));
-          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+          // Check if result fits in a smi.
+          __ cmp(eax, 0xc0000000);
+          __ j(negative, &non_smi_result);
         }
+        // Tag smi result and return.
+        __ SmiTag(eax);
         GenerateReturn(masm);
+
+        // All ops except SHR return a signed int32 that we load in
+        // a HeapNumber.
+        if (op_ != Token::SHR) {
+          __ bind(&non_smi_result);
+          // Allocate a heap number if needed.
+          __ mov(ebx, Operand(eax));  // ebx: result
+          Label skip_allocation;
+          switch (mode_) {
+            case OVERWRITE_LEFT:
+            case OVERWRITE_RIGHT:
+              // If the operand was an object, we skip the
+              // allocation of a heap number.
+              __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                  1 * kPointerSize : 2 * kPointerSize));
+              __ test(eax, Immediate(kSmiTagMask));
+              __ j(not_zero, &skip_allocation, not_taken);
+              // Fall through!
+            case NO_OVERWRITE:
+              __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+              __ bind(&skip_allocation);
+              break;
+            default: UNREACHABLE();
+          }
+          // Store the result in the HeapNumber and return.
+          if (CpuFeatures::IsSupported(SSE2)) {
+            CpuFeatures::Scope use_sse2(SSE2);
+            __ cvtsi2sd(xmm0, Operand(ebx));
+            __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+          } else {
+            __ mov(Operand(esp, 1 * kPointerSize), ebx);
+            __ fild_s(Operand(esp, 1 * kPointerSize));
+            __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+          }
+          GenerateReturn(masm);
+        }
+        break;
       }
-      break;
+      default: UNREACHABLE(); break;
     }
-    default: UNREACHABLE(); break;
   }
 
   // If all else fails, use the runtime system to get the correct
@@ -8230,21 +8259,20 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   // stack in the correct order below the return address.
   __ bind(&call_runtime);
   if (HasArgsInRegisters()) {
-    __ pop(ecx);
-    if (HasArgsReversed()) {
-      __ push(eax);
-      __ push(edx);
-    } else {
-      __ push(edx);
-      __ push(eax);
-    }
-    __ push(ecx);
+    GenerateRegisterArgsPush(masm);
   }
+
   switch (op_) {
     case Token::ADD: {
       // Test for string arguments before calling runtime.
       Label not_strings, not_string1, string1, string1_smi2;
-      Result answer;
+
+      // If this stub has already generated FP-specific code then the arguments
+      // are already in edx, eax
+      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+        GenerateLoadArguments(masm);
+      }
+
       __ test(edx, Immediate(kSmiTagMask));
       __ j(zero, &not_string1);
       __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx);
@@ -8333,6 +8361,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
     default:
       UNREACHABLE();
   }
+
+  // Generate an unreachable reference to the DEFAULT stub so that it can be
+  // found at the end of this stub when clearing ICs at GC.
+  if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
+    GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
+    __ TailCallStub(&uninit);
+  }
 }
 
 
@@ -8386,10 +8421,9 @@ void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
 
 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
   // If arguments are not passed in registers read them from the stack.
-  if (!HasArgsInRegisters()) {
-    __ mov(eax, Operand(esp, 1 * kPointerSize));
-    __ mov(edx, Operand(esp, 2 * kPointerSize));
-  }
+  ASSERT(!HasArgsInRegisters());
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
 }
 
 
@@ -8404,6 +8438,75 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
 }
 
 
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  ASSERT(HasArgsInRegisters());
+  __ pop(ecx);
+  if (HasArgsReversed()) {
+    __ push(eax);
+    __ push(edx);
+  } else {
+    __ push(edx);
+    __ push(eax);
+  }
+  __ push(ecx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  Label get_result;
+
+  // Keep a copy of operands on the stack and make sure they are also in
+  // edx, eax.
+  if (HasArgsInRegisters()) {
+    GenerateRegisterArgsPush(masm);
+  } else {
+    GenerateLoadArguments(masm);
+  }
+
+  // Internal frame is necessary to handle exceptions properly.
+  __ EnterInternalFrame();
+
+  // Push arguments on stack if the stub expects them there.
+  if (!HasArgsInRegisters()) {
+    __ push(edx);
+    __ push(eax);
+  }
+  // Call the stub proper to get the result in eax.
+  __ call(&get_result);
+  __ LeaveInternalFrame();
+
+  __ pop(ecx);  // Return address.
+  // Left and right arguments are now on top.
+  // Push the operation result. The tail call to BinaryOp_Patch will
+  // return it to the original caller.
+  __ push(eax);
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ push(Immediate(Smi::FromInt(MinorKey())));
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
+
+  __ push(ecx);  // Return address.
+
+  // Patch the caller to an appropriate specialized stub
+  // and return the operation result.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+      6,
+      1);
+
+  // The entry point for the result calculation is assumed to be immediately
+  // after this sequence.
+  __ bind(&get_result);
+}
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+  GenericBinaryOpStub stub(key, type_info);
+  HandleScope scope;
+  return stub.GetCode();
+}
+
+
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   // Input on stack:
   // esp[4]: argument (should be number).
index 3681d77c44ee72e1bf9c8b93d8c6621500171408..0b605b606c28e3f36b11dbe068f8a7250a751ad7 100644 (file)
@@ -28,6 +28,8 @@
 #ifndef V8_IA32_CODEGEN_IA32_H_
 #define V8_IA32_CODEGEN_IA32_H_
 
+#include "ic-inl.h"
+
 namespace v8 {
 namespace internal {
 
@@ -699,12 +701,25 @@ class GenericBinaryOpStub: public CodeStub {
         flags_(flags),
         args_in_registers_(false),
         args_reversed_(false),
-        name_(NULL),
-        operands_type_(operands_type) {
+        static_operands_type_(operands_type),
+        runtime_operands_type_(BinaryOpIC::DEFAULT),
+        name_(NULL) {
     use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
+  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        flags_(FlagBits::decode(key)),
+        args_in_registers_(ArgsInRegistersBits::decode(key)),
+        args_reversed_(ArgsReversedBits::decode(key)),
+        use_sse3_(SSE3Bits::decode(key)),
+        static_operands_type_(StaticTypeInfoBits::decode(key)),
+        runtime_operands_type_(runtime_operands_type),
+        name_(NULL) {
+  }
+
   // Generate code to call the stub with the supplied arguments. This will add
   // code at the call site to prepare arguments either in registers or on the
   // stack together with the actual call.
@@ -724,8 +739,14 @@ class GenericBinaryOpStub: public CodeStub {
   bool args_in_registers_;  // Arguments passed in registers not on the stack.
   bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
+
+  // Number type information of operands, determined by code generator.
+  NumberInfo::Type static_operands_type_;
+
+  // Operand type information determined at runtime.
+  BinaryOpIC::TypeInfo runtime_operands_type_;
+
   char* name_;
-  NumberInfo::Type operands_type_;  // Number type information of operands.
 
   const char* GetName();
 
@@ -739,29 +760,31 @@ class GenericBinaryOpStub: public CodeStub {
            static_cast<int>(flags_),
            static_cast<int>(args_in_registers_),
            static_cast<int>(args_reversed_),
-           NumberInfo::ToString(operands_type_));
+           NumberInfo::ToString(static_operands_type_));
   }
 #endif
 
-  // Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
+  // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
   class SSE3Bits: public BitField<bool, 9, 1> {};
   class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
   class ArgsReversedBits: public BitField<bool, 11, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
-  class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
+  class StaticTypeInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
+  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
+    // Encode the parameters in a unique 18 bit value.
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
            | FlagBits::encode(flags_)
            | SSE3Bits::encode(use_sse3_)
            | ArgsInRegistersBits::encode(args_in_registers_)
            | ArgsReversedBits::encode(args_reversed_)
-           | NumberInfoBits::encode(operands_type_);
+           | StaticTypeInfoBits::encode(static_operands_type_)
+           | RuntimeTypeInfoBits::encode(runtime_operands_type_);
   }
 
   void Generate(MacroAssembler* masm);
@@ -769,6 +792,8 @@ class GenericBinaryOpStub: public CodeStub {
   void GenerateLoadArguments(MacroAssembler* masm);
   void GenerateReturn(MacroAssembler* masm);
   void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+  void GenerateRegisterArgsPush(MacroAssembler* masm);
+  void GenerateTypeTransition(MacroAssembler* masm);
 
   bool ArgsInRegistersSupported() {
     return op_ == Token::ADD || op_ == Token::SUB
@@ -783,6 +808,22 @@ class GenericBinaryOpStub: public CodeStub {
   bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
   bool HasArgsInRegisters() { return args_in_registers_; }
   bool HasArgsReversed() { return args_reversed_; }
+
+  bool ShouldGenerateSmiCode() {
+    return HasSmiCodeInStub() &&
+        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+        runtime_operands_type_ != BinaryOpIC::STRINGS;
+  }
+
+  bool ShouldGenerateFPCode() {
+    return runtime_operands_type_ != BinaryOpIC::STRINGS;
+  }
+
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return BinaryOpIC::ToState(runtime_operands_type_);
+  }
 };
 
 
index 8fd777a362c1997240d850bb4f3d1bc486df9e50..298edf8eb649a6c009fef3779fb43de73d4532bc 100644 (file)
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -222,6 +222,7 @@ void IC::Clear(Address address) {
     case Code::STORE_IC: return StoreIC::Clear(address, target);
     case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
     case Code::CALL_IC: return CallIC::Clear(address, target);
+    case Code::BINARY_OP_IC: return BinaryOpIC::Clear(address, target);
     default: UNREACHABLE();
   }
 }
@@ -1416,6 +1417,112 @@ Object* KeyedStoreIC_Miss(Arguments args) {
 }
 
 
+void BinaryOpIC::patch(Code* code) {
+  set_target(code);
+}
+
+
+void BinaryOpIC::Clear(Address address, Code* target) {
+  if (target->ic_state() == UNINITIALIZED) return;
+
+  // At the end of a fast case stub there should be a reference to
+  // a corresponding UNINITIALIZED stub, so look for the last reloc info item.
+  RelocInfo* rinfo = NULL;
+  for (RelocIterator it(target, RelocInfo::kCodeTargetMask);
+       !it.done(); it.next()) {
+    rinfo = it.rinfo();
+  }
+
+  ASSERT(rinfo != NULL);
+  Code* uninit_stub = Code::GetCodeFromTargetAddress(rinfo->target_address());
+  ASSERT(uninit_stub->ic_state() == UNINITIALIZED &&
+         uninit_stub->kind() == Code::BINARY_OP_IC);
+  SetTargetAtAddress(address, uninit_stub);
+}
+
+
+const char* BinaryOpIC::GetName(TypeInfo type_info) {
+  switch (type_info) {
+    case DEFAULT: return "Default";
+    case GENERIC: return "Generic";
+    case HEAP_NUMBERS: return "HeapNumbers";
+    case STRINGS: return "Strings";
+    default: return "Invalid";
+  }
+}
+
+
+BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
+  switch (type_info) {
+    // DEFAULT is mapped to UNINITIALIZED so that calls to DEFAULT stubs
+    // are not cleared at GC.
+    case DEFAULT: return UNINITIALIZED;
+
+    // Could have mapped GENERIC to MONOMORPHIC just as well but MEGAMORPHIC is
+    // conceptually closer.
+    case GENERIC: return MEGAMORPHIC;
+
+    default: return MONOMORPHIC;
+  }
+}
+
+
+BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
+                                             Object* right) {
+  // Patching is never requested for the two smis.
+  ASSERT(!left->IsSmi() || !right->IsSmi());
+
+  if (left->IsNumber() && right->IsNumber()) {
+    return HEAP_NUMBERS;
+  }
+
+  if (left->IsString() || right->IsString()) {
+    // Patching for fast string ADD makes sense even if only one of the
+    // arguments is a string.
+    return STRINGS;
+  }
+
+  return GENERIC;
+}
+
+
+// defined in codegen-<arch>.cc
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
+
+
+Object* BinaryOp_Patch(Arguments args) {
+  ASSERT(args.length() == 6);
+
+  Handle<Object> left = args.at<Object>(0);
+  Handle<Object> right = args.at<Object>(1);
+  Handle<Object> result = args.at<Object>(2);
+  int key = Smi::cast(args[3])->value();
+#ifdef DEBUG
+  Token::Value op = static_cast<Token::Value>(Smi::cast(args[4])->value());
+  BinaryOpIC::TypeInfo prev_type_info =
+      static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[5])->value());
+#endif  // DEBUG
+  { HandleScope scope;
+    BinaryOpIC::TypeInfo type_info = BinaryOpIC::GetTypeInfo(*left, *right);
+    Handle<Code> code = GetBinaryOpStub(key, type_info);
+    if (!code.is_null()) {
+      BinaryOpIC ic;
+      ic.patch(*code);
+#ifdef DEBUG
+      if (FLAG_trace_ic) {
+        PrintF("[BinaryOpIC (%s->%s)#%s]\n",
+            BinaryOpIC::GetName(prev_type_info),
+            BinaryOpIC::GetName(type_info),
+            Token::Name(op));
+      }
+#endif  // DEBUG
+    }
+  }
+
+  return *result;
+}
+
+
 static Address IC_utilities[] = {
 #define ADDR(name) FUNCTION_ADDR(name),
     IC_UTIL_LIST(ADDR)
index d545989bf6c0f610b48ec3a1814dbec5869fd59b..2fe3a7d52555a7f4d18d02073c248d7474c7dab8 100644 (file)
--- a/src/ic.h
+++ b/src/ic.h
@@ -55,7 +55,8 @@ enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
   ICU(LoadPropertyWithInterceptorForLoad)             \
   ICU(LoadPropertyWithInterceptorForCall)             \
   ICU(KeyedLoadPropertyWithInterceptor)               \
-  ICU(StoreInterceptorProperty)
+  ICU(StoreInterceptorProperty)                       \
+  ICU(BinaryOp_Patch)
 
 //
 // IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
@@ -444,6 +445,30 @@ class KeyedStoreIC: public IC {
 };
 
 
+class BinaryOpIC: public IC {
+ public:
+
+  enum TypeInfo {
+    DEFAULT,  // Initial state. When first executed, patches to one
+              // of the following states depending on the operands types.
+    HEAP_NUMBERS,  // Both arguments are HeapNumbers.
+    STRINGS,  // At least one of the arguments is String.
+    GENERIC   // Non-specialized case (processes any type combination).
+  };
+
+  BinaryOpIC() : IC(NO_EXTRA_FRAME) { }
+
+  void patch(Code* code);
+
+  static void Clear(Address address, Code* target);
+
+  static const char* GetName(TypeInfo type_info);
+
+  static State ToState(TypeInfo type_info);
+
+  static TypeInfo GetTypeInfo(Object* left, Object* right);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_IC_H_
index 9d0b35246b2f78611ae5064444cf7430b7b85a5c..588d34549937418e5508b62790a06956320465bd 100644 (file)
@@ -1267,6 +1267,8 @@ void Logger::LogCodeObject(Object* object) {
     switch (code_object->kind()) {
       case Code::FUNCTION:
         return;  // We log this later using LogCompiledFunctions.
+      case Code::BINARY_OP_IC:
+        // fall through
       case Code::STUB:
         description = CodeStub::MajorName(code_object->major_key(), true);
         if (description == NULL)
index a33930ef6c497ab2a3d150cf2389b5a75bd49389..79ffaa01d25c53407641f6b7df171af535e12311 100644 (file)
@@ -2149,14 +2149,14 @@ int Code::arguments_count() {
 
 
 CodeStub::Major Code::major_key() {
-  ASSERT(kind() == STUB);
+  ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
   return static_cast<CodeStub::Major>(READ_BYTE_FIELD(this,
                                                       kStubMajorKeyOffset));
 }
 
 
 void Code::set_major_key(CodeStub::Major major) {
-  ASSERT(kind() == STUB);
+  ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
   ASSERT(0 <= major && major < 256);
   WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
 }
index 4f1fa334ad75657d9134ae0a243510d9f678a0bf..e50b73100c0b0bb8255e8b8204ddc0ecd687ce46 100644 (file)
@@ -5126,6 +5126,7 @@ const char* Code::Kind2String(Kind kind) {
     case STORE_IC: return "STORE_IC";
     case KEYED_STORE_IC: return "KEYED_STORE_IC";
     case CALL_IC: return "CALL_IC";
+    case BINARY_OP_IC: return "BINARY_OP_IC";
   }
   UNREACHABLE();
   return NULL;
index 2b9321c6890c4e8bc8fe0c22a9734d784037bca0..192033d15f7db8015f14617d615c4b6c0d49dc01 100644 (file)
@@ -2618,13 +2618,14 @@ class Code: public HeapObject {
     CALL_IC,
     STORE_IC,
     KEYED_STORE_IC,
-    // No more than eight kinds. The value currently encoded in three bits in
+    BINARY_OP_IC,
+    // No more than 16 kinds. The value currently encoded in four bits in
     // Flags.
 
     // Pseudo-kinds.
     REGEXP = BUILTIN,
     FIRST_IC_KIND = LOAD_IC,
-    LAST_IC_KIND = KEYED_STORE_IC
+    LAST_IC_KIND = BINARY_OP_IC
   };
 
   enum {
@@ -2670,7 +2671,7 @@ class Code: public HeapObject {
   inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
   inline bool is_call_stub() { return kind() == CALL_IC; }
 
-  // [major_key]: For kind STUB, the major key.
+  // [major_key]: For kind STUB or BINARY_OP_IC, the major key.
   inline CodeStub::Major major_key();
   inline void set_major_key(CodeStub::Major major);
 
@@ -2777,14 +2778,14 @@ class Code: public HeapObject {
   static const int kFlagsICStateShift        = 0;
   static const int kFlagsICInLoopShift       = 3;
   static const int kFlagsKindShift           = 4;
-  static const int kFlagsTypeShift           = 7;
-  static const int kFlagsArgumentsCountShift = 10;
-
-  static const int kFlagsICStateMask        = 0x00000007;  // 0000000111
-  static const int kFlagsICInLoopMask       = 0x00000008;  // 0000001000
-  static const int kFlagsKindMask           = 0x00000070;  // 0001110000
-  static const int kFlagsTypeMask           = 0x00000380;  // 1110000000
-  static const int kFlagsArgumentsCountMask = 0xFFFFFC00;
+  static const int kFlagsTypeShift           = 8;
+  static const int kFlagsArgumentsCountShift = 11;
+
+  static const int kFlagsICStateMask        = 0x00000007;  // 00000000111
+  static const int kFlagsICInLoopMask       = 0x00000008;  // 00000001000
+  static const int kFlagsKindMask           = 0x000000F0;  // 00011110000
+  static const int kFlagsTypeMask           = 0x00000700;  // 11100000000
+  static const int kFlagsArgumentsCountMask = 0xFFFFF800;
 
   static const int kFlagsNotUsedInLookup =
       (kFlagsICInLoopMask | kFlagsTypeMask);
index 2c495d8529049851ba403d7761eede6f06da5248..6181dabc29d24c49515161d820a961e9f4337b56 100644 (file)
@@ -1379,6 +1379,7 @@ static void ReportCodeKindStatistics() {
       CASE(STORE_IC);
       CASE(KEYED_STORE_IC);
       CASE(CALL_IC);
+      CASE(BINARY_OP_IC);
     }
   }
 
index 0725e311b85f34a4002086520d530221a70137ff..d3d2e55bce1b4776a4cf91cb66f44fb8164b92c8 100644 (file)
@@ -8898,6 +8898,11 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
 }
 
 
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+  return Handle<Code>::null();
+}
+
+
 int CompareStub::MinorKey() {
   // Encode the three parameters in a unique 16 bit value.
   ASSERT(static_cast<unsigned>(cc_) < (1 << 14));