X64: Added inline keyed load/store and a bunch of other missing functions.
authorlrn@chromium.org <lrn@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Thu, 30 Jul 2009 09:18:14 +0000 (09:18 +0000)
committerlrn@chromium.org <lrn@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Thu, 30 Jul 2009 09:18:14 +0000 (09:18 +0000)
Review URL: http://codereview.chromium.org/160272

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2585 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/ia32/ic-ia32.cc
src/ic.h
src/x64/assembler-x64.cc
src/x64/assembler-x64.h
src/x64/codegen-x64.cc
src/x64/ic-x64.cc
src/x64/macro-assembler-x64.cc
src/x64/macro-assembler-x64.h
src/x64/stub-cache-x64.cc
test/cctest/cctest.status

index 4ec1e8c..08ffe2f 100644 (file)
@@ -839,7 +839,8 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
 
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
   // The address of the instruction following the call.
-  Address test_instruction_address = address + 4;
+  Address test_instruction_address =
+      address + Assembler::kTargetAddrToReturnAddrDist;
   // If the instruction following the call is not a test eax, nothing
   // was inlined.
   if (*test_instruction_address != kTestEaxByte) return false;
@@ -865,7 +866,8 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
 
 
 static bool PatchInlinedMapCheck(Address address, Object* map) {
-  Address test_instruction_address = address + 4;  // 4 = stub address
+  Address test_instruction_address =
+      address + Assembler::kTargetAddrToReturnAddrDist;
   // The keyed load has a fast inlined case if the IC call instruction
   // is immediately followed by a test instruction.
   if (*test_instruction_address != kTestEaxByte) return false;
index d19a0e9..860b7e6 100644 (file)
--- a/src/ic.h
+++ b/src/ic.h
@@ -389,6 +389,10 @@ class KeyedStoreIC: public IC {
 
   // Support for patching the map that is checked in an inlined
   // version of keyed store.
+  // The address is the patch point for the IC call
+  // (Assembler::kTargetAddrToReturnAddrDist before the end of
+  // the call/return address).
+  // The map is the new map that the inlined code should check against.
   static bool PatchInlinedStore(Address address, Object* map);
 
   friend class IC;
index 433c830..5e70f9e 100644 (file)
@@ -1140,6 +1140,9 @@ void Assembler::movq(const Operand& dst, Register src) {
 
 
 void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
+  // This method must not be used with heap object references. The stored
+  // address is not GC safe. Use the handle version instead.
+  ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_rex_64(dst);
index 9b858cb..ad4721d 100644 (file)
@@ -292,6 +292,7 @@ enum ScaleFactor {
   times_4 = 2,
   times_8 = 3,
   times_int_size = times_4,
+  times_half_pointer_size = times_4,
   times_pointer_size = times_8
 };
 
index 769a8ee..6a038ca 100644 (file)
@@ -389,6 +389,112 @@ bool CodeGenerator::HasValidEntryRegisters() {
 #endif
 
 
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+  explicit DeferredReferenceGetKeyedValue(Register dst,
+                                          Register receiver,
+                                          Register key,
+                                          bool is_global)
+      : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+    set_comment("[ DeferredReferenceGetKeyedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Label patch_site_;
+  Register dst_;
+  Register receiver_;
+  Register key_;
+  bool is_global_;
+};
+
+
+void DeferredReferenceGetKeyedValue::Generate() {
+  __ push(receiver_);  // First IC argument.
+  __ push(key_);       // Second IC argument.
+
+  // Calculate the delta from the IC call instruction to the map check
+  // movq instruction in the inlined version.  This delta is stored in
+  // a test(rax, delta) instruction after the call so that we can find
+  // it in the IC initialization code and patch the movq instruction.
+  // This means that we cannot allow test instructions after calls to
+  // KeyedLoadIC stubs in other places.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  RelocInfo::Mode mode = is_global_
+                         ? RelocInfo::CODE_TARGET_CONTEXT
+                         : RelocInfo::CODE_TARGET;
+  __ Call(ic, mode);
+  // The delta from the start of the map-compare instruction to the
+  // test instruction.  We use masm_-> directly here instead of the __
+  // macro because the macro sometimes uses macro expansion to turn
+  // into something that can't return a value.  This is encountered
+  // when doing generated code coverage tests.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  // TODO(X64): Consider whether it's worth switching the test to a
+  // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
+  // be generated normally.
+  masm_->testl(rax, Immediate(-delta_to_patch_site));
+  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+  __ pop(key_);
+  __ pop(receiver_);
+}
+
+
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+  DeferredReferenceSetKeyedValue(Register value,
+                                 Register key,
+                                 Register receiver)
+      : value_(value), key_(key), receiver_(receiver) {
+    set_comment("[ DeferredReferenceSetKeyedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Register value_;
+  Register key_;
+  Register receiver_;
+  Label patch_site_;
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+  __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+  // Push receiver and key arguments on the stack.
+  __ push(receiver_);
+  __ push(key_);
+  // Move value argument to eax as expected by the IC stub.
+  if (!value_.is(rax)) __ movq(rax, value_);
+  // Call the IC stub.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  // The delta from the start of the map-compare instructions (initial movq)
+  // to the test instruction.  We use masm_-> directly here instead of the
+  // __ macro because the macro sometimes uses macro expansion to turn
+  // into something that can't return a value.  This is encountered
+  // when doing generated code coverage tests.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->testl(rax, Immediate(-delta_to_patch_site));
+  // Restore value (returned from store IC), key and receiver
+  // registers.
+  if (!value_.is(rax)) __ movq(value_, rax);
+  __ pop(key_);
+  __ pop(receiver_);
+}
+
+
 class DeferredStackCheck: public DeferredCode {
  public:
   DeferredStackCheck() {
@@ -2193,9 +2299,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
       // The receiver is the argument to the runtime call.  It is the
       // first value pushed when the reference was loaded to the
       // frame.
-      // TODO(X64): Enable this and the switch back to fast, once they work.
-      // frame_->PushElementAt(target.size() - 1);
-      // Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+      frame_->PushElementAt(target.size() - 1);
+      Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
     }
     if (node->op() == Token::ASSIGN ||
         node->op() == Token::INIT_VAR ||
@@ -2203,20 +2308,18 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
       Load(node->value());
 
     } else {
-      // Literal* literal = node->value()->AsLiteral();
+      Literal* literal = node->value()->AsLiteral();
       bool overwrite_value =
           (node->value()->AsBinaryOperation() != NULL &&
            node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
-      // Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+      Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
       // There are two cases where the target is not read in the right hand
       // side, that are easy to test for: the right hand side is a literal,
       // or the right hand side is a different variable.  TakeValue invalidates
       // the target, with an implicit promise that it will be written to again
       // before it is read.
-      // TODO(X64): Implement TakeValue optimization.  Check issue 150016.
-      if (false) {
-        // if (literal != NULL || (right_var != NULL && right_var != var)) {
-        // target.TakeValue(NOT_INSIDE_TYPEOF);
+      if (literal != NULL || (right_var != NULL && right_var != var)) {
+        target.TakeValue(NOT_INSIDE_TYPEOF);
       } else {
         target.GetValue(NOT_INSIDE_TYPEOF);
       }
@@ -2247,9 +2350,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
         // argument to the runtime call is the receiver, which is the
         // first value pushed as part of the reference, which is below
         // the lhs value.
-        // TODO(X64): Enable this once ToFastProperties works.
-        // frame_->PushElementAt(target.size());
-        // Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+        frame_->PushElementAt(target.size());
+        Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
       }
     }
   }
@@ -3645,7 +3747,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
 
   // Smi => false iff zero.
   ASSERT(kSmiTag == 0);
-  __ testq(value.reg(), value.reg());
+  __ testl(value.reg(), value.reg());
   dest->false_target()->Branch(zero);
   __ testl(value.reg(), Immediate(kSmiTagMask));
   dest->true_target()->Branch(zero);
@@ -4130,7 +4232,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
   // A test rax instruction following the call signals that the inobject
   // property case was inlined.  Ensure that there is not a test eax
   // instruction here.
-  __ nop();
+  masm_->nop();
   // Discard the global object. The result is in answer.
   frame_->Drop();
   return answer;
@@ -4700,7 +4802,7 @@ void DeferredReferenceGetNamedValue::Generate() {
   int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
   // Here we use masm_-> instead of the __ macro because this is the
   // instruction that gets patched and coverage code gets in the way.
-  masm_->testq(rax, Immediate(-delta_to_patch_site));
+  masm_->testl(rax, Immediate(-delta_to_patch_site));
   __ IncrementCounter(&Counters::named_load_inline_miss, 1);
 
   if (!dst_.is(rax)) __ movq(dst_, rax);
@@ -5287,7 +5389,8 @@ void Reference::GetValue(TypeofState typeof_state) {
                    kScratchRegister);
         // This branch is always a forwards branch so it's always a fixed
         // size which allows the assert below to succeed and patching to work.
-        deferred->Branch(not_equal);
+        // Don't use deferred->Branch(...), since that might add coverage code.
+        masm->j(not_equal, deferred->entry_label());
 
         // The delta from the patch label to the load offset must be
         // statically known.
@@ -5314,26 +5417,117 @@ void Reference::GetValue(TypeofState typeof_state) {
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
+
       // Inline array load code if inside of a loop.  We do not know
       // the receiver map yet, so we initially generate the code with
       // a check against an invalid map.  In the inline cache code, we
       // patch the map check if appropriate.
+      if (cgen_->loop_nesting() > 0) {
+        Comment cmnt(masm, "[ Inlined load from keyed Property");
 
-      // TODO(x64): Implement inlined loads for keyed properties.
-      // Make sure to load length field as a 32-bit quantity.
-      //      Comment cmnt(masm, "[ Load from keyed Property");
-
-      RelocInfo::Mode mode = is_global
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-      Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
-      // Make sure that we do not have a test instruction after the
-      // call.  A test instruction after the call is used to
-      // indicate that we have generated an inline version of the
-      // keyed load.  The explicit nop instruction is here because
-      // the push that follows might be peep-hole optimized away.
-      __ nop();
-      cgen_->frame()->Push(&answer);
+        Result key = cgen_->frame()->Pop();
+        Result receiver = cgen_->frame()->Pop();
+        key.ToRegister();
+        receiver.ToRegister();
+
+        // Use a fresh temporary to load the elements without destroying
+        // the receiver which is needed for the deferred slow case.
+        Result elements = cgen_->allocator()->Allocate();
+        ASSERT(elements.is_valid());
+
+        // Use a fresh temporary for the index and later the loaded
+        // value.
+        Result index = cgen_->allocator()->Allocate();
+        ASSERT(index.is_valid());
+
+        DeferredReferenceGetKeyedValue* deferred =
+            new DeferredReferenceGetKeyedValue(index.reg(),
+                                               receiver.reg(),
+                                               key.reg(),
+                                               is_global);
+
+        // Check that the receiver is not a smi (only needed if this
+        // is not a load from the global context) and that it has the
+        // expected map.
+        if (!is_global) {
+          __ testl(receiver.reg(), Immediate(kSmiTagMask));
+          deferred->Branch(zero);
+        }
+
+        // Initially, use an invalid map. The map is patched in the IC
+        // initialization code.
+        __ bind(deferred->patch_site());
+        // Use masm-> here instead of the double underscore macro since extra
+        // coverage code can interfere with the patching.
+        masm->movq(kScratchRegister, Factory::null_value(), RelocInfo::EMBEDDED_OBJECT);
+        masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                   kScratchRegister);
+        deferred->Branch(not_equal);
+
+        // Check that the key is a non-negative smi.
+        __ testl(key.reg(),
+                 Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000u)));
+        deferred->Branch(not_zero);
+
+        // Get the elements array from the receiver and check that it
+        // is not a dictionary.
+        __ movq(elements.reg(),
+                FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+        __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+               Factory::fixed_array_map());
+        deferred->Branch(not_equal);
+
+        // Shift the key to get the actual index value and check that
+        // it is within bounds.
+        __ movl(index.reg(), key.reg());
+        __ shrl(index.reg(), Immediate(kSmiTagSize));
+        __ cmpl(index.reg(),
+                FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+        deferred->Branch(above_equal);
+
+        // The index register holds the un-smi-tagged key. It has been
+        // zero-extended to 64-bits, so it can be used directly as index in the
+        // operand below.
+        // Load and check that the result is not the hole.  We could
+        // reuse the index or elements register for the value.
+        //
+        // TODO(206): Consider whether it makes sense to try some
+        // heuristic about which register to reuse.  For example, if
+        // one is rax, the we can reuse that one because the value
+        // coming from the deferred code will be in rax.
+        Result value = index;
+        __ movq(value.reg(),
+                Operand(elements.reg(),
+                        index.reg(),
+                        times_pointer_size,
+                        FixedArray::kHeaderSize - kHeapObjectTag));
+        elements.Unuse();
+        index.Unuse();
+        __ Cmp(value.reg(), Factory::the_hole_value());
+        deferred->Branch(equal);
+        __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+        deferred->BindExit();
+        // Restore the receiver and key to the frame and push the
+        // result on top of it.
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&key);
+        cgen_->frame()->Push(&value);
+
+      } else {
+        Comment cmnt(masm, "[ Load from keyed Property");
+        RelocInfo::Mode mode = is_global
+                               ? RelocInfo::CODE_TARGET_CONTEXT
+                               : RelocInfo::CODE_TARGET;
+        Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
+        // Make sure that we do not have a test instruction after the
+        // call.  A test instruction after the call is used to
+        // indicate that we have generated an inline version of the
+        // keyed load.  The explicit nop instruction is here because
+        // the push that follows might be peep-hole optimized away.
+        __ nop();
+        cgen_->frame()->Push(&answer);
+      }
       break;
     }
 
@@ -5400,15 +5594,105 @@ void Reference::SetValue(InitState init_state) {
     case KEYED: {
       Comment cmnt(masm, "[ Store to keyed Property");
 
-      // TODO(x64): Implement inlined version of keyed stores.
+      // Generate inlined version of the keyed store if the code is in
+      // a loop and the key is likely to be a smi.
+      Property* property = expression()->AsProperty();
+      ASSERT(property != NULL);
+      SmiAnalysis* key_smi_analysis = property->key()->type();
 
-      Result answer = cgen_->frame()->CallKeyedStoreIC();
-      // Make sure that we do not have a test instruction after the
-      // call.  A test instruction after the call is used to
-      // indicate that we have generated an inline version of the
-      // keyed store.
-      __ nop();
-      cgen_->frame()->Push(&answer);
+      if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
+        Comment cmnt(masm, "[ Inlined store to keyed Property");
+
+        // Get the receiver, key and value into registers.
+        Result value = cgen_->frame()->Pop();
+        Result key = cgen_->frame()->Pop();
+        Result receiver = cgen_->frame()->Pop();
+
+        Result tmp = cgen_->allocator_->Allocate();
+        ASSERT(tmp.is_valid());
+
+        // Determine whether the value is a constant before putting it
+        // in a register.
+        bool value_is_constant = value.is_constant();
+
+        // Make sure that value, key and receiver are in registers.
+        value.ToRegister();
+        key.ToRegister();
+        receiver.ToRegister();
+
+        DeferredReferenceSetKeyedValue* deferred =
+            new DeferredReferenceSetKeyedValue(value.reg(),
+                                               key.reg(),
+                                               receiver.reg());
+
+        // Check that the value is a smi if it is not a constant.
+        // We can skip the write barrier for smis and constants.
+        if (!value_is_constant) {
+          __ testl(value.reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+        }
+
+        // Check that the key is a non-negative smi.
+        __ testl(key.reg(),
+                 Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
+        deferred->Branch(not_zero);
+
+        // Check that the receiver is not a smi.
+        __ testl(receiver.reg(), Immediate(kSmiTagMask));
+        deferred->Branch(zero);
+
+        // Check that the receiver is a JSArray.
+        __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
+        deferred->Branch(not_equal);
+
+        // Check that the key is within bounds.  Both the key and the
+        // length of the JSArray are smis, so compare only low 32 bits.
+        __ cmpl(key.reg(),
+                FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+        deferred->Branch(greater_equal);
+
+        // Get the elements array from the receiver and check that it
+        // is a flat array (not a dictionary).
+        __ movq(tmp.reg(),
+                FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+        // Bind the deferred code patch site to be able to locate the
+        // fixed array map comparison.  When debugging, we patch this
+        // comparison to always fail so that we will hit the IC call
+        // in the deferred code which will allow the debugger to
+        // break for fast case stores.
+        __ bind(deferred->patch_site());
+        // Avoid using __ to ensure the distance from patch_site
+        // to the map address is always the same.
+        masm->movq(kScratchRegister, Factory::fixed_array_map(),
+                   RelocInfo::EMBEDDED_OBJECT);
+        __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+                kScratchRegister);
+        deferred->Branch(not_equal);
+
+        // Store the value.
+        ASSERT_EQ(1, kSmiTagSize);
+        ASSERT_EQ(0, kSmiTag);
+        __ movq(Operand(tmp.reg(),
+                        key.reg(),
+                        times_half_pointer_size,
+                        FixedArray::kHeaderSize - kHeapObjectTag),
+                value.reg());
+        __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+        deferred->BindExit();
+
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&key);
+        cgen_->frame()->Push(&value);
+      } else {
+        Result answer = cgen_->frame()->CallKeyedStoreIC();
+        // Make sure that we do not have a test instruction after the
+        // call.  A test instruction after the call is used to
+        // indicate that we have generated an inline version of the
+        // keyed store.
+        masm->nop();
+        cgen_->frame()->Push(&answer);
+      }
       break;
     }
 
index 8f101df..0ef75f8 100644 (file)
@@ -159,16 +159,64 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
 }
 
 
+// One byte opcode for test eax,0xXXXXXXXX.
+static const byte kTestEaxByte = 0xA9;
+
+
+static bool PatchInlinedMapCheck(Address address, Object* map) {
+  // Arguments are address of start of call sequence that called
+  // the IC,
+  Address test_instruction_address =
+      address + Assembler::kTargetAddrToReturnAddrDist;
+  // The keyed load has a fast inlined case if the IC call instruction
+  // is immediately followed by a test instruction.
+  if (*test_instruction_address != kTestEaxByte) return false;
+
+  // Fetch the offset from the test instruction to the map compare
+  // instructions (starting with the 64-bit immediate mov of the map
+  // address). This offset is stored in the last 4 bytes of the 5
+  // byte test instruction.
+  Address delta_address = test_instruction_address + 1;
+  int delta = *reinterpret_cast<int*>(delta_address);
+  // Compute the map address.  The map address is in the last 8 bytes
+  // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
+  // to the offset to get the map address.
+  Address map_address = test_instruction_address + delta + 2;
+  // Patch the map check.
+  *(reinterpret_cast<Object**>(map_address)) = map;
+  return true;
+}
+
+
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  return PatchInlinedMapCheck(address, map);
+}
+
+
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  return PatchInlinedMapCheck(address, map);
+}
+
+
 void KeyedLoadIC::ClearInlinedVersion(Address address) {
-  // TODO(X64): Implement this when LoadIC is enabled.
+  // Insert null as the map to check for to make sure the map check fails
+  // sending control flow to the IC instead of the inlined version.
+  PatchInlinedLoad(address, Heap::null_value());
 }
 
+
 void KeyedStoreIC::ClearInlinedVersion(Address address) {
-  // TODO(X64): Implement this when LoadIC is enabled.
+  // Insert null as the elements map to check for.  This will make
+  // sure that the elements fast-case map check fails so that control
+  // flows to the IC instead of the inlined version.
+  PatchInlinedStore(address, Heap::null_value());
 }
 
+
 void KeyedStoreIC::RestoreInlinedVersion(Address address) {
-  UNIMPLEMENTED();
+  // Restore the fast-case elements map check so that the inlined
+  // version can be used again.
+  PatchInlinedStore(address, Heap::fixed_array_map());
 }
 
 
@@ -310,18 +358,6 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
 }
 
 
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  // Never patch the map in the map check, so the check always fails.
-  return false;
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  // Never patch the map in the map check, so the check always fails.
-  return false;
-}
-
-
 void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
   // ----------- S t a t e -------------
   //  -- rax     : value
@@ -539,7 +575,10 @@ const int LoadIC::kOffsetToLoadInstruction = 20;
 
 
 void LoadIC::ClearInlinedVersion(Address address) {
-  // TODO(X64): Implement this when LoadIC is enabled.
+  // Reset the map check of the inlined inobject property load (if
+  // present) to guarantee failure by holding an invalid map (the null
+  // value).  The offset can be patched to anything.
+  PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
 }
 
 
@@ -605,13 +644,37 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
 }
 
+
 void LoadIC::GenerateStringLength(MacroAssembler* masm) {
   Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
 }
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
-  // TODO(X64): Implement this function.  Until then, the code is not patched.
-  return false;
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kTargetAddrToReturnAddrDist;
+  // If the instruction following the call is not a test eax, nothing
+  // was inlined.
+  if (*test_instruction_address != kTestEaxByte) return false;
+
+  Address delta_address = test_instruction_address + 1;
+  // The delta to the start of the map check instruction.
+  int delta = *reinterpret_cast<int*>(delta_address);
+
+  // The map address is the last 8 bytes of the 10-byte
+  // immediate move instruction, so we add 2 to get the
+  // offset to the last 8 bytes.
+  Address map_address = test_instruction_address + delta + 2;
+  *(reinterpret_cast<Object**>(map_address)) = map;
+
+  // The offset is in the 32-bit displacement of a seven byte
+  // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
+  // so we add 3 to get the offset of the displacement.
+  Address offset_address =
+      test_instruction_address + delta + kOffsetToLoadInstruction + 3;
+  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+  return true;
 }
 
 void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
index 5e888ec..f58e1cd 100644 (file)
@@ -318,6 +318,17 @@ void MacroAssembler::Push(Handle<Object> source) {
 }
 
 
+void MacroAssembler::Push(Smi* source) {
+  if (IsUnsafeSmi(source)) {
+    LoadUnsafeSmi(kScratchRegister, source);
+    push(kScratchRegister);
+  } else {
+    int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
+    push(Immediate(smi));
+  }
+}
+
+
 void MacroAssembler::Jump(ExternalReference ext) {
   movq(kScratchRegister, ext);
   jmp(kScratchRegister);
@@ -363,6 +374,7 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
   ASSERT(RelocInfo::IsCodeTarget(rmode));
   movq(kScratchRegister, code_object, rmode);
 #ifdef DEBUG
+  // Patch target is kPointer size bytes *before* target label.
   Label target;
   bind(&target);
 #endif
index 44a76a4..cba55eb 100644 (file)
@@ -164,6 +164,7 @@ class MacroAssembler: public Assembler {
   void Cmp(Register dst, Handle<Object> source);
   void Cmp(const Operand& dst, Handle<Object> source);
   void Push(Handle<Object> source);
+  void Push(Smi* smi);
 
   // Control Flow
   void Jump(Address destination, RelocInfo::Mode rmode);
index 6061e12..7d7f5eb 100644 (file)
 namespace v8 {
 namespace internal {
 
+//-----------------------------------------------------------------------------
+// StubCompiler static helper functions
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(MacroAssembler* masm,
+                       Code::Flags flags,
+                       StubCache::Table table,
+                       Register name,
+                       Register offset) {
+  ExternalReference key_offset(SCTableReference::keyReference(table));
+  Label miss;
+
+  __ movq(kScratchRegister, key_offset);
+  // Check that the key in the entry matches the name.
+  __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
+  __ j(not_equal, &miss);
+  // Get the code entry from the cache.
+  // Use key_offset + kPointerSize, rather than loading value_offset.
+  __ movq(kScratchRegister,
+          Operand(kScratchRegister, offset, times_4, kPointerSize));
+  // Check that the flags match what we're looking for.
+  __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
+  __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+  __ cmpl(offset, Immediate(flags));
+  __ j(not_equal, &miss);
+
+  // Jump to the first instruction in the code stub.
+  __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(kScratchRegister);
+
+  __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = Builtins::builtin(Builtins::LoadIC_Miss);
+  } else {
+    code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+                                                       int index,
+                                                       Register prototype) {
+  // Load the global or builtins object from the current context.
+  __ movq(prototype,
+             Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  __ movq(prototype,
+             FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
+  // Load the initial map.  The global functions all have initial maps.
+  __ movq(prototype,
+             FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the prototype from the initial map.
+  __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
+  // Adjust for the number of properties stored in the holder.
+  index -= holder->map()->inobject_properties();
+  if (index < 0) {
+    // Get the property straight out of the holder.
+    int offset = holder->map()->instance_size() + (index * kPointerSize);
+    __ movq(dst, FieldOperand(src, offset));
+  } else {
+    // Calculate the offset into the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+    __ movq(dst, FieldOperand(dst, offset));
+  }
+}
+
+
+template <typename Pushable>
+static void PushInterceptorArguments(MacroAssembler* masm,
+                                     Register receiver,
+                                     Register holder,
+                                     Pushable name,
+                                     JSObject* holder_obj) {
+  __ push(receiver);
+  __ push(holder);
+  __ push(name);
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  __ movq(kScratchRegister, Handle<Object>(interceptor),
+          RelocInfo::EMBEDDED_OBJECT);
+  __ push(kScratchRegister);
+  __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+                              Code::Flags flags,
+                              Register receiver,
+                              Register name,
+                              Register scratch,
+                              Register extra) {
+  Label miss;
+  USE(extra);  // The register extra is not used on the X64 platform.
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 16.
+  ASSERT(sizeof(Entry) == 16);
+
+  // Make sure the flags do not name a specific type.
+  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  ASSERT(!scratch.is(receiver));
+  ASSERT(!scratch.is(name));
+
+  // Check that the receiver isn't a smi.
+  __ testl(receiver, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+  // Use only the low 32 bits of the map pointer.
+  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, Immediate(flags));
+  __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the primary table.
+  ProbeTable(masm, flags, kPrimary, name, scratch);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, Immediate(flags));
+  __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+  __ subl(scratch, name);
+  __ addl(scratch, Immediate(flags));
+  __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the secondary table.
+  ProbeTable(masm, flags, kSecondary, name, scratch);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+                                      Builtins::Name storage_extend,
+                                      JSObject* object,
+                                      int index,
+                                      Map* transition,
+                                      Register receiver_reg,
+                                      Register name_reg,
+                                      Register scratch,
+                                      Label* miss_label) {
+  // Check that the object isn't a smi.
+  __ testl(receiver_reg, Immediate(kSmiTagMask));
+  __ j(zero, miss_label);
+
+  // Check that the map of the object hasn't changed.
+  __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+         Handle<Map>(object->map()));
+  __ j(not_equal, miss_label);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  // Perform map transition for the receiver if necessary.
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ Move(rcx, Handle<Map>(transition));
+    Handle<Code> ic(Builtins::builtin(storage_extend));
+    __ Jump(ic, RelocInfo::CODE_TARGET);
+    return;
+  }
+
+  if (transition != NULL) {
+    // Update the map of the object; no write barrier updating is
+    // needed because the map is never in new space.
+    __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+            Handle<Map>(transition));
+  }
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= object->map()->inobject_properties();
+
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = object->map()->instance_size() + (index * kPointerSize);
+    __ movq(FieldOperand(receiver_reg, offset), rax);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ movq(name_reg, rax);
+    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array (optimistically).
+    __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+    __ movq(FieldOperand(scratch, offset), rax);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ movq(name_reg, rax);
+    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+  }
+
+  // Return the value (register rax).
+  __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register scratch,
+                                           Label* miss_label) {
+  // Check that the receiver isn't a smi.
+  __ testl(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss_label);
+
+  // Check that the object is a JS array.
+  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+  __ j(not_equal, miss_label);
+
+  // Load length directly from the JS array.
+  __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
+  __ ret(0);
+}
+
+
+// Generate code to check if an object is a string.  If the object is
+// a string, the map's instance type is left in the scratch register.
+static void GenerateStringCheck(MacroAssembler* masm,
+                                Register receiver,
+                                Register scratch,
+                                Label* smi,
+                                Label* non_string_object) {
+  // Check that the object isn't a smi.
+  __ testl(receiver, Immediate(kSmiTagMask));
+  __ j(zero, smi);
+
+  // Check that the object is a string.
+  __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  ASSERT(kNotStringTag != 0);
+  __ testl(scratch, Immediate(kNotStringTag));
+  __ j(not_zero, non_string_object);
+}
+
+
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+                                            Register receiver,
+                                            Register scratch,
+                                            Label* miss) {
+  Label load_length, check_wrapper;
+
+  // Check if the object is a string leaving the instance type in the
+  // scratch register.
+  GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
+
+  // Load length directly from the string.
+  __ bind(&load_length);
+  __ and_(scratch, Immediate(kStringSizeMask));
+  __ movl(rax, FieldOperand(receiver, String::kLengthOffset));
+  // rcx is also the receiver.
+  __ lea(rcx, Operand(scratch, String::kLongLengthShift));
+  __ shr(rax);  // rcx is implicit shift register.
+  __ shl(rax, Immediate(kSmiTagSize));
+  __ ret(0);
+
+  // Check if the object is a JSValue wrapper.
+  __ bind(&check_wrapper);
+  __ cmpl(scratch, Immediate(JS_VALUE_TYPE));
+  __ j(not_equal, miss);
+
+  // Check if the wrapped value is a string and load the length
+  // directly if it is.
+  __ movq(receiver, FieldOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, receiver, scratch, miss, miss);
+  __ jmp(&load_length);
+}
+
+
+template <class Pushable>
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Pushable name,
+                                                   JSObject* holder_obj) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+  __ movq(rax, Immediate(5));
+  __ movq(rbx, ref);
+
+  CEntryStub stub;
+  __ CallStub(&stub);
+}
+
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+                                                 Register receiver,
+                                                 Register result,
+                                                 Register scratch,
+                                                 Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, result, miss_label);
+  if (!result.is(rax)) __ movq(rax, result);
+  __ ret(0);
+}
+
+
+static void LookupPostInterceptor(JSObject* holder,
+                                  String* name,
+                                  LookupResult* lookup) {
+  holder->LocalLookupRealNamedProperty(name, lookup);
+  if (lookup->IsNotFound()) {
+    Object* proto = holder->GetPrototype();
+    if (proto != Heap::null_value()) {
+      proto->Lookup(name, lookup);
+    }
+  }
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+  explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+  void CompileCacheable(MacroAssembler* masm,
+                        StubCompiler* stub_compiler,
+                        Register receiver,
+                        Register holder,
+                        Register scratch1,
+                        Register scratch2,
+                        JSObject* holder_obj,
+                        LookupResult* lookup,
+                        String* name,
+                        Label* miss_label) {
+    AccessorInfo* callback = 0;
+    bool optimize = false;
+    // So far the most popular follow ups for interceptor loads are FIELD
+    // and CALLBACKS, so inline only them, other cases may be added
+    // later.
+    if (lookup->type() == FIELD) {
+      optimize = true;
+    } else if (lookup->type() == CALLBACKS) {
+      Object* callback_object = lookup->GetCallbackObject();
+      if (callback_object->IsAccessorInfo()) {
+        callback = AccessorInfo::cast(callback_object);
+        optimize = callback->getter() != NULL;
+      }
+    }
+
+    if (!optimize) {
+      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      return;
+    }
+
+    // Note: starting a frame here makes GC aware of pointers pushed below.
+    __ EnterInternalFrame();
+
+    if (lookup->type() == CALLBACKS) {
+      __ push(receiver);
+    }
+    __ push(holder);
+    __ push(name_);
+
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
+
+    Label interceptor_failed;
+    __ Cmp(rax, Factory::no_interceptor_result_sentinel());
+    __ j(equal, &interceptor_failed);
+    __ LeaveInternalFrame();
+    __ ret(0);
+
+    __ bind(&interceptor_failed);
+    __ pop(name_);
+    __ pop(holder);
+    if (lookup->type() == CALLBACKS) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
+
+    if (lookup->type() == FIELD) {
+      holder = stub_compiler->CheckPrototypes(holder_obj,
+                                              holder,
+                                              lookup->holder(),
+                                              scratch1,
+                                              scratch2,
+                                              name,
+                                              miss_label);
+      stub_compiler->GenerateFastPropertyLoad(masm,
+                                              rax,
+                                              holder,
+                                              lookup->holder(),
+                                              lookup->GetFieldIndex());
+      __ ret(0);
+    } else {
+      ASSERT(lookup->type() == CALLBACKS);
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      ASSERT(callback != NULL);
+      ASSERT(callback->getter() != NULL);
+
+      Label cleanup;
+      __ pop(scratch2);
+      __ push(receiver);
+      __ push(scratch2);
+
+      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+                                              lookup->holder(), scratch1,
+                                              scratch2,
+                                              name,
+                                              &cleanup);
+
+      __ pop(scratch2);  // save old return address
+      __ push(holder);
+      __ Move(holder, Handle<AccessorInfo>(callback));
+      __ push(holder);
+      __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
+      __ push(name_);
+      __ push(scratch2);  // restore old return address
+
+      ExternalReference ref =
+          ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+      __ TailCallRuntime(ref, 5);
+
+      __ bind(&cleanup);
+      __ pop(scratch1);
+      __ pop(scratch2);
+      __ push(scratch1);
+    }
+  }
+
+
+  void CompileRegular(MacroAssembler* masm,
+                      Register receiver,
+                      Register holder,
+                      Register scratch,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    __ pop(scratch);  // save old return address
+    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    __ push(scratch);  // restore old return address
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+    __ TailCallRuntime(ref, 5);
+  }
+
+ private:
+  Register name_;
+};
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+                                   StubCompiler* stub_compiler,
+                                   MacroAssembler* masm,
+                                   JSObject* object,
+                                   JSObject* holder,
+                                   String* name,
+                                   LookupResult* lookup,
+                                   Register receiver,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Label* miss) {
+  ASSERT(holder->HasNamedInterceptor());
+  ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Check that the receiver isn't a smi.
+  __ testl(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      stub_compiler->CheckPrototypes(object, receiver, holder,
+                                     scratch1, scratch2, name, miss);
+
+  if (lookup->IsValid() && lookup->IsCacheable()) {
+    compiler->CompileCacheable(masm,
+                               stub_compiler,
+                               receiver,
+                               reg,
+                               scratch1,
+                               scratch2,
+                               holder,
+                               lookup,
+                               name,
+                               miss);
+  } else {
+    compiler->CompileRegular(masm,
+                             receiver,
+                             reg,
+                             scratch2,
+                             holder,
+                             miss);
+  }
+}
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+  explicit CallInterceptorCompiler(const ParameterCount& arguments)
+      : arguments_(arguments), argc_(arguments.immediate()) {}
+
+  void CompileCacheable(MacroAssembler* masm,
+                        StubCompiler* stub_compiler,
+                        Register receiver,
+                        Register holder,
+                        Register scratch1,
+                        Register scratch2,
+                        JSObject* holder_obj,
+                        LookupResult* lookup,
+                        String* name,
+                        Label* miss_label) {
+    JSFunction* function = 0;
+    bool optimize = false;
+    // So far the most popular case for failed interceptor is
+    // CONSTANT_FUNCTION sitting below.
+    if (lookup->type() == CONSTANT_FUNCTION) {
+      function = lookup->GetConstantFunction();
+      // JSArray holder is a special case for call constant function
+      // (see the corresponding code).
+      if (function->is_compiled() && !holder_obj->IsJSArray()) {
+        optimize = true;
+      }
+    }
+
+    if (!optimize) {
+      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      return;
+    }
+
+    __ EnterInternalFrame();
+    __ push(holder);  // save the holder
+
+    CompileCallLoadPropertyWithInterceptor(
+        masm,
+        receiver,
+        holder,
+        // Under EnterInternalFrame this refers to name.
+        Operand(rbp, (argc_ + 3) * kPointerSize),
+        holder_obj);
+
+    __ pop(receiver);  // restore holder
+    __ LeaveInternalFrame();
+
+    __ Cmp(rax, Factory::no_interceptor_result_sentinel());
+    Label invoke;
+    __ j(not_equal, &invoke);
+
+    stub_compiler->CheckPrototypes(holder_obj, receiver,
+                                   lookup->holder(), scratch1,
+                                   scratch2,
+                                   name,
+                                   miss_label);
+    if (lookup->holder()->IsGlobalObject()) {
+      __ movq(rdx, Operand(rsp, (argc_ + 1) * kPointerSize));
+      __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+      __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdx);
+    }
+
+    ASSERT(function->is_compiled());
+    // Get the function and setup the context.
+    __ Move(rdi, Handle<JSFunction>(function));
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+    // Jump to the cached code (tail call).
+    ASSERT(function->is_compiled());
+    Handle<Code> code(function->code());
+    ParameterCount expected(function->shared()->formal_parameter_count());
+    __ InvokeCode(code, expected, arguments_,
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+    __ bind(&invoke);
+  }
+
+  void CompileRegular(MacroAssembler* masm,
+                      Register receiver,
+                      Register holder,
+                      Register scratch,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    __ EnterInternalFrame();
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             Operand(rbp, (argc_ + 3) * kPointerSize),
+                             holder_obj);
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+    __ movq(rax, Immediate(5));
+    __ movq(rbx, ref);
+
+    CEntryStub stub;
+    __ CallStub(&stub);
+
+    __ LeaveInternalFrame();
+  }
+
+ private:
+  const ParameterCount& arguments_;
+  int argc_;
+};
+
+
+#undef __
+
 #define __ ACCESS_MASM((masm()))
 
 
@@ -188,23 +827,81 @@ Object* CallStubCompiler::CompileCallField(Object* object,
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
-  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ testl(rdx, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+
+  // Do the right check and compute the holder register.
+  Register reg =
+      CheckPrototypes(JSObject::cast(object), rdx, holder,
+                      rbx, rcx, name, &miss);
+
+  GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
+
+  // Check that the function really is a function.
+  __ testl(rdi, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
+  __ j(not_equal, &miss);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+  }
+
+  // Invoke the function.
+  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+                                                 JSObject* holder,
+                                                 String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  Label miss;
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
 
-  // Check that the receiver isn't a smi.
-  __ testl(rdx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
 
-  // Do the right check and compute the holder register.
-  Register reg =
-      CheckPrototypes(JSObject::cast(object), rdx, holder,
-                      rbx, rcx, name, &miss);
+  // Get the receiver from the stack.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
+  CallInterceptorCompiler compiler(arguments());
+  CompileLoadInterceptor(&compiler,
+                         this,
+                         masm(),
+                         JSObject::cast(object),
+                         holder,
+                         name,
+                         &lookup,
+                         rdx,
+                         rbx,
+                         rcx,
+                         &miss);
+
+  // Restore receiver.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
   // Check that the function really is a function.
-  __ testl(rdi, Immediate(kSmiTagMask));
+  __ testl(rax, Immediate(kSmiTagMask));
   __ j(zero, &miss);
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
+  __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
   __ j(not_equal, &miss);
 
   // Patch the receiver on the stack with the global proxy if
@@ -215,23 +912,16 @@ Object* CallStubCompiler::CompileCallField(Object* object,
   }
 
   // Invoke the function.
+  __ movq(rdi, rax);
   __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
 
-  // Handle call cache miss.
+  // Handle load cache miss.
   __ bind(&miss);
-  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  Handle<Code> ic = ComputeCallMiss(argc);
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(FIELD, name);
-}
-
-
-Object* CallStubCompiler::CompileCallInterceptor(Object* a,
-                                                 JSObject* b,
-                                                 String* c) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -305,12 +995,25 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
 }
 
 
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
-                                              JSObject* b,
-                                              AccessorInfo* c,
-                                              String* d) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+                                              JSObject* holder,
+                                              AccessorInfo* callback,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
+                       callback, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -356,11 +1059,37 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
 }
 
 
-Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
-                                                 JSObject* b,
-                                                 String* c) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                 JSObject* holder,
+                                                 String* name) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  // TODO(368): Compile in the whole chain: all the interceptors in
+  // prototypes and ultimate answer.
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          rax,
+                          rcx,
+                          rdx,
+                          rbx,
+                          name,
+                          &miss);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -415,11 +1144,213 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
 }
 
 
-Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
-                                                AccessorInfo* b,
-                                                String* c) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadArrayLength(masm(), rcx, rdx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   Object* value) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadConstant(receiver, holder, rcx, rbx, rdx,
+                       value, name, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadFunctionPrototype(masm(), rcx, rdx, rbx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                      JSObject* holder,
+                                                      String* name) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          rcx,
+                          rax,
+                          rdx,
+                          rbx,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadStringLength(masm(), rcx, rdx, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                AccessorInfo* callback,
+                                                String* name) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ movq(rbx, Operand(rsp, 1 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ testl(rbx, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+         Handle<Map>(object->map()));
+  __ j(not_equal, &miss);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  __ pop(rbx);  // remove the return address
+  __ push(Operand(rsp, 0));  // receiver
+  __ Push(Handle<AccessorInfo>(callback));  // callback info
+  __ push(rcx);  // name
+  __ push(rax);  // value
+  __ push(rbx);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+  __ TailCallRuntime(store_callback_property, 4);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ Move(rcx, Handle<String>(name));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -458,9 +1389,56 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
 }
 
 
-Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                   String* name) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ movq(rbx, Operand(rsp, 1 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ testl(rbx, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+         Handle<Map>(receiver->map()));
+  __ j(not_equal, &miss);
+
+  // Perform global security token check if needed.
+  if (receiver->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+  __ pop(rbx);  // remove the return address
+  __ push(Operand(rsp, 0));  // receiver
+  __ push(rcx);  // name
+  __ push(rax);  // value
+  __ push(rbx);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property =
+      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+  __ TailCallRuntime(store_ic_property, 3);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ Move(rcx, Handle<String>(name));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -519,54 +1497,14 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
   __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadField(receiver, holder, rcx, rbx, rdx, index, name, &miss);
-
-  __ bind(&miss);
-  __ DecrementCounter(&Counters::keyed_load_field, 1);
-  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
-  // Return the generated code.
-  return GetCode(FIELD, name);
-}
-
-
-Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
-}
-
-Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
-}
-
-Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* callback) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
-}
-
-
-Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
-}
-
-Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
-}
+  GenerateLoadField(receiver, holder, rcx, rbx, rdx, index, name, &miss);
 
-Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
-  // TODO(X64): Implement a real stub.
-  return Failure::InternalError();
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_field, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
 }
 
 
@@ -575,9 +1513,9 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
                                                   Map* transition,
                                                   String* name) {
   // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : key
+  //  -- rax     : value
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : key
   //  -- rsp[16] : receiver
   // -----------------------------------
   Label miss;
@@ -637,6 +1575,66 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
 }
 
 
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* holder,
+                                           LookupResult* lookup,
+                                           Register receiver,
+                                           Register name_reg,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           String* name,
+                                           Label* miss) {
+  LoadInterceptorCompiler compiler(name_reg);
+  CompileLoadInterceptor(&compiler,
+                         this,
+                         masm(),
+                         object,
+                         holder,
+                         name,
+                         lookup,
+                         receiver,
+                         scratch1,
+                         scratch2,
+                         miss);
+}
+
+
+void StubCompiler::GenerateLoadCallback(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        AccessorInfo* callback,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ testl(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Push the arguments on the JS stack of the caller.
+  __ pop(scratch2);  // remove return address
+  __ push(receiver);  // receiver
+  __ push(reg);  // holder
+  __ Move(reg, Handle<AccessorInfo>(callback));  // callback data
+  __ push(reg);
+  __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+  __ push(name_reg);  // name
+  __ push(scratch2);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference load_callback_property =
+      ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+  __ TailCallRuntime(load_callback_property, 5);
+}
+
+
 Register StubCompiler::CheckPrototypes(JSObject* object,
                                        Register object_reg,
                                        JSObject* holder,
@@ -721,224 +1719,4 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
 
 #undef __
 
-//-----------------------------------------------------------------------------
-// StubCompiler static helper functions
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(MacroAssembler* masm,
-                       Code::Flags flags,
-                       StubCache::Table table,
-                       Register name,
-                       Register offset) {
-  ExternalReference key_offset(SCTableReference::keyReference(table));
-  Label miss;
-
-  __ movq(kScratchRegister, key_offset);
-  // Check that the key in the entry matches the name.
-  __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
-  __ j(not_equal, &miss);
-  // Get the code entry from the cache.
-  // Use key_offset + kPointerSize, rather than loading value_offset.
-  __ movq(kScratchRegister,
-          Operand(kScratchRegister, offset, times_4, kPointerSize));
-  // Check that the flags match what we're looking for.
-  __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
-  __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
-  __ cmpl(offset, Immediate(flags));
-  __ j(not_equal, &miss);
-
-  // Jump to the first instruction in the code stub.
-  __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
-  __ jmp(kScratchRegister);
-
-  __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
-  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Code* code = NULL;
-  if (kind == Code::LOAD_IC) {
-    code = Builtins::builtin(Builtins::LoadIC_Miss);
-  } else {
-    code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
-  }
-
-  Handle<Code> ic(code);
-  __ Jump(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                       int index,
-                                                       Register prototype) {
-  // Load the global or builtins object from the current context.
-  __ movq(prototype,
-             Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  // Load the global context from the global or builtins object.
-  __ movq(prototype,
-             FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
-  // Load the function from the global context.
-  __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
-  // Load the initial map.  The global functions all have initial maps.
-  __ movq(prototype,
-             FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst, Register src,
-                                            JSObject* holder, int index) {
-  // Adjust for the number of properties stored in the holder.
-  index -= holder->map()->inobject_properties();
-  if (index < 0) {
-    // Get the property straight out of the holder.
-    int offset = holder->map()->instance_size() + (index * kPointerSize);
-    __ movq(dst, FieldOperand(src, offset));
-  } else {
-    // Calculate the offset into the properties array.
-    int offset = index * kPointerSize + FixedArray::kHeaderSize;
-    __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
-    __ movq(dst, FieldOperand(dst, offset));
-  }
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
-                              Code::Flags flags,
-                              Register receiver,
-                              Register name,
-                              Register scratch,
-                              Register extra) {
-  Label miss;
-  USE(extra);  // The register extra is not used on the X64 platform.
-  // Make sure that code is valid. The shifting code relies on the
-  // entry size being 16.
-  ASSERT(sizeof(Entry) == 16);
-
-  // Make sure the flags do not name a specific type.
-  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
-  // Make sure that there are no register conflicts.
-  ASSERT(!scratch.is(receiver));
-  ASSERT(!scratch.is(name));
-
-  // Check that the receiver isn't a smi.
-  __ testl(receiver, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ movl(scratch, FieldOperand(name, String::kLengthOffset));
-  // Use only the low 32 bits of the map pointer.
-  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(scratch, Immediate(flags));
-  __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
-
-  // Probe the primary table.
-  ProbeTable(masm, flags, kPrimary, name, scratch);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ movl(scratch, FieldOperand(name, String::kLengthOffset));
-  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(scratch, Immediate(flags));
-  __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
-  __ subl(scratch, name);
-  __ addl(scratch, Immediate(flags));
-  __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
-
-  // Probe the secondary table.
-  ProbeTable(masm, flags, kSecondary, name, scratch);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      Builtins::Name storage_extend,
-                                      JSObject* object,
-                                      int index,
-                                      Map* transition,
-                                      Register receiver_reg,
-                                      Register name_reg,
-                                      Register scratch,
-                                      Label* miss_label) {
-  // Check that the object isn't a smi.
-  __ testl(receiver_reg, Immediate(kSmiTagMask));
-  __ j(zero, miss_label);
-
-  // Check that the map of the object hasn't changed.
-  __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
-         Handle<Map>(object->map()));
-  __ j(not_equal, miss_label);
-
-  // Perform global security token check if needed.
-  if (object->IsJSGlobalProxy()) {
-    __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
-  }
-
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  // Perform map transition for the receiver if necessary.
-  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
-    // The properties must be extended before we can store the value.
-    // We jump to a runtime call that extends the properties array.
-    __ Move(rcx, Handle<Map>(transition));
-    Handle<Code> ic(Builtins::builtin(storage_extend));
-    __ Jump(ic, RelocInfo::CODE_TARGET);
-    return;
-  }
-
-  if (transition != NULL) {
-    // Update the map of the object; no write barrier updating is
-    // needed because the map is never in new space.
-    __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
-            Handle<Map>(transition));
-  }
-
-  // Adjust for the number of properties stored in the object. Even in the
-  // face of a transition we can use the old map here because the size of the
-  // object and the number of in-object properties is not going to change.
-  index -= object->map()->inobject_properties();
-
-  if (index < 0) {
-    // Set the property straight into the object.
-    int offset = object->map()->instance_size() + (index * kPointerSize);
-    __ movq(FieldOperand(receiver_reg, offset), rax);
-
-    // Update the write barrier for the array address.
-    // Pass the value being stored in the now unused name_reg.
-    __ movq(name_reg, rax);
-    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
-  } else {
-    // Write to the properties array.
-    int offset = index * kPointerSize + FixedArray::kHeaderSize;
-    // Get the properties array (optimistically).
-    __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
-    __ movq(FieldOperand(scratch, offset), rax);
-
-    // Update the write barrier for the array address.
-    // Pass the value being stored in the now unused name_reg.
-    __ movq(name_reg, rax);
-    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
-  }
-
-  // Return the value (register rax).
-  __ ret(0);
-}
-
-
-#undef __
-
-
 } }  // namespace v8::internal
index fa33d32..4fc2f3a 100644 (file)
@@ -118,6 +118,7 @@ test-api/HugeConsStringOutOfMemory: CRASH || FAIL
 test-api/OutOfMemory: CRASH || FAIL
 test-api/OutOfMemoryNested: CRASH || FAIL
 test-api/Threading: CRASH || FAIL
+test-api/Threading2: PASS || TIMEOUT
 test-api/TryCatchSourceInfo: CRASH || FAIL
 test-api/RegExpInterruption: PASS || TIMEOUT
 test-api/RegExpStringModification: PASS || TIMEOUT