Track JS allocations as they arrive with no affection on performance when tracking...
authoryurys@chromium.org <yurys@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 14 Oct 2013 12:41:28 +0000 (12:41 +0000)
committeryurys@chromium.org <yurys@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 14 Oct 2013 12:41:28 +0000 (12:41 +0000)
BUG=277984
R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/22852024

Patch from Alexandra Mikhaylova <amikhaylova@google.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17191 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

21 files changed:
include/v8-profiler.h
src/api.cc
src/assembler.cc
src/assembler.h
src/builtins.cc
src/heap-profiler.cc
src/heap-profiler.h
src/heap-snapshot-generator.cc
src/heap-snapshot-generator.h
src/heap.cc
src/mark-compact.cc
src/objects.cc
src/serialize.cc
src/serialize.h
src/spaces-inl.h
src/spaces.h
src/x64/code-stubs-x64.cc
src/x64/macro-assembler-x64.cc
src/x64/macro-assembler-x64.h
test/cctest/cctest.h
test/cctest/test-heap-profiler.cc

index 7016a79b81e5dd0e5755dadae0a8d1abdc9d726f..0882d6452732373293bf6deb2a4e3ba92c3eff0e 100644 (file)
@@ -475,6 +475,19 @@ class V8_EXPORT HeapProfiler {
    */
   void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
 
+  /**
+   * Starts recording JS allocations immediately as they arrive and tracking of
+   * heap objects population statistics.
+   */
+  void StartRecordingHeapAllocations();
+
+  /**
+   * Stops recording JS allocations and tracking of heap objects population
+   * statistics, cleans all collected heap objects population statistics data.
+   */
+  void StopRecordingHeapAllocations();
+
+
  private:
   HeapProfiler();
   ~HeapProfiler();
index 32a3db643c950cdc8c7be0ce28d259d73ddb9a5b..469c7a1814df85e35e1e409684be5c22efa3437c 100644 (file)
@@ -7300,6 +7300,16 @@ void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
 }
 
 
+void HeapProfiler::StartRecordingHeapAllocations() {
+  reinterpret_cast<i::HeapProfiler*>(this)->StartHeapAllocationsRecording();
+}
+
+
+void HeapProfiler::StopRecordingHeapAllocations() {
+  reinterpret_cast<i::HeapProfiler*>(this)->StopHeapAllocationsRecording();
+}
+
+
 v8::Testing::StressType internal::Testing::stress_type_ =
     v8::Testing::kStressTypeOpt;
 
index c6228eb353eaad7da8a18f10f8c5a84c07215f3e..54ee69a25dca84ac47dc383c784562fdf15016a6 100644 (file)
@@ -1333,6 +1333,14 @@ ExternalReference ExternalReference::address_of_the_hole_nan() {
 }
 
 
+ExternalReference ExternalReference::record_object_allocation_function(
+  Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate,
+               FUNCTION_ADDR(HeapProfiler::RecordObjectAllocationFromMasm)));
+}
+
+
 #ifndef V8_INTERPRETED_REGEXP
 
 ExternalReference ExternalReference::re_check_stack_guard_state(
index 3bba98bb7dded9408319160478a39be062846a16..9888ac2ab5483e8a1fd61e5105a535331cf0a1a2 100644 (file)
@@ -728,6 +728,9 @@ class ExternalReference BASE_EMBEDDED {
 
   static ExternalReference get_make_code_young_function(Isolate* isolate);
 
+  // New heap objects tracking support.
+  static ExternalReference record_object_allocation_function(Isolate* isolate);
+
   // Deoptimization support.
   static ExternalReference new_deoptimizer_function(Isolate* isolate);
   static ExternalReference compute_output_frames_function(Isolate* isolate);
index 518c89372e6fdb04c89028a778e3c27f365d51ab..aaa8bc4d0cf2af17292b562712514dd143139f47 100644 (file)
@@ -274,7 +274,8 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
   }
 
   HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
-                                     elms->address() + size_delta));
+                                     elms->address() + size_delta,
+                                     elms->Size()));
   return FixedArrayBase::cast(HeapObject::FromAddress(
       elms->address() + to_trim * entry_size));
 }
index e66af3364d865b018bdb8110af24e1087555e8b9..6b159a98a338c7b2704ed924a59bc401f8425663 100644 (file)
@@ -27,6 +27,7 @@
 
 #include "v8.h"
 
+#include "deoptimizer.h"
 #include "heap-profiler.h"
 #include "heap-snapshot-generator-inl.h"
 
@@ -35,7 +36,8 @@ namespace internal {
 
 HeapProfiler::HeapProfiler(Heap* heap)
     : snapshots_(new HeapSnapshotsCollection(heap)),
-      next_snapshot_uid_(1) {
+      next_snapshot_uid_(1),
+      is_tracking_allocations_(false) {
 }
 
 
@@ -132,14 +134,86 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
 }
 
 
-void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
-  snapshots_->ObjectMoveEvent(from, to);
+void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
+  snapshots_->ObjectMoveEvent(from, to, size);
 }
 
+
+void HeapProfiler::NewObjectEvent(Address addr, int size) {
+  snapshots_->NewObjectEvent(addr, size);
+}
+
+
+void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
+  snapshots_->UpdateObjectSizeEvent(addr, size);
+}
+
+
 void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
                                          RetainedObjectInfo* info) {
   // TODO(yurus, marja): Don't route this information through GlobalHandles.
   heap()->isolate()->global_handles()->SetRetainedObjectInfo(id, info);
 }
 
+
+void HeapProfiler::StartHeapAllocationsRecording() {
+  StartHeapObjectsTracking();
+  is_tracking_allocations_ = true;
+  DropCompiledCode();
+  snapshots_->UpdateHeapObjectsMap();
+}
+
+
+void HeapProfiler::StopHeapAllocationsRecording() {
+  StopHeapObjectsTracking();
+  is_tracking_allocations_ = false;
+  DropCompiledCode();
+}
+
+
+void HeapProfiler::RecordObjectAllocationFromMasm(Isolate* isolate,
+                                                  Address obj,
+                                                  int size) {
+  isolate->heap_profiler()->NewObjectEvent(obj, size);
+}
+
+
+void HeapProfiler::DropCompiledCode() {
+  Isolate* isolate = heap()->isolate();
+  HandleScope scope(isolate);
+
+  if (FLAG_concurrent_recompilation) {
+    isolate->optimizing_compiler_thread()->Flush();
+  }
+
+  Deoptimizer::DeoptimizeAll(isolate);
+
+  Handle<Code> lazy_compile =
+      Handle<Code>(isolate->builtins()->builtin(Builtins::kLazyCompile));
+
+  heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                            "switch allocations tracking");
+
+  DisallowHeapAllocation no_allocation;
+
+  HeapIterator iterator(heap());
+  HeapObject* obj = NULL;
+  while (((obj = iterator.next()) != NULL)) {
+    if (obj->IsJSFunction()) {
+      JSFunction* function = JSFunction::cast(obj);
+      SharedFunctionInfo* shared = function->shared();
+
+      if (!shared->allows_lazy_compilation()) continue;
+      if (!shared->script()->IsScript()) continue;
+
+      Code::Kind kind = function->code()->kind();
+      if (kind == Code::FUNCTION || kind == Code::BUILTIN) {
+        function->set_code(*lazy_compile);
+        shared->set_code(*lazy_compile);
+      }
+    }
+  }
+}
+
+
 } }  // namespace v8::internal
index 5ae60fa923449458ab95670c902f3c99c63f251b..96ae273902ed604fb20a217220275148f68f5bbf 100644 (file)
@@ -63,13 +63,22 @@ class HeapProfiler {
 
   void StartHeapObjectsTracking();
   void StopHeapObjectsTracking();
+
+  static void RecordObjectAllocationFromMasm(Isolate* isolate,
+                                             Address obj,
+                                             int size);
+
   SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
   int GetSnapshotsCount();
   HeapSnapshot* GetSnapshot(int index);
   SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
   void DeleteAllSnapshots();
 
-  void ObjectMoveEvent(Address from, Address to);
+  void ObjectMoveEvent(Address from, Address to, int size);
+
+  void NewObjectEvent(Address addr, int size);
+
+  void UpdateObjectSizeEvent(Address addr, int size);
 
   void DefineWrapperClass(
       uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
@@ -82,12 +91,26 @@ class HeapProfiler {
 
   void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
 
+  bool is_tracking_allocations() {
+    return is_tracking_allocations_;
+  }
+
+  void StartHeapAllocationsRecording();
+  void StopHeapAllocationsRecording();
+
+  int FindUntrackedObjects() {
+    return snapshots_->FindUntrackedObjects();
+  }
+
+  void DropCompiledCode();
+
  private:
   Heap* heap() const { return snapshots_->heap(); }
 
   HeapSnapshotsCollection* snapshots_;
   unsigned next_snapshot_uid_;
   List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
+  bool is_tracking_allocations_;
 };
 
 } }  // namespace v8::internal
index 5570362b8c1726f434bb3e715335f82c6145202d..444bebf8917405aacb2ea7b185b0d890492fb748 100644 (file)
@@ -397,7 +397,7 @@ void HeapObjectsMap::SnapshotGenerationFinished() {
 }
 
 
-void HeapObjectsMap::MoveObject(Address from, Address to) {
+void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
   ASSERT(to != NULL);
   ASSERT(from != NULL);
   if (from == to) return;
@@ -428,11 +428,26 @@ void HeapObjectsMap::MoveObject(Address from, Address to) {
     int from_entry_info_index =
         static_cast<int>(reinterpret_cast<intptr_t>(from_value));
     entries_.at(from_entry_info_index).addr = to;
+    // Size of an object can change during its life, so to keep information
+    // about the object in entries_ consistent, we have to adjust size when the
+    // object is migrated.
+    entries_.at(from_entry_info_index).size = object_size;
     to_entry->value = from_value;
   }
 }
 
 
+void HeapObjectsMap::NewObject(Address addr, int size) {
+  ASSERT(addr != NULL);
+  FindOrAddEntry(addr, size, false);
+}
+
+
+void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
+  FindOrAddEntry(addr, size, false);
+}
+
+
 SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
   HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
                                               false);
@@ -445,7 +460,8 @@ SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
 
 
 SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
-                                                unsigned int size) {
+                                                unsigned int size,
+                                                bool accessed) {
   ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
   HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
                                               true);
@@ -453,14 +469,14 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
     int entry_index =
         static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
     EntryInfo& entry_info = entries_.at(entry_index);
-    entry_info.accessed = true;
+    entry_info.accessed = accessed;
     entry_info.size = size;
     return entry_info.id;
   }
   entry->value = reinterpret_cast<void*>(entries_.length());
   SnapshotObjectId id = next_id_;
   next_id_ += kObjectIdStep;
-  entries_.Add(EntryInfo(id, addr, size));
+  entries_.Add(EntryInfo(id, addr, size, accessed));
   ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
   return id;
 }
@@ -484,6 +500,27 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
 }
 
 
+int HeapObjectsMap::FindUntrackedObjects() {
+  HeapIterator iterator(heap_);
+  int untracked = 0;
+  for (HeapObject* obj = iterator.next();
+       obj != NULL;
+       obj = iterator.next()) {
+    HashMap::Entry* entry = entries_map_.Lookup(
+      obj->address(), ComputePointerHash(obj->address()), false);
+    if (entry == NULL) {
+      untracked++;
+    } else {
+      int entry_index = static_cast<int>(
+          reinterpret_cast<intptr_t>(entry->value));
+      EntryInfo& entry_info = entries_.at(entry_index);
+      CHECK_EQ(obj->Size(), static_cast<int>(entry_info.size));
+    }
+  }
+  return untracked;
+}
+
+
 SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
   UpdateHeapObjectsMap();
   time_intervals_.Add(TimeInterval(next_id_));
index c323f3cde282de350f88da721630bbbe685d3aee..602d6fde8f9126010c06f62227345c5ba3660cea 100644 (file)
@@ -227,8 +227,12 @@ class HeapObjectsMap {
 
   void SnapshotGenerationFinished();
   SnapshotObjectId FindEntry(Address addr);
-  SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
-  void MoveObject(Address from, Address to);
+  SnapshotObjectId FindOrAddEntry(Address addr,
+                                  unsigned int size,
+                                  bool accessed = true);
+  void MoveObject(Address from, Address to, int size);
+  void NewObject(Address addr, int size);
+  void UpdateObjectSize(Address addr, int size);
   SnapshotObjectId last_assigned_id() const {
     return next_id_ - kObjectIdStep;
   }
@@ -247,6 +251,10 @@ class HeapObjectsMap {
   static const SnapshotObjectId kGcRootsFirstSubrootId;
   static const SnapshotObjectId kFirstAvailableObjectId;
 
+  int FindUntrackedObjects();
+
+  void UpdateHeapObjectsMap();
+
  private:
   struct EntryInfo {
   EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
@@ -265,7 +273,6 @@ class HeapObjectsMap {
     uint32_t count;
   };
 
-  void UpdateHeapObjectsMap();
   void RemoveDeadEntries();
 
   SnapshotObjectId next_id_;
@@ -306,12 +313,22 @@ class HeapSnapshotsCollection {
     return ids_.FindOrAddEntry(object_addr, object_size);
   }
   Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
-  void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
+  void ObjectMoveEvent(Address from, Address to, int size) {
+    ids_.MoveObject(from, to, size);
+  }
+  void NewObjectEvent(Address addr, int size) { ids_.NewObject(addr, size); }
+  void UpdateObjectSizeEvent(Address addr, int size) {
+    ids_.UpdateObjectSize(addr, size);
+  }
   SnapshotObjectId last_assigned_id() const {
     return ids_.last_assigned_id();
   }
   size_t GetUsedMemorySize() const;
 
+  int FindUntrackedObjects() { return ids_.FindUntrackedObjects(); }
+
+  void UpdateHeapObjectsMap() { ids_.UpdateHeapObjectsMap(); }
+
  private:
   bool is_tracking_objects_;  // Whether tracking object moves is needed.
   List<HeapSnapshot*> snapshots_;
index b0335d0efdd5da06accdc84acf018f626cf8fd66..20fca9b512b13d080fa63fda61259f8a368ee0af 100644 (file)
@@ -2101,7 +2101,8 @@ class ScavengingVisitor : public StaticVisitorBase {
     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
       // Update NewSpace stats if necessary.
       RecordCopiedObject(heap, target);
-      HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
+      HEAP_PROFILE(heap,
+                   ObjectMoveEvent(source->address(), target->address(), size));
       Isolate* isolate = heap->isolate();
       if (isolate->logger()->is_logging_code_events() ||
           isolate->cpu_profiler()->is_profiling()) {
@@ -4927,6 +4928,13 @@ MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
       alloc_memento->set_map_no_write_barrier(allocation_memento_map());
       ASSERT(site->map() == allocation_site_map());
       alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
+      HeapProfiler* profiler = isolate()->heap_profiler();
+      if (profiler->is_tracking_allocations()) {
+        profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
+                                        object_size);
+        profiler->NewObjectEvent(alloc_memento->address(),
+                                 AllocationMemento::kSize);
+      }
     }
   }
 
index 65d838cdf95b895a8c86430f452f9b8e1715dcf2..2f8f5d74799652614098324fbcf98f6df2d5e455 100644 (file)
@@ -2759,7 +2759,7 @@ void MarkCompactCollector::MigrateObject(Address dst,
                                          Address src,
                                          int size,
                                          AllocationSpace dest) {
-  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
+  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst, size));
   ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
   ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
   if (dest == OLD_POINTER_SPACE) {
@@ -2942,7 +2942,9 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
   ASSERT(target_space == heap()->old_pointer_space() ||
          target_space == heap()->old_data_space());
   Object* result;
-  MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
+  MaybeObject* maybe_result = target_space->AllocateRaw(
+      object_size,
+      PagedSpace::MOVE_OBJECT);
   if (maybe_result->ToObject(&result)) {
     HeapObject* target = HeapObject::cast(result);
     MigrateObject(target->address(),
@@ -3015,7 +3017,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
 
       int size = object->Size();
 
-      MaybeObject* target = space->AllocateRaw(size);
+      MaybeObject* target = space->AllocateRaw(size, PagedSpace::MOVE_OBJECT);
       if (target->IsFailure()) {
         // OS refused to give us memory.
         V8::FatalProcessOutOfMemory("Evacuation");
index f39b760402e1f05a937dbaac7d278aa277ca2855..530fb6d804d5d7eb5a4bccd380eaa40b60733ff3 100644 (file)
@@ -2284,6 +2284,13 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
       MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
     }
   }
+
+  // The array may not be moved during GC,
+  // and size has to be adjusted nevertheless.
+  HeapProfiler* profiler = heap->isolate()->heap_profiler();
+  if (profiler->is_tracking_allocations()) {
+    profiler->UpdateObjectSizeEvent(elms->address(), elms->Size());
+  }
 }
 
 
index ef1461f1e9b6b5b634055cea1be441934495c90f..e7dbbe5afbee2dd88a2bba4f87d6cbac609f1994 100644 (file)
@@ -581,6 +581,10 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
       UNCLASSIFIED,
       63,
       "Heap::allocation_sites_list_address()");
+  Add(ExternalReference::record_object_allocation_function(isolate).address(),
+      UNCLASSIFIED,
+      64,
+      "HeapProfiler::RecordObjectAllocationFromMasm");
 
   // Add a small set of deopt entry addresses to encoder without generating the
   // deopt table code, which isn't possible at deserialization time.
@@ -591,7 +595,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
         entry,
         Deoptimizer::LAZY,
         Deoptimizer::CALCULATE_ENTRY_ADDRESS);
-    Add(address, LAZY_DEOPTIMIZATION, 64 + entry, "lazy_deopt");
+    Add(address, LAZY_DEOPTIMIZATION, 65 + entry, "lazy_deopt");
   }
 }
 
index 020a744fc0ea3f8e78acb9ce3fbfb36027dd0861..9d6685be0e6579e468fb5a0ec1efc77b279af0cb 100644 (file)
@@ -366,6 +366,10 @@ class Deserializer: public SerializerDeserializer {
   Address Allocate(int space_index, int size) {
     Address address = high_water_[space_index];
     high_water_[space_index] = address + size;
+    HeapProfiler* profiler = isolate_->heap_profiler();
+    if (profiler->is_tracking_allocations()) {
+      profiler->NewObjectEvent(address, size);
+    }
     return address;
   }
 
index be2ae2a57db248234ab9f92726beec0168a49709..7178b5783b94be7a063171fcc15445bd47d3c0f4 100644 (file)
@@ -28,6 +28,7 @@
 #ifndef V8_SPACES_INL_H_
 #define V8_SPACES_INL_H_
 
+#include "heap-profiler.h"
 #include "isolate.h"
 #include "spaces.h"
 #include "v8memory.h"
@@ -273,12 +274,18 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
 
 
 // Raw allocation.
-MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
+MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
+                                     AllocationType event) {
+  HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+
   HeapObject* object = AllocateLinearly(size_in_bytes);
   if (object != NULL) {
     if (identity() == CODE_SPACE) {
       SkipList::Update(object->address(), size_in_bytes);
     }
+    if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+      profiler->NewObjectEvent(object->address(), size_in_bytes);
+    }
     return object;
   }
 
@@ -291,6 +298,9 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
     if (identity() == CODE_SPACE) {
       SkipList::Update(object->address(), size_in_bytes);
     }
+    if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+      profiler->NewObjectEvent(object->address(), size_in_bytes);
+    }
     return object;
   }
 
@@ -299,6 +309,9 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
     if (identity() == CODE_SPACE) {
       SkipList::Update(object->address(), size_in_bytes);
     }
+    if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+      profiler->NewObjectEvent(object->address(), size_in_bytes);
+    }
     return object;
   }
 
@@ -332,10 +345,15 @@ MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
     return SlowAllocateRaw(size_in_bytes);
   }
 
-  Object* obj = HeapObject::FromAddress(old_top);
+  HeapObject* obj = HeapObject::FromAddress(old_top);
   allocation_info_.top += size_in_bytes;
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 
+  HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+  if (profiler != NULL && profiler->is_tracking_allocations()) {
+    profiler->NewObjectEvent(obj->address(), size_in_bytes);
+  }
+
   return obj;
 }
 
index 43f44a5c707d0c41632215943f2a553296a0f611..6144c95d14d689876dc2008c5c506ecfd37faa51 100644 (file)
@@ -1714,9 +1714,16 @@ class PagedSpace : public Space {
   Address* allocation_top_address() { return &allocation_info_.top; }
   Address* allocation_limit_address() { return &allocation_info_.limit; }
 
+  enum AllocationType {
+    NEW_OBJECT,
+    MOVE_OBJECT
+  };
+
   // Allocate the requested number of bytes in the space if possible, return a
   // failure object if not.
-  MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
+  MUST_USE_RESULT inline MaybeObject* AllocateRaw(
+      int size_in_bytes,
+      AllocationType event = NEW_OBJECT);
 
   virtual bool ReserveSpace(int bytes);
 
index 1bdfda992e1e60e2a428d2aa1dd8b2300fe3e632..7e6b63c56143ca9680820ccb16fa551dfc48036e 100644 (file)
@@ -4226,9 +4226,15 @@ void SubStringStub::Generate(MacroAssembler* masm) {
     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
     __ testb(rbx, Immediate(kStringEncodingMask));
-    __ j(zero, &two_byte_slice, Label::kNear);
+    // Make long jumps when allocations tracking is on due to
+    // RecordObjectAllocation inside MacroAssembler::Allocate.
+    Label::Distance jump_distance =
+        masm->isolate()->heap_profiler()->is_tracking_allocations()
+        ? Label::kFar
+        : Label::kNear;
+    __ j(zero, &two_byte_slice, jump_distance);
     __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
-    __ jmp(&set_slice_header, Label::kNear);
+    __ jmp(&set_slice_header, jump_distance);
     __ bind(&two_byte_slice);
     __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
     __ bind(&set_slice_header);
index b301f29e31f804cdca1817216414d1b3f354fa5e..b3336e6eabbce0f7b9d49033035e622b2150cc37 100644 (file)
@@ -4093,6 +4093,10 @@ void MacroAssembler::Allocate(int object_size,
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
 
+  if (isolate()->heap_profiler()->is_tracking_allocations()) {
+    RecordObjectAllocation(isolate(), result, object_size);
+  }
+
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
@@ -4172,6 +4176,10 @@ void MacroAssembler::Allocate(Register object_size,
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
 
+  if (isolate()->heap_profiler()->is_tracking_allocations()) {
+    RecordObjectAllocation(isolate(), result, object_size);
+  }
+
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
@@ -4933,6 +4941,38 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
 }
 
 
+void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
+                                            Register object,
+                                            Register object_size) {
+  FrameScope frame(this, StackFrame::EXIT);
+  PushSafepointRegisters();
+  PrepareCallCFunction(3);
+  // In case object is rdx
+  movq(kScratchRegister, object);
+  movq(arg_reg_3, object_size);
+  movq(arg_reg_2, kScratchRegister);
+  movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
+  CallCFunction(
+      ExternalReference::record_object_allocation_function(isolate), 3);
+  PopSafepointRegisters();
+}
+
+
+void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
+                                            Register object,
+                                            int object_size) {
+  FrameScope frame(this, StackFrame::EXIT);
+  PushSafepointRegisters();
+  PrepareCallCFunction(3);
+  movq(arg_reg_2, object);
+  movq(arg_reg_3, Immediate(object_size));
+  movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
+  CallCFunction(
+      ExternalReference::record_object_allocation_function(isolate), 3);
+  PopSafepointRegisters();
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
index 3d5a4560bb0227789e7092031bb0a35e12588b7e..931a4eb12fac3baf70b1ff86f5311174832991b3 100644 (file)
@@ -1115,6 +1115,15 @@ class MacroAssembler: public Assembler {
                 Label* gc_required,
                 AllocationFlags flags);
 
+  // Record a JS object allocation if allocations tracking mode is on.
+  void RecordObjectAllocation(Isolate* isolate,
+                              Register object,
+                              Register object_size);
+
+  void RecordObjectAllocation(Isolate* isolate,
+                              Register object,
+                              int object_size);
+
   // Undo allocation in new space. The object passed and objects allocated after
   // it will no longer be allocated. Make sure that no pointers are left to the
   // object(s) no longer allocated as they would be invalid when allocation is
index bc800399abb656997748c8d92a66a8f2f9ac5be6..7f84c259f0c4cb1b135ae6a73d465d631d858e31 100644 (file)
@@ -348,4 +348,26 @@ static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
 }
 
 
+// Helper class for new allocations tracking and checking.
+// To use checking of JS allocations tracking in a test,
+// just create an instance of this class.
+class HeapObjectsTracker {
+ public:
+  HeapObjectsTracker() {
+    heap_profiler_ = i::Isolate::Current()->heap_profiler();
+    CHECK_NE(NULL, heap_profiler_);
+    heap_profiler_->StartHeapAllocationsRecording();
+  }
+
+  ~HeapObjectsTracker() {
+    i::Isolate::Current()->heap()->CollectAllAvailableGarbage();
+    CHECK_EQ(0, heap_profiler_->FindUntrackedObjects());
+    heap_profiler_->StopHeapAllocationsRecording();
+  }
+
+ private:
+  i::HeapProfiler* heap_profiler_;
+};
+
+
 #endif  // ifndef CCTEST_H_
index 59f7a258b1094fa5a0ddb2b2e42a2170f2560f55..6b452960b5ecb3c6746459583213b4e6e653ead3 100644 (file)
@@ -2005,3 +2005,19 @@ TEST(JSFunctionHasCodeLink) {
       GetProperty(foo_func, v8::HeapGraphEdge::kInternal, "code");
   CHECK_NE(NULL, code);
 }
+
+
+// This is an example of using checking of JS allocations tracking in a test.
+TEST(HeapObjectsTracker) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+  HeapObjectsTracker tracker;
+  CompileRun("var a = 1.2");
+  CompileRun("var a = 1.2; var b = 1.0; var c = 1.0;");
+  CompileRun(
+    "var a = [];"
+    "for (var i = 0; i < 5; ++i)"
+    "    a[i] = i;\n"
+    "for (var i = 0; i < 3; ++i)"
+    "    a.shift();\n");
+}