Use an allocation site scratchpad to speed up allocaton site processing during gc.
authorhpayer@chromium.org <hpayer@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 18 Dec 2013 21:23:56 +0000 (21:23 +0000)
committerhpayer@chromium.org <hpayer@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 18 Dec 2013 21:23:56 +0000 (21:23 +0000)
BUG=
R=mvstanton@chromium.org

Review URL: https://codereview.chromium.org/99133017

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18367 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/heap-inl.h
src/heap.cc
src/heap.h
src/mark-compact.cc
src/objects-inl.h
src/objects.h

index 3094016..3dce348 100644 (file)
@@ -490,7 +490,15 @@ void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
         object, true);
     if (memento != NULL) {
       ASSERT(memento->IsValid());
-      memento->GetAllocationSite()->IncrementMementoFoundCount();
+      bool add_to_scratchpad =
+          memento->GetAllocationSite()->IncrementMementoFoundCount();
+      Heap* heap = object->GetIsolate()->heap();
+      if (add_to_scratchpad && heap->allocation_sites_scratchpad_length <
+              kAllocationSiteScratchpadSize) {
+        heap->allocation_sites_scratchpad[
+            heap->allocation_sites_scratchpad_length++] =
+                memento->GetAllocationSite();
+      }
     }
   }
 }
index f6c6ae6..c42c445 100644 (file)
@@ -148,6 +148,7 @@ Heap::Heap()
 #ifdef VERIFY_HEAP
       no_weak_object_verification_scope_depth_(0),
 #endif
+      allocation_sites_scratchpad_length(0),
       promotion_queue_(this),
       configured_(false),
       chunks_queued_for_free_(NULL),
@@ -503,25 +504,45 @@ void Heap::RepairFreeListsAfterBoot() {
 }
 
 
-void Heap::GarbageCollectionEpilogue() {
+void Heap::ProcessPretenuringFeedback() {
   if (FLAG_allocation_site_pretenuring) {
     int tenure_decisions = 0;
     int dont_tenure_decisions = 0;
     int allocation_mementos_found = 0;
-
-    Object* cur = allocation_sites_list();
-    while (cur->IsAllocationSite()) {
-      AllocationSite* casted = AllocationSite::cast(cur);
-      allocation_mementos_found += casted->memento_found_count()->value();
-      if (casted->DigestPretenuringFeedback()) {
-        if (casted->GetPretenureMode() == TENURED) {
+    int allocation_sites = 0;
+    int active_allocation_sites = 0;
+
+    // If the scratchpad overflowed, we have to iterate over the allocation
+    // stites list.
+    bool use_scratchpad =
+        allocation_sites_scratchpad_length < kAllocationSiteScratchpadSize;
+
+    int i = 0;
+    Object* list_element = allocation_sites_list();
+    while (use_scratchpad ?
+              i < allocation_sites_scratchpad_length :
+              list_element->IsAllocationSite()) {
+      AllocationSite* site = use_scratchpad ?
+        allocation_sites_scratchpad[i] : AllocationSite::cast(list_element);
+      allocation_mementos_found += site->memento_found_count()->value();
+      if (site->memento_found_count()->value() > 0) {
+        active_allocation_sites++;
+      }
+      if (site->DigestPretenuringFeedback()) {
+        if (site->GetPretenureMode() == TENURED) {
           tenure_decisions++;
         } else {
           dont_tenure_decisions++;
         }
       }
-      cur = casted->weak_next();
+      allocation_sites++;
+      if (use_scratchpad) {
+        i++;
+      } else {
+        list_element = site->weak_next();
+      }
     }
+    allocation_sites_scratchpad_length = 0;
 
     // TODO(mvstanton): Pretenure decisions are only made once for an allocation
     // site. Find a sane way to decide about revisiting the decision later.
@@ -530,14 +551,21 @@ void Heap::GarbageCollectionEpilogue() {
         (allocation_mementos_found > 0 ||
          tenure_decisions > 0 ||
          dont_tenure_decisions > 0)) {
-      PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
-             "(%d, %d, %d)\n",
+      PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
+             "#mementos, #tenure decisions, #donttenure decisions) "
+             "(%s, %d, %d, %d, %d, %d)\n",
+             use_scratchpad ? "use scratchpad" : "use list",
+             allocation_sites,
+             active_allocation_sites,
              allocation_mementos_found,
              tenure_decisions,
              dont_tenure_decisions);
     }
   }
+}
+
 
+void Heap::GarbageCollectionEpilogue() {
   store_buffer()->GCEpilogue();
 
   // In release mode, we only zap the from space under heap verification.
@@ -1564,6 +1592,8 @@ void Heap::Scavenge() {
   IncrementYoungSurvivorsCounter(static_cast<int>(
       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
 
+  ProcessPretenuringFeedback();
+
   LOG(isolate_, ResourceEvent("scavenge", "end"));
 
   gc_state_ = NOT_IN_GC;
index 871c4d8..d05f019 100644 (file)
@@ -2057,6 +2057,11 @@ class Heap {
   void GarbageCollectionPrologue();
   void GarbageCollectionEpilogue();
 
+  // Pretenuring decisions are made based on feedback collected during new
+  // space evacuation. Note that between feedback collection and calling this
+  // method object in old space must not move.
+  void ProcessPretenuringFeedback();
+
   // Checks whether a global GC is necessary
   GarbageCollector SelectGarbageCollector(AllocationSpace space,
                                           const char** reason);
@@ -2383,6 +2388,11 @@ class Heap {
   int no_weak_object_verification_scope_depth_;
 #endif
 
+
+  static const int kAllocationSiteScratchpadSize = 256;
+  int allocation_sites_scratchpad_length;
+  AllocationSite* allocation_sites_scratchpad[kAllocationSiteScratchpadSize];
+
   static const int kMaxMarkSweepsInIdleRound = 7;
   static const int kIdleScavengeThreshold = 5;
 
index c0e1039..1e78093 100644 (file)
@@ -3361,6 +3361,13 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
     EvacuateNewSpace();
   }
 
+  // We have to travers our allocation sites scratchpad which contains raw
+  // pointers before we move objects. During new space evacauation we
+  // gathered pretenuring statistics. The found allocation sites may not be
+  // valid after compacting old space.
+  heap()->ProcessPretenuringFeedback();
+
+
   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
     EvacuatePages();
   }
index 994c991..311afc0 100644 (file)
@@ -1370,9 +1370,12 @@ inline DependentCode::DependencyGroup AllocationSite::ToDependencyGroup(
 }
 
 
-inline void AllocationSite::IncrementMementoFoundCount() {
+inline bool AllocationSite::IncrementMementoFoundCount() {
+  if (IsZombie()) return false;
+
   int value = memento_found_count()->value();
   set_memento_found_count(Smi::FromInt(value + 1));
+  return value == 0;
 }
 
 
index 52d15af..0fa8304 100644 (file)
@@ -8136,7 +8136,9 @@ class AllocationSite: public Struct {
   class UnusedBits:             public BitField<int,          15, 14> {};
   class DoNotInlineBit:         public BitField<bool,         29,  1> {};
 
-  inline void IncrementMementoFoundCount();
+  // Increments the mementos found counter and returns true when the first
+  // memento was found for a given allocation site.
+  inline bool IncrementMementoFoundCount();
 
   inline void IncrementMementoCreateCount();