--verify-predictable mode added for ensuring that GC behaves deterministically.
authorishell@chromium.org <ishell@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Fri, 20 Jun 2014 07:35:48 +0000 (07:35 +0000)
committerishell@chromium.org <ishell@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Fri, 20 Jun 2014 07:35:48 +0000 (07:35 +0000)
In order to be able to use it one should pass verifypredictable=on to the make tool or specify v8_enable_verify_predictable=1 in GYP_DEFINES.

R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/325553002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21892 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

14 files changed:
Makefile
build/features.gypi
src/builtins.cc
src/d8.cc
src/flag-definitions.h
src/heap-inl.h
src/heap.cc
src/heap.h
src/mark-compact.cc
src/objects-printer.cc
src/runtime.cc
src/serialize.cc
src/serialize.h
src/spaces-inl.h

index f49be61..d5ce783 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -70,6 +70,10 @@ ifeq ($(backtrace), off)
 else
   GYPFLAGS += -Dv8_enable_backtrace=1
 endif
+# verifypredictable=on
+ifeq ($(verifypredictable), on)
+  GYPFLAGS += -Dv8_enable_verify_predictable=1
+endif
 # snapshot=off
 ifeq ($(snapshot), off)
   GYPFLAGS += -Dv8_use_snapshot='false'
index e8f5b2f..5d53c95 100644 (file)
@@ -41,6 +41,8 @@
 
     'v8_use_snapshot%': 'true',
 
+    'v8_enable_verify_predictable%': 0,
+
     # With post mortem support enabled, metadata is embedded into libv8 that
     # describes various parameters of the VM for use by debuggers. See
     # tools/gen-postmortem-metadata.py for details.
@@ -74,6 +76,9 @@
       ['v8_enable_verify_heap==1', {
         'defines': ['VERIFY_HEAP',],
       }],
+      ['v8_enable_verify_predictable==1', {
+        'defines': ['VERIFY_PREDICTABLE',],
+      }],
       ['v8_interpreted_regexp==1', {
         'defines': ['V8_INTERPRETED_REGEXP',],
       }],
index 503d9a6..08ca122 100644 (file)
@@ -239,12 +239,8 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
 
   FixedArrayBase* new_elms =
       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
-  HeapProfiler* profiler = heap->isolate()->heap_profiler();
-  if (profiler->is_tracking_object_moves()) {
-    profiler->ObjectMoveEvent(elms->address(),
-                              new_elms->address(),
-                              new_elms->Size());
-  }
+
+  heap->OnMoveEvent(new_elms, elms, new_elms->Size());
   return new_elms;
 }
 
index 03356e1..ccdb9e7 100644 (file)
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -290,9 +290,18 @@ int PerIsolateData::RealmIndexOrThrow(
 
 #ifndef V8_SHARED
 // performance.now() returns a time stamp as double, measured in milliseconds.
+// When FLAG_verify_predictable mode is enabled it returns current value
+// of Heap::allocations_count().
 void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
-  args.GetReturnValue().Set(delta.InMillisecondsF());
+  if (i::FLAG_verify_predictable) {
+    Isolate* v8_isolate = args.GetIsolate();
+    i::Heap* heap = reinterpret_cast<i::Isolate*>(v8_isolate)->heap();
+    args.GetReturnValue().Set(heap->synthetic_time());
+
+  } else {
+    i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
+    args.GetReturnValue().Set(delta.InMillisecondsF());
+  }
 }
 #endif  // !V8_SHARED
 
index 1d83481..14bea60 100644 (file)
@@ -866,6 +866,24 @@ DEFINE_implication(print_all_code, trace_codegen)
 #endif
 #endif
 
+
+//
+// VERIFY_PREDICTABLE related flags
+//
+#undef FLAG
+
+#ifdef VERIFY_PREDICTABLE
+#define FLAG FLAG_FULL
+#else
+#define FLAG FLAG_READONLY
+#endif
+
+DEFINE_bool(verify_predictable, false,
+            "this mode is used for checking that V8 behaves predictably")
+DEFINE_int(dump_allocations_digest_at_alloc, 0,
+          "dump allocations digest each n-th allocation")
+
+
 //
 // Read-only flags
 //
index 2e80452..2de7616 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <cmath>
 
+#include "src/cpu-profiler.h"
 #include "src/heap.h"
 #include "src/heap-profiler.h"
 #include "src/isolate.h"
@@ -180,7 +181,6 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes,
   ASSERT(AllowHandleAllocation::IsAllowed());
   ASSERT(AllowHeapAllocation::IsAllowed());
   ASSERT(gc_state_ == NOT_IN_GC);
-  HeapProfiler* profiler = isolate_->heap_profiler();
 #ifdef DEBUG
   if (FLAG_gc_interval >= 0 &&
       AllowAllocationFailure::IsAllowed(isolate_) &&
@@ -200,8 +200,8 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes,
         retry_space != NEW_SPACE) {
       space = retry_space;
     } else {
-      if (profiler->is_tracking_allocations() && allocation.To(&object)) {
-        profiler->AllocationEvent(object->address(), size_in_bytes);
+      if (allocation.To(&object)) {
+        OnAllocationEvent(object, size_in_bytes);
       }
       return allocation;
     }
@@ -212,7 +212,12 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes,
   } else if (OLD_DATA_SPACE == space) {
     allocation = old_data_space_->AllocateRaw(size_in_bytes);
   } else if (CODE_SPACE == space) {
-    allocation = code_space_->AllocateRaw(size_in_bytes);
+    if (size_in_bytes <= code_space()->AreaSize()) {
+      allocation = code_space_->AllocateRaw(size_in_bytes);
+    } else {
+      // Large code objects are allocated in large object space.
+      allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
+    }
   } else if (LO_SPACE == space) {
     allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
   } else if (CELL_SPACE == space) {
@@ -223,14 +228,99 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes,
     ASSERT(MAP_SPACE == space);
     allocation = map_space_->AllocateRaw(size_in_bytes);
   }
-  if (allocation.IsRetry()) old_gen_exhausted_ = true;
-  if (profiler->is_tracking_allocations() && allocation.To(&object)) {
-    profiler->AllocationEvent(object->address(), size_in_bytes);
+  if (allocation.To(&object)) {
+    OnAllocationEvent(object, size_in_bytes);
+  } else {
+    old_gen_exhausted_ = true;
   }
   return allocation;
 }
 
 
+void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
+  HeapProfiler* profiler = isolate_->heap_profiler();
+  if (profiler->is_tracking_allocations()) {
+    profiler->AllocationEvent(object->address(), size_in_bytes);
+  }
+
+  if (FLAG_verify_predictable) {
+    ++allocations_count_;
+
+    UpdateAllocationsHash(object);
+    UpdateAllocationsHash(size_in_bytes);
+
+    if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
+        (--dump_allocations_hash_countdown_ == 0)) {
+      dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+      PrintAlloctionsHash();
+    }
+  }
+}
+
+
+void Heap::OnMoveEvent(HeapObject* target,
+                       HeapObject* source,
+                       int size_in_bytes) {
+  HeapProfiler* heap_profiler = isolate_->heap_profiler();
+  if (heap_profiler->is_tracking_object_moves()) {
+    heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+                                   size_in_bytes);
+  }
+
+  if (isolate_->logger()->is_logging_code_events() ||
+      isolate_->cpu_profiler()->is_profiling()) {
+    if (target->IsSharedFunctionInfo()) {
+      PROFILE(isolate_, SharedFunctionInfoMoveEvent(
+          source->address(), target->address()));
+    }
+  }
+
+  if (FLAG_verify_predictable) {
+    ++allocations_count_;
+
+    UpdateAllocationsHash(source);
+    UpdateAllocationsHash(target);
+    UpdateAllocationsHash(size_in_bytes);
+
+    if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
+        (--dump_allocations_hash_countdown_ == 0)) {
+      dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+      PrintAlloctionsHash();
+    }
+  }
+}
+
+
+void Heap::UpdateAllocationsHash(HeapObject* object) {
+  Address object_address = object->address();
+  MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
+  AllocationSpace allocation_space = memory_chunk->owner()->identity();
+
+  STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
+  uint32_t value =
+      static_cast<uint32_t>(object_address - memory_chunk->address()) |
+      (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
+
+  UpdateAllocationsHash(value);
+}
+
+
+void Heap::UpdateAllocationsHash(uint32_t value) {
+  uint16_t c1 = static_cast<uint16_t>(value);
+  uint16_t c2 = static_cast<uint16_t>(value >> 16);
+  raw_allocations_hash_ =
+      StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
+  raw_allocations_hash_ =
+      StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
+}
+
+
+void Heap::PrintAlloctionsHash() {
+  uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
+  PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count_, hash);
+}
+
+
 void Heap::FinalizeExternalString(String* string) {
   ASSERT(string->IsExternalString());
   v8::String::ExternalStringResourceBase** resource_addr =
index 7260e7a..1f55485 100644 (file)
@@ -64,7 +64,6 @@ Heap::Heap()
       survived_since_last_expansion_(0),
       sweep_generation_(0),
       always_allocate_scope_depth_(0),
-      linear_allocation_scope_depth_(0),
       contexts_disposed_(0),
       global_ic_age_(0),
       flush_monomorphic_ics_(false),
@@ -79,6 +78,9 @@ Heap::Heap()
       lo_space_(NULL),
       gc_state_(NOT_IN_GC),
       gc_post_processing_depth_(0),
+      allocations_count_(0),
+      raw_allocations_hash_(0),
+      dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
       ms_count_(0),
       gc_count_(0),
       remembered_unmapped_pages_index_(0),
@@ -1957,19 +1959,7 @@ class ScavengingVisitor : public StaticVisitorBase {
     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
       // Update NewSpace stats if necessary.
       RecordCopiedObject(heap, target);
-      Isolate* isolate = heap->isolate();
-      HeapProfiler* heap_profiler = isolate->heap_profiler();
-      if (heap_profiler->is_tracking_object_moves()) {
-        heap_profiler->ObjectMoveEvent(source->address(), target->address(),
-                                       size);
-      }
-      if (isolate->logger()->is_logging_code_events() ||
-          isolate->cpu_profiler()->is_profiling()) {
-        if (target->IsSharedFunctionInfo()) {
-          PROFILE(isolate, SharedFunctionInfoMoveEvent(
-              source->address(), target->address()));
-        }
-      }
+      heap->OnMoveEvent(target, source, size);
     }
 
     if (marks_handling == TRANSFER_MARKS) {
@@ -2224,6 +2214,7 @@ static void InitializeScavengingVisitorsTables() {
 
 void Heap::SelectScavengingVisitorsTable() {
   bool logging_and_profiling =
+      FLAG_verify_predictable ||
       isolate()->logger()->is_logging() ||
       isolate()->cpu_profiler()->is_profiling() ||
       (isolate()->heap_profiler() != NULL &&
@@ -3338,29 +3329,28 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
 }
 
 
-AllocationResult Heap::AllocateCode(int object_size,
-                                bool immovable) {
+AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
   ASSERT(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
-  AllocationResult allocation;
-  // Large code objects and code objects which should stay at a fixed address
-  // are allocated in large object space.
+  AllocationResult allocation =
+      AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
+
   HeapObject* result;
-  bool force_lo_space = object_size > code_space()->AreaSize();
-  if (force_lo_space) {
-    allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
-  } else {
-    allocation = AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
-  }
   if (!allocation.To(&result)) return allocation;
 
-  if (immovable && !force_lo_space &&
-     // Objects on the first page of each space are never moved.
-     !code_space_->FirstPage()->Contains(result->address())) {
-    // Discard the first code allocation, which was on a page where it could be
-    // moved.
-    CreateFillerObjectAt(result->address(), object_size);
-    allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
-    if (!allocation.To(&result)) return allocation;
+  if (immovable) {
+    Address address = result->address();
+    // Code objects which should stay at a fixed address are allocated either
+    // in the first page of code space (objects on the first page of each space
+    // are never moved) or in large object space.
+    if (!code_space_->FirstPage()->Contains(address) &&
+        MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
+      // Discard the first code allocation, which was on a page where it could
+      // be moved.
+      CreateFillerObjectAt(result->address(), object_size);
+      allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
+      if (!allocation.To(&result)) return allocation;
+      OnAllocationEvent(result, object_size);
+    }
   }
 
   result->set_map_no_write_barrier(code_map());
@@ -3387,15 +3377,10 @@ AllocationResult Heap::CopyCode(Code* code) {
     new_constant_pool = empty_constant_pool_array();
   }
 
+  HeapObject* result;
   // Allocate an object the same size as the code object.
   int obj_size = code->Size();
-  if (obj_size > code_space()->AreaSize()) {
-    allocation = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
-  } else {
-    allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
-  }
-
-  HeapObject* result;
+  allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
   if (!allocation.To(&result)) return allocation;
 
   // Copy code object.
@@ -3445,14 +3430,9 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
   size_t relocation_offset =
       static_cast<size_t>(code->instruction_end() - old_addr);
 
-  AllocationResult allocation;
-  if (new_obj_size > code_space()->AreaSize()) {
-    allocation = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
-  } else {
-    allocation = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
-  }
-
   HeapObject* result;
+  AllocationResult allocation =
+      AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
   if (!allocation.To(&result)) return allocation;
 
   // Copy code object.
@@ -5259,6 +5239,10 @@ void Heap::TearDown() {
     PrintF("\n\n");
   }
 
+  if (FLAG_verify_predictable) {
+    PrintAlloctionsHash();
+  }
+
   TearDownArrayBuffers();
 
   isolate_->global_handles()->TearDown();
index 97de93e..cf1d0ab 100644 (file)
@@ -657,9 +657,6 @@ class Heap {
   Address always_allocate_scope_depth_address() {
     return reinterpret_cast<Address>(&always_allocate_scope_depth_);
   }
-  bool linear_allocation() {
-    return linear_allocation_scope_depth_ != 0;
-  }
 
   Address* NewSpaceAllocationTopAddress() {
     return new_space_.allocation_top_address();
@@ -977,6 +974,13 @@ class Heap {
 #endif
   }
 
+  // Number of "runtime allocations" done so far.
+  uint32_t allocations_count() { return allocations_count_; }
+
+  // Returns deterministic "time" value in ms. Works only with
+  // FLAG_verify_predictable.
+  double synthetic_time() { return allocations_count_ / 100.0; }
+
   // Print short heap statistics.
   void PrintShortHeapStatistics();
 
@@ -1437,6 +1441,17 @@ class Heap {
   static void FatalProcessOutOfMemory(const char* location,
                                       bool take_snapshot = false);
 
+  // This event is triggered after successful allocation of a new object made
+  // by runtime. Allocations of target space for object evacuation do not
+  // trigger the event. In order to track ALL allocations one must turn off
+  // FLAG_inline_new and FLAG_use_allocation_folding.
+  inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
+
+  // This event is triggered after object is moved to a new place.
+  inline void OnMoveEvent(HeapObject* target,
+                          HeapObject* source,
+                          int size_in_bytes);
+
  protected:
   // Methods made available to tests.
 
@@ -1526,7 +1541,6 @@ class Heap {
   int sweep_generation_;
 
   int always_allocate_scope_depth_;
-  int linear_allocation_scope_depth_;
 
   // For keeping track of context disposals.
   int contexts_disposed_;
@@ -1552,8 +1566,20 @@ class Heap {
   // Returns the amount of external memory registered since last global gc.
   int64_t PromotedExternalMemorySize();
 
-  unsigned int ms_count_;  // how many mark-sweep collections happened
-  unsigned int gc_count_;  // how many gc happened
+  // How many "runtime allocations" happened.
+  uint32_t allocations_count_;
+
+  // Running hash over allocations performed.
+  uint32_t raw_allocations_hash_;
+
+  // Countdown counter, dumps allocation hash when 0.
+  uint32_t dump_allocations_hash_countdown_;
+
+  // How many mark-sweep collections happened.
+  unsigned int ms_count_;
+
+  // How many gc happened.
+  unsigned int gc_count_;
 
   // For post mortem debugging.
   static const int kRememberedUnmappedPages = 128;
@@ -2064,6 +2090,10 @@ class Heap {
     return &weak_object_to_code_table_;
   }
 
+  inline void UpdateAllocationsHash(HeapObject* object);
+  inline void UpdateAllocationsHash(uint32_t value);
+  inline void PrintAlloctionsHash();
+
   static const int kInitialStringTableSize = 2048;
   static const int kInitialEvalCacheSize = 64;
   static const int kInitialNumberStringCacheSize = 256;
index 61b1b54..17b3c34 100644 (file)
@@ -2806,10 +2806,6 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst,
                                          AllocationSpace dest) {
   Address dst_addr = dst->address();
   Address src_addr = src->address();
-  HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
-  if (heap_profiler->is_tracking_object_moves()) {
-    heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
-  }
   ASSERT(heap()->AllowedToBeMigrated(src, dest));
   ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
   if (dest == OLD_POINTER_SPACE) {
@@ -2876,6 +2872,7 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst,
     ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
     heap()->MoveBlock(dst_addr, src_addr, size);
   }
+  heap()->OnMoveEvent(dst, src, size);
   Memory::Address_at(src_addr) = dst_addr;
 }
 
index 54a7b55..1901009 100644 (file)
@@ -415,7 +415,7 @@ void JSObject::PrintTransitions(FILE* out) {
 
 
 void JSObject::JSObjectPrint(FILE* out) {
-  PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
+  HeapObject::PrintHeader(out, "JSObject");
   PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
   // Don't call GetElementsKind, its validation code can cause the printer to
   // fail when debugging.
index b97af64..9a9454e 100644 (file)
@@ -9652,7 +9652,13 @@ RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
   // the number in a Date object representing a particular instant in
   // time is milliseconds. Therefore, we floor the result of getting
   // the OS time.
-  double millis = std::floor(OS::TimeCurrentMillis());
+  double millis;
+  if (FLAG_verify_predictable) {
+    millis = 1388534400000.0;  // Jan 1 2014 00:00:00 GMT+0000
+    millis += std::floor(isolate->heap()->synthetic_time());
+  } else {
+    millis = std::floor(OS::TimeCurrentMillis());
+  }
   return *isolate->factory()->NewNumber(millis);
 }
 
index 4e5699c..bf00da0 100644 (file)
@@ -849,6 +849,7 @@ void Deserializer::ReadObject(int space_number,
   int size = source_->GetInt() << kObjectAlignmentBits;
   Address address = Allocate(space_number, size);
   HeapObject* obj = HeapObject::FromAddress(address);
+  isolate_->heap()->OnAllocationEvent(obj, size);
   *write_back = obj;
   Object** current = reinterpret_cast<Object**>(address);
   Object** limit = current + (size >> kPointerSizeLog2);
index 9e3cc88..99b9854 100644 (file)
@@ -334,10 +334,6 @@ class Deserializer: public SerializerDeserializer {
   Address Allocate(int space_index, int size) {
     Address address = high_water_[space_index];
     high_water_[space_index] = address + size;
-    HeapProfiler* profiler = isolate_->heap_profiler();
-    if (profiler->is_tracking_allocations()) {
-      profiler->AllocationEvent(address, size);
-    }
     return address;
   }
 
index e863b51..5f024e4 100644 (file)
@@ -253,26 +253,14 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
 // Raw allocation.
 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
   HeapObject* object = AllocateLinearly(size_in_bytes);
-  if (object != NULL) {
-    if (identity() == CODE_SPACE) {
-      SkipList::Update(object->address(), size_in_bytes);
-    }
-    return object;
-  }
 
-  ASSERT(!heap()->linear_allocation() ||
-         (anchor_.next_chunk() == &anchor_ &&
-          anchor_.prev_chunk() == &anchor_));
-
-  object = free_list_.Allocate(size_in_bytes);
-  if (object != NULL) {
-    if (identity() == CODE_SPACE) {
-      SkipList::Update(object->address(), size_in_bytes);
+  if (object == NULL) {
+    object = free_list_.Allocate(size_in_bytes);
+    if (object == NULL) {
+      object = SlowAllocateRaw(size_in_bytes);
     }
-    return object;
   }
 
-  object = SlowAllocateRaw(size_in_bytes);
   if (object != NULL) {
     if (identity() == CODE_SPACE) {
       SkipList::Update(object->address(), size_in_bytes);