Move a bunch of GC related files to heap/ subdirectory
authorjochen@chromium.org <jochen@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Tue, 5 Aug 2014 08:18:22 +0000 (08:18 +0000)
committerjochen@chromium.org <jochen@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Tue, 5 Aug 2014 08:18:22 +0000 (08:18 +0000)
BUG=none
R=hpayer@chromium.org
LOG=n

Review URL: https://codereview.chromium.org/437993003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22850 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

41 files changed:
BUILD.gn
src/builtins.cc
src/contexts.h
src/elements.h
src/frames.cc
src/handles-inl.h
src/heap/gc-tracer.cc [moved from src/gc-tracer.cc with 99% similarity]
src/heap/gc-tracer.h [moved from src/gc-tracer.h with 99% similarity]
src/heap/heap-inl.h [moved from src/heap-inl.h with 85% similarity]
src/heap/heap.cc [moved from src/heap.cc with 82% similarity]
src/heap/heap.h [moved from src/heap.h with 86% similarity]
src/heap/incremental-marking-inl.h [moved from src/incremental-marking-inl.h with 83% similarity]
src/heap/incremental-marking.cc [moved from src/incremental-marking.cc with 91% similarity]
src/heap/incremental-marking.h [moved from src/incremental-marking.h with 84% similarity]
src/heap/mark-compact-inl.h [moved from src/mark-compact-inl.h with 85% similarity]
src/heap/mark-compact.cc [moved from src/mark-compact.cc with 87% similarity]
src/heap/mark-compact.h [moved from src/mark-compact.h with 90% similarity]
src/heap/spaces-inl.h [moved from src/spaces-inl.h with 90% similarity]
src/heap/spaces.cc [moved from src/spaces.cc with 89% similarity]
src/heap/spaces.h [moved from src/spaces.h with 88% similarity]
src/heap/sweeper-thread.cc [moved from src/sweeper-thread.cc with 77% similarity]
src/heap/sweeper-thread.h [moved from src/sweeper-thread.h with 81% similarity]
src/ia32/codegen-ia32.cc
src/isolate.cc
src/isolate.h
src/json-parser.h
src/jsregexp-inl.h
src/messages.cc
src/objects-inl.h
src/objects.cc
src/runtime-profiler.cc
src/safepoint-table.h
src/transitions.h
src/v8.h
src/x64/macro-assembler-x64.cc
src/x87/codegen-x87.cc
test/cctest/test-decls.cc
test/cctest/test-serialize.cc
test/cctest/test-weaktypedarrays.cc
tools/gyp/v8.gyp
tools/presubmit.py

index 139a895..b1be576 100644 (file)
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -632,8 +632,6 @@ source_set("v8_base") {
     "src/full-codegen.h",
     "src/func-name-inferrer.cc",
     "src/func-name-inferrer.h",
-    "src/gc-tracer.cc",
-    "src/gc-tracer.h",
     "src/gdb-jit.cc",
     "src/gdb-jit.h",
     "src/global-handles.cc",
@@ -643,14 +641,26 @@ source_set("v8_base") {
     "src/handles.cc",
     "src/handles.h",
     "src/hashmap.h",
-    "src/heap-inl.h",
     "src/heap-profiler.cc",
     "src/heap-profiler.h",
     "src/heap-snapshot-generator-inl.h",
     "src/heap-snapshot-generator.cc",
     "src/heap-snapshot-generator.h",
-    "src/heap.cc",
-    "src/heap.h",
+    "src/heap/gc-tracer.cc",
+    "src/heap/gc-tracer.h",
+    "src/heap/heap-inl.h",
+    "src/heap/heap.cc",
+    "src/heap/heap.h",
+    "src/heap/incremental-marking.cc",
+    "src/heap/incremental-marking.h",
+    "src/heap/mark-compact-inl.h",
+    "src/heap/mark-compact.cc",
+    "src/heap/mark-compact.h",
+    "src/heap/spaces-inl.h",
+    "src/heap/spaces.cc",
+    "src/heap/spaces.h",
+    "src/heap/sweeper-thread.h",
+    "src/heap/sweeper-thread.cc",
     "src/hydrogen-alias-analysis.h",
     "src/hydrogen-bce.cc",
     "src/hydrogen-bce.h",
@@ -710,8 +720,6 @@ source_set("v8_base") {
     "src/ic-inl.h",
     "src/ic.cc",
     "src/ic.h",
-    "src/incremental-marking.cc",
-    "src/incremental-marking.h",
     "src/interface.cc",
     "src/interface.h",
     "src/interpreter-irregexp.cc",
@@ -742,8 +750,6 @@ source_set("v8_base") {
     "src/lookup.cc",
     "src/lookup.h",
     "src/macro-assembler.h",
-    "src/mark-compact.cc",
-    "src/mark-compact.h",
     "src/messages.cc",
     "src/messages.h",
     "src/msan.h",
@@ -811,9 +817,6 @@ source_set("v8_base") {
     "src/snapshot-source-sink.cc",
     "src/snapshot-source-sink.h",
     "src/snapshot.h",
-    "src/spaces-inl.h",
-    "src/spaces.cc",
-    "src/spaces.h",
     "src/store-buffer-inl.h",
     "src/store-buffer.cc",
     "src/store-buffer.h",
@@ -825,8 +828,6 @@ source_set("v8_base") {
     "src/strtod.h",
     "src/stub-cache.cc",
     "src/stub-cache.h",
-    "src/sweeper-thread.h",
-    "src/sweeper-thread.cc",
     "src/token.cc",
     "src/token.h",
     "src/transitions-inl.h",
index 4739ec0..e764eaf 100644 (file)
@@ -11,9 +11,9 @@
 #include "src/builtins.h"
 #include "src/cpu-profiler.h"
 #include "src/gdb-jit.h"
+#include "src/heap/mark-compact.h"
 #include "src/heap-profiler.h"
 #include "src/ic-inl.h"
-#include "src/mark-compact.h"
 #include "src/prototype.h"
 #include "src/stub-cache.h"
 #include "src/vm-state-inl.h"
index b33baae..d2f9714 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef V8_CONTEXTS_H_
 #define V8_CONTEXTS_H_
 
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/objects.h"
 
 namespace v8 {
index 0be045c..3496a64 100644 (file)
@@ -6,7 +6,7 @@
 #define V8_ELEMENTS_H_
 
 #include "src/elements-kind.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/isolate.h"
 #include "src/objects.h"
 
index dba69da..e892f80 100644 (file)
@@ -8,7 +8,7 @@
 #include "src/deoptimizer.h"
 #include "src/frames-inl.h"
 #include "src/full-codegen.h"
-#include "src/mark-compact.h"
+#include "src/heap/mark-compact.h"
 #include "src/safepoint-table.h"
 #include "src/scopeinfo.h"
 #include "src/string-stream.h"
index e9ce79a..65b78c5 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "src/api.h"
 #include "src/handles.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/isolate.h"
 
 namespace v8 {
similarity index 99%
rename from src/gc-tracer.cc
rename to src/heap/gc-tracer.cc
index 4d9b6d6..0368f40 100644 (file)
@@ -4,7 +4,7 @@
 
 #include "src/v8.h"
 
-#include "src/gc-tracer.h"
+#include "src/heap/gc-tracer.h"
 
 namespace v8 {
 namespace internal {
similarity index 99%
rename from src/gc-tracer.h
rename to src/heap/gc-tracer.h
index ea8e718..38843d1 100644 (file)
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_GC_TRACER_H_
-#define V8_GC_TRACER_H_
+#ifndef V8_HEAP_GC_TRACER_H_
+#define V8_HEAP_GC_TRACER_H_
 
 namespace v8 {
 namespace internal {
@@ -340,4 +340,4 @@ class GCTracer BASE_EMBEDDED {
 }
 }  // namespace v8::internal
 
-#endif  // V8_GC_TRACER_H_
+#endif  // V8_HEAP_GC_TRACER_H_
similarity index 85%
rename from src/heap-inl.h
rename to src/heap/heap-inl.h
index b82c16e..03a8927 100644 (file)
@@ -2,14 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_HEAP_INL_H_
-#define V8_HEAP_INL_H_
+#ifndef V8_HEAP_HEAP_INL_H_
+#define V8_HEAP_HEAP_INL_H_
 
 #include <cmath>
 
 #include "src/base/platform/platform.h"
 #include "src/cpu-profiler.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/heap-profiler.h"
 #include "src/isolate.h"
 #include "src/list-inl.h"
@@ -47,7 +47,7 @@ void PromotionQueue::insert(HeapObject* target, int size) {
 
   *(--rear_) = reinterpret_cast<intptr_t>(target);
   *(--rear_) = size;
-  // Assert no overflow into live objects.
+// Assert no overflow into live objects.
 #ifdef DEBUG
   SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
                               reinterpret_cast<Address>(rear_));
@@ -57,12 +57,12 @@ void PromotionQueue::insert(HeapObject* target, int size) {
 
 void PromotionQueue::ActivateGuardIfOnTheSamePage() {
   guard_ = guard_ ||
-      heap_->new_space()->active_space()->current_page()->address() ==
-      GetHeadPage()->address();
+           heap_->new_space()->active_space()->current_page()->address() ==
+               GetHeadPage()->address();
 }
 
 
-template<>
+template <>
 bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
   // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
   // ASCII only check.
@@ -70,7 +70,7 @@ bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
 }
 
 
-template<>
+template <>
 bool inline Heap::IsOneByte(String* str, int chars) {
   return str->IsOneByteRepresentation();
 }
@@ -79,16 +79,16 @@ bool inline Heap::IsOneByte(String* str, int chars) {
 AllocationResult Heap::AllocateInternalizedStringFromUtf8(
     Vector<const char> str, int chars, uint32_t hash_field) {
   if (IsOneByte(str, chars)) {
-    return AllocateOneByteInternalizedString(
-        Vector<const uint8_t>::cast(str), hash_field);
+    return AllocateOneByteInternalizedString(Vector<const uint8_t>::cast(str),
+                                             hash_field);
   }
   return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
 }
 
 
-template<typename T>
-AllocationResult Heap::AllocateInternalizedStringImpl(
-    T t, int chars, uint32_t hash_field) {
+template <typename T>
+AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
+                                                      uint32_t hash_field) {
   if (IsOneByte(t, chars)) {
     return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
   }
@@ -97,8 +97,7 @@ AllocationResult Heap::AllocateInternalizedStringImpl(
 
 
 AllocationResult Heap::AllocateOneByteInternalizedString(
-    Vector<const uint8_t> str,
-    uint32_t hash_field) {
+    Vector<const uint8_t> str, uint32_t hash_field) {
   CHECK_GE(String::kMaxLength, str.length());
   // Compute map and object size.
   Map* map = ascii_internalized_string_map();
@@ -107,7 +106,8 @@ AllocationResult Heap::AllocateOneByteInternalizedString(
 
   // Allocate string.
   HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -138,7 +138,8 @@ AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
 
   // Allocate string.
   HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -175,15 +176,13 @@ AllocationResult Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
 }
 
 
-AllocationResult Heap::AllocateRaw(int size_in_bytes,
-                                   AllocationSpace space,
+AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
                                    AllocationSpace retry_space) {
   DCHECK(AllowHandleAllocation::IsAllowed());
   DCHECK(AllowHeapAllocation::IsAllowed());
   DCHECK(gc_state_ == NOT_IN_GC);
 #ifdef DEBUG
-  if (FLAG_gc_interval >= 0 &&
-      AllowAllocationFailure::IsAllowed(isolate_) &&
+  if (FLAG_gc_interval >= 0 && AllowAllocationFailure::IsAllowed(isolate_) &&
       Heap::allocation_timeout_-- <= 0) {
     return AllocationResult::Retry(space);
   }
@@ -195,9 +194,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes,
   AllocationResult allocation;
   if (NEW_SPACE == space) {
     allocation = new_space_.AllocateRaw(size_in_bytes);
-    if (always_allocate() &&
-        allocation.IsRetry() &&
-        retry_space != NEW_SPACE) {
+    if (always_allocate() && allocation.IsRetry() && retry_space != NEW_SPACE) {
       space = retry_space;
     } else {
       if (allocation.To(&object)) {
@@ -258,8 +255,7 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
 }
 
 
-void Heap::OnMoveEvent(HeapObject* target,
-                       HeapObject* source,
+void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
                        int size_in_bytes) {
   HeapProfiler* heap_profiler = isolate_->heap_profiler();
   if (heap_profiler->is_tracking_object_moves()) {
@@ -270,8 +266,8 @@ void Heap::OnMoveEvent(HeapObject* target,
   if (isolate_->logger()->is_logging_code_events() ||
       isolate_->cpu_profiler()->is_profiling()) {
     if (target->IsSharedFunctionInfo()) {
-      PROFILE(isolate_, SharedFunctionInfoMoveEvent(
-          source->address(), target->address()));
+      PROFILE(isolate_, SharedFunctionInfoMoveEvent(source->address(),
+                                                    target->address()));
     }
   }
 
@@ -325,8 +321,7 @@ void Heap::FinalizeExternalString(String* string) {
   DCHECK(string->IsExternalString());
   v8::String::ExternalStringResourceBase** resource_addr =
       reinterpret_cast<v8::String::ExternalStringResourceBase**>(
-          reinterpret_cast<byte*>(string) +
-          ExternalString::kResourceOffset -
+          reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
           kHeapObjectTag);
 
   // Dispose of the C++ object if it has not already been disposed.
@@ -339,16 +334,14 @@ void Heap::FinalizeExternalString(String* string) {
 
 bool Heap::InNewSpace(Object* object) {
   bool result = new_space_.Contains(object);
-  DCHECK(!result ||                  // Either not in new space
-         gc_state_ != NOT_IN_GC ||   // ... or in the middle of GC
-         InToSpace(object));         // ... or in to-space (where we allocate).
+  DCHECK(!result ||                 // Either not in new space
+         gc_state_ != NOT_IN_GC ||  // ... or in the middle of GC
+         InToSpace(object));        // ... or in to-space (where we allocate).
   return result;
 }
 
 
-bool Heap::InNewSpace(Address address) {
-  return new_space_.Contains(address);
-}
+bool Heap::InNewSpace(Address address) { return new_space_.Contains(address); }
 
 
 bool Heap::InFromSpace(Object* object) {
@@ -391,7 +384,7 @@ bool Heap::ShouldBePromoted(Address old_address, int object_size) {
   NewSpacePage* page = NewSpacePage::FromAddress(old_address);
   Address age_mark = new_space_.age_mark();
   return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
-      (!page->ContainsLimit(age_mark) || old_address < age_mark);
+         (!page->ContainsLimit(age_mark) || old_address < age_mark);
 }
 
 
@@ -412,9 +405,7 @@ void Heap::RecordWrites(Address address, int start, int len) {
 OldSpace* Heap::TargetSpace(HeapObject* object) {
   InstanceType type = object->map()->instance_type();
   AllocationSpace space = TargetSpaceId(type);
-  return (space == OLD_POINTER_SPACE)
-      ? old_pointer_space_
-      : old_data_space_;
+  return (space == OLD_POINTER_SPACE) ? old_pointer_space_ : old_data_space_;
 }
 
 
@@ -438,8 +429,8 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
     // strings, cons strings, and sliced strings.
     // Only the latter two contain non-map-word pointers to heap objects.
     return ((type & kIsIndirectStringMask) == kIsIndirectStringTag)
-        ? OLD_POINTER_SPACE
-        : OLD_DATA_SPACE;
+               ? OLD_POINTER_SPACE
+               : OLD_DATA_SPACE;
   } else {
     return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
   }
@@ -490,8 +481,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
 
 
 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
-  CopyWords(reinterpret_cast<Object**>(dst),
-            reinterpret_cast<Object**>(src),
+  CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
             static_cast<size_t>(byte_size / kPointerSize));
 }
 
@@ -515,9 +505,7 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
 }
 
 
-void Heap::ScavengePointer(HeapObject** p) {
-  ScavengeObject(p, *p);
-}
+void Heap::ScavengePointer(HeapObject** p) { ScavengeObject(p, *p); }
 
 
 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
@@ -527,8 +515,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
   Address object_address = object->address();
   Address memento_address = object_address + object->Size();
   Address last_memento_word_address = memento_address + kPointerSize;
-  if (!NewSpacePage::OnSamePage(object_address,
-                                last_memento_word_address)) {
+  if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
     return NULL;
   }
 
@@ -561,7 +548,8 @@ void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
   DCHECK(heap->InFromSpace(object));
 
   if (!FLAG_allocation_site_pretenuring ||
-      !AllocationSite::CanTrack(object->map()->instance_type())) return;
+      !AllocationSite::CanTrack(object->map()->instance_type()))
+    return;
 
   AllocationMemento* memento = heap->FindAllocationMemento(object);
   if (memento == NULL) return;
@@ -599,8 +587,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
 }
 
 
-bool Heap::CollectGarbage(AllocationSpace space,
-                          const char* gc_reason,
+bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
                           const v8::GCCallbackFlags callbackFlags) {
   const char* collector_reason = NULL;
   GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
@@ -609,7 +596,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
 
 
 Isolate* Heap::isolate() {
-  return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
+  return reinterpret_cast<Isolate*>(
+      reinterpret_cast<intptr_t>(this) -
       reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
 }
 
@@ -621,49 +609,44 @@ Isolate* Heap::isolate() {
 // Warning: Do not use the identifiers __object__, __maybe_object__ or
 // __scope__ in a call to this macro.
 
-#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                      \
-  if (__allocation__.To(&__object__)) {                                        \
-    DCHECK(__object__ != (ISOLATE)->heap()->exception());                      \
-    RETURN_VALUE;                                                              \
-  }
-
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)     \
-  do {                                                                         \
-    AllocationResult __allocation__ = FUNCTION_CALL;                           \
-    Object* __object__ = NULL;                                                 \
-    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                          \
-    (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(),             \
-                                      "allocation failure");                   \
-    __allocation__ = FUNCTION_CALL;                                            \
-    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                          \
-    (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment();         \
-    (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc");           \
-    {                                                                          \
-      AlwaysAllocateScope __scope__(ISOLATE);                                  \
-      __allocation__ = FUNCTION_CALL;                                          \
-    }                                                                          \
-    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                          \
-      /* TODO(1181417): Fix this. */                                           \
-    v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true);  \
-    RETURN_EMPTY;                                                              \
+#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
+  if (__allocation__.To(&__object__)) {                   \
+    DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
+    RETURN_VALUE;                                         \
+  }
+
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)    \
+  do {                                                                        \
+    AllocationResult __allocation__ = FUNCTION_CALL;                          \
+    Object* __object__ = NULL;                                                \
+    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
+    (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(),            \
+                                      "allocation failure");                  \
+    __allocation__ = FUNCTION_CALL;                                           \
+    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
+    (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment();        \
+    (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc");          \
+    {                                                                         \
+      AlwaysAllocateScope __scope__(ISOLATE);                                 \
+      __allocation__ = FUNCTION_CALL;                                         \
+    }                                                                         \
+    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
+    /* TODO(1181417): Fix this. */                                            \
+    v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
+    RETURN_EMPTY;                                                             \
   } while (false)
 
-#define CALL_AND_RETRY_OR_DIE(                                             \
-     ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)                   \
-  CALL_AND_RETRY(                                                          \
-      ISOLATE,                                                             \
-      FUNCTION_CALL,                                                       \
-      RETURN_VALUE,                                                        \
-      RETURN_EMPTY)
+#define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \
+                              RETURN_EMPTY)                         \
+  CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)
 
 #define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE)                      \
-  CALL_AND_RETRY_OR_DIE(ISOLATE,                                              \
-                        FUNCTION_CALL,                                        \
+  CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL,                               \
                         return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
-                        return Handle<TYPE>())                                \
+                        return Handle<TYPE>())
 
 
-#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL)  \
+#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
   CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
 
 
@@ -776,9 +759,7 @@ GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
 }
 
 
-GCCallbacksScope::~GCCallbacksScope() {
-  heap_->gc_callbacks_depth_--;
-}
+GCCallbacksScope::~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
 
 
 bool GCCallbacksScope::CheckReenter() {
@@ -799,11 +780,10 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
 
 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
   for (Object** current = start; current < end; current++) {
-     CHECK((*current)->IsSmi());
+    CHECK((*current)->IsSmi());
   }
 }
+}
+}  // namespace v8::internal
 
-
-} }  // namespace v8::internal
-
-#endif  // V8_HEAP_INL_H_
+#endif  // V8_HEAP_HEAP_INL_H_
similarity index 82%
rename from src/heap.cc
rename to src/heap/heap.cc
index c313399..b391041 100644 (file)
 #include "src/debug.h"
 #include "src/deoptimizer.h"
 #include "src/global-handles.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
 #include "src/heap-profiler.h"
-#include "src/incremental-marking.h"
 #include "src/isolate-inl.h"
-#include "src/mark-compact.h"
 #include "src/natives.h"
 #include "src/objects-visiting-inl.h"
 #include "src/objects-visiting.h"
 #include "src/vm-state-inl.h"
 
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h"  // NOLINT
+#include "src/regexp-macro-assembler.h"          // NOLINT
 #include "src/arm/regexp-macro-assembler-arm.h"  // NOLINT
 #endif
 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h"  // NOLINT
+#include "src/regexp-macro-assembler.h"            // NOLINT
 #include "src/mips/regexp-macro-assembler-mips.h"  // NOLINT
 #endif
 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
@@ -137,9 +137,9 @@ Heap::Heap()
       external_string_table_(this),
       chunks_queued_for_free_(NULL),
       gc_callbacks_depth_(0) {
-  // Allow build-time customization of the max semispace size. Building
-  // V8 with snapshots and a non-default max semispace size is much
-  // easier if you can define it as part of the build environment.
+// Allow build-time customization of the max semispace size. Building
+// V8 with snapshots and a non-default max semispace size is much
+// easier if you can define it as part of the build environment.
 #if defined(V8_MAX_SEMISPACE_SIZE)
   max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
 #endif
@@ -163,27 +163,20 @@ Heap::Heap()
 intptr_t Heap::Capacity() {
   if (!HasBeenSetUp()) return 0;
 
-  return new_space_.Capacity() +
-      old_pointer_space_->Capacity() +
-      old_data_space_->Capacity() +
-      code_space_->Capacity() +
-      map_space_->Capacity() +
-      cell_space_->Capacity() +
-      property_cell_space_->Capacity();
+  return new_space_.Capacity() + old_pointer_space_->Capacity() +
+         old_data_space_->Capacity() + code_space_->Capacity() +
+         map_space_->Capacity() + cell_space_->Capacity() +
+         property_cell_space_->Capacity();
 }
 
 
 intptr_t Heap::CommittedMemory() {
   if (!HasBeenSetUp()) return 0;
 
-  return new_space_.CommittedMemory() +
-      old_pointer_space_->CommittedMemory() +
-      old_data_space_->CommittedMemory() +
-      code_space_->CommittedMemory() +
-      map_space_->CommittedMemory() +
-      cell_space_->CommittedMemory() +
-      property_cell_space_->CommittedMemory() +
-      lo_space_->Size();
+  return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
+         old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
+         map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
+         property_cell_space_->CommittedMemory() + lo_space_->Size();
 }
 
 
@@ -191,13 +184,13 @@ size_t Heap::CommittedPhysicalMemory() {
   if (!HasBeenSetUp()) return 0;
 
   return new_space_.CommittedPhysicalMemory() +
-      old_pointer_space_->CommittedPhysicalMemory() +
-      old_data_space_->CommittedPhysicalMemory() +
-      code_space_->CommittedPhysicalMemory() +
-      map_space_->CommittedPhysicalMemory() +
-      cell_space_->CommittedPhysicalMemory() +
-      property_cell_space_->CommittedPhysicalMemory() +
-      lo_space_->CommittedPhysicalMemory();
+         old_pointer_space_->CommittedPhysicalMemory() +
+         old_data_space_->CommittedPhysicalMemory() +
+         code_space_->CommittedPhysicalMemory() +
+         map_space_->CommittedPhysicalMemory() +
+         cell_space_->CommittedPhysicalMemory() +
+         property_cell_space_->CommittedPhysicalMemory() +
+         lo_space_->CommittedPhysicalMemory();
 }
 
 
@@ -221,24 +214,17 @@ void Heap::UpdateMaximumCommitted() {
 intptr_t Heap::Available() {
   if (!HasBeenSetUp()) return 0;
 
-  return new_space_.Available() +
-      old_pointer_space_->Available() +
-      old_data_space_->Available() +
-      code_space_->Available() +
-      map_space_->Available() +
-      cell_space_->Available() +
-      property_cell_space_->Available();
+  return new_space_.Available() + old_pointer_space_->Available() +
+         old_data_space_->Available() + code_space_->Available() +
+         map_space_->Available() + cell_space_->Available() +
+         property_cell_space_->Available();
 }
 
 
 bool Heap::HasBeenSetUp() {
-  return old_pointer_space_ != NULL &&
-         old_data_space_ != NULL &&
-         code_space_ != NULL &&
-         map_space_ != NULL &&
-         cell_space_ != NULL &&
-         property_cell_space_ != NULL &&
-         lo_space_ != NULL;
+  return old_pointer_space_ != NULL && old_data_space_ != NULL &&
+         code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
+         property_cell_space_ != NULL && lo_space_ != NULL;
 }
 
 
@@ -273,8 +259,9 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
 
   // Have allocation in OLD and LO failed?
   if (old_gen_exhausted_) {
-    isolate_->counters()->
-        gc_compactor_caused_by_oldspace_exhaustion()->Increment();
+    isolate_->counters()
+        ->gc_compactor_caused_by_oldspace_exhaustion()
+        ->Increment();
     *reason = "old generations exhausted";
     return MARK_COMPACTOR;
   }
@@ -289,8 +276,9 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
   // space.  Undercounting is safe---we may get an unrequested full GC when
   // a scavenge would have succeeded.
   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
-    isolate_->counters()->
-        gc_compactor_caused_by_oldspace_exhaustion()->Increment();
+    isolate_->counters()
+        ->gc_compactor_caused_by_oldspace_exhaustion()
+        ->Increment();
     *reason = "scavenge might not succeed";
     return MARK_COMPACTOR;
   }
@@ -304,9 +292,9 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
 // TODO(1238405): Combine the infrastructure for --heap-stats and
 // --log-gc to avoid the complicated preprocessor and flag testing.
 void Heap::ReportStatisticsBeforeGC() {
-  // Heap::ReportHeapStatistics will also log NewSpace statistics when
-  // compiled --log-gc is set.  The following logic is used to avoid
-  // double logging.
+// Heap::ReportHeapStatistics will also log NewSpace statistics when
+// compiled --log-gc is set.  The following logic is used to avoid
+// double logging.
 #ifdef DEBUG
   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
   if (FLAG_heap_stats) {
@@ -327,63 +315,76 @@ void Heap::ReportStatisticsBeforeGC() {
 
 void Heap::PrintShortHeapStatistics() {
   if (!FLAG_trace_gc_verbose) return;
-  PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB\n",
+  PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX "d KB\n",
            isolate_->memory_allocator()->Size() / KB,
            isolate_->memory_allocator()->Available() / KB);
-  PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           new_space_.Size() / KB,
-           new_space_.Available() / KB,
+  PrintPID("New space,          used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           new_space_.Size() / KB, new_space_.Available() / KB,
            new_space_.CommittedMemory() / KB);
-  PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
+  PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
            old_pointer_space_->SizeOfObjects() / KB,
            old_pointer_space_->Available() / KB,
            old_pointer_space_->CommittedMemory() / KB);
-  PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
+  PrintPID("Old data space,     used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
            old_data_space_->SizeOfObjects() / KB,
            old_data_space_->Available() / KB,
            old_data_space_->CommittedMemory() / KB);
-  PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           code_space_->SizeOfObjects() / KB,
-           code_space_->Available() / KB,
+  PrintPID("Code space,         used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
            code_space_->CommittedMemory() / KB);
-  PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           map_space_->SizeOfObjects() / KB,
-           map_space_->Available() / KB,
+  PrintPID("Map space,          used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
            map_space_->CommittedMemory() / KB);
-  PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           cell_space_->SizeOfObjects() / KB,
-           cell_space_->Available() / KB,
+  PrintPID("Cell space,         used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
            cell_space_->CommittedMemory() / KB);
-  PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
+  PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
            property_cell_space_->SizeOfObjects() / KB,
            property_cell_space_->Available() / KB,
            property_cell_space_->CommittedMemory() / KB);
-  PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           lo_space_->SizeOfObjects() / KB,
-           lo_space_->Available() / KB,
+  PrintPID("Large object space, used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
            lo_space_->CommittedMemory() / KB);
-  PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           this->SizeOfObjects() / KB,
-           this->Available() / KB,
+  PrintPID("All spaces,         used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           this->SizeOfObjects() / KB, this->Available() / KB,
            this->CommittedMemory() / KB);
   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
            static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
@@ -394,8 +395,8 @@ void Heap::PrintShortHeapStatistics() {
 // TODO(1238405): Combine the infrastructure for --heap-stats and
 // --log-gc to avoid the complicated preprocessor and flag testing.
 void Heap::ReportStatisticsAfterGC() {
-  // Similar to the before GC, we use some complicated logic to ensure that
-  // NewSpace statistics are logged exactly once when --log-gc is turned on.
+// Similar to the before GC, we use some complicated logic to ensure that
+// NewSpace statistics are logged exactly once when --log-gc is turned on.
 #if defined(DEBUG)
   if (FLAG_heap_stats) {
     new_space_.CollectStatistics();
@@ -410,7 +411,8 @@ void Heap::ReportStatisticsAfterGC() {
 
 
 void Heap::GarbageCollectionPrologue() {
-  {  AllowHeapAllocation for_the_first_part_of_prologue;
+  {
+    AllowHeapAllocation for_the_first_part_of_prologue;
     ClearJSFunctionResultCaches();
     gc_count_++;
     unflattened_strings_length_ = 0;
@@ -484,8 +486,7 @@ void Heap::ClearAllICsByKind(Code::Kind kind) {
 
 void Heap::RepairFreeListsAfterBoot() {
   PagedSpaces spaces(this);
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
+  for (PagedSpace* space = spaces.next(); space != NULL;
        space = spaces.next()) {
     space->RepairFreeListsAfterBoot();
   }
@@ -508,19 +509,19 @@ void Heap::ProcessPretenuringFeedback() {
     // in a seperate data structure if this is a performance problem.
     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
     bool use_scratchpad =
-         allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
-         !deopt_maybe_tenured;
+        allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
+        !deopt_maybe_tenured;
 
     int i = 0;
     Object* list_element = allocation_sites_list();
     bool trigger_deoptimization = false;
     bool maximum_size_scavenge = MaximumSizeScavenge();
-    while (use_scratchpad ?
-              i < allocation_sites_scratchpad_length_ :
-              list_element->IsAllocationSite()) {
-      AllocationSite* site = use_scratchpad ?
-          AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
-          AllocationSite::cast(list_element);
+    while (use_scratchpad ? i < allocation_sites_scratchpad_length_
+                          : list_element->IsAllocationSite()) {
+      AllocationSite* site =
+          use_scratchpad
+              ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
+              : AllocationSite::cast(list_element);
       allocation_mementos_found += site->memento_found_count();
       if (site->memento_found_count() > 0) {
         active_allocation_sites++;
@@ -554,18 +555,15 @@ void Heap::ProcessPretenuringFeedback() {
     FlushAllocationSitesScratchpad();
 
     if (FLAG_trace_pretenuring_statistics &&
-        (allocation_mementos_found > 0 ||
-         tenure_decisions > 0 ||
+        (allocation_mementos_found > 0 || tenure_decisions > 0 ||
          dont_tenure_decisions > 0)) {
-      PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
-             "#mementos, #tenure decisions, #donttenure decisions) "
-             "(%s, %d, %d, %d, %d, %d)\n",
-             use_scratchpad ? "use scratchpad" : "use list",
-             allocation_sites,
-             active_allocation_sites,
-             allocation_mementos_found,
-             tenure_decisions,
-             dont_tenure_decisions);
+      PrintF(
+          "GC: (mode, #visited allocation sites, #active allocation sites, "
+          "#mementos, #tenure decisions, #donttenure decisions) "
+          "(%s, %d, %d, %d, %d, %d)\n",
+          use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
+          active_allocation_sites, allocation_mementos_found, tenure_decisions,
+          dont_tenure_decisions);
     }
   }
 }
@@ -580,8 +578,7 @@ void Heap::DeoptMarkedAllocationSites() {
     AllocationSite* site = AllocationSite::cast(list_element);
     if (site->deopt_dependent_code()) {
       site->dependent_code()->MarkCodeForDeoptimization(
-          isolate_,
-          DependentCode::kAllocationSiteTenuringChangedGroup);
+          isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
       site->set_deopt_dependent_code(false);
     }
     list_element = site->weak_next();
@@ -638,41 +635,35 @@ void Heap::GarbageCollectionEpilogue() {
   if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
     isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
         static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
-            (crankshaft_codegen_bytes_generated_
-            + full_codegen_bytes_generated_)));
+                         (crankshaft_codegen_bytes_generated_ +
+                          full_codegen_bytes_generated_)));
   }
 
   if (CommittedMemory() > 0) {
     isolate_->counters()->external_fragmentation_total()->AddSample(
         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
 
-    isolate_->counters()->heap_fraction_new_space()->
-        AddSample(static_cast<int>(
-            (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+    isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
+        (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
     isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
-        static_cast<int>(
-            (old_pointer_space()->CommittedMemory() * 100.0) /
-            CommittedMemory()));
+        static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
     isolate_->counters()->heap_fraction_old_data_space()->AddSample(
-        static_cast<int>(
-            (old_data_space()->CommittedMemory() * 100.0) /
-            CommittedMemory()));
-    isolate_->counters()->heap_fraction_code_space()->
-        AddSample(static_cast<int>(
-            (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-    isolate_->counters()->heap_fraction_map_space()->AddSample(
-        static_cast<int>(
-            (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+        static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_code_space()->AddSample(
+        static_cast<int>((code_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
+        (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
     isolate_->counters()->heap_fraction_cell_space()->AddSample(
-        static_cast<int>(
-            (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-    isolate_->counters()->heap_fraction_property_cell_space()->
-        AddSample(static_cast<int>(
-            (property_cell_space()->CommittedMemory() * 100.0) /
-            CommittedMemory()));
-    isolate_->counters()->heap_fraction_lo_space()->
-        AddSample(static_cast<int>(
-            (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+        static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
+        static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
+        (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
 
     isolate_->counters()->heap_sample_total_committed()->AddSample(
         static_cast<int>(CommittedMemory() / KB));
@@ -682,10 +673,10 @@ void Heap::GarbageCollectionEpilogue() {
         static_cast<int>(map_space()->CommittedMemory() / KB));
     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
         static_cast<int>(cell_space()->CommittedMemory() / KB));
-    isolate_->counters()->
-        heap_sample_property_cell_space_committed()->
-            AddSample(static_cast<int>(
-                property_cell_space()->CommittedMemory() / KB));
+    isolate_->counters()
+        ->heap_sample_property_cell_space_committed()
+        ->AddSample(
+            static_cast<int>(property_cell_space()->CommittedMemory() / KB));
     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
         static_cast<int>(code_space()->CommittedMemory() / KB));
 
@@ -693,21 +684,22 @@ void Heap::GarbageCollectionEpilogue() {
         static_cast<int>(MaximumCommittedMemory() / KB));
   }
 
-#define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
-  isolate_->counters()->space##_bytes_available()->Set(                        \
-      static_cast<int>(space()->Available()));                                 \
-  isolate_->counters()->space##_bytes_committed()->Set(                        \
-      static_cast<int>(space()->CommittedMemory()));                           \
-  isolate_->counters()->space##_bytes_used()->Set(                             \
+#define UPDATE_COUNTERS_FOR_SPACE(space)                \
+  isolate_->counters()->space##_bytes_available()->Set( \
+      static_cast<int>(space()->Available()));          \
+  isolate_->counters()->space##_bytes_committed()->Set( \
+      static_cast<int>(space()->CommittedMemory()));    \
+  isolate_->counters()->space##_bytes_used()->Set(      \
       static_cast<int>(space()->SizeOfObjects()));
-#define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
-  if (space()->CommittedMemory() > 0) {                                        \
-    isolate_->counters()->external_fragmentation_##space()->AddSample(         \
-        static_cast<int>(100 -                                                 \
-            (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
-  }
-#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
-  UPDATE_COUNTERS_FOR_SPACE(space)                                             \
+#define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
+  if (space()->CommittedMemory() > 0) {                                \
+    isolate_->counters()->external_fragmentation_##space()->AddSample( \
+        static_cast<int>(100 -                                         \
+                         (space()->SizeOfObjects() * 100.0) /          \
+                             space()->CommittedMemory()));             \
+  }
+#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
+  UPDATE_COUNTERS_FOR_SPACE(space)                         \
   UPDATE_FRAGMENTATION_FOR_SPACE(space)
 
   UPDATE_COUNTERS_FOR_SPACE(new_space)
@@ -732,8 +724,7 @@ void Heap::GarbageCollectionEpilogue() {
 }
 
 
-void Heap::CollectAllGarbage(int flags,
-                             const char* gc_reason,
+void Heap::CollectAllGarbage(int flags, const char* gc_reason,
                              const v8::GCCallbackFlags gc_callback_flags) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
@@ -794,8 +785,7 @@ void Heap::EnsureFillerObjectAtTop() {
 }
 
 
-bool Heap::CollectGarbage(GarbageCollector collector,
-                          const char* gc_reason,
+bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
                           const char* collector_reason,
                           const v8::GCCallbackFlags gc_callback_flags) {
   // The VM is in the GC state until exiting this function.
@@ -860,8 +850,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
   // generator needs incremental marking to stay off after it aborted.
   if (!mark_compact_collector()->abort_incremental_marking() &&
       incremental_marking()->IsStopped() &&
-      incremental_marking()->WorthActivating() &&
-      NextGCIsLikelyToBeFull()) {
+      incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
     incremental_marking()->Start();
   }
 
@@ -880,9 +869,7 @@ int Heap::NotifyContextDisposed() {
 }
 
 
-void Heap::MoveElements(FixedArray* array,
-                        int dst_index,
-                        int src_index,
+void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
                         int len) {
   if (len == 0) return;
 
@@ -926,9 +913,7 @@ static void VerifyStringTable(Heap* heap) {
 
 
 static bool AbortIncrementalMarkingAndCollectGarbage(
-    Heap* heap,
-    AllocationSpace space,
-    const char* gc_reason = NULL) {
+    Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
   bool result = heap->CollectGarbage(space, gc_reason);
   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
@@ -936,7 +921,7 @@ static bool AbortIncrementalMarkingAndCollectGarbage(
 }
 
 
-void Heap::ReserveSpace(int *sizes, Address *locations_out) {
+void Heap::ReserveSpace(int* sizes, Address* locations_out) {
   bool gc_performed = true;
   int counter = 0;
   static const int kThreshold = 20;
@@ -958,8 +943,7 @@ void Heap::ReserveSpace(int *sizes, Address *locations_out) {
                                  "failed to reserve space in the new space");
           } else {
             AbortIncrementalMarkingAndCollectGarbage(
-                this,
-                static_cast<AllocationSpace>(space),
+                this, static_cast<AllocationSpace>(space),
                 "failed to reserve space in paged space");
           }
           gc_performed = true;
@@ -1036,13 +1020,12 @@ void Heap::ClearNormalizedMapCaches() {
 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
   if (start_new_space_size == 0) return;
 
-  promotion_rate_ =
-        (static_cast<double>(promoted_objects_size_) /
-            static_cast<double>(start_new_space_size) * 100);
+  promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
+                     static_cast<double>(start_new_space_size) * 100);
 
   semi_space_copied_rate_ =
-        (static_cast<double>(semi_space_copied_object_size_) /
-            static_cast<double>(start_new_space_size) * 100);
+      (static_cast<double>(semi_space_copied_object_size_) /
+       static_cast<double>(start_new_space_size) * 100);
 
   double survival_rate = promotion_rate_ + semi_space_copied_rate_;
 
@@ -1054,8 +1037,7 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
 }
 
 bool Heap::PerformGarbageCollection(
-    GarbageCollector collector,
-    const v8::GCCallbackFlags gc_callback_flags) {
+    GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
   int freed_global_handles = 0;
 
   if (collector != SCAVENGER) {
@@ -1071,7 +1053,8 @@ bool Heap::PerformGarbageCollection(
   GCType gc_type =
       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
 
-  { GCCallbacksScope scope(this);
+  {
+    GCCallbacksScope scope(this);
     if (scope.CheckReenter()) {
       AllowHeapAllocation allow_allocation;
       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
@@ -1115,7 +1098,8 @@ bool Heap::PerformGarbageCollection(
   DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
 
   gc_post_processing_depth_++;
-  { AllowHeapAllocation allow_allocation;
+  {
+    AllowHeapAllocation allow_allocation;
     GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
     freed_global_handles =
         isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
@@ -1131,12 +1115,12 @@ bool Heap::PerformGarbageCollection(
     // Register the amount of external allocated memory.
     amount_of_external_allocated_memory_at_last_global_gc_ =
         amount_of_external_allocated_memory_;
-    old_generation_allocation_limit_ =
-        OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
-                                     freed_global_handles);
+    old_generation_allocation_limit_ = OldGenerationAllocationLimit(
+        PromotedSpaceSizeOfObjects(), freed_global_handles);
   }
 
-  { GCCallbacksScope scope(this);
+  {
+    GCCallbacksScope scope(this);
     if (scope.CheckReenter()) {
       AllowHeapAllocation allow_allocation;
       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
@@ -1184,8 +1168,7 @@ void Heap::CallGCEpilogueCallbacks(GCType gc_type,
         callback(gc_type, gc_callback_flags);
       } else {
         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
-        gc_epilogue_callbacks_[i].callback(
-            isolate, gc_type, gc_callback_flags);
+        gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
       }
     }
   }
@@ -1243,7 +1226,7 @@ void Heap::MarkCompactPrologue() {
 
 
 // Helper class for copying HeapObjects
-class ScavengeVisitor: public ObjectVisitor {
+class ScavengeVisitor : public ObjectVisitor {
  public:
   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
 
@@ -1269,10 +1252,10 @@ class ScavengeVisitor: public ObjectVisitor {
 #ifdef VERIFY_HEAP
 // Visitor class to verify pointers in code or data space do not point into
 // new space.
-class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
+class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
  public:
   explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
-  void VisitPointers(Object** start, Object**end) {
+  void VisitPointers(Object** start, Object** end) {
     for (Object** current = start; current < end; current++) {
       if ((*current)->IsHeapObject()) {
         CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
@@ -1290,16 +1273,16 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
   // do not expect them.
   VerifyNonPointerSpacePointersVisitor v(heap);
   HeapObjectIterator code_it(heap->code_space());
-  for (HeapObject* object = code_it.Next();
-       object != NULL; object = code_it.Next())
+  for (HeapObject* object = code_it.Next(); object != NULL;
+       object = code_it.Next())
     object->Iterate(&v);
 
   // The old data space was normally swept conservatively so that the iterator
   // doesn't work, so we normally skip the next bit.
   if (heap->old_data_space()->swept_precisely()) {
     HeapObjectIterator data_it(heap->old_data_space());
-    for (HeapObject* object = data_it.Next();
-         object != NULL; object = data_it.Next())
+    for (HeapObject* object = data_it.Next(); object != NULL;
+         object = data_it.Next())
       object->Iterate(&v);
   }
 }
@@ -1320,14 +1303,12 @@ void Heap::CheckNewSpaceExpansionCriteria() {
 
 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
   return heap->InNewSpace(*p) &&
-      !HeapObject::cast(*p)->map_word().IsForwardingAddress();
+         !HeapObject::cast(*p)->map_word().IsForwardingAddress();
 }
 
 
-void Heap::ScavengeStoreBufferCallback(
-    Heap* heap,
-    MemoryChunk* page,
-    StoreBufferEvent event) {
+void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
+                                       StoreBufferEvent event) {
   heap->store_buffer_rebuilder_.Callback(page, event);
 }
 
@@ -1386,8 +1367,8 @@ void PromotionQueue::Initialize() {
   // Assumes that a NewSpacePage exactly fits a number of promotion queue
   // entries (where each is a pair of intptr_t). This allows us to simplify
   // the test fpr when to switch pages.
-  DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
-         == 0);
+  DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
+         0);
   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
   front_ = rear_ =
       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
@@ -1401,8 +1382,7 @@ void PromotionQueue::RelocateQueueHead() {
 
   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
   intptr_t* head_start = rear_;
-  intptr_t* head_end =
-      Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
+  intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
 
   int entries_count =
       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
@@ -1420,7 +1400,7 @@ void PromotionQueue::RelocateQueueHead() {
 
 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
  public:
-  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
+  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
 
   virtual Object* RetainAs(Object* object) {
     if (!heap_->InFromSpace(object)) {
@@ -1496,8 +1476,7 @@ void Heap::Scavenge() {
 
   // Copy objects reachable from the old generation.
   {
-    StoreBufferRebuildScope scope(this,
-                                  store_buffer(),
+    StoreBufferRebuildScope scope(this, store_buffer(),
                                   &ScavengeStoreBufferCallback);
     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
   }
@@ -1505,8 +1484,7 @@ void Heap::Scavenge() {
   // Copy objects reachable from simple cells by scavenging cell values
   // directly.
   HeapObjectIterator cell_iterator(cell_space_);
-  for (HeapObject* heap_object = cell_iterator.Next();
-       heap_object != NULL;
+  for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
        heap_object = cell_iterator.Next()) {
     if (heap_object->IsCell()) {
       Cell* cell = Cell::cast(heap_object);
@@ -1638,7 +1616,6 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
 
 void Heap::UpdateReferencesInExternalStringTable(
     ExternalStringTableUpdaterCallback updater_func) {
-
   // Update old space string references.
   if (external_string_table_.old_space_strings_.length() > 0) {
     Object** start = &external_string_table_.old_space_strings_[0];
@@ -1713,7 +1690,7 @@ void Heap::EvaluateOldSpaceLocalPretenuring(
   uint64_t size_of_objects_after_gc = SizeOfObjects();
   double old_generation_survival_rate =
       (static_cast<double>(size_of_objects_after_gc) * 100) /
-          static_cast<double>(size_of_objects_before_gc);
+      static_cast<double>(size_of_objects_before_gc);
 
   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
     // Too many objects died in the old generation, pretenuring of wrong
@@ -1722,8 +1699,10 @@ void Heap::EvaluateOldSpaceLocalPretenuring(
     // our pretenuring decisions.
     ResetAllAllocationSitesDependentCode(TENURED);
     if (FLAG_trace_pretenuring) {
-      PrintF("Deopt all allocation sites dependent code due to low survival "
-             "rate in the old generation %f\n", old_generation_survival_rate);
+      PrintF(
+          "Deopt all allocation sites dependent code due to low survival "
+          "rate in the old generation %f\n",
+          old_generation_survival_rate);
     }
   }
 }
@@ -1736,14 +1715,16 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
    public:
     explicit ExternalStringTableVisitorAdapter(
-        v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
+        v8::ExternalResourceVisitor* visitor)
+        : visitor_(visitor) {}
     virtual void VisitPointers(Object** start, Object** end) {
       for (Object** p = start; p < end; p++) {
         DCHECK((*p)->IsExternalString());
-        visitor_->VisitExternalString(Utils::ToLocal(
-            Handle<String>(String::cast(*p))));
+        visitor_->VisitExternalString(
+            Utils::ToLocal(Handle<String>(String::cast(*p))));
       }
     }
+
    private:
     v8::ExternalResourceVisitor* visitor_;
   } external_string_table_visitor(visitor);
@@ -1774,7 +1755,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
       if (!NewSpacePage::IsAtEnd(new_space_front)) {
         HeapObject* object = HeapObject::FromAddress(new_space_front);
         new_space_front +=
-          NewSpaceScavenger::IterateBody(object->map(), object);
+            NewSpaceScavenger::IterateBody(object->map(), object);
       } else {
         new_space_front =
             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
@@ -1783,8 +1764,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
 
     // Promote and process all the to-be-promoted objects.
     {
-      StoreBufferRebuildScope scope(this,
-                                    store_buffer(),
+      StoreBufferRebuildScope scope(this, store_buffer(),
                                     &ScavengeStoreBufferCallback);
       while (!promotion_queue()->is_empty()) {
         HeapObject* target;
@@ -1796,9 +1776,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
         // for pointers to from semispace instead of looking for pointers
         // to new space.
         DCHECK(!target->IsMap());
-        IterateAndMarkPointersToFromSpace(target->address(),
-                                          target->address() + size,
-                                          &ScavengeObject);
+        IterateAndMarkPointersToFromSpace(
+            target->address(), target->address() + size, &ScavengeObject);
       }
     }
 
@@ -1810,20 +1789,18 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
 }
 
 
-STATIC_ASSERT((FixedDoubleArray::kHeaderSize &
-               kDoubleAlignmentMask) == 0);  // NOLINT
-STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset &
-               kDoubleAlignmentMask) == 0);  // NOLINT
+STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
+              0);  // NOLINT
+STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
+              0);  // NOLINT
 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
                kDoubleAlignmentMask) == 0);  // NOLINT
 
 
-INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
-                                              HeapObject* object,
+INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
                                               int size));
 
-static HeapObject* EnsureDoubleAligned(Heap* heap,
-                                       HeapObject* object,
+static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
                                        int size) {
   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
     heap->CreateFillerObjectAt(object->address(), kPointerSize);
@@ -1845,8 +1822,8 @@ enum LoggingAndProfiling {
 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
 
 
-template<MarksHandling marks_handling,
-         LoggingAndProfiling logging_and_profiling_mode>
+template <MarksHandling marks_handling,
+          LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
   static void Initialize() {
@@ -1859,65 +1836,63 @@ class ScavengingVisitor : public StaticVisitorBase {
     table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
     table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
 
-    table_.Register(kVisitNativeContext,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<Context::kSize>);
+    table_.Register(
+        kVisitNativeContext,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            Context::kSize>);
 
-    table_.Register(kVisitConsString,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<ConsString::kSize>);
+    table_.Register(
+        kVisitConsString,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            ConsString::kSize>);
 
-    table_.Register(kVisitSlicedString,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<SlicedString::kSize>);
+    table_.Register(
+        kVisitSlicedString,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            SlicedString::kSize>);
 
-    table_.Register(kVisitSymbol,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<Symbol::kSize>);
+    table_.Register(
+        kVisitSymbol,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            Symbol::kSize>);
 
-    table_.Register(kVisitSharedFunctionInfo,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<SharedFunctionInfo::kSize>);
+    table_.Register(
+        kVisitSharedFunctionInfo,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            SharedFunctionInfo::kSize>);
 
     table_.Register(kVisitJSWeakCollection,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    Visit);
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
 
     table_.Register(kVisitJSArrayBuffer,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    Visit);
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
 
     table_.Register(kVisitJSTypedArray,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    Visit);
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
 
     table_.Register(kVisitJSDataView,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    Visit);
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
 
     table_.Register(kVisitJSRegExp,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    Visit);
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
 
     if (marks_handling == IGNORE_MARKS) {
-      table_.Register(kVisitJSFunction,
-                      &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                          template VisitSpecialized<JSFunction::kSize>);
+      table_.Register(
+          kVisitJSFunction,
+          &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+              JSFunction::kSize>);
     } else {
       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
     }
 
     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
-                                   kVisitDataObject,
-                                   kVisitDataObjectGeneric>();
+                                   kVisitDataObject, kVisitDataObjectGeneric>();
 
     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
-                                   kVisitJSObject,
-                                   kVisitJSObjectGeneric>();
+                                   kVisitJSObject, kVisitJSObjectGeneric>();
 
     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
-                                   kVisitStruct,
-                                   kVisitStructGeneric>();
+                                   kVisitStruct, kVisitStructGeneric>();
   }
 
   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
@@ -1925,7 +1900,7 @@ class ScavengingVisitor : public StaticVisitorBase {
   }
 
  private:
-  enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
+  enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
 
   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
     bool should_record = false;
@@ -1945,22 +1920,20 @@ class ScavengingVisitor : public StaticVisitorBase {
   // Helper function used by CopyObject to copy a source object to an
   // allocated target object and update the forwarding pointer in the source
   // object.  Returns the target object.
-  INLINE(static void MigrateObject(Heap* heap,
-                                   HeapObject* source,
-                                   HeapObject* target,
-                                   int size)) {
+  INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
+                                   HeapObject* target, int size)) {
     // If we migrate into to-space, then the to-space top pointer should be
     // right after the target object. Incorporate double alignment
     // over-allocation.
     DCHECK(!heap->InToSpace(target) ||
-        target->address() + size == heap->new_space()->top() ||
-        target->address() + size + kPointerSize == heap->new_space()->top());
+           target->address() + size == heap->new_space()->top() ||
+           target->address() + size + kPointerSize == heap->new_space()->top());
 
     // Make sure that we do not overwrite the promotion queue which is at
     // the end of to-space.
     DCHECK(!heap->InToSpace(target) ||
-        heap->promotion_queue()->IsBelowPromotionQueue(
-            heap->new_space()->top()));
+           heap->promotion_queue()->IsBelowPromotionQueue(
+               heap->new_space()->top()));
 
     // Copy the content of source to target.
     heap->CopyBlock(target->address(), source->address(), size);
@@ -1981,11 +1954,9 @@ class ScavengingVisitor : public StaticVisitorBase {
     }
   }
 
-  template<int alignment>
-  static inline bool SemiSpaceCopyObject(Map* map,
-                                         HeapObject** slot,
-                                         HeapObject* object,
-                                         int object_size) {
+  template <int alignment>
+  static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
+                                         HeapObject* object, int object_size) {
     Heap* heap = map->GetHeap();
 
     int allocation_size = object_size;
@@ -2022,11 +1993,9 @@ class ScavengingVisitor : public StaticVisitorBase {
   }
 
 
-  template<ObjectContents object_contents, int alignment>
-  static inline bool PromoteObject(Map* map,
-                                   HeapObject** slot,
-                                   HeapObject* object,
-                                   int object_size) {
+  template <ObjectContents object_contents, int alignment>
+  static inline bool PromoteObject(Map* map, HeapObject** slot,
+                                   HeapObject* object, int object_size) {
     Heap* heap = map->GetHeap();
 
     int allocation_size = object_size;
@@ -2058,8 +2027,8 @@ class ScavengingVisitor : public StaticVisitorBase {
 
       if (object_contents == POINTER_OBJECT) {
         if (map->instance_type() == JS_FUNCTION_TYPE) {
-          heap->promotion_queue()->insert(
-              target, JSFunction::kNonWeakFieldsEndOffset);
+          heap->promotion_queue()->insert(target,
+                                          JSFunction::kNonWeakFieldsEndOffset);
         } else {
           heap->promotion_queue()->insert(target, object_size);
         }
@@ -2071,11 +2040,9 @@ class ScavengingVisitor : public StaticVisitorBase {
   }
 
 
-  template<ObjectContents object_contents, int alignment>
-  static inline void EvacuateObject(Map* map,
-                                    HeapObject** slot,
-                                    HeapObject* object,
-                                    int object_size) {
+  template <ObjectContents object_contents, int alignment>
+  static inline void EvacuateObject(Map* map, HeapObject** slot,
+                                    HeapObject* object, int object_size) {
     SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
     SLOW_DCHECK(object->Size() == object_size);
     Heap* heap = map->GetHeap();
@@ -2088,8 +2055,8 @@ class ScavengingVisitor : public StaticVisitorBase {
       }
     }
 
-    if (PromoteObject<object_contents, alignment>(
-        map, slot, object, object_size)) {
+    if (PromoteObject<object_contents, alignment>(map, slot, object,
+                                                  object_size)) {
       return;
     }
 
@@ -2100,11 +2067,10 @@ class ScavengingVisitor : public StaticVisitorBase {
   }
 
 
-  static inline void EvacuateJSFunction(Map* map,
-                                        HeapObject** slot,
+  static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
                                         HeapObject* object) {
-    ObjectEvacuationStrategy<POINTER_OBJECT>::
-        template VisitSpecialized<JSFunction::kSize>(map, slot, object);
+    ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+        JSFunction::kSize>(map, slot, object);
 
     HeapObject* target = *slot;
     MarkBit mark_bit = Marking::MarkBitFrom(target);
@@ -2116,88 +2082,79 @@ class ScavengingVisitor : public StaticVisitorBase {
       Address code_entry_slot =
           target->address() + JSFunction::kCodeEntryOffset;
       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
-      map->GetHeap()->mark_compact_collector()->
-          RecordCodeEntrySlot(code_entry_slot, code);
+      map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
+          code_entry_slot, code);
     }
   }
 
 
-  static inline void EvacuateFixedArray(Map* map,
-                                        HeapObject** slot,
+  static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
                                         HeapObject* object) {
     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
-    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
+    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
+                                                     object_size);
   }
 
 
-  static inline void EvacuateFixedDoubleArray(Map* map,
-                                              HeapObject** slot,
+  static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
                                               HeapObject* object) {
     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
     int object_size = FixedDoubleArray::SizeFor(length);
-    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
-        map, slot, object, object_size);
+    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
+                                                  object_size);
   }
 
 
-  static inline void EvacuateFixedTypedArray(Map* map,
-                                             HeapObject** slot,
+  static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
                                              HeapObject* object) {
     int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
   }
 
 
-  static inline void EvacuateFixedFloat64Array(Map* map,
-                                               HeapObject** slot,
+  static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
                                                HeapObject* object) {
     int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
-    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
-        map, slot, object, object_size);
+    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
+                                                  object_size);
   }
 
 
-  static inline void EvacuateByteArray(Map* map,
-                                       HeapObject** slot,
+  static inline void EvacuateByteArray(Map* map, HeapObject** slot,
                                        HeapObject* object) {
     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
   }
 
 
-  static inline void EvacuateSeqOneByteString(Map* map,
-                                            HeapObject** slot,
-                                            HeapObject* object) {
-    int object_size = SeqOneByteString::cast(object)->
-        SeqOneByteStringSize(map->instance_type());
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
+  static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
+                                              HeapObject* object) {
+    int object_size = SeqOneByteString::cast(object)
+                          ->SeqOneByteStringSize(map->instance_type());
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
   }
 
 
-  static inline void EvacuateSeqTwoByteString(Map* map,
-                                              HeapObject** slot,
+  static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
                                               HeapObject* object) {
-    int object_size = SeqTwoByteString::cast(object)->
-        SeqTwoByteStringSize(map->instance_type());
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
+    int object_size = SeqTwoByteString::cast(object)
+                          ->SeqTwoByteStringSize(map->instance_type());
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
   }
 
 
-  static inline void EvacuateShortcutCandidate(Map* map,
-                                               HeapObject** slot,
+  static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
                                                HeapObject* object) {
     DCHECK(IsShortcutCandidate(map->instance_type()));
 
     Heap* heap = map->GetHeap();
 
     if (marks_handling == IGNORE_MARKS &&
-        ConsString::cast(object)->unchecked_second() ==
-        heap->empty_string()) {
+        ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
       HeapObject* first =
           HeapObject::cast(ConsString::cast(object)->unchecked_first());
 
@@ -2223,27 +2180,24 @@ class ScavengingVisitor : public StaticVisitorBase {
     }
 
     int object_size = ConsString::kSize;
-    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
+    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
+                                                     object_size);
   }
 
-  template<ObjectContents object_contents>
+  template <ObjectContents object_contents>
   class ObjectEvacuationStrategy {
    public:
-    template<int object_size>
-    static inline void VisitSpecialized(Map* map,
-                                        HeapObject** slot,
+    template <int object_size>
+    static inline void VisitSpecialized(Map* map, HeapObject** slot,
                                         HeapObject* object) {
-      EvacuateObject<object_contents, kObjectAlignment>(
-          map, slot, object, object_size);
+      EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
+                                                        object_size);
     }
 
-    static inline void Visit(Map* map,
-                             HeapObject** slot,
-                             HeapObject* object) {
+    static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
       int object_size = map->instance_size();
-      EvacuateObject<object_contents, kObjectAlignment>(
-          map, slot, object, object_size);
+      EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
+                                                        object_size);
     }
   };
 
@@ -2251,8 +2205,8 @@ class ScavengingVisitor : public StaticVisitorBase {
 };
 
 
-template<MarksHandling marks_handling,
-         LoggingAndProfiling logging_and_profiling_mode>
+template <MarksHandling marks_handling,
+          LoggingAndProfiling logging_and_profiling_mode>
 VisitorDispatchTable<ScavengingCallback>
     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
 
@@ -2269,31 +2223,26 @@ static void InitializeScavengingVisitorsTables() {
 
 void Heap::SelectScavengingVisitorsTable() {
   bool logging_and_profiling =
-      FLAG_verify_predictable ||
-      isolate()->logger()->is_logging() ||
+      FLAG_verify_predictable || isolate()->logger()->is_logging() ||
       isolate()->cpu_profiler()->is_profiling() ||
       (isolate()->heap_profiler() != NULL &&
        isolate()->heap_profiler()->is_tracking_object_moves());
 
   if (!incremental_marking()->IsMarking()) {
     if (!logging_and_profiling) {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS,
-                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
     } else {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS,
-                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
     }
   } else {
     if (!logging_and_profiling) {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS,
-                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
     } else {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS,
-                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
     }
 
     if (incremental_marking()->IsCompacting()) {
@@ -2330,7 +2279,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
   reinterpret_cast<Map*>(result)->set_visitor_id(
-        StaticVisitorBase::GetVisitorId(instance_type, instance_size));
+      StaticVisitorBase::GetVisitorId(instance_type, instance_size));
   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
@@ -2377,11 +2326,11 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
 }
 
 
-AllocationResult Heap::AllocateFillerObject(int size,
-                                            bool double_align,
+AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
                                             AllocationSpace space) {
   HeapObject* obj;
-  { AllocationResult allocation = AllocateRaw(size, space, space);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, space);
     if (!allocation.To(&obj)) return allocation;
   }
 #ifdef DEBUG
@@ -2394,32 +2343,36 @@ AllocationResult Heap::AllocateFillerObject(int size,
 
 
 const Heap::StringTypeTable Heap::string_type_table[] = {
-#define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
-  {type, size, k##camel_name##MapRootIndex},
-  STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
+#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
+  { type, size, k##camel_name##MapRootIndex }             \
+  ,
+    STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
 #undef STRING_TYPE_ELEMENT
 };
 
 
 const Heap::ConstantStringTable Heap::constant_string_table[] = {
-#define CONSTANT_STRING_ELEMENT(name, contents)                                \
-  {contents, k##name##RootIndex},
-  INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
+#define CONSTANT_STRING_ELEMENT(name, contents) \
+  { contents, k##name##RootIndex }              \
+  ,
+    INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
 #undef CONSTANT_STRING_ELEMENT
 };
 
 
 const Heap::StructTable Heap::struct_table[] = {
-#define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
-  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
-  STRUCT_LIST(STRUCT_TABLE_ELEMENT)
+#define STRUCT_TABLE_ELEMENT(NAME, Name, name)        \
+  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
+  ,
+    STRUCT_LIST(STRUCT_TABLE_ELEMENT)
 #undef STRUCT_TABLE_ELEMENT
 };
 
 
 bool Heap::CreateInitialMaps() {
   HeapObject* obj;
-  { AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
+  {
+    AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
     if (!allocation.To(&obj)) return false;
   }
   // Map::cast cannot be used due to uninitialized map field.
@@ -2428,11 +2381,12 @@ bool Heap::CreateInitialMaps() {
   new_meta_map->set_map(new_meta_map);
 
   {  // Partial map allocation
-#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                  \
-    { Map* map;                                                                \
-      if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
-      set_##field_name##_map(map);                                             \
-    }
+#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                \
+  {                                                                          \
+    Map* map;                                                                \
+    if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
+    set_##field_name##_map(map);                                             \
+  }
 
     ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
@@ -2444,18 +2398,21 @@ bool Heap::CreateInitialMaps() {
   }
 
   // Allocate the empty array.
-  { AllocationResult allocation = AllocateEmptyFixedArray();
+  {
+    AllocationResult allocation = AllocateEmptyFixedArray();
     if (!allocation.To(&obj)) return false;
   }
   set_empty_fixed_array(FixedArray::cast(obj));
 
-  { AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
+  {
+    AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
     if (!allocation.To(&obj)) return false;
   }
   set_null_value(Oddball::cast(obj));
   Oddball::cast(obj)->set_kind(Oddball::kNull);
 
-  { AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
+  {
+    AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
     if (!allocation.To(&obj)) return false;
   }
   set_undefined_value(Oddball::cast(obj));
@@ -2466,13 +2423,15 @@ bool Heap::CreateInitialMaps() {
   set_exception(null_value());
 
   // Allocate the empty descriptor array.
-  { AllocationResult allocation = AllocateEmptyFixedArray();
+  {
+    AllocationResult allocation = AllocateEmptyFixedArray();
     if (!allocation.To(&obj)) return false;
   }
   set_empty_descriptor_array(DescriptorArray::cast(obj));
 
   // Allocate the constant pool array.
-  { AllocationResult allocation = AllocateEmptyConstantPoolArray();
+  {
+    AllocationResult allocation = AllocateEmptyConstantPoolArray();
     if (!allocation.To(&obj)) return false;
   }
   set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
@@ -2522,22 +2481,23 @@ bool Heap::CreateInitialMaps() {
   constant_pool_array_map()->set_constructor(null_value());
 
   {  // Map allocation
-#define ALLOCATE_MAP(instance_type, size, field_name)                          \
-    { Map* map;                                                                \
-      if (!AllocateMap((instance_type), size).To(&map)) return false;          \
-      set_##field_name##_map(map);                                             \
-    }
+#define ALLOCATE_MAP(instance_type, size, field_name)               \
+  {                                                                 \
+    Map* map;                                                       \
+    if (!AllocateMap((instance_type), size).To(&map)) return false; \
+    set_##field_name##_map(map);                                    \
+  }
 
-#define ALLOCATE_VARSIZE_MAP(instance_type, field_name)                        \
-    ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
+#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
+  ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
     DCHECK(fixed_array_map() != fixed_cow_array_map());
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
     ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
-    ALLOCATE_MAP(
-        MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize, mutable_heap_number)
+    ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
+                 mutable_heap_number)
     ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
 
@@ -2551,7 +2511,8 @@ bool Heap::CreateInitialMaps() {
 
     for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
       const StringTypeTable& entry = string_type_table[i];
-      { AllocationResult allocation = AllocateMap(entry.type, entry.size);
+      {
+        AllocationResult allocation = AllocateMap(entry.type, entry.size);
         if (!allocation.To(&obj)) return false;
       }
       // Mark cons string maps as unstable, because their objects can change
@@ -2571,18 +2532,17 @@ bool Heap::CreateInitialMaps() {
     ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
     ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
 
-#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)            \
-    ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize,   \
-        external_##type##_array)
+#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)        \
+  ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
+               external_##type##_array)
 
-     TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
+    TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
 
-#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size)         \
-    ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE,                           \
-        fixed_##type##_array)
+#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+  ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
 
-     TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
+    TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
@@ -2598,8 +2558,7 @@ bool Heap::CreateInitialMaps() {
     for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
       const StructTable& entry = struct_table[i];
       Map* map;
-      if (!AllocateMap(entry.type, entry.size).To(&map))
-        return false;
+      if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
       roots_[entry.index] = map;
     }
 
@@ -2619,39 +2578,40 @@ bool Heap::CreateInitialMaps() {
         StaticVisitorBase::kVisitNativeContext);
 
     ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
-        shared_function_info)
+                 shared_function_info)
 
-    ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
-        message_object)
-    ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
-        external)
+    ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
+    ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
     external_map()->set_is_extensible(false);
 #undef ALLOCATE_VARSIZE_MAP
 #undef ALLOCATE_MAP
   }
 
-  { // Empty arrays
-    { ByteArray* byte_array;
+  {  // Empty arrays
+    {
+      ByteArray* byte_array;
       if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
       set_empty_byte_array(byte_array);
     }
 
-#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)           \
-    { ExternalArray* obj;                                                      \
-      if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj))        \
-          return false;                                                        \
-      set_empty_external_##type##_array(obj);                                  \
-    }
+#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)  \
+  {                                                                   \
+    ExternalArray* obj;                                               \
+    if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
+      return false;                                                   \
+    set_empty_external_##type##_array(obj);                           \
+  }
 
     TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
 
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size)        \
-    { FixedTypedArrayBase* obj;                                                \
-      if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj))      \
-          return false;                                                        \
-      set_empty_fixed_##type##_array(obj);                                     \
-    }
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+  {                                                                     \
+    FixedTypedArrayBase* obj;                                           \
+    if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
+      return false;                                                     \
+    set_empty_fixed_##type##_array(obj);                                \
+  }
 
     TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
@@ -2661,8 +2621,7 @@ bool Heap::CreateInitialMaps() {
 }
 
 
-AllocationResult Heap::AllocateHeapNumber(double value,
-                                          MutableMode mode,
+AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
                                           PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate heap numbers in paged
   // spaces.
@@ -2672,7 +2631,8 @@ AllocationResult Heap::AllocateHeapNumber(double value,
   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
 
   HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -2688,7 +2648,8 @@ AllocationResult Heap::AllocateCell(Object* value) {
   STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
 
   HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
   result->set_map_no_write_barrier(cell_map());
@@ -2799,62 +2760,45 @@ void Heap::CreateInitialObjects() {
   set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
 
   // Finish initializing oddballs after creating the string table.
-  Oddball::Initialize(isolate(),
-                      factory->undefined_value(),
-                      "undefined",
-                      factory->nan_value(),
-                      Oddball::kUndefined);
+  Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
+                      factory->nan_value(), Oddball::kUndefined);
 
   // Initialize the null_value.
-  Oddball::Initialize(isolate(),
-                      factory->null_value(),
-                      "null",
-                      handle(Smi::FromInt(0), isolate()),
-                      Oddball::kNull);
-
-  set_true_value(*factory->NewOddball(factory->boolean_map(),
-                                      "true",
+  Oddball::Initialize(isolate(), factory->null_value(), "null",
+                      handle(Smi::FromInt(0), isolate()), Oddball::kNull);
+
+  set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
                                       handle(Smi::FromInt(1), isolate()),
                                       Oddball::kTrue));
 
-  set_false_value(*factory->NewOddball(factory->boolean_map(),
-                                       "false",
+  set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
                                        handle(Smi::FromInt(0), isolate()),
                                        Oddball::kFalse));
 
-  set_the_hole_value(*factory->NewOddball(factory->the_hole_map(),
-                                          "hole",
+  set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
                                           handle(Smi::FromInt(-1), isolate()),
                                           Oddball::kTheHole));
 
-  set_uninitialized_value(
-      *factory->NewOddball(factory->uninitialized_map(),
-                           "uninitialized",
-                           handle(Smi::FromInt(-1), isolate()),
-                           Oddball::kUninitialized));
-
-  set_arguments_marker(*factory->NewOddball(factory->arguments_marker_map(),
-                                            "arguments_marker",
-                                            handle(Smi::FromInt(-4), isolate()),
-                                            Oddball::kArgumentMarker));
-
-  set_no_interceptor_result_sentinel(
-      *factory->NewOddball(factory->no_interceptor_result_sentinel_map(),
-                           "no_interceptor_result_sentinel",
-                           handle(Smi::FromInt(-2), isolate()),
-                           Oddball::kOther));
-
-  set_termination_exception(
-      *factory->NewOddball(factory->termination_exception_map(),
-                           "termination_exception",
-                           handle(Smi::FromInt(-3), isolate()),
-                           Oddball::kOther));
-
-  set_exception(
-      *factory->NewOddball(factory->exception_map(),
-                           "exception",
-                           handle(Smi::FromInt(-5), isolate()),
-                           Oddball::kException));
+  set_uninitialized_value(*factory->NewOddball(
+      factory->uninitialized_map(), "uninitialized",
+      handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
+
+  set_arguments_marker(*factory->NewOddball(
+      factory->arguments_marker_map(), "arguments_marker",
+      handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
+
+  set_no_interceptor_result_sentinel(*factory->NewOddball(
+      factory->no_interceptor_result_sentinel_map(),
+      "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
+      Oddball::kOther));
+
+  set_termination_exception(*factory->NewOddball(
+      factory->termination_exception_map(), "termination_exception",
+      handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
+
+  set_exception(*factory->NewOddball(factory->exception_map(), "exception",
+                                     handle(Smi::FromInt(-5), isolate()),
+                                     Oddball::kException));
 
   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
     Handle<String> str =
@@ -2894,12 +2838,12 @@ void Heap::CreateInitialObjects() {
   Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
   set_intrinsic_function_names(*intrinsic_names);
 
-  set_number_string_cache(*factory->NewFixedArray(
-      kInitialNumberStringCacheSize * 2, TENURED));
+  set_number_string_cache(
+      *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
 
   // Allocate cache for single character one byte strings.
-  set_single_character_string_cache(*factory->NewFixedArray(
-      String::kMaxOneByteCharCode + 1, TENURED));
+  set_single_character_string_cache(
+      *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
 
   // Allocate cache for string split and regexp-multiple.
   set_string_split_cache(*factory->NewFixedArray(
@@ -2908,8 +2852,8 @@ void Heap::CreateInitialObjects() {
       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
 
   // Allocate cache for external strings pointing to native source code.
-  set_natives_source_cache(*factory->NewFixedArray(
-      Natives::GetBuiltinsCount()));
+  set_natives_source_cache(
+      *factory->NewFixedArray(Natives::GetBuiltinsCount()));
 
   set_undefined_cell(*factory->NewCell(factory->undefined_value()));
 
@@ -2944,8 +2888,8 @@ void Heap::CreateInitialObjects() {
   // Handling of script id generation is in Factory::NewScript.
   set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
 
-  set_allocation_sites_scratchpad(*factory->NewFixedArray(
-      kAllocationSiteScratchpadSize, TENURED));
+  set_allocation_sites_scratchpad(
+      *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
   InitializeAllocationSitesScratchpad();
 
   // Initialize keyed lookup cache.
@@ -2964,28 +2908,27 @@ void Heap::CreateInitialObjects() {
 
 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
   RootListIndex writable_roots[] = {
-    kStoreBufferTopRootIndex,
-    kStackLimitRootIndex,
-    kNumberStringCacheRootIndex,
-    kInstanceofCacheFunctionRootIndex,
-    kInstanceofCacheMapRootIndex,
-    kInstanceofCacheAnswerRootIndex,
-    kCodeStubsRootIndex,
-    kNonMonomorphicCacheRootIndex,
-    kPolymorphicCodeCacheRootIndex,
-    kLastScriptIdRootIndex,
-    kEmptyScriptRootIndex,
-    kRealStackLimitRootIndex,
-    kArgumentsAdaptorDeoptPCOffsetRootIndex,
-    kConstructStubDeoptPCOffsetRootIndex,
-    kGetterStubDeoptPCOffsetRootIndex,
-    kSetterStubDeoptPCOffsetRootIndex,
-    kStringTableRootIndex,
+      kStoreBufferTopRootIndex,
+      kStackLimitRootIndex,
+      kNumberStringCacheRootIndex,
+      kInstanceofCacheFunctionRootIndex,
+      kInstanceofCacheMapRootIndex,
+      kInstanceofCacheAnswerRootIndex,
+      kCodeStubsRootIndex,
+      kNonMonomorphicCacheRootIndex,
+      kPolymorphicCodeCacheRootIndex,
+      kLastScriptIdRootIndex,
+      kEmptyScriptRootIndex,
+      kRealStackLimitRootIndex,
+      kArgumentsAdaptorDeoptPCOffsetRootIndex,
+      kConstructStubDeoptPCOffsetRootIndex,
+      kGetterStubDeoptPCOffsetRootIndex,
+      kSetterStubDeoptPCOffsetRootIndex,
+      kStringTableRootIndex,
   };
 
   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
-    if (root_index == writable_roots[i])
-      return true;
+    if (root_index == writable_roots[i]) return true;
   }
   return false;
 }
@@ -2993,14 +2936,12 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
 
 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
   return !RootCanBeWrittenAfterInitialization(root_index) &&
-      !InNewSpace(roots_array_start()[root_index]);
+         !InNewSpace(roots_array_start()[root_index]);
 }
 
 
-Object* RegExpResultsCache::Lookup(Heap* heap,
-                                   String* key_string,
-                                   Object* key_pattern,
-                                   ResultsCacheType type) {
+Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
+                                   Object* key_pattern, ResultsCacheType type) {
   FixedArray* cache;
   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
   if (type == STRING_SPLIT_SUBSTRINGS) {
@@ -3015,7 +2956,7 @@ Object* RegExpResultsCache::Lookup(Heap* heap,
 
   uint32_t hash = key_string->Hash();
   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
-      ~(kArrayEntriesPerCacheEntry - 1));
+                    ~(kArrayEntriesPerCacheEntry - 1));
   if (cache->get(index + kStringOffset) == key_string &&
       cache->get(index + kPatternOffset) == key_pattern) {
     return cache->get(index + kArrayOffset);
@@ -3030,8 +2971,7 @@ Object* RegExpResultsCache::Lookup(Heap* heap,
 }
 
 
-void RegExpResultsCache::Enter(Isolate* isolate,
-                               Handle<String> key_string,
+void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
                                Handle<Object> key_pattern,
                                Handle<FixedArray> value_array,
                                ResultsCacheType type) {
@@ -3050,7 +2990,7 @@ void RegExpResultsCache::Enter(Isolate* isolate,
 
   uint32_t hash = key_string->Hash();
   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
-      ~(kArrayEntriesPerCacheEntry - 1));
+                    ~(kArrayEntriesPerCacheEntry - 1));
   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
     cache->set(index + kStringOffset, *key_string);
     cache->set(index + kPatternOffset, *key_pattern);
@@ -3137,8 +3077,8 @@ void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
     // We cannot use the normal write-barrier because slots need to be
     // recorded with non-incremental marking as well. We have to explicitly
     // record the slot to take evacuation candidates into account.
-    allocation_sites_scratchpad()->set(
-        allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
+    allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
+                                       site, SKIP_WRITE_BARRIER);
     Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
         allocation_sites_scratchpad_length_);
 
@@ -3147,8 +3087,8 @@ void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
       // candidates are not part of the global list of old space pages and
       // releasing an evacuation candidate due to a slots buffer overflow
       // results in lost pages.
-      mark_compact_collector()->RecordSlot(
-          slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
+      mark_compact_collector()->RecordSlot(slot, slot, *slot,
+                                           SlotsBuffer::IGNORE_OVERFLOW);
     }
     allocation_sites_scratchpad_length_++;
   }
@@ -3163,9 +3103,9 @@ Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
     ExternalArrayType array_type) {
   switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)               \
-    case kExternal##Type##Array:                                              \
-      return kExternal##Type##ArrayMapRootIndex;
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                                  \
+    return kExternal##Type##ArrayMapRootIndex;
 
     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
 #undef ARRAY_TYPE_TO_ROOT_INDEX
@@ -3185,9 +3125,9 @@ Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
     ExternalArrayType array_type) {
   switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)               \
-    case kExternal##Type##Array:                                              \
-      return kFixed##Type##ArrayMapRootIndex;
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                                  \
+    return kFixed##Type##ArrayMapRootIndex;
 
     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
 #undef ARRAY_TYPE_TO_ROOT_INDEX
@@ -3202,9 +3142,9 @@ Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
     ElementsKind elementsKind) {
   switch (elementsKind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)             \
-    case EXTERNAL_##TYPE##_ELEMENTS:                                          \
-      return kEmptyExternal##Type##ArrayRootIndex;
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case EXTERNAL_##TYPE##_ELEMENTS:                                \
+    return kEmptyExternal##Type##ArrayRootIndex;
 
     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
 #undef ELEMENT_KIND_TO_ROOT_INDEX
@@ -3219,9 +3159,9 @@ Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
     ElementsKind elementsKind) {
   switch (elementsKind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)             \
-    case TYPE##_ELEMENTS:                                                     \
-      return kEmptyFixed##Type##ArrayRootIndex;
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case TYPE##_ELEMENTS:                                           \
+    return kEmptyFixed##Type##ArrayRootIndex;
 
     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
 #undef ELEMENT_KIND_TO_ROOT_INDEX
@@ -3264,7 +3204,8 @@ AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
   int size = ByteArray::SizeFor(length);
   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
   HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -3320,38 +3261,37 @@ void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
 
 
 AllocationResult Heap::AllocateExternalArray(int length,
-                                         ExternalArrayType array_type,
-                                         void* external_pointer,
-                                         PretenureFlag pretenure) {
+                                             ExternalArrayType array_type,
+                                             void* external_pointer,
+                                             PretenureFlag pretenure) {
   int size = ExternalArray::kAlignedSize;
   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
   HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
-  result->set_map_no_write_barrier(
-      MapForExternalArrayType(array_type));
+  result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
   ExternalArray::cast(result)->set_length(length);
   ExternalArray::cast(result)->set_external_pointer(external_pointer);
   return result;
 }
 
-static void ForFixedTypedArray(ExternalArrayType array_type,
-                               int* element_size,
+static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
                                ElementsKind* element_kind) {
   switch (array_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                       \
-    case kExternal##Type##Array:                                              \
-      *element_size = size;                                                   \
-      *element_kind = TYPE##_ELEMENTS;                                        \
-      return;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                          \
+    *element_size = size;                               \
+    *element_kind = TYPE##_ELEMENTS;                    \
+    return;
 
     TYPED_ARRAYS(TYPED_ARRAY_CASE)
 #undef TYPED_ARRAY_CASE
 
     default:
-      *element_size = 0;  // Bogus
+      *element_size = 0;               // Bogus
       *element_kind = UINT8_ELEMENTS;  // Bogus
       UNREACHABLE();
   }
@@ -3364,8 +3304,8 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
   int element_size;
   ElementsKind elements_kind;
   ForFixedTypedArray(array_type, &element_size, &elements_kind);
-  int size = OBJECT_POINTER_ALIGN(
-      length * element_size + FixedTypedArrayBase::kDataOffset);
+  int size = OBJECT_POINTER_ALIGN(length * element_size +
+                                  FixedTypedArrayBase::kDataOffset);
 #ifndef V8_HOST_ARCH_64_BIT
   if (array_type == kExternalFloat64Array) {
     size += kPointerSize;
@@ -3415,8 +3355,7 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
 
   result->set_map_no_write_barrier(code_map());
   Code* code = Code::cast(result);
-  DCHECK(isolate_->code_range() == NULL ||
-         !isolate_->code_range()->valid() ||
+  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
          isolate_->code_range()->contains(code->address()));
   code->set_gc_metadata(Smi::FromInt(0));
   code->set_ic_age(global_ic_age_);
@@ -3453,8 +3392,7 @@ AllocationResult Heap::CopyCode(Code* code) {
   new_code->set_constant_pool(new_constant_pool);
 
   // Relocate the copy.
-  DCHECK(isolate_->code_range() == NULL ||
-         !isolate_->code_range()->valid() ||
+  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
          isolate_->code_range()->contains(code->address()));
   new_code->Relocate(new_addr - old_addr);
   return new_code;
@@ -3465,7 +3403,8 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
   // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
   // do not risk leaving uninitialized Code object (and breaking the heap).
   ByteArray* reloc_info_array;
-  { AllocationResult allocation =
+  {
+    AllocationResult allocation =
         AllocateByteArray(reloc_info.length(), TENURED);
     if (!allocation.To(&reloc_info_array)) return allocation;
   }
@@ -3474,8 +3413,7 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
       code->constant_pool() != empty_constant_pool_array()) {
     // Copy the constant pool, since edits to the copied code may modify
     // the constant pool.
-    AllocationResult allocation =
-        CopyConstantPoolArray(code->constant_pool());
+    AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
     if (!allocation.To(&new_constant_pool)) return allocation;
   } else {
     new_constant_pool = empty_constant_pool_array();
@@ -3508,13 +3446,11 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
   new_code->set_constant_pool(new_constant_pool);
 
   // Copy patched rinfo.
-  CopyBytes(new_code->relocation_start(),
-            reloc_info.start(),
+  CopyBytes(new_code->relocation_start(), reloc_info.start(),
             static_cast<size_t>(reloc_info.length()));
 
   // Relocate the copy.
-  DCHECK(isolate_->code_range() == NULL ||
-         !isolate_->code_range()->valid() ||
+  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
          isolate_->code_range()->contains(code->address()));
   new_code->Relocate(new_addr - old_addr);
 
@@ -3537,7 +3473,7 @@ void Heap::InitializeAllocationMemento(AllocationMemento* memento,
 
 
 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
-                            AllocationSite* allocation_site) {
+                                AllocationSite* allocation_site) {
   DCHECK(gc_state_ == NOT_IN_GC);
   DCHECK(map->instance_type() != MAP_TYPE);
   // If allocation failures are disallowed, we may allocate in a different
@@ -3562,8 +3498,7 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
 }
 
 
-void Heap::InitializeJSObjectFromMap(JSObject* obj,
-                                     FixedArray* properties,
+void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
                                      Map* map) {
   obj->set_properties(properties);
   obj->initialize_elements();
@@ -3582,8 +3517,8 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
   // so that object accesses before the constructor completes (e.g. in the
   // debugger) will not cause a crash.
   if (map->constructor()->IsJSFunction() &&
-      JSFunction::cast(map->constructor())->
-          IsInobjectSlackTrackingInProgress()) {
+      JSFunction::cast(map->constructor())
+          ->IsInobjectSlackTrackingInProgress()) {
     // We might want to shrink the object later.
     DCHECK(obj->GetInternalFieldCount() == 0);
     filler = Heap::one_pointer_filler_map();
@@ -3595,9 +3530,7 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
 
 
 AllocationResult Heap::AllocateJSObjectFromMap(
-    Map* map,
-    PretenureFlag pretenure,
-    bool allocate_properties,
+    Map* map, PretenureFlag pretenure, bool allocate_properties,
     AllocationSite* allocation_site) {
   // JSFunctions should be allocated using AllocateFunction to be
   // properly initialized.
@@ -3613,7 +3546,8 @@ AllocationResult Heap::AllocateJSObjectFromMap(
   if (allocate_properties) {
     int prop_size = map->InitialPropertiesLength();
     DCHECK(prop_size >= 0);
-    { AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
+    {
+      AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
       if (!allocation.To(&properties)) return allocation;
     }
   } else {
@@ -3629,8 +3563,7 @@ AllocationResult Heap::AllocateJSObjectFromMap(
 
   // Initialize the JSObject.
   InitializeJSObjectFromMap(js_obj, properties, map);
-  DCHECK(js_obj->HasFastElements() ||
-         js_obj->HasExternalArrayElements() ||
+  DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
          js_obj->HasFixedTypedArrayElements());
   return js_obj;
 }
@@ -3670,34 +3603,30 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
   // If we're forced to always allocate, we use the general allocation
   // functions which may leave us with an object in old space.
   if (always_allocate()) {
-    { AllocationResult allocation =
+    {
+      AllocationResult allocation =
           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
       if (!allocation.To(&clone)) return allocation;
     }
     Address clone_address = clone->address();
-    CopyBlock(clone_address,
-              source->address(),
-              object_size);
+    CopyBlock(clone_address, source->address(), object_size);
     // Update write barrier for all fields that lie beyond the header.
-    RecordWrites(clone_address,
-                 JSObject::kHeaderSize,
+    RecordWrites(clone_address, JSObject::kHeaderSize,
                  (object_size - JSObject::kHeaderSize) / kPointerSize);
   } else {
     wb_mode = SKIP_WRITE_BARRIER;
 
-    { int adjusted_object_size = site != NULL
-          ? object_size + AllocationMemento::kSize
-          : object_size;
-    AllocationResult allocation =
+    {
+      int adjusted_object_size =
+          site != NULL ? object_size + AllocationMemento::kSize : object_size;
+      AllocationResult allocation =
           AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
       if (!allocation.To(&clone)) return allocation;
     }
     SLOW_DCHECK(InNewSpace(clone));
     // Since we know the clone is allocated in new space, we can copy
     // the contents without worrying about updating the write barrier.
-    CopyBlock(clone->address(),
-              source->address(),
-              object_size);
+    CopyBlock(clone->address(), source->address(), object_size);
 
     if (site != NULL) {
       AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
@@ -3706,14 +3635,15 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
     }
   }
 
-  SLOW_DCHECK(
-      JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
+  SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
+              source->GetElementsKind());
   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
   FixedArray* properties = FixedArray::cast(source->properties());
   // Update elements if necessary.
   if (elements->length() > 0) {
     FixedArrayBase* elem;
-    { AllocationResult allocation;
+    {
+      AllocationResult allocation;
       if (elements->map() == fixed_cow_array_map()) {
         allocation = FixedArray::cast(elements);
       } else if (source->HasFastDoubleElements()) {
@@ -3728,7 +3658,8 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
   // Update properties if necessary.
   if (properties->length() > 0) {
     FixedArray* prop;
-    { AllocationResult allocation = CopyFixedArray(properties);
+    {
+      AllocationResult allocation = CopyFixedArray(properties);
       if (!allocation.To(&prop)) return allocation;
     }
     JSObject::cast(clone)->set_properties(prop, wb_mode);
@@ -3738,16 +3669,14 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
 }
 
 
-static inline void WriteOneByteData(Vector<const char> vector,
-                                    uint8_t* chars,
+static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
                                     int len) {
   // Only works for ascii.
   DCHECK(vector.length() == len);
   MemCopy(chars, vector.start(), len);
 }
 
-static inline void WriteTwoByteData(Vector<const char> vector,
-                                    uint16_t* chars,
+static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
                                     int len) {
   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
   unsigned stream_length = vector.length();
@@ -3786,9 +3715,9 @@ static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
 }
 
 
-template<bool is_one_byte, typename T>
-AllocationResult Heap::AllocateInternalizedStringImpl(
-    T t, int chars, uint32_t hash_field) {
+template <bool is_one_byte, typename T>
+AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
+                                                      uint32_t hash_field) {
   DCHECK(chars >= 0);
   // Compute map and object size.
   int size;
@@ -3807,7 +3736,8 @@ AllocationResult Heap::AllocateInternalizedStringImpl(
 
   // Allocate string.
   HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -3829,14 +3759,13 @@ AllocationResult Heap::AllocateInternalizedStringImpl(
 
 
 // Need explicit instantiations.
-template
-AllocationResult Heap::AllocateInternalizedStringImpl<true>(
-    String*, int, uint32_t);
-template
-AllocationResult Heap::AllocateInternalizedStringImpl<false>(
-    String*, int, uint32_t);
-template
-AllocationResult Heap::AllocateInternalizedStringImpl<false>(
+template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
+                                                                     int,
+                                                                     uint32_t);
+template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
+                                                                      int,
+                                                                      uint32_t);
+template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
     Vector<const char>, int, uint32_t);
 
 
@@ -3849,7 +3778,8 @@ AllocationResult Heap::AllocateRawOneByteString(int length,
   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
 
   HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -3872,7 +3802,8 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
 
   HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -3888,7 +3819,8 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
 AllocationResult Heap::AllocateEmptyFixedArray() {
   int size = FixedArray::SizeFor(0);
   HeapObject* result;
-  { AllocationResult allocation =
+  {
+    AllocationResult allocation =
         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
@@ -3912,7 +3844,8 @@ AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
 
   int len = src->length();
   HeapObject* obj;
-  { AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
+  {
+    AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
     if (!allocation.To(&obj)) return allocation;
   }
   obj->set_map_no_write_barrier(fixed_array_map());
@@ -3941,13 +3874,13 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
   int len = src->length();
   HeapObject* obj;
-  { AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
+  {
+    AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
     if (!allocation.To(&obj)) return allocation;
   }
   if (InNewSpace(obj)) {
     obj->set_map_no_write_barrier(map);
-    CopyBlock(obj->address() + kPointerSize,
-              src->address() + kPointerSize,
+    CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
               FixedArray::SizeFor(len) - kPointerSize);
     return obj;
   }
@@ -3967,14 +3900,14 @@ AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
                                                    Map* map) {
   int len = src->length();
   HeapObject* obj;
-  { AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
+  {
+    AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
     if (!allocation.To(&obj)) return allocation;
   }
   obj->set_map_no_write_barrier(map);
-  CopyBlock(
-      obj->address() + FixedDoubleArray::kLengthOffset,
-      src->address() + FixedDoubleArray::kLengthOffset,
-      FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
+  CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
+            src->address() + FixedDoubleArray::kLengthOffset,
+            FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
   return obj;
 }
 
@@ -3984,23 +3917,22 @@ AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
   HeapObject* obj;
   if (src->is_extended_layout()) {
     ConstantPoolArray::NumberOfEntries small(src,
-        ConstantPoolArray::SMALL_SECTION);
-    ConstantPoolArray::NumberOfEntries extended(src,
-        ConstantPoolArray::EXTENDED_SECTION);
+                                             ConstantPoolArray::SMALL_SECTION);
+    ConstantPoolArray::NumberOfEntries extended(
+        src, ConstantPoolArray::EXTENDED_SECTION);
     AllocationResult allocation =
         AllocateExtendedConstantPoolArray(small, extended);
     if (!allocation.To(&obj)) return allocation;
   } else {
     ConstantPoolArray::NumberOfEntries small(src,
-        ConstantPoolArray::SMALL_SECTION);
+                                             ConstantPoolArray::SMALL_SECTION);
     AllocationResult allocation = AllocateConstantPoolArray(small);
     if (!allocation.To(&obj)) return allocation;
   }
   obj->set_map_no_write_barrier(map);
-  CopyBlock(
-      obj->address() + ConstantPoolArray::kFirstEntryOffset,
-      src->address() + ConstantPoolArray::kFirstEntryOffset,
-      src->size() - ConstantPoolArray::kFirstEntryOffset);
+  CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
+            src->address() + ConstantPoolArray::kFirstEntryOffset,
+            src->size() - ConstantPoolArray::kFirstEntryOffset);
   return obj;
 }
 
@@ -4026,7 +3958,8 @@ AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
 
   DCHECK(!InNewSpace(filler));
   HeapObject* result;
-  { AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
+  {
+    AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -4047,7 +3980,8 @@ AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
   if (length == 0) return empty_fixed_array();
 
   HeapObject* obj;
-  { AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
+  {
+    AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
     if (!allocation.To(&obj)) return allocation;
   }
 
@@ -4058,8 +3992,7 @@ AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
 
 
 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
-    int length,
-    PretenureFlag pretenure) {
+    int length, PretenureFlag pretenure) {
   if (length == 0) return empty_fixed_array();
 
   HeapObject* elements;
@@ -4084,7 +4017,8 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
 
   HeapObject* object;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!allocation.To(&object)) return allocation;
   }
 
@@ -4093,7 +4027,7 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
 
 
 AllocationResult Heap::AllocateConstantPoolArray(
-      const ConstantPoolArray::NumberOfEntries& small) {
+    const ConstantPoolArray::NumberOfEntries& small) {
   CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
   int size = ConstantPoolArray::SizeFor(small);
 #ifndef V8_HOST_ARCH_64_BIT
@@ -4102,7 +4036,8 @@ AllocationResult Heap::AllocateConstantPoolArray(
   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
 
   HeapObject* object;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
     if (!allocation.To(&object)) return allocation;
   }
   object = EnsureDoubleAligned(this, object, size);
@@ -4127,7 +4062,8 @@ AllocationResult Heap::AllocateExtendedConstantPoolArray(
   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
 
   HeapObject* object;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
     if (!allocation.To(&object)) return allocation;
   }
   object = EnsureDoubleAligned(this, object, size);
@@ -4144,7 +4080,8 @@ AllocationResult Heap::AllocateEmptyConstantPoolArray() {
   ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
   int size = ConstantPoolArray::SizeFor(small);
   HeapObject* result;
-  { AllocationResult allocation =
+  {
+    AllocationResult allocation =
         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
@@ -4174,8 +4111,8 @@ AllocationResult Heap::AllocateSymbol() {
   } while (hash == 0 && attempts < 30);
   if (hash == 0) hash = 1;  // never return 0
 
-  Symbol::cast(result)->set_hash_field(
-      Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
+  Symbol::cast(result)
+      ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
   Symbol::cast(result)->set_name(undefined_value());
   Symbol::cast(result)->set_flags(Smi::FromInt(0));
 
@@ -4188,8 +4125,10 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
   Map* map;
   switch (type) {
 #define MAKE_CASE(NAME, Name, name) \
-    case NAME##_TYPE: map = name##_map(); break;
-STRUCT_LIST(MAKE_CASE)
+  case NAME##_TYPE:                 \
+    map = name##_map();             \
+    break;
+    STRUCT_LIST(MAKE_CASE)
 #undef MAKE_CASE
     default:
       UNREACHABLE();
@@ -4198,7 +4137,8 @@ STRUCT_LIST(MAKE_CASE)
   int size = map->instance_size();
   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
   Struct* result;
-  { AllocationResult allocation = Allocate(map, space);
+  {
+    AllocationResult allocation = Allocate(map, space);
     if (!allocation.To(&result)) return allocation;
   }
   result->InitializeBody(size);
@@ -4264,8 +4204,7 @@ bool Heap::IdleNotification(int hint) {
   // The size factor is in range [5..250]. The numbers here are chosen from
   // experiments. If you changes them, make sure to test with
   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
-  intptr_t step_size =
-      size_factor * IncrementalMarking::kAllocatedThreshold;
+  intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
 
   isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(hint);
   HistogramTimerScope idle_notification_scope(
@@ -4306,8 +4245,8 @@ bool Heap::IdleNotification(int hint) {
     }
   }
 
-  int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
-                              mark_sweeps_since_idle_round_started_;
+  int remaining_mark_sweeps =
+      kMaxMarkSweepsInIdleRound - mark_sweeps_since_idle_round_started_;
 
   if (incremental_marking()->IsStopped()) {
     // If there are no more than two GCs left in this idle round and we are
@@ -4372,8 +4311,8 @@ void Heap::ReportCodeStatistics(const char* title) {
 // just-completed scavenge collection).
 void Heap::ReportHeapStatistics(const char* title) {
   USE(title);
-  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
-         title, gc_count_);
+  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
+         gc_count_);
   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
          old_generation_allocation_limit_);
 
@@ -4405,22 +4344,18 @@ void Heap::ReportHeapStatistics(const char* title) {
 
 #endif  // DEBUG
 
-bool Heap::Contains(HeapObject* value) {
-  return Contains(value->address());
-}
+bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
 
 
 bool Heap::Contains(Address addr) {
   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
   return HasBeenSetUp() &&
-    (new_space_.ToSpaceContains(addr) ||
-     old_pointer_space_->Contains(addr) ||
-     old_data_space_->Contains(addr) ||
-     code_space_->Contains(addr) ||
-     map_space_->Contains(addr) ||
-     cell_space_->Contains(addr) ||
-     property_cell_space_->Contains(addr) ||
-     lo_space_->SlowContains(addr));
+         (new_space_.ToSpaceContains(addr) ||
+          old_pointer_space_->Contains(addr) ||
+          old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
+          map_space_->Contains(addr) || cell_space_->Contains(addr) ||
+          property_cell_space_->Contains(addr) ||
+          lo_space_->SlowContains(addr));
 }
 
 
@@ -4498,16 +4433,14 @@ void Heap::ZapFromSpace() {
   while (it.has_next()) {
     NewSpacePage* page = it.next();
     for (Address cursor = page->area_start(), limit = page->area_end();
-         cursor < limit;
-         cursor += kPointerSize) {
+         cursor < limit; cursor += kPointerSize) {
       Memory::Address_at(cursor) = kFromSpaceZapValue;
     }
   }
 }
 
 
-void Heap::IterateAndMarkPointersToFromSpace(Address start,
-                                             Address end,
+void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
                                              ObjectSlotCallback callback) {
   Address slot_address = start;
 
@@ -4564,21 +4497,17 @@ bool IsAMapPointerAddress(Object** addr) {
 }
 
 
-bool EverythingsAPointer(Object** addr) {
-  return true;
-}
+bool EverythingsAPointer(Object** addr) { return true; }
 
 
-static void CheckStoreBuffer(Heap* heap,
-                             Object** current,
-                             Object** limit,
+static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
                              Object**** store_buffer_position,
                              Object*** store_buffer_top,
                              CheckStoreBufferFilter filter,
                              Address special_garbage_start,
                              Address special_garbage_end) {
   Map* free_space_map = heap->free_space_map();
-  for ( ; current < limit; current++) {
+  for (; current < limit; current++) {
     Object* o = *current;
     Address current_address = reinterpret_cast<Address>(current);
     // Skip free space.
@@ -4643,13 +4572,8 @@ void Heap::OldPointerSpaceCheckStoreBuffer() {
     Object*** store_buffer_top = store_buffer()->Top();
 
     Object** limit = reinterpret_cast<Object**>(end);
-    CheckStoreBuffer(this,
-                     current,
-                     limit,
-                     &store_buffer_position,
-                     store_buffer_top,
-                     &EverythingsAPointer,
-                     space->top(),
+    CheckStoreBuffer(this, current, limit, &store_buffer_position,
+                     store_buffer_top, &EverythingsAPointer, space->top(),
                      space->limit());
   }
 }
@@ -4671,13 +4595,8 @@ void Heap::MapSpaceCheckStoreBuffer() {
     Object*** store_buffer_top = store_buffer()->Top();
 
     Object** limit = reinterpret_cast<Object**>(end);
-    CheckStoreBuffer(this,
-                     current,
-                     limit,
-                     &store_buffer_position,
-                     store_buffer_top,
-                     &IsAMapPointerAddress,
-                     space->top(),
+    CheckStoreBuffer(this, current, limit, &store_buffer_position,
+                     store_buffer_top, &IsAMapPointerAddress, space->top(),
                      space->limit());
   }
 }
@@ -4695,14 +4614,8 @@ void Heap::LargeObjectSpaceCheckStoreBuffer() {
       Object** current = reinterpret_cast<Object**>(object->address());
       Object** limit =
           reinterpret_cast<Object**>(object->address() + object->Size());
-      CheckStoreBuffer(this,
-                       current,
-                       limit,
-                       &store_buffer_position,
-                       store_buffer_top,
-                       &EverythingsAPointer,
-                       NULL,
-                       NULL);
+      CheckStoreBuffer(this, current, limit, &store_buffer_position,
+                       store_buffer_top, &EverythingsAPointer, NULL, NULL);
     }
   }
 }
@@ -4718,8 +4631,7 @@ void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
   v->Synchronize(VisitorSynchronization::kStringTable);
-  if (mode != VISIT_ALL_IN_SCAVENGE &&
-      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+  if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     // Scavenge collections have special processing for this.
     external_string_table_.Iterate(v);
   }
@@ -4815,10 +4727,8 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
 // TODO(1236194): Since the heap size is configurable on the command line
 // and through the API, we should gracefully handle the case that the heap
 // size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int max_semi_space_size,
-                         int max_old_space_size,
-                         int max_executable_size,
-                         size_t code_range_size) {
+bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+                         int max_executable_size, size_t code_range_size) {
   if (HasBeenSetUp()) return false;
 
   // Overwrite default configuration.
@@ -4883,8 +4793,10 @@ bool Heap::ConfigureHeap(int max_semi_space_size,
     if (initial_semispace_size > max_semi_space_size_) {
       initial_semispace_size_ = max_semi_space_size_;
       if (FLAG_trace_gc) {
-        PrintPID("Min semi-space size cannot be more than the maximum"
-                 "semi-space size of %d MB\n", max_semi_space_size_);
+        PrintPID(
+            "Min semi-space size cannot be more than the maximum"
+            "semi-space size of %d MB\n",
+            max_semi_space_size_);
       }
     } else {
       initial_semispace_size_ = initial_semispace_size;
@@ -4912,9 +4824,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size,
 }
 
 
-bool Heap::ConfigureHeapDefault() {
-  return ConfigureHeap(0, 0, 0, 0);
-}
+bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
 
 
 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
@@ -4941,11 +4851,10 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
       isolate()->memory_allocator()->Size() +
       isolate()->memory_allocator()->Available();
   *stats->os_error = base::OS::GetLastError();
-      isolate()->memory_allocator()->Available();
+  isolate()->memory_allocator()->Available();
   if (take_snapshot) {
     HeapIterator iterator(this);
-    for (HeapObject* obj = iterator.next();
-         obj != NULL;
+    for (HeapObject* obj = iterator.next(); obj != NULL;
          obj = iterator.next()) {
       InstanceType type = obj->map()->instance_type();
       DCHECK(0 <= type && type <= LAST_TYPE);
@@ -4957,21 +4866,19 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
 
 
 intptr_t Heap::PromotedSpaceSizeOfObjects() {
-  return old_pointer_space_->SizeOfObjects()
-      + old_data_space_->SizeOfObjects()
-      + code_space_->SizeOfObjects()
-      + map_space_->SizeOfObjects()
-      + cell_space_->SizeOfObjects()
-      + property_cell_space_->SizeOfObjects()
-      + lo_space_->SizeOfObjects();
+  return old_pointer_space_->SizeOfObjects() +
+         old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
+         map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
+         property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
 }
 
 
 int64_t Heap::PromotedExternalMemorySize() {
-  if (amount_of_external_allocated_memory_
-      <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
-  return amount_of_external_allocated_memory_
-      - amount_of_external_allocated_memory_at_last_global_gc_;
+  if (amount_of_external_allocated_memory_ <=
+      amount_of_external_allocated_memory_at_last_global_gc_)
+    return 0;
+  return amount_of_external_allocated_memory_ -
+         amount_of_external_allocated_memory_at_last_global_gc_;
 }
 
 
@@ -5000,7 +4907,7 @@ intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
     // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
     factor = max_factor -
              (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
-             (kMaxHandles - kMinHandles);
+                 (kMaxHandles - kMinHandles);
   }
 
   if (FLAG_stress_compaction ||
@@ -5034,8 +4941,7 @@ void Heap::DisableInlineAllocation() {
 
   // Update inline allocation limit for old spaces.
   PagedSpaces spaces(this);
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
+  for (PagedSpace* space = spaces.next(); space != NULL;
        space = spaces.next()) {
     space->EmptyAllocationInfo();
   }
@@ -5074,7 +4980,7 @@ bool Heap::SetUp() {
 
   // Set up memory allocator.
   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
-      return false;
+    return false;
 
   // Set up new space.
   if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
@@ -5083,20 +4989,14 @@ bool Heap::SetUp() {
   new_space_top_after_last_gc_ = new_space()->top();
 
   // Initialize old pointer space.
-  old_pointer_space_ =
-      new OldSpace(this,
-                   max_old_generation_size_,
-                   OLD_POINTER_SPACE,
-                   NOT_EXECUTABLE);
+  old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
+                                    OLD_POINTER_SPACE, NOT_EXECUTABLE);
   if (old_pointer_space_ == NULL) return false;
   if (!old_pointer_space_->SetUp()) return false;
 
   // Initialize old data space.
-  old_data_space_ =
-      new OldSpace(this,
-                   max_old_generation_size_,
-                   OLD_DATA_SPACE,
-                   NOT_EXECUTABLE);
+  old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
+                                 NOT_EXECUTABLE);
   if (old_data_space_ == NULL) return false;
   if (!old_data_space_->SetUp()) return false;
 
@@ -5179,12 +5079,10 @@ void Heap::SetStackLimits() {
 
   // Set up the special root array entries containing the stack limits.
   // These are actually addresses, but the tag makes the GC ignore it.
-  roots_[kStackLimitRootIndex] =
-      reinterpret_cast<Object*>(
-          (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
-  roots_[kRealStackLimitRootIndex] =
-      reinterpret_cast<Object*>(
-          (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
+  roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
+      (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
+  roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
+      (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
 }
 
 
@@ -5204,8 +5102,7 @@ void Heap::TearDown() {
     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
-    PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
-           get_max_alive_after_gc());
+    PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
     PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
     PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
     PrintF("\n\n");
@@ -5214,25 +5111,25 @@ void Heap::TearDown() {
   if (FLAG_print_max_heap_committed) {
     PrintF("\n");
     PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
-      MaximumCommittedMemory());
+           MaximumCommittedMemory());
     PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
-      new_space_.MaximumCommittedMemory());
+           new_space_.MaximumCommittedMemory());
     PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
-      old_data_space_->MaximumCommittedMemory());
+           old_data_space_->MaximumCommittedMemory());
     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
-      old_pointer_space_->MaximumCommittedMemory());
+           old_pointer_space_->MaximumCommittedMemory());
     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
-      old_pointer_space_->MaximumCommittedMemory());
+           old_pointer_space_->MaximumCommittedMemory());
     PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
-      code_space_->MaximumCommittedMemory());
+           code_space_->MaximumCommittedMemory());
     PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
-      map_space_->MaximumCommittedMemory());
+           map_space_->MaximumCommittedMemory());
     PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
-      cell_space_->MaximumCommittedMemory());
+           cell_space_->MaximumCommittedMemory());
     PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
-      property_cell_space_->MaximumCommittedMemory());
+           property_cell_space_->MaximumCommittedMemory());
     PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
-      lo_space_->MaximumCommittedMemory());
+           lo_space_->MaximumCommittedMemory());
     PrintF("\n\n");
   }
 
@@ -5300,8 +5197,7 @@ void Heap::TearDown() {
 
 
 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
-                                 GCType gc_type,
-                                 bool pass_isolate) {
+                                 GCType gc_type, bool pass_isolate) {
   DCHECK(callback != NULL);
   GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
   DCHECK(!gc_prologue_callbacks_.Contains(pair));
@@ -5322,8 +5218,7 @@ void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
 
 
 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
-                                 GCType gc_type,
-                                 bool pass_isolate) {
+                                 GCType gc_type, bool pass_isolate) {
   DCHECK(callback != NULL);
   GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
   DCHECK(!gc_epilogue_callbacks_.Contains(pair));
@@ -5372,8 +5267,9 @@ DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
 
 void Heap::EnsureWeakObjectToCodeTable() {
   if (!weak_object_to_code_table()->IsHashTable()) {
-    set_weak_object_to_code_table(*WeakHashTable::New(
-        isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY, TENURED));
+    set_weak_object_to_code_table(
+        *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
+                            TENURED));
   }
 }
 
@@ -5384,12 +5280,11 @@ void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
 
 #ifdef DEBUG
 
-class PrintHandleVisitor: public ObjectVisitor {
+class PrintHandleVisitor : public ObjectVisitor {
  public:
   void VisitPointers(Object** start, Object** end) {
     for (Object** p = start; p < end; p++)
-      PrintF("  handle %p to %p\n",
-             reinterpret_cast<void*>(p),
+      PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
              reinterpret_cast<void*>(*p));
   }
 };
@@ -5448,7 +5343,6 @@ PagedSpace* PagedSpaces::next() {
 }
 
 
-
 OldSpace* OldSpaces::next() {
   switch (counter_++) {
     case OLD_POINTER_SPACE:
@@ -5467,16 +5361,14 @@ SpaceIterator::SpaceIterator(Heap* heap)
     : heap_(heap),
       current_space_(FIRST_SPACE),
       iterator_(NULL),
-      size_func_(NULL) {
-}
+      size_func_(NULL) {}
 
 
 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
     : heap_(heap),
       current_space_(FIRST_SPACE),
       iterator_(NULL),
-      size_func_(size_func) {
-}
+      size_func_(size_func) {}
 
 
 SpaceIterator::~SpaceIterator() {
@@ -5532,8 +5424,8 @@ ObjectIterator* SpaceIterator::CreateIterator() {
       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
       break;
     case PROPERTY_CELL_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
-                                         size_func_);
+      iterator_ =
+          new HeapObjectIterator(heap_->property_cell_space(), size_func_);
       break;
     case LO_SPACE:
       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
@@ -5628,9 +5520,7 @@ HeapIterator::HeapIterator(Heap* heap,
 }
 
 
-HeapIterator::~HeapIterator() {
-  Shutdown();
-}
+HeapIterator::~HeapIterator() { Shutdown(); }
 
 
 void HeapIterator::Init() {
@@ -5706,14 +5596,13 @@ void HeapIterator::reset() {
 
 Object* const PathTracer::kAnyGlobalObject = NULL;
 
-class PathTracer::MarkVisitor: public ObjectVisitor {
+class PathTracer::MarkVisitor : public ObjectVisitor {
  public:
   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
   void VisitPointers(Object** start, Object** end) {
     // Scan all HeapObject pointers in [start, end)
     for (Object** p = start; !tracer_->found() && (p < end); p++) {
-      if ((*p)->IsHeapObject())
-        tracer_->MarkRecursively(p, this);
+      if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
     }
   }
 
@@ -5722,14 +5611,13 @@ class PathTracer::MarkVisitor: public ObjectVisitor {
 };
 
 
-class PathTracer::UnmarkVisitor: public ObjectVisitor {
+class PathTracer::UnmarkVisitor : public ObjectVisitor {
  public:
   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
   void VisitPointers(Object** start, Object** end) {
     // Scan all HeapObject pointers in [start, end)
     for (Object** p = start; p < end; p++) {
-      if ((*p)->IsHeapObject())
-        tracer_->UnmarkRecursively(p, this);
+      if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
     }
   }
 
@@ -5806,10 +5694,11 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
   // Scan the object body.
   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
     // This is specialized to scan Context's properly.
-    Object** start = reinterpret_cast<Object**>(obj->address() +
-                                                Context::kHeaderSize);
-    Object** end = reinterpret_cast<Object**>(obj->address() +
-        Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
+    Object** start =
+        reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
+    Object** end =
+        reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
+                                   Context::FIRST_WEAK_SLOT * kPointerSize);
     mark_visitor->VisitPointers(start, end);
   } else {
     obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
@@ -5883,8 +5772,7 @@ void Heap::TracePathToObject(Object* target) {
 // and finds a path to any global object and prints it. Useful for
 // determining the source for leaks of global objects.
 void Heap::TracePathToGlobal() {
-  PathTracer tracer(PathTracer::kAnyGlobalObject,
-                    PathTracer::FIND_ALL,
+  PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
                     VISIT_ALL);
   IterateRoots(&tracer, VISIT_ONLY_STRONG);
 }
@@ -5929,14 +5817,12 @@ int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
 }
 
 
-void KeyedLookupCache::Update(Handle<Map> map,
-                              Handle<Name> name,
+void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
                               int field_offset) {
   DisallowHeapAllocation no_gc;
   if (!name->IsUniqueName()) {
-    if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
-                                                Handle<String>::cast(name)).
-        ToHandle(&name)) {
+    if (!StringTable::InternalizeStringIfExists(
+             name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
       return;
     }
   }
@@ -5947,7 +5833,7 @@ void KeyedLookupCache::Update(Handle<Map> map,
   int index = (Hash(map, name) & kHashMask);
   // After a GC there will be free slots, so we use them in order (this may
   // help to get the most frequently used one in position 0).
-  for (int i = 0; i< kEntriesPerBucket; i++) {
+  for (int i = 0; i < kEntriesPerBucket; i++) {
     Key& key = keys_[index];
     Object* free_entry_indicator = NULL;
     if (key.map == free_entry_indicator) {
@@ -6056,8 +5942,8 @@ void Heap::FreeQueuedChunks() {
       // If FromAnyPointerAddress encounters a slot that belongs to one of
       // these smaller pieces it will treat it as a slot on a normal Page.
       Address chunk_end = chunk->address() + chunk->size();
-      MemoryChunk* inner = MemoryChunk::FromAddress(
-          chunk->address() + Page::kPageSize);
+      MemoryChunk* inner =
+          MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
       while (inner <= inner_last) {
         // Size of a large chunk is always a multiple of
@@ -6070,8 +5956,7 @@ void Heap::FreeQueuedChunks() {
         inner->set_size(Page::kPageSize);
         inner->set_owner(lo_space());
         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
-        inner = MemoryChunk::FromAddress(
-            inner->address() + Page::kPageSize);
+        inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
       }
     }
   }
@@ -6117,14 +6002,14 @@ void Heap::CheckpointObjectStats() {
   base::LockGuard<base::Mutex> lock_guard(
       checkpoint_object_stats_mutex.Pointer());
   Counters* counters = isolate()->counters();
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
-  counters->count_of_##name()->Increment(                                      \
-      static_cast<int>(object_counts_[name]));                                 \
-  counters->count_of_##name()->Decrement(                                      \
-      static_cast<int>(object_counts_last_time_[name]));                       \
-  counters->size_of_##name()->Increment(                                       \
-      static_cast<int>(object_sizes_[name]));                                  \
-  counters->size_of_##name()->Decrement(                                       \
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)              \
+  counters->count_of_##name()->Increment(                \
+      static_cast<int>(object_counts_[name]));           \
+  counters->count_of_##name()->Decrement(                \
+      static_cast<int>(object_counts_last_time_[name])); \
+  counters->size_of_##name()->Increment(                 \
+      static_cast<int>(object_sizes_[name]));            \
+  counters->size_of_##name()->Decrement(                 \
       static_cast<int>(object_sizes_last_time_[name]));
   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
 #undef ADJUST_LAST_TIME_OBJECT_COUNT
@@ -6171,5 +6056,5 @@ void Heap::CheckpointObjectStats() {
   MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
   ClearObjectStats();
 }
-
-}  // namespace v8::internal
+}
+}  // namespace v8::internal
similarity index 86%
rename from src/heap.h
rename to src/heap/heap.h
index 54378fb..2315973 100644 (file)
@@ -2,21 +2,21 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_HEAP_H_
-#define V8_HEAP_H_
+#ifndef V8_HEAP_HEAP_H_
+#define V8_HEAP_HEAP_H_
 
 #include <cmath>
 
 #include "src/allocation.h"
 #include "src/assert-scope.h"
 #include "src/counters.h"
-#include "src/gc-tracer.h"
 #include "src/globals.h"
-#include "src/incremental-marking.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/spaces.h"
 #include "src/list.h"
-#include "src/mark-compact.h"
 #include "src/objects-visiting.h"
-#include "src/spaces.h"
 #include "src/splay-tree-inl.h"
 #include "src/store-buffer.h"
 
@@ -190,69 +190,69 @@ namespace internal {
   V(FixedArray, microtask_queue, MicrotaskQueue)
 
 // Entries in this list are limited to Smis and are not visited during GC.
-#define SMI_ROOT_LIST(V)                                                       \
-  V(Smi, stack_limit, StackLimit)                                              \
-  V(Smi, real_stack_limit, RealStackLimit)                                     \
-  V(Smi, last_script_id, LastScriptId)                                         \
-  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)     \
-  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)           \
-  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)                 \
+#define SMI_ROOT_LIST(V)                                                   \
+  V(Smi, stack_limit, StackLimit)                                          \
+  V(Smi, real_stack_limit, RealStackLimit)                                 \
+  V(Smi, last_script_id, LastScriptId)                                     \
+  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)       \
+  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)             \
   V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
 
-#define ROOT_LIST(V)                                  \
-  STRONG_ROOT_LIST(V)                                 \
-  SMI_ROOT_LIST(V)                                    \
+#define ROOT_LIST(V)  \
+  STRONG_ROOT_LIST(V) \
+  SMI_ROOT_LIST(V)    \
   V(StringTable, string_table, StringTable)
 
 // Heap roots that are known to be immortal immovable, for which we can safely
 // skip write barriers.
-#define IMMORTAL_IMMOVABLE_ROOT_LIST(V)   \
-  V(byte_array_map)                       \
-  V(free_space_map)                       \
-  V(one_pointer_filler_map)               \
-  V(two_pointer_filler_map)               \
-  V(undefined_value)                      \
-  V(the_hole_value)                       \
-  V(null_value)                           \
-  V(true_value)                           \
-  V(false_value)                          \
-  V(uninitialized_value)                  \
-  V(cell_map)                             \
-  V(global_property_cell_map)             \
-  V(shared_function_info_map)             \
-  V(meta_map)                             \
-  V(heap_number_map)                      \
-  V(mutable_heap_number_map)              \
-  V(native_context_map)                   \
-  V(fixed_array_map)                      \
-  V(code_map)                             \
-  V(scope_info_map)                       \
-  V(fixed_cow_array_map)                  \
-  V(fixed_double_array_map)               \
-  V(constant_pool_array_map)              \
-  V(no_interceptor_result_sentinel)       \
-  V(hash_table_map)                       \
-  V(ordered_hash_table_map)               \
-  V(empty_fixed_array)                    \
-  V(empty_byte_array)                     \
-  V(empty_descriptor_array)               \
-  V(empty_constant_pool_array)            \
-  V(arguments_marker)                     \
-  V(symbol_map)                           \
-  V(sloppy_arguments_elements_map)        \
-  V(function_context_map)                 \
-  V(catch_context_map)                    \
-  V(with_context_map)                     \
-  V(block_context_map)                    \
-  V(module_context_map)                   \
-  V(global_context_map)                   \
-  V(undefined_map)                        \
-  V(the_hole_map)                         \
-  V(null_map)                             \
-  V(boolean_map)                          \
-  V(uninitialized_map)                    \
-  V(message_object_map)                   \
-  V(foreign_map)                          \
+#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
+  V(byte_array_map)                     \
+  V(free_space_map)                     \
+  V(one_pointer_filler_map)             \
+  V(two_pointer_filler_map)             \
+  V(undefined_value)                    \
+  V(the_hole_value)                     \
+  V(null_value)                         \
+  V(true_value)                         \
+  V(false_value)                        \
+  V(uninitialized_value)                \
+  V(cell_map)                           \
+  V(global_property_cell_map)           \
+  V(shared_function_info_map)           \
+  V(meta_map)                           \
+  V(heap_number_map)                    \
+  V(mutable_heap_number_map)            \
+  V(native_context_map)                 \
+  V(fixed_array_map)                    \
+  V(code_map)                           \
+  V(scope_info_map)                     \
+  V(fixed_cow_array_map)                \
+  V(fixed_double_array_map)             \
+  V(constant_pool_array_map)            \
+  V(no_interceptor_result_sentinel)     \
+  V(hash_table_map)                     \
+  V(ordered_hash_table_map)             \
+  V(empty_fixed_array)                  \
+  V(empty_byte_array)                   \
+  V(empty_descriptor_array)             \
+  V(empty_constant_pool_array)          \
+  V(arguments_marker)                   \
+  V(symbol_map)                         \
+  V(sloppy_arguments_elements_map)      \
+  V(function_context_map)               \
+  V(catch_context_map)                  \
+  V(with_context_map)                   \
+  V(block_context_map)                  \
+  V(module_context_map)                 \
+  V(global_context_map)                 \
+  V(undefined_map)                      \
+  V(the_hole_map)                       \
+  V(null_map)                           \
+  V(boolean_map)                        \
+  V(uninitialized_map)                  \
+  V(message_object_map)                 \
+  V(foreign_map)                        \
   V(neander_map)
 
 #define INTERNALIZED_STRING_LIST(V)                                \
@@ -357,8 +357,7 @@ typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
 class StoreBufferRebuilder {
  public:
   explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
-      : store_buffer_(store_buffer) {
-  }
+      : store_buffer_(store_buffer) {}
 
   void Callback(MemoryChunk* page, StoreBufferEvent event);
 
@@ -375,7 +374,6 @@ class StoreBufferRebuilder {
 };
 
 
-
 // A queue of objects promoted during scavenge. Each object is accompanied
 // by it's size to avoid dereferencing a map pointer for scanning.
 class PromotionQueue {
@@ -385,7 +383,7 @@ class PromotionQueue {
         rear_(NULL),
         limit_(NULL),
         emergency_stack_(0),
-        heap_(heap) { }
+        heap_(heap) {}
 
   void Initialize();
 
@@ -430,7 +428,7 @@ class PromotionQueue {
 
   bool is_empty() {
     return (front_ == rear_) &&
-        (emergency_stack_ == NULL || emergency_stack_->length() == 0);
+           (emergency_stack_ == NULL || emergency_stack_->length() == 0);
   }
 
   inline void insert(HeapObject* target, int size);
@@ -448,8 +446,7 @@ class PromotionQueue {
       NewSpacePage* front_page =
           NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
       DCHECK(!front_page->prev_page()->is_anchor());
-      front_ =
-          reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
+      front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
     }
     *target = reinterpret_cast<HeapObject*>(*(--front_));
     *size = static_cast<int>(*(--front_));
@@ -469,7 +466,7 @@ class PromotionQueue {
   static const int kEntrySizeInWords = 2;
 
   struct Entry {
-    Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
+    Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {}
 
     HeapObject* obj_;
     int size_;
@@ -484,8 +481,7 @@ class PromotionQueue {
 };
 
 
-typedef void (*ScavengingCallback)(Map* map,
-                                   HeapObject** slot,
+typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
                                    HeapObject* object);
 
 
@@ -507,7 +503,7 @@ class ExternalStringTable {
   void TearDown();
 
  private:
-  explicit ExternalStringTable(Heap* heap) : heap_(heap) { }
+  explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
 
   friend class Heap;
 
@@ -539,10 +535,8 @@ class Heap {
  public:
   // Configure heap size in MB before setup. Return false if the heap has been
   // set up already.
-  bool ConfigureHeap(int max_semi_space_size,
-                     int max_old_space_size,
-                     int max_executable_size,
-                     size_t code_range_size);
+  bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+                     int max_executable_size, size_t code_range_size);
   bool ConfigureHeapDefault();
 
   // Prepares the heap, setting up memory areas that are needed in the isolate
@@ -619,9 +613,7 @@ class Heap {
   OldSpace* code_space() { return code_space_; }
   MapSpace* map_space() { return map_space_; }
   CellSpace* cell_space() { return cell_space_; }
-  PropertyCellSpace* property_cell_space() {
-    return property_cell_space_;
-  }
+  PropertyCellSpace* property_cell_space() { return property_cell_space_; }
   LargeObjectSpace* lo_space() { return lo_space_; }
   PagedSpace* paged_space(int idx) {
     switch (idx) {
@@ -673,8 +665,8 @@ class Heap {
   // Returns a deep copy of the JavaScript object.
   // Properties and elements are copied too.
   // Optionally takes an AllocationSite to be appended in an AllocationMemento.
-  MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
-                                                AllocationSite* site = NULL);
+  MUST_USE_RESULT AllocationResult
+      CopyJSObject(JSObject* source, AllocationSite* site = NULL);
 
   // Clear the Instanceof cache (used when a prototype changes).
   inline void ClearInstanceofCache();
@@ -685,7 +677,7 @@ class Heap {
   // For use during bootup.
   void RepairFreeListsAfterBoot();
 
-  template<typename T>
+  template <typename T>
   static inline bool IsOneByte(T t, int chars);
 
   // Move len elements within a given array from src_index index to dst_index
@@ -725,8 +717,7 @@ class Heap {
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
   inline bool CollectGarbage(
-      AllocationSpace space,
-      const char* gc_reason = NULL,
+      AllocationSpace space, const char* gc_reason = NULL,
       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
 
   static const int kNoGCFlags = 0;
@@ -743,8 +734,7 @@ class Heap {
   // non-zero, then the slower precise sweeper is used, which leaves the heap
   // in a state where we can iterate over the heap visiting all objects.
   void CollectAllGarbage(
-      int flags,
-      const char* gc_reason = NULL,
+      int flags, const char* gc_reason = NULL,
       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
 
   // Last hope GC, should try to squeeze as much as possible.
@@ -773,40 +763,33 @@ class Heap {
   PromotionQueue* promotion_queue() { return &promotion_queue_; }
 
   void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
-                             GCType gc_type_filter,
-                             bool pass_isolate = true);
+                             GCType gc_type_filter, bool pass_isolate = true);
   void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
 
   void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
-                             GCType gc_type_filter,
-                             bool pass_isolate = true);
+                             GCType gc_type_filter, bool pass_isolate = true);
   void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
 
-  // Heap root getters.  We have versions with and without type::cast() here.
-  // You can't use type::cast during GC because the assert fails.
-  // TODO(1490): Try removing the unchecked accessors, now that GC marking does
-  // not corrupt the map.
-#define ROOT_ACCESSOR(type, name, camel_name)                                  \
-  type* name() {                                                               \
-    return type::cast(roots_[k##camel_name##RootIndex]);                       \
-  }                                                                            \
-  type* raw_unchecked_##name() {                                               \
-    return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]);          \
+// Heap root getters.  We have versions with and without type::cast() here.
+// You can't use type::cast during GC because the assert fails.
+// TODO(1490): Try removing the unchecked accessors, now that GC marking does
+// not corrupt the map.
+#define ROOT_ACCESSOR(type, name, camel_name)                           \
+  type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \
+  type* raw_unchecked_##name() {                                        \
+    return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]);   \
   }
   ROOT_LIST(ROOT_ACCESSOR)
 #undef ROOT_ACCESSOR
 
 // Utility type maps
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name)                                  \
-    Map* name##_map() {                                                        \
-      return Map::cast(roots_[k##Name##MapRootIndex]);                         \
-    }
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+  Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
   STRUCT_LIST(STRUCT_MAP_ACCESSOR)
 #undef STRUCT_MAP_ACCESSOR
 
-#define STRING_ACCESSOR(name, str) String* name() {                            \
-    return String::cast(roots_[k##name##RootIndex]);                           \
-  }
+#define STRING_ACCESSOR(name, str) \
+  String* name() { return String::cast(roots_[k##name##RootIndex]); }
   INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
 #undef STRING_ACCESSOR
 
@@ -819,9 +802,7 @@ class Heap {
   }
   Object* native_contexts_list() const { return native_contexts_list_; }
 
-  void set_array_buffers_list(Object* object) {
-    array_buffers_list_ = object;
-  }
+  void set_array_buffers_list(Object* object) { array_buffers_list_ = object; }
   Object* array_buffers_list() const { return array_buffers_list_; }
 
   void set_allocation_sites_list(Object* object) {
@@ -856,8 +837,7 @@ class Heap {
 
   // Iterate pointers to from semispace of new space found in memory interval
   // from start to end.
-  void IterateAndMarkPointersToFromSpace(Address start,
-                                         Address end,
+  void IterateAndMarkPointersToFromSpace(Address start, Address end,
                                          ObjectSlotCallback callback);
 
   // Returns whether the object resides in new space.
@@ -987,9 +967,7 @@ class Heap {
   inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
 
 #ifdef DEBUG
-  void set_allocation_timeout(int timeout) {
-    allocation_timeout_ = timeout;
-  }
+  void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
 
   void TracePathToObjectFrom(Object* target, Object* root);
   void TracePathToObject(Object* target);
@@ -1003,10 +981,7 @@ class Heap {
   static inline void ScavengePointer(HeapObject** p);
   static inline void ScavengeObject(HeapObject** p, HeapObject* object);
 
-  enum ScratchpadSlotMode {
-    IGNORE_SCRATCHPAD_SLOT,
-    RECORD_SCRATCHPAD_SLOT
-  };
+  enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
 
   // If an object has an AllocationMemento trailing it, return it, otherwise
   // return NULL;
@@ -1015,12 +990,12 @@ class Heap {
   // An object may have an AllocationSite associated with it through a trailing
   // AllocationMemento. Its feedback should be updated when objects are found
   // in the heap.
-  static inline void UpdateAllocationSiteFeedback(
-      HeapObject* object, ScratchpadSlotMode mode);
+  static inline void UpdateAllocationSiteFeedback(HeapObject* object,
+                                                  ScratchpadSlotMode mode);
 
   // Support for partial snapshots.  After calling this we have a linear
   // space to write objects in each space.
-  void ReserveSpace(int *sizes, Address* addresses);
+  void ReserveSpace(intsizes, Address* addresses);
 
   //
   // Support for the API.
@@ -1049,25 +1024,18 @@ class Heap {
   static const int kPointerMultiplier = i::kPointerSize / 4;
 
   // The new space size has to be a power of 2. Sizes are in MB.
-  static const int kMaxSemiSpaceSizeLowMemoryDevice =
-      1 * kPointerMultiplier;
-  static const int kMaxSemiSpaceSizeMediumMemoryDevice =
-      4 * kPointerMultiplier;
-  static const int kMaxSemiSpaceSizeHighMemoryDevice =
-      8 * kPointerMultiplier;
-  static const int kMaxSemiSpaceSizeHugeMemoryDevice =
-      8 * kPointerMultiplier;
+  static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
+  static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
+  static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
+  static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
 
   // The old space size has to be a multiple of Page::kPageSize.
   // Sizes are in MB.
-  static const int kMaxOldSpaceSizeLowMemoryDevice =
-      128 * kPointerMultiplier;
+  static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
   static const int kMaxOldSpaceSizeMediumMemoryDevice =
       256 * kPointerMultiplier;
-  static const int kMaxOldSpaceSizeHighMemoryDevice =
-      512 * kPointerMultiplier;
-  static const int kMaxOldSpaceSizeHugeMemoryDevice =
-      700 * kPointerMultiplier;
+  static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
+  static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
 
   // The executable size has to be a multiple of Page::kPageSize.
   // Sizes are in MB.
@@ -1102,17 +1070,15 @@ class Heap {
     INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
 #undef STRING_DECLARATION
 
-    // Utility type maps
+// Utility type maps
 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
     STRUCT_LIST(DECLARE_STRUCT_MAP)
 #undef DECLARE_STRUCT_MAP
-
     kStringTableRootIndex,
 
 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
     SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
 #undef ROOT_INDEX_DECLARATION
-
     kRootListLength,
     kStrongRootListLength = kStringTableRootIndex,
     kSmiRootsStart = kStringTableRootIndex + 1
@@ -1132,12 +1098,10 @@ class Heap {
   bool RootCanBeTreatedAsConstant(RootListIndex root_index);
 
   Map* MapForFixedTypedArray(ExternalArrayType array_type);
-  RootListIndex RootIndexForFixedTypedArray(
-      ExternalArrayType array_type);
+  RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
 
   Map* MapForExternalArrayType(ExternalArrayType array_type);
-  RootListIndex RootIndexForExternalArrayType(
-      ExternalArrayType array_type);
+  RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type);
 
   RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
   RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
@@ -1167,17 +1131,11 @@ class Heap {
     semi_space_copied_object_size_ += object_size;
   }
 
-  inline void IncrementNodesDiedInNewSpace() {
-    nodes_died_in_new_space_++;
-  }
+  inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
 
-  inline void IncrementNodesCopiedInNewSpace() {
-    nodes_copied_in_new_space_++;
-  }
+  inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
 
-  inline void IncrementNodesPromoted() {
-    nodes_promoted_++;
-  }
+  inline void IncrementNodesPromoted() { nodes_promoted_++; }
 
   inline void IncrementYoungSurvivorsCounter(int survived) {
     DCHECK(survived >= 0);
@@ -1250,26 +1208,18 @@ class Heap {
     return &mark_compact_collector_;
   }
 
-  StoreBuffer* store_buffer() {
-    return &store_buffer_;
-  }
+  StoreBuffer* store_buffer() { return &store_buffer_; }
 
-  Marking* marking() {
-    return &marking_;
-  }
+  Marking* marking() { return &marking_; }
 
-  IncrementalMarking* incremental_marking() {
-    return &incremental_marking_;
-  }
+  IncrementalMarking* incremental_marking() { return &incremental_marking_; }
 
   ExternalStringTable* external_string_table() {
     return &external_string_table_;
   }
 
   // Returns the current sweep generation.
-  int sweep_generation() {
-    return sweep_generation_;
-  }
+  int sweep_generation() { return sweep_generation_; }
 
   inline Isolate* isolate();
 
@@ -1325,9 +1275,7 @@ class Heap {
 
   // Global inline caching age: it is incremented on some GCs after context
   // disposal. We use it to flush inline caches.
-  int global_ic_age() {
-    return global_ic_age_;
-  }
+  int global_ic_age() { return global_ic_age_; }
 
   void AgeInlineCaches() {
     global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
@@ -1341,9 +1289,7 @@ class Heap {
 
   void DeoptMarkedAllocationSites();
 
-  bool MaximumSizeScavenge() {
-    return maximum_size_scavenges_ > 0;
-  }
+  bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
 
   bool DeoptMaybeTenuredAllocationSites() {
     return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
@@ -1398,9 +1344,7 @@ class Heap {
     }
 
 
-    ~RelocationLock() {
-      heap_->relocation_mutex_.Unlock();
-    }
+    ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
 
    private:
     Heap* heap_;
@@ -1427,59 +1371,53 @@ class Heap {
   inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
 
   // This event is triggered after object is moved to a new place.
-  inline void OnMoveEvent(HeapObject* target,
-                          HeapObject* source,
+  inline void OnMoveEvent(HeapObject* target, HeapObject* source,
                           int size_in_bytes);
 
  protected:
   // Methods made available to tests.
 
   // Allocates a JS Map in the heap.
-  MUST_USE_RESULT AllocationResult AllocateMap(
-      InstanceType instance_type,
-      int instance_size,
-      ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+  MUST_USE_RESULT AllocationResult
+      AllocateMap(InstanceType instance_type, int instance_size,
+                  ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
 
   // Allocates and initializes a new JavaScript object based on a
   // constructor.
   // If allocation_site is non-null, then a memento is emitted after the object
   // that points to the site.
-  MUST_USE_RESULT AllocationResult AllocateJSObject(
-      JSFunction* constructor,
-      PretenureFlag pretenure = NOT_TENURED,
-      AllocationSite* allocation_site = NULL);
+  MUST_USE_RESULT AllocationResult
+      AllocateJSObject(JSFunction* constructor,
+                       PretenureFlag pretenure = NOT_TENURED,
+                       AllocationSite* allocation_site = NULL);
 
   // Allocates and initializes a new JavaScript object based on a map.
   // Passing an allocation site means that a memento will be created that
   // points to the site.
-  MUST_USE_RESULT AllocationResult AllocateJSObjectFromMap(
-      Map* map,
-      PretenureFlag pretenure = NOT_TENURED,
-      bool alloc_props = true,
-      AllocationSite* allocation_site = NULL);
+  MUST_USE_RESULT AllocationResult
+      AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
+                              bool alloc_props = true,
+                              AllocationSite* allocation_site = NULL);
 
   // Allocated a HeapNumber from value.
-  MUST_USE_RESULT AllocationResult AllocateHeapNumber(
-      double value,
-      MutableMode mode = IMMUTABLE,
-      PretenureFlag pretenure = NOT_TENURED);
+  MUST_USE_RESULT AllocationResult
+      AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
+                         PretenureFlag pretenure = NOT_TENURED);
 
   // Allocate a byte array of the specified length
-  MUST_USE_RESULT AllocationResult AllocateByteArray(
-      int length,
-      PretenureFlag pretenure = NOT_TENURED);
+  MUST_USE_RESULT AllocationResult
+      AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
 
   // Copy the code and scope info part of the code object, but insert
   // the provided data as the relocation information.
-  MUST_USE_RESULT AllocationResult CopyCode(Code* code,
-                                            Vector<byte> reloc_info);
+  MUST_USE_RESULT AllocationResult
+      CopyCode(Code* code, Vector<byte> reloc_info);
 
   MUST_USE_RESULT AllocationResult CopyCode(Code* code);
 
   // Allocates a fixed array initialized with undefined values
-  MUST_USE_RESULT AllocationResult AllocateFixedArray(
-      int length,
-      PretenureFlag pretenure = NOT_TENURED);
+  MUST_USE_RESULT AllocationResult
+      AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
 
  private:
   Heap();
@@ -1561,12 +1499,12 @@ class Heap {
   // Total length of the strings we failed to flatten since the last GC.
   int unflattened_strings_length_;
 
-#define ROOT_ACCESSOR(type, name, camel_name)                                  \
-  inline void set_##name(type* value) {                                        \
-    /* The deserializer makes use of the fact that these common roots are */   \
-    /* never in new space and never on a page that is being compacted.    */   \
-    DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value));  \
-    roots_[k##camel_name##RootIndex] = value;                                  \
+#define ROOT_ACCESSOR(type, name, camel_name)                                 \
+  inline void set_##name(type* value) {                                       \
+    /* The deserializer makes use of the fact that these common roots are */  \
+    /* never in new space and never on a page that is being compacted.    */  \
+    DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
+    roots_[k##camel_name##RootIndex] = value;                                 \
   }
   ROOT_LIST(ROOT_ACCESSOR)
 #undef ROOT_ACCESSOR
@@ -1639,10 +1577,8 @@ class Heap {
   // Allocations in the callback function are disallowed.
   struct GCPrologueCallbackPair {
     GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
-                           GCType gc_type,
-                           bool pass_isolate)
-        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
-    }
+                           GCType gc_type, bool pass_isolate)
+        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
     bool operator==(const GCPrologueCallbackPair& pair) const {
       return pair.callback == callback;
     }
@@ -1655,10 +1591,8 @@ class Heap {
 
   struct GCEpilogueCallbackPair {
     GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
-                           GCType gc_type,
-                           bool pass_isolate)
-        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
-    }
+                           GCType gc_type, bool pass_isolate)
+        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
     bool operator==(const GCEpilogueCallbackPair& pair) const {
       return pair.callback == callback;
     }
@@ -1707,8 +1641,7 @@ class Heap {
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
   bool CollectGarbage(
-      GarbageCollector collector,
-      const char* gc_reason,
+      GarbageCollector collector, const char* gc_reason,
       const char* collector_reason,
       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
 
@@ -1737,54 +1670,49 @@ class Heap {
   // performed by the runtime and should not be bypassed (to extend this to
   // inlined allocations, use the Heap::DisableInlineAllocation() support).
   MUST_USE_RESULT inline AllocationResult AllocateRaw(
-      int size_in_bytes,
-      AllocationSpace space,
-      AllocationSpace retry_space);
+      int size_in_bytes, AllocationSpace space, AllocationSpace retry_space);
 
   // Allocates a heap object based on the map.
-  MUST_USE_RESULT AllocationResult Allocate(
-      Map* map,
-      AllocationSpace space,
-      AllocationSite* allocation_site = NULL);
+  MUST_USE_RESULT AllocationResult
+      Allocate(Map* map, AllocationSpace space,
+               AllocationSite* allocation_site = NULL);
 
   // Allocates a partial map for bootstrapping.
-  MUST_USE_RESULT AllocationResult AllocatePartialMap(
-      InstanceType instance_type,
-      int instance_size);
+  MUST_USE_RESULT AllocationResult
+      AllocatePartialMap(InstanceType instance_type, int instance_size);
 
   // Initializes a JSObject based on its map.
-  void InitializeJSObjectFromMap(JSObject* obj,
-                                 FixedArray* properties,
+  void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
                                  Map* map);
   void InitializeAllocationMemento(AllocationMemento* memento,
                                    AllocationSite* allocation_site);
 
   // Allocate a block of memory in the given space (filled with a filler).
   // Used as a fall-back for generated code when the space is full.
-  MUST_USE_RESULT AllocationResult AllocateFillerObject(int size,
-                                                    bool double_align,
-                                                    AllocationSpace space);
+  MUST_USE_RESULT AllocationResult
+      AllocateFillerObject(int size, bool double_align, AllocationSpace space);
 
   // Allocate an uninitialized fixed array.
-  MUST_USE_RESULT AllocationResult AllocateRawFixedArray(
-      int length, PretenureFlag pretenure);
+  MUST_USE_RESULT AllocationResult
+      AllocateRawFixedArray(int length, PretenureFlag pretenure);
 
   // Allocate an uninitialized fixed double array.
-  MUST_USE_RESULT AllocationResult AllocateRawFixedDoubleArray(
-      int length, PretenureFlag pretenure);
+  MUST_USE_RESULT AllocationResult
+      AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
 
   // Allocate an initialized fixed array with the given filler value.
-  MUST_USE_RESULT AllocationResult AllocateFixedArrayWithFiller(
-      int length, PretenureFlag pretenure, Object* filler);
+  MUST_USE_RESULT AllocationResult
+      AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
+                                   Object* filler);
 
   // Allocate and partially initializes a String.  There are two String
   // encodings: ASCII and two byte.  These functions allocate a string of the
   // given length and set its map and length fields.  The characters of the
   // string are uninitialized.
-  MUST_USE_RESULT AllocationResult AllocateRawOneByteString(
-      int length, PretenureFlag pretenure);
-  MUST_USE_RESULT AllocationResult AllocateRawTwoByteString(
-      int length, PretenureFlag pretenure);
+  MUST_USE_RESULT AllocationResult
+      AllocateRawOneByteString(int length, PretenureFlag pretenure);
+  MUST_USE_RESULT AllocationResult
+      AllocateRawTwoByteString(int length, PretenureFlag pretenure);
 
   bool CreateInitialMaps();
   void CreateInitialObjects();
@@ -1792,23 +1720,19 @@ class Heap {
   // Allocates an internalized string in old space based on the character
   // stream.
   MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
-      Vector<const char> str,
-      int chars,
-      uint32_t hash_field);
+      Vector<const char> str, int chars, uint32_t hash_field);
 
   MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
-        Vector<const uint8_t> str,
-        uint32_t hash_field);
+      Vector<const uint8_t> str, uint32_t hash_field);
 
   MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
-        Vector<const uc16> str,
-        uint32_t hash_field);
+      Vector<const uc16> str, uint32_t hash_field);
 
-  template<bool is_one_byte, typename T>
-  MUST_USE_RESULT AllocationResult AllocateInternalizedStringImpl(
-      T t, int chars, uint32_t hash_field);
+  template <bool is_one_byte, typename T>
+  MUST_USE_RESULT AllocationResult
+      AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
 
-  template<typename T>
+  template <typename T>
   MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
       T t, int chars, uint32_t hash_field);
 
@@ -1821,8 +1745,8 @@ class Heap {
 
   // Make a copy of src, set the map, and return the copy. Returns
   // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  MUST_USE_RESULT AllocationResult CopyFixedArrayWithMap(FixedArray* src,
-                                                         Map* map);
+  MUST_USE_RESULT AllocationResult
+      CopyFixedArrayWithMap(FixedArray* src, Map* map);
 
   // Make a copy of src and return it. Returns
   // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
@@ -1837,15 +1761,15 @@ class Heap {
 
   // Computes a single character string where the character has code.
   // A cache is used for ASCII codes.
-  MUST_USE_RESULT AllocationResult LookupSingleCharacterStringFromCode(
-      uint16_t code);
+  MUST_USE_RESULT AllocationResult
+      LookupSingleCharacterStringFromCode(uint16_t code);
 
   // Allocate a symbol in old space.
   MUST_USE_RESULT AllocationResult AllocateSymbol();
 
   // Make a copy of src, set the map, and return the copy.
-  MUST_USE_RESULT AllocationResult CopyConstantPoolArrayWithMap(
-      ConstantPoolArray* src, Map* map);
+  MUST_USE_RESULT AllocationResult
+      CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map);
 
   MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(
       const ConstantPoolArray::NumberOfEntries& small);
@@ -1855,29 +1779,25 @@ class Heap {
       const ConstantPoolArray::NumberOfEntries& extended);
 
   // Allocates an external array of the specified length and type.
-  MUST_USE_RESULT AllocationResult AllocateExternalArray(
-      int length,
-      ExternalArrayType array_type,
-      void* external_pointer,
-      PretenureFlag pretenure);
+  MUST_USE_RESULT AllocationResult
+      AllocateExternalArray(int length, ExternalArrayType array_type,
+                            void* external_pointer, PretenureFlag pretenure);
 
   // Allocates a fixed typed array of the specified length and type.
-  MUST_USE_RESULT AllocationResult AllocateFixedTypedArray(
-      int length,
-      ExternalArrayType array_type,
-      PretenureFlag pretenure);
+  MUST_USE_RESULT AllocationResult
+      AllocateFixedTypedArray(int length, ExternalArrayType array_type,
+                              PretenureFlag pretenure);
 
   // Make a copy of src and return it.
   MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
 
   // Make a copy of src, set the map, and return the copy.
-  MUST_USE_RESULT AllocationResult CopyFixedDoubleArrayWithMap(
-      FixedDoubleArray* src, Map* map);
+  MUST_USE_RESULT AllocationResult
+      CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
 
   // Allocates a fixed double array with uninitialized values. Returns
   MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
-      int length,
-      PretenureFlag pretenure = NOT_TENURED);
+      int length, PretenureFlag pretenure = NOT_TENURED);
 
   // These five Create*EntryStub functions are here and forced to not be inlined
   // because of a gcc-4.4 bug that assigns wrong vtable entries.
@@ -1890,12 +1810,12 @@ class Heap {
   MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
 
   // Allocate empty external array of given type.
-  MUST_USE_RESULT AllocationResult AllocateEmptyExternalArray(
-      ExternalArrayType array_type);
+  MUST_USE_RESULT AllocationResult
+      AllocateEmptyExternalArray(ExternalArrayType array_type);
 
   // Allocate empty fixed typed array of given type.
-  MUST_USE_RESULT AllocationResult AllocateEmptyFixedTypedArray(
-      ExternalArrayType array_type);
+  MUST_USE_RESULT AllocationResult
+      AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
 
   // Allocate empty constant pool array.
   MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray();
@@ -1910,11 +1830,11 @@ class Heap {
   MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
 
   // Allocates a new foreign object.
-  MUST_USE_RESULT AllocationResult AllocateForeign(
-      Address address, PretenureFlag pretenure = NOT_TENURED);
+  MUST_USE_RESULT AllocationResult
+      AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
 
-  MUST_USE_RESULT AllocationResult AllocateCode(int object_size,
-                                                bool immovable);
+  MUST_USE_RESULT AllocationResult
+      AllocateCode(int object_size, bool immovable);
 
   MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
 
@@ -1933,12 +1853,10 @@ class Heap {
   void ZapFromSpace();
 
   static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
-      Heap* heap,
-      Object** pointer);
+      Heap* heap, Object** pointer);
 
   Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
-  static void ScavengeStoreBufferCallback(Heap* heap,
-                                          MemoryChunk* page,
+  static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
                                           StoreBufferEvent event);
 
   // Performs a major collection in the whole heap.
@@ -2014,15 +1932,11 @@ class Heap {
 
   // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
   // Re-visit incremental marking heuristics.
-  bool IsHighSurvivalRate() {
-    return high_survival_rate_period_length_ > 0;
-  }
+  bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
 
   void SelectScavengingVisitorsTable();
 
-  void StartIdleRound() {
-    mark_sweeps_since_idle_round_started_ = 0;
-  }
+  void StartIdleRound() { mark_sweeps_since_idle_round_started_ = 0; }
 
   void FinishIdleRound() {
     mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
@@ -2165,33 +2079,33 @@ class HeapStats {
   static const int kStartMarker = 0xDECADE00;
   static const int kEndMarker = 0xDECADE01;
 
-  int* start_marker;                    //  0
-  int* new_space_size;                  //  1
-  int* new_space_capacity;              //  2
-  intptr_t* old_pointer_space_size;          //  3
-  intptr_t* old_pointer_space_capacity;      //  4
-  intptr_t* old_data_space_size;             //  5
-  intptr_t* old_data_space_capacity;         //  6
-  intptr_t* code_space_size;                 //  7
-  intptr_t* code_space_capacity;             //  8
-  intptr_t* map_space_size;                  //  9
-  intptr_t* map_space_capacity;              // 10
-  intptr_t* cell_space_size;                 // 11
-  intptr_t* cell_space_capacity;             // 12
-  intptr_t* lo_space_size;                   // 13
-  int* global_handle_count;             // 14
-  int* weak_global_handle_count;        // 15
-  int* pending_global_handle_count;     // 16
-  int* near_death_global_handle_count;  // 17
-  int* free_global_handle_count;        // 18
-  intptr_t* memory_allocator_size;           // 19
-  intptr_t* memory_allocator_capacity;       // 20
-  int* objects_per_type;                // 21
-  int* size_per_type;                   // 22
-  int* os_error;                        // 23
-  int* end_marker;                      // 24
-  intptr_t* property_cell_space_size;   // 25
-  intptr_t* property_cell_space_capacity;    // 26
+  int* start_marker;                       //  0
+  int* new_space_size;                     //  1
+  int* new_space_capacity;                 //  2
+  intptr_t* old_pointer_space_size;        //  3
+  intptr_t* old_pointer_space_capacity;    //  4
+  intptr_t* old_data_space_size;           //  5
+  intptr_t* old_data_space_capacity;       //  6
+  intptr_t* code_space_size;               //  7
+  intptr_t* code_space_capacity;           //  8
+  intptr_t* map_space_size;                //  9
+  intptr_t* map_space_capacity;            // 10
+  intptr_t* cell_space_size;               // 11
+  intptr_t* cell_space_capacity;           // 12
+  intptr_t* lo_space_size;                 // 13
+  int* global_handle_count;                // 14
+  int* weak_global_handle_count;           // 15
+  int* pending_global_handle_count;        // 16
+  int* near_death_global_handle_count;     // 17
+  int* free_global_handle_count;           // 18
+  intptr_t* memory_allocator_size;         // 19
+  intptr_t* memory_allocator_capacity;     // 20
+  int* objects_per_type;                   // 21
+  int* size_per_type;                      // 22
+  int* os_error;                           // 23
+  int* end_marker;                         // 24
+  intptr_t* property_cell_space_size;      // 25
+  intptr_t* property_cell_space_capacity;  // 26
 };
 
 
@@ -2233,14 +2147,14 @@ class GCCallbacksScope {
 // point into the heap to a location that has a map pointer at its first word.
 // Caveat: Heap::Contains is an approximation because it can return true for
 // objects in a heap space but above the allocation pointer.
-class VerifyPointersVisitor: public ObjectVisitor {
+class VerifyPointersVisitor : public ObjectVisitor {
  public:
   inline void VisitPointers(Object** start, Object** end);
 };
 
 
 // Verify that all objects are Smis.
-class VerifySmisVisitor: public ObjectVisitor {
+class VerifySmisVisitor : public ObjectVisitor {
  public:
   inline void VisitPointers(Object** start, Object** end);
 };
@@ -2252,6 +2166,7 @@ class AllSpaces BASE_EMBEDDED {
  public:
   explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
   Space* next();
+
  private:
   Heap* heap_;
   int counter_;
@@ -2265,6 +2180,7 @@ class OldSpaces BASE_EMBEDDED {
  public:
   explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
   OldSpace* next();
+
  private:
   Heap* heap_;
   int counter_;
@@ -2278,6 +2194,7 @@ class PagedSpaces BASE_EMBEDDED {
  public:
   explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
   PagedSpace* next();
+
  private:
   Heap* heap_;
   int counter_;
@@ -2300,7 +2217,7 @@ class SpaceIterator : public Malloced {
   ObjectIterator* CreateIterator();
 
   Heap* heap_;
-  int current_space_;  // from enum AllocationSpace.
+  int current_space_;         // from enum AllocationSpace.
   ObjectIterator* iterator_;  // object iterator for the current space.
   HeapObjectCallback size_func_;
 };
@@ -2322,10 +2239,7 @@ class HeapObjectsFilter;
 
 class HeapIterator BASE_EMBEDDED {
  public:
-  enum HeapObjectsFiltering {
-    kNoFiltering,
-    kFilterUnreachable
-  };
+  enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
 
   explicit HeapIterator(Heap* heap);
   HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
@@ -2397,9 +2311,7 @@ class KeyedLookupCache {
 
   // Get the address of the keys and field_offsets arrays.  Used in
   // generated code to perform cache lookups.
-  Address keys_address() {
-    return reinterpret_cast<Address>(&keys_);
-  }
+  Address keys_address() { return reinterpret_cast<Address>(&keys_); }
 
   Address field_offsets_address() {
     return reinterpret_cast<Address>(&field_offsets_);
@@ -2464,11 +2376,11 @@ class DescriptorLookupCache {
   static int Hash(Object* source, Name* name) {
     // Uses only lower 32 bits if pointers are larger.
     uint32_t source_hash =
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source))
-            >> kPointerSizeLog2;
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
+        kPointerSizeLog2;
     uint32_t name_hash =
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name))
-            >> kPointerSizeLog2;
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >>
+        kPointerSizeLog2;
     return (source_hash ^ name_hash) % kLength;
   }
 
@@ -2492,16 +2404,12 @@ class RegExpResultsCache {
 
   // Attempt to retrieve a cached result.  On failure, 0 is returned as a Smi.
   // On success, the returned result is guaranteed to be a COW-array.
-  static Object* Lookup(Heap* heap,
-                        String* key_string,
-                        Object* key_pattern,
+  static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
                         ResultsCacheType type);
   // Attempt to add value_array to the cache specified by type.  On success,
   // value_array is turned into a COW-array.
-  static void Enter(Isolate* isolate,
-                    Handle<String> key_string,
-                    Handle<Object> key_pattern,
-                    Handle<FixedArray> value_array,
+  static void Enter(Isolate* isolate, Handle<String> key_string,
+                    Handle<Object> key_pattern, Handle<FixedArray> value_array,
                     ResultsCacheType type);
   static void Clear(FixedArray* cache);
   static const int kRegExpResultsCacheSize = 0x100;
@@ -2583,8 +2491,7 @@ class PathTracer : public ObjectVisitor {
   // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
   // after the first match.  If FIND_ALL is specified, then tracing will be
   // done for all matches.
-  PathTracer(Object* search_target,
-             WhatToFind what_to_find,
+  PathTracer(Object* search_target, WhatToFind what_to_find,
              VisitMode visit_mode)
       : search_target_(search_target),
         found_target_(false),
@@ -2624,7 +2531,7 @@ class PathTracer : public ObjectVisitor {
   DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
 };
 #endif  // DEBUG
+}
+}  // namespace v8::internal
 
-} }  // namespace v8::internal
-
-#endif  // V8_HEAP_H_
+#endif  // V8_HEAP_HEAP_H_
similarity index 83%
rename from src/incremental-marking-inl.h
rename to src/heap/incremental-marking-inl.h
index 692e958..5258c5c 100644 (file)
@@ -2,17 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_INCREMENTAL_MARKING_INL_H_
-#define V8_INCREMENTAL_MARKING_INL_H_
+#ifndef V8_HEAP_INCREMENTAL_MARKING_INL_H_
+#define V8_HEAP_INCREMENTAL_MARKING_INL_H_
 
-#include "src/incremental-marking.h"
+#include "src/heap/incremental-marking.h"
 
 namespace v8 {
 namespace internal {
 
 
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
-                                         Object** slot,
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot,
                                          Object* value) {
   HeapObject* value_heap_obj = HeapObject::cast(value);
   MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
@@ -42,8 +41,7 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
 }
 
 
-void IncrementalMarking::RecordWrite(HeapObject* obj,
-                                     Object** slot,
+void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
                                      Object* value) {
   if (IsMarking() && value->IsHeapObject()) {
     RecordWriteSlow(obj, slot, value);
@@ -51,15 +49,13 @@ void IncrementalMarking::RecordWrite(HeapObject* obj,
 }
 
 
-void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
-                                                Object** slot,
+void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
                                                 Code* value) {
   if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
 }
 
 
-void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
-                                             RelocInfo* rinfo,
+void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
                                              Object* value) {
   if (IsMarking() && value->IsHeapObject()) {
     RecordWriteIntoCodeSlow(obj, rinfo, value);
@@ -85,7 +81,7 @@ void IncrementalMarking::RecordWrites(HeapObject* obj) {
 void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
                                                MarkBit mark_bit) {
   DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
-  DCHECK(obj->Size() >= 2*kPointerSize);
+  DCHECK(obj->Size() >= 2 * kPointerSize);
   DCHECK(IsMarking());
   Marking::BlackToGrey(mark_bit);
   int obj_size = obj->Size();
@@ -115,8 +111,7 @@ void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
   Marking::WhiteToGrey(mark_bit);
   marking_deque_.PushGrey(obj);
 }
+}
+}  // namespace v8::internal
 
-
-} }  // namespace v8::internal
-
-#endif  // V8_INCREMENTAL_MARKING_INL_H_
+#endif  // V8_HEAP_INCREMENTAL_MARKING_INL_H_
similarity index 91%
rename from src/incremental-marking.cc
rename to src/heap/incremental-marking.cc
index 431e1ce..d2cd0a0 100644 (file)
@@ -4,7 +4,7 @@
 
 #include "src/v8.h"
 
-#include "src/incremental-marking.h"
+#include "src/heap/incremental-marking.h"
 
 #include "src/code-stubs.h"
 #include "src/compilation-cache.h"
@@ -31,27 +31,23 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
       unscanned_bytes_of_large_object_(0) {}
 
 
-void IncrementalMarking::TearDown() {
-  delete marking_deque_memory_;
-}
+void IncrementalMarking::TearDown() { delete marking_deque_memory_; }
 
 
-void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
-                                         Object** slot,
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
                                          Object* value) {
   if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
     MarkBit obj_bit = Marking::MarkBitFrom(obj);
     if (Marking::IsBlack(obj_bit)) {
       // Object is not going to be rescanned we need to record the slot.
-      heap_->mark_compact_collector()->RecordSlot(
-          HeapObject::RawField(obj, 0), slot, value);
+      heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
+                                                  slot, value);
     }
   }
 }
 
 
-void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
-                                             Object** slot,
+void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
                                              Isolate* isolate) {
   DCHECK(obj->IsHeapObject());
   IncrementalMarking* marking = isolate->heap()->incremental_marking();
@@ -61,7 +57,7 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
   if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
     marking->write_barriers_invoked_since_last_step_ +=
         MemoryChunk::kWriteBarrierCounterGranularity -
-            chunk->write_barrier_counter();
+        chunk->write_barrier_counter();
     chunk->set_write_barrier_counter(
         MemoryChunk::kWriteBarrierCounterGranularity);
   }
@@ -70,8 +66,7 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
 }
 
 
-void IncrementalMarking::RecordCodeTargetPatch(Code* host,
-                                               Address pc,
+void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
                                                HeapObject* value) {
   if (IsMarking()) {
     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
@@ -82,8 +77,9 @@ void IncrementalMarking::RecordCodeTargetPatch(Code* host,
 
 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
   if (IsMarking()) {
-    Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
-        GcSafeFindCodeForInnerPointer(pc);
+    Code* host = heap_->isolate()
+                     ->inner_pointer_to_code_cache()
+                     ->GcSafeFindCodeForInnerPointer(pc);
     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
     RecordWriteIntoCode(host, &rinfo, value);
   }
@@ -95,8 +91,8 @@ void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
                                                     Code* value) {
   if (BaseRecordWrite(host, slot, value)) {
     DCHECK(slot != NULL);
-    heap_->mark_compact_collector()->
-        RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
+    heap_->mark_compact_collector()->RecordCodeEntrySlot(
+        reinterpret_cast<Address>(slot), value);
   }
 }
 
@@ -140,8 +136,7 @@ static void MarkObjectGreyDoNotEnqueue(Object* obj) {
 
 
 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
-                                       MarkBit mark_bit,
-                                       int size) {
+                                       MarkBit mark_bit, int size) {
   DCHECK(!Marking::IsImpossible(mark_bit));
   if (mark_bit.Get()) return;
   mark_bit.Set();
@@ -151,8 +146,7 @@ static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
 
 
 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
-                                        MarkBit mark_bit,
-                                        int size) {
+                                        MarkBit mark_bit, int size) {
   DCHECK(!Marking::IsImpossible(mark_bit));
   if (Marking::IsBlack(mark_bit)) return;
   Marking::MarkBlack(mark_bit);
@@ -188,15 +182,14 @@ class IncrementalMarkingMarkingVisitor
       // fully scanned. Fall back to scanning it through to the end in case this
       // fails because of a full deque.
       int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
-      int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
-                             chunk->progress_bar());
-      int end_offset = Min(object_size,
-                           start_offset + kProgressBarScanningChunk);
+      int start_offset =
+          Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
+      int end_offset =
+          Min(object_size, start_offset + kProgressBarScanningChunk);
       int already_scanned_offset = start_offset;
       bool scan_until_end = false;
       do {
-        VisitPointersWithAnchor(heap,
-                                HeapObject::RawField(object, 0),
+        VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
                                 HeapObject::RawField(object, start_offset),
                                 HeapObject::RawField(object, end_offset));
         start_offset = end_offset;
@@ -245,10 +238,8 @@ class IncrementalMarkingMarkingVisitor
     }
   }
 
-  INLINE(static void VisitPointersWithAnchor(Heap* heap,
-                                             Object** anchor,
-                                             Object** start,
-                                             Object** end)) {
+  INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
+                                             Object** start, Object** end)) {
     for (Object** p = start; p < end; p++) {
       Object* obj = *p;
       if (obj->IsHeapObject()) {
@@ -289,12 +280,9 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
  public:
   explicit IncrementalMarkingRootMarkingVisitor(
       IncrementalMarking* incremental_marking)
-      : incremental_marking_(incremental_marking) {
-  }
+      : incremental_marking_(incremental_marking) {}
 
-  void VisitPointer(Object** p) {
-    MarkObjectByPointer(p);
-  }
+  void VisitPointer(Object** p) { MarkObjectByPointer(p); }
 
   void VisitPointers(Object** start, Object** end) {
     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
@@ -334,8 +322,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
 
     // It's difficult to filter out slots recorded for large objects.
     if (chunk->owner()->identity() == LO_SPACE &&
-        chunk->size() > static_cast<size_t>(Page::kPageSize) &&
-        is_compacting) {
+        chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
       chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
     }
   } else if (chunk->owner()->identity() == CELL_SPACE ||
@@ -445,18 +432,16 @@ bool IncrementalMarking::WorthActivating() {
   // Only start incremental marking in a safe state: 1) when incremental
   // marking is turned on, 2) when we are currently not in a GC, and
   // 3) when we are currently not serializing or deserializing the heap.
-  return FLAG_incremental_marking &&
-      FLAG_incremental_marking_steps &&
-      heap_->gc_state() == Heap::NOT_IN_GC &&
-      !heap_->isolate()->serializer_enabled() &&
-      heap_->isolate()->IsInitialized() &&
-      heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
+  return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
+         heap_->gc_state() == Heap::NOT_IN_GC &&
+         !heap_->isolate()->serializer_enabled() &&
+         heap_->isolate()->IsInitialized() &&
+         heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
 }
 
 
 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
-  DCHECK(RecordWriteStub::GetMode(stub) ==
-         RecordWriteStub::STORE_BUFFER_ONLY);
+  DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
 
   if (!IsMarking()) {
     // Initially stub is generated in STORE_BUFFER_ONLY mode thus
@@ -480,8 +465,7 @@ static void PatchIncrementalMarkingRecordWriteStubs(
     if (stubs->IsKey(k)) {
       uint32_t key = NumberToUint32(k);
 
-      if (CodeStub::MajorKeyFromKey(key) ==
-          CodeStub::RecordWrite) {
+      if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
         Object* e = stubs->ValueAt(i);
         if (e->IsCode()) {
           RecordWriteStub::Patch(Code::cast(e), mode);
@@ -550,13 +534,14 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
   }
 
   is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
-      heap_->mark_compact_collector()->StartCompaction(
-          MarkCompactCollector::INCREMENTAL_COMPACTION);
+                   heap_->mark_compact_collector()->StartCompaction(
+                       MarkCompactCollector::INCREMENTAL_COMPACTION);
 
   state_ = MARKING;
 
-  RecordWriteStub::Mode mode = is_compacting_ ?
-      RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
+  RecordWriteStub::Mode mode = is_compacting_
+                                   ? RecordWriteStub::INCREMENTAL_COMPACTION
+                                   : RecordWriteStub::INCREMENTAL;
 
   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
 
@@ -570,7 +555,7 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
 
   ActivateIncrementalWriteBarrier();
 
-  // Marking bits are cleared by the sweeper.
+// Marking bits are cleared by the sweeper.
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
     heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
@@ -644,12 +629,12 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
       new_top = ((new_top + 1) & mask);
       DCHECK(new_top != marking_deque_.bottom());
 #ifdef DEBUG
-        MarkBit mark_bit = Marking::MarkBitFrom(obj);
-        MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-        DCHECK(Marking::IsGrey(mark_bit) ||
-               (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
-               (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
-                Marking::IsBlack(mark_bit)));
+      MarkBit mark_bit = Marking::MarkBitFrom(obj);
+      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+      DCHECK(Marking::IsGrey(mark_bit) ||
+             (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+             (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+              Marking::IsBlack(mark_bit)));
 #endif
     }
   }
@@ -838,8 +823,7 @@ void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
 
 void IncrementalMarking::Step(intptr_t allocated_bytes,
                               CompletionAction action) {
-  if (heap_->gc_state() != Heap::NOT_IN_GC ||
-      !FLAG_incremental_marking ||
+  if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
       !FLAG_incremental_marking_steps ||
       (state_ != SWEEPING && state_ != MARKING)) {
     return;
@@ -983,5 +967,5 @@ void IncrementalMarking::ResetStepCounters() {
 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
   return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
 }
-
-}  // namespace v8::internal
+}
+}  // namespace v8::internal
similarity index 84%
rename from src/incremental-marking.h
rename to src/heap/incremental-marking.h
index e5d0f5b..d9259e8 100644 (file)
@@ -2,12 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_INCREMENTAL_MARKING_H_
-#define V8_INCREMENTAL_MARKING_H_
+#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
+#define V8_HEAP_INCREMENTAL_MARKING_H_
 
 
 #include "src/execution.h"
-#include "src/mark-compact.h"
+#include "src/heap/mark-compact.h"
 #include "src/objects.h"
 
 namespace v8 {
@@ -16,17 +16,9 @@ namespace internal {
 
 class IncrementalMarking {
  public:
-  enum State {
-    STOPPED,
-    SWEEPING,
-    MARKING,
-    COMPLETE
-  };
-
-  enum CompletionAction {
-    GC_VIA_STACK_GUARD,
-    NO_GC_VIA_STACK_GUARD
-  };
+  enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
+
+  enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
 
   explicit IncrementalMarking(Heap* heap);
 
@@ -102,8 +94,7 @@ class IncrementalMarking {
     }
   }
 
-  static void RecordWriteFromCode(HeapObject* obj,
-                                  Object** slot,
+  static void RecordWriteFromCode(HeapObject* obj, Object** slot,
                                   Isolate* isolate);
 
   // Record a slot for compaction.  Returns false for objects that are
@@ -114,17 +105,14 @@ class IncrementalMarking {
   // the incremental cycle (stays white).
   INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
   INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
-  INLINE(void RecordWriteIntoCode(HeapObject* obj,
-                                  RelocInfo* rinfo,
+  INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
                                   Object* value));
-  INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
-                                     Object** slot,
+  INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
                                      Code* value));
 
 
   void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
-  void RecordWriteIntoCodeSlow(HeapObject* obj,
-                               RelocInfo* rinfo,
+  void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo,
                                Object* value);
   void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
   void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
@@ -154,22 +142,19 @@ class IncrementalMarking {
     if (IsMarking()) {
       if (marking_speed_ < kFastMarking) {
         if (FLAG_trace_gc) {
-          PrintPID("Increasing marking speed to %d "
-                   "due to high promotion rate\n",
-                   static_cast<int>(kFastMarking));
+          PrintPID(
+              "Increasing marking speed to %d "
+              "due to high promotion rate\n",
+              static_cast<int>(kFastMarking));
         }
         marking_speed_ = kFastMarking;
       }
     }
   }
 
-  void EnterNoMarkingScope() {
-    no_marking_scope_depth_++;
-  }
+  void EnterNoMarkingScope() { no_marking_scope_depth_++; }
 
-  void LeaveNoMarkingScope() {
-    no_marking_scope_depth_--;
-  }
+  void LeaveNoMarkingScope() { no_marking_scope_depth_--; }
 
   void UncommitMarkingDeque();
 
@@ -192,8 +177,7 @@ class IncrementalMarking {
   static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
   void DeactivateIncrementalWriteBarrier();
 
-  static void SetOldSpacePageFlags(MemoryChunk* chunk,
-                                   bool is_marking,
+  static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
                                    bool is_compacting);
 
   static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
@@ -231,7 +215,7 @@ class IncrementalMarking {
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
 };
+}
+}  // namespace v8::internal
 
-} }  // namespace v8::internal
-
-#endif  // V8_INCREMENTAL_MARKING_H_
+#endif  // V8_HEAP_INCREMENTAL_MARKING_H_
similarity index 85%
rename from src/mark-compact-inl.h
rename to src/heap/mark-compact-inl.h
index 6f853fd..934fce8 100644 (file)
@@ -2,13 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_MARK_COMPACT_INL_H_
-#define V8_MARK_COMPACT_INL_H_
+#ifndef V8_HEAP_MARK_COMPACT_INL_H_
+#define V8_HEAP_MARK_COMPACT_INL_H_
 
 #include <memory.h>
 
+#include "src/heap/mark-compact.h"
 #include "src/isolate.h"
-#include "src/mark-compact.h"
 
 
 namespace v8 {
@@ -57,23 +57,19 @@ bool MarkCompactCollector::IsMarked(Object* obj) {
 }
 
 
-void MarkCompactCollector::RecordSlot(Object** anchor_slot,
-                                      Object** slot,
+void MarkCompactCollector::RecordSlot(Object** anchor_slot, Object** slot,
                                       Object* object,
                                       SlotsBuffer::AdditionMode mode) {
   Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
   if (object_page->IsEvacuationCandidate() &&
       !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
     if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                            object_page->slots_buffer_address(),
-                            slot,
-                            mode)) {
+                            object_page->slots_buffer_address(), slot, mode)) {
       EvictEvacuationCandidate(object_page);
     }
   }
 }
+}
+}  // namespace v8::internal
 
-
-} }  // namespace v8::internal
-
-#endif  // V8_MARK_COMPACT_INL_H_
+#endif  // V8_HEAP_MARK_COMPACT_INL_H_
similarity index 87%
rename from src/mark-compact.cc
rename to src/heap/mark-compact.cc
index 9ea1cf9..87def09 100644 (file)
 #include "src/execution.h"
 #include "src/gdb-jit.h"
 #include "src/global-handles.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/spaces-inl.h"
+#include "src/heap/sweeper-thread.h"
 #include "src/heap-profiler.h"
 #include "src/ic-inl.h"
-#include "src/incremental-marking.h"
-#include "src/mark-compact.h"
 #include "src/objects-visiting.h"
 #include "src/objects-visiting-inl.h"
-#include "src/spaces-inl.h"
 #include "src/stub-cache.h"
-#include "src/sweeper-thread.h"
 
 namespace v8 {
 namespace internal {
@@ -35,7 +35,8 @@ const char* Marking::kImpossibleBitPattern = "01";
 // -------------------------------------------------------------------------
 // MarkCompactCollector
 
-MarkCompactCollector::MarkCompactCollector(Heap* heap) :  // NOLINT
+MarkCompactCollector::MarkCompactCollector(Heap* heap)
+    :  // NOLINT
 #ifdef DEBUG
       state_(IDLE),
 #endif
@@ -51,10 +52,11 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) :  // NOLINT
       migration_slots_buffer_(NULL),
       heap_(heap),
       code_flusher_(NULL),
-      have_code_to_deoptimize_(false) { }
+      have_code_to_deoptimize_(false) {
+}
 
 #ifdef VERIFY_HEAP
-class VerifyMarkingVisitor: public ObjectVisitor {
+class VerifyMarkingVisitor : public ObjectVisitor {
  public:
   explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
 
@@ -93,9 +95,7 @@ static void VerifyMarking(Heap* heap, Address bottom, Address top) {
   HeapObject* object;
   Address next_object_must_be_here_or_later = bottom;
 
-  for (Address current = bottom;
-       current < top;
-       current += kPointerSize) {
+  for (Address current = bottom; current < top; current += kPointerSize) {
     object = HeapObject::FromAddress(current);
     if (MarkCompactCollector::IsMarked(object)) {
       CHECK(current >= next_object_must_be_here_or_later);
@@ -154,7 +154,7 @@ static void VerifyMarking(Heap* heap) {
 }
 
 
-class VerifyEvacuationVisitor: public ObjectVisitor {
+class VerifyEvacuationVisitor : public ObjectVisitor {
  public:
   void VisitPointers(Object** start, Object** end) {
     for (Object** current = start; current < end; current++) {
@@ -171,7 +171,7 @@ static void VerifyEvacuation(Page* page) {
   VerifyEvacuationVisitor visitor;
   HeapObjectIterator iterator(page, NULL);
   for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
-      heap_object = iterator.Next()) {
+       heap_object = iterator.Next()) {
     // We skip free space objects.
     if (!heap_object->IsFiller()) {
       heap_object->Iterate(&visitor);
@@ -230,7 +230,7 @@ static void VerifyEvacuation(Heap* heap) {
 
 
 #ifdef DEBUG
-class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
+class VerifyNativeContextSeparationVisitor : public ObjectVisitor {
  public:
   VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
 
@@ -269,8 +269,8 @@ class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
               // Set array length to zero to prevent cycles while iterating
               // over array bodies, this is easier than intrusive marking.
               array->set_length(0);
-              array->IterateBody(
-                  FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
+              array->IterateBody(FIXED_ARRAY_TYPE, FixedArray::SizeFor(length),
+                                 this);
               array->set_length(length);
             }
             break;
@@ -332,9 +332,7 @@ void MarkCompactCollector::SetUp() {
 }
 
 
-void MarkCompactCollector::TearDown() {
-  AbortCompaction();
-}
+void MarkCompactCollector::TearDown() { AbortCompaction(); }
 
 
 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
@@ -348,10 +346,8 @@ static void TraceFragmentation(PagedSpace* space) {
   intptr_t reserved = (number_of_pages * space->AreaSize());
   intptr_t free = reserved - space->SizeOfObjects();
   PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
-         AllocationSpaceName(space->identity()),
-         number_of_pages,
-         static_cast<int>(free),
-         static_cast<double>(free) * 100 / reserved);
+         AllocationSpaceName(space->identity()), number_of_pages,
+         static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
 }
 
 
@@ -367,9 +363,8 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
     CollectEvacuationCandidates(heap()->old_pointer_space());
     CollectEvacuationCandidates(heap()->old_data_space());
 
-    if (FLAG_compact_code_space &&
-        (mode == NON_INCREMENTAL_COMPACTION ||
-         FLAG_incremental_code_compaction)) {
+    if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
+                                    FLAG_incremental_code_compaction)) {
       CollectEvacuationCandidates(heap()->code_space());
     } else if (FLAG_trace_fragmentation) {
       TraceFragmentation(heap()->code_space());
@@ -481,8 +476,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
 
 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
   HeapObjectIterator code_iterator(heap()->code_space());
-  for (HeapObject* obj = code_iterator.Next();
-       obj != NULL;
+  for (HeapObject* obj = code_iterator.Next(); obj != NULL;
        obj = code_iterator.Next()) {
     Code* code = Code::cast(obj);
     if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
@@ -494,9 +488,7 @@ void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
 
 void MarkCompactCollector::VerifyOmittedMapChecks() {
   HeapObjectIterator iterator(heap()->map_space());
-  for (HeapObject* obj = iterator.Next();
-       obj != NULL;
-       obj = iterator.Next()) {
+  for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
     Map* map = Map::cast(obj);
     map->VerifyOmittedMapChecks();
   }
@@ -544,8 +536,7 @@ void MarkCompactCollector::ClearMarkbits() {
 
 class MarkCompactCollector::SweeperTask : public v8::Task {
  public:
-  SweeperTask(Heap* heap, PagedSpace* space)
-    : heap_(heap), space_(space) {}
+  SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
 
   virtual ~SweeperTask() {}
 
@@ -700,15 +691,22 @@ void Marking::TransferMark(Address old_start, Address new_start) {
 
 const char* AllocationSpaceName(AllocationSpace space) {
   switch (space) {
-    case NEW_SPACE: return "NEW_SPACE";
-    case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
-    case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
-    case CODE_SPACE: return "CODE_SPACE";
-    case MAP_SPACE: return "MAP_SPACE";
-    case CELL_SPACE: return "CELL_SPACE";
+    case NEW_SPACE:
+      return "NEW_SPACE";
+    case OLD_POINTER_SPACE:
+      return "OLD_POINTER_SPACE";
+    case OLD_DATA_SPACE:
+      return "OLD_DATA_SPACE";
+    case CODE_SPACE:
+      return "CODE_SPACE";
+    case MAP_SPACE:
+      return "MAP_SPACE";
+    case CELL_SPACE:
+      return "CELL_SPACE";
     case PROPERTY_CELL_SPACE:
       return "PROPERTY_CELL_SPACE";
-    case LO_SPACE: return "LO_SPACE";
+    case LO_SPACE:
+      return "LO_SPACE";
     default:
       UNREACHABLE();
   }
@@ -724,10 +722,8 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
   // If page was not swept then there are no free list items on it.
   if (!p->WasSwept()) {
     if (FLAG_trace_fragmentation) {
-      PrintF("%p [%s]: %d bytes live (unswept)\n",
-             reinterpret_cast<void*>(p),
-             AllocationSpaceName(space->identity()),
-             p->LiveBytes());
+      PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
+             AllocationSpaceName(space->identity()), p->LiveBytes());
     }
     return 0;
   }
@@ -739,31 +735,24 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
   intptr_t ratio_threshold;
   intptr_t area_size = space->AreaSize();
   if (space->identity() == CODE_SPACE) {
-    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
-        area_size;
+    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
     ratio_threshold = 10;
   } else {
-    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
-        area_size;
+    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
     ratio_threshold = 15;
   }
 
   if (FLAG_trace_fragmentation) {
     PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
-           reinterpret_cast<void*>(p),
-           AllocationSpaceName(space->identity()),
+           reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
            static_cast<int>(sizes.small_size_),
-           static_cast<double>(sizes.small_size_ * 100) /
-           area_size,
+           static_cast<double>(sizes.small_size_ * 100) / area_size,
            static_cast<int>(sizes.medium_size_),
-           static_cast<double>(sizes.medium_size_ * 100) /
-           area_size,
+           static_cast<double>(sizes.medium_size_ * 100) / area_size,
            static_cast<int>(sizes.large_size_),
-           static_cast<double>(sizes.large_size_ * 100) /
-           area_size,
+           static_cast<double>(sizes.large_size_ * 100) / area_size,
            static_cast<int>(sizes.huge_size_),
-           static_cast<double>(sizes.huge_size_ * 100) /
-           area_size,
+           static_cast<double>(sizes.huge_size_ * 100) / area_size,
            (ratio > ratio_threshold) ? "[fragmented]" : "");
   }
 
@@ -793,8 +782,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
 
   class Candidate {
    public:
-    Candidate() : fragmentation_(0), page_(NULL) { }
-    Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
+    Candidate() : fragmentation_(0), page_(NULL) {}
+    Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
 
     int fragmentation() { return fragmentation_; }
     Page* page() { return page_; }
@@ -804,10 +793,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
     Page* page_;
   };
 
-  enum CompactionMode {
-    COMPACT_FREE_LISTS,
-    REDUCE_MEMORY_FOOTPRINT
-  };
+  enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
 
   CompactionMode mode = COMPACT_FREE_LISTS;
 
@@ -833,12 +819,12 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
   }
 
   if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
-    PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
-           "evacuation candidate limit: %d\n",
-           static_cast<double>(over_reserved) / MB,
-           static_cast<double>(reserved) / MB,
-           static_cast<int>(kFreenessThreshold),
-           max_evacuation_candidates);
+    PrintF(
+        "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
+        "evacuation candidate limit: %d\n",
+        static_cast<double>(over_reserved) / MB,
+        static_cast<double>(reserved) / MB,
+        static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
   }
 
   intptr_t estimated_release = 0;
@@ -889,8 +875,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
       }
 
       if (FLAG_trace_fragmentation) {
-        PrintF("%p [%s]: %d (%.2f%%) free %s\n",
-               reinterpret_cast<void*>(p),
+        PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
                AllocationSpaceName(space->identity()),
                static_cast<int>(free_bytes),
                static_cast<double>(free_bytes * 100) / p->area_size(),
@@ -925,8 +910,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
   }
 
   if (count > 0 && FLAG_trace_fragmentation) {
-    PrintF("Collected %d evacuation candidates for space %s\n",
-           count,
+    PrintF("Collected %d evacuation candidates for space %s\n", count,
            AllocationSpaceName(space->identity()));
   }
 }
@@ -980,8 +964,7 @@ void MarkCompactCollector::Prepare() {
   }
 
   PagedSpaces spaces(heap());
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
+  for (PagedSpace* space = spaces.next(); space != NULL;
        space = spaces.next()) {
     space->PrepareForMarkCompact();
   }
@@ -1072,13 +1055,13 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
     // setter did not record the slot update and we have to do that manually.
     Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
     Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
-    isolate_->heap()->mark_compact_collector()->
-        RecordCodeEntrySlot(slot, target);
+    isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
+                                                                    target);
 
     Object** shared_code_slot =
         HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
-    isolate_->heap()->mark_compact_collector()->
-        RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
+    isolate_->heap()->mark_compact_collector()->RecordSlot(
+        shared_code_slot, shared_code_slot, *shared_code_slot);
 
     candidate = next_candidate;
   }
@@ -1110,8 +1093,8 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
 
     Object** code_slot =
         HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
-    isolate_->heap()->mark_compact_collector()->
-        RecordSlot(code_slot, code_slot, *code_slot);
+    isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
+                                                           *code_slot);
 
     candidate = next_candidate;
   }
@@ -1133,8 +1116,7 @@ void CodeFlusher::ProcessOptimizedCodeMaps() {
     FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
     int new_length = SharedFunctionInfo::kEntriesStart;
     int old_length = code_map->length();
-    for (int i = SharedFunctionInfo::kEntriesStart;
-         i < old_length;
+    for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
          i += SharedFunctionInfo::kEntryLength) {
       Code* code =
           Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
@@ -1149,10 +1131,10 @@ void CodeFlusher::ProcessOptimizedCodeMaps() {
         if (j == SharedFunctionInfo::kOsrAstIdOffset) {
           DCHECK(object->IsSmi());
         } else {
-          DCHECK(Marking::IsBlack(
-              Marking::MarkBitFrom(HeapObject::cast(*slot))));
-          isolate_->heap()->mark_compact_collector()->
-              RecordSlot(slot, slot, *slot);
+          DCHECK(
+              Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
+          isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
+                                                                 *slot);
         }
       }
     }
@@ -1240,8 +1222,9 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
 
 
 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
-  DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())->
-         get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
+  DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
+              ->get(SharedFunctionInfo::kNextMapIndex)
+              ->IsUndefined());
 
   // Make sure previous flushing decisions are revisited.
   isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
@@ -1373,15 +1356,14 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
 class MarkCompactMarkingVisitor
     : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
  public:
-  static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
-                                   Map* map, HeapObject* obj);
+  static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
+                                   HeapObject* obj);
 
   static void ObjectStatsCountFixedArray(
-      FixedArrayBase* fixed_array,
-      FixedArraySubInstanceType fast_type,
+      FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
       FixedArraySubInstanceType dictionary_type);
 
-  template<MarkCompactMarkingVisitor::VisitorId id>
+  template <MarkCompactMarkingVisitor::VisitorId id>
   class ObjectStatsTracker {
    public:
     static inline void Visit(Map* map, HeapObject* obj);
@@ -1425,8 +1407,7 @@ class MarkCompactMarkingVisitor
 
   // Mark object pointed to by p.
   INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
-                                         Object** anchor_slot,
-                                         Object** p)) {
+                                         Object** anchor_slot, Object** p)) {
     if (!(*p)->IsHeapObject()) return;
     HeapObject* object = ShortCircuitConsString(p);
     collector->RecordSlot(anchor_slot, p, object);
@@ -1454,8 +1435,7 @@ class MarkCompactMarkingVisitor
 
   // Visit all unmarked objects pointed to by [start, end).
   // Returns false if the operation fails (lack of stack space).
-  INLINE(static bool VisitUnmarkedObjects(Heap* heap,
-                                          Object** start,
+  INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
                                           Object** end)) {
     // Return false is we are close to the stack limit.
     StackLimitCheck check(heap->isolate());
@@ -1476,20 +1456,20 @@ class MarkCompactMarkingVisitor
   }
 
  private:
-  template<int id>
+  template <int id>
   static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
 
   // Code flushing support.
 
   static const int kRegExpCodeThreshold = 5;
 
-  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
-                                          JSRegExp* re,
+  static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
                                           bool is_ascii) {
     // Make sure that the fixed array is in fact initialized on the RegExp.
     // We could potentially trigger a GC when initializing the RegExp.
     if (HeapObject::cast(re->data())->map()->instance_type() !=
-            FIXED_ARRAY_TYPE) return;
+        FIXED_ARRAY_TYPE)
+      return;
 
     // Make sure this is a RegExp that actually contains code.
     if (re->TypeTag() != JSRegExp::IRREGEXP) return;
@@ -1506,8 +1486,7 @@ class MarkCompactMarkingVisitor
       // object.
       FixedArray* data = FixedArray::cast(re->data());
       Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
-      heap->mark_compact_collector()->
-          RecordSlot(slot, slot, code);
+      heap->mark_compact_collector()->RecordSlot(slot, slot, code);
 
       // Set a number in the 0-255 range to guarantee no smi overflow.
       re->SetDataAt(JSRegExp::code_index(is_ascii),
@@ -1556,19 +1535,16 @@ class MarkCompactMarkingVisitor
 
 
 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
-    FixedArrayBase* fixed_array,
-    FixedArraySubInstanceType fast_type,
+    FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
     FixedArraySubInstanceType dictionary_type) {
   Heap* heap = fixed_array->map()->GetHeap();
   if (fixed_array->map() != heap->fixed_cow_array_map() &&
       fixed_array->map() != heap->fixed_double_array_map() &&
       fixed_array != heap->empty_fixed_array()) {
     if (fixed_array->IsDictionary()) {
-      heap->RecordFixedArraySubTypeStats(dictionary_type,
-                                         fixed_array->Size());
+      heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
     } else {
-      heap->RecordFixedArraySubTypeStats(fast_type,
-                                         fixed_array->Size());
+      heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
     }
   }
 }
@@ -1582,8 +1558,7 @@ void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
   non_count_table_.GetVisitorById(id)(map, obj);
   if (obj->IsJSObject()) {
     JSObject* object = JSObject::cast(obj);
-    ObjectStatsCountFixedArray(object->elements(),
-                               DICTIONARY_ELEMENTS_SUB_TYPE,
+    ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
                                FAST_ELEMENTS_SUB_TYPE);
     ObjectStatsCountFixedArray(object->properties(),
                                DICTIONARY_PROPERTIES_SUB_TYPE,
@@ -1592,14 +1567,14 @@ void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
 }
 
 
-template<MarkCompactMarkingVisitor::VisitorId id>
-void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
-    Map* map, HeapObject* obj) {
+template <MarkCompactMarkingVisitor::VisitorId id>
+void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
+                                                              HeapObject* obj) {
   ObjectStatsVisitBase(id, map, obj);
 }
 
 
-template<>
+template <>
 class MarkCompactMarkingVisitor::ObjectStatsTracker<
     MarkCompactMarkingVisitor::kVisitMap> {
  public:
@@ -1634,7 +1609,7 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
 };
 
 
-template<>
+template <>
 class MarkCompactMarkingVisitor::ObjectStatsTracker<
     MarkCompactMarkingVisitor::kVisitCode> {
  public:
@@ -1650,7 +1625,7 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
 };
 
 
-template<>
+template <>
 class MarkCompactMarkingVisitor::ObjectStatsTracker<
     MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
  public:
@@ -1659,15 +1634,14 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
     SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
     if (sfi->scope_info() != heap->empty_fixed_array()) {
       heap->RecordFixedArraySubTypeStats(
-          SCOPE_INFO_SUB_TYPE,
-          FixedArray::cast(sfi->scope_info())->Size());
+          SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
     }
     ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
   }
 };
 
 
-template<>
+template <>
 class MarkCompactMarkingVisitor::ObjectStatsTracker<
     MarkCompactMarkingVisitor::kVisitFixedArray> {
  public:
@@ -1675,9 +1649,8 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
     Heap* heap = map->GetHeap();
     FixedArray* fixed_array = FixedArray::cast(obj);
     if (fixed_array == heap->string_table()) {
-      heap->RecordFixedArraySubTypeStats(
-          STRING_TABLE_SUB_TYPE,
-          fixed_array->Size());
+      heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
+                                         fixed_array->Size());
     }
     ObjectStatsVisitBase(kVisitFixedArray, map, obj);
   }
@@ -1687,14 +1660,13 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
 void MarkCompactMarkingVisitor::Initialize() {
   StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
 
-  table_.Register(kVisitJSRegExp,
-                  &VisitRegExpAndFlushCode);
+  table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
 
   if (FLAG_track_gc_object_stats) {
     // Copy the visitor table to make call-through possible.
     non_count_table_.CopyFrom(&table_);
-#define VISITOR_ID_COUNT_FUNCTION(id)                                   \
-    table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
+#define VISITOR_ID_COUNT_FUNCTION(id) \
+  table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
     VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
 #undef VISITOR_ID_COUNT_FUNCTION
   }
@@ -1801,11 +1773,9 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
 class RootMarkingVisitor : public ObjectVisitor {
  public:
   explicit RootMarkingVisitor(Heap* heap)
-    : collector_(heap->mark_compact_collector()) { }
+      : collector_(heap->mark_compact_collector()) {}
 
-  void VisitPointer(Object** p) {
-    MarkObjectByPointer(p);
-  }
+  void VisitPointer(Object** p) { MarkObjectByPointer(p); }
 
   void VisitPointers(Object** start, Object** end) {
     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
@@ -1813,7 +1783,7 @@ class RootMarkingVisitor : public ObjectVisitor {
 
   // Skip the weak next code link in a code object, which is visited in
   // ProcessTopOptimizedFrame.
-  void VisitNextCodeLink(Object** p) { }
+  void VisitNextCodeLink(Object** p) {}
 
  private:
   void MarkObjectByPointer(Object** p) {
@@ -1843,11 +1813,10 @@ class RootMarkingVisitor : public ObjectVisitor {
 
 
 // Helper class for pruning the string table.
-template<bool finalize_external_strings>
+template <bool finalize_external_strings>
 class StringTableCleaner : public ObjectVisitor {
  public:
-  explicit StringTableCleaner(Heap* heap)
-    : heap_(heap), pointers_removed_(0) { }
+  explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
 
   virtual void VisitPointers(Object** start, Object** end) {
     // Visit all HeapObject pointers in [start, end).
@@ -1907,7 +1876,7 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
 // Fill the marking stack with overflowed objects returned by the given
 // iterator.  Stop when the marking stack is filled or the end of the space
 // is reached, whichever comes first.
-template<class T>
+template <class T>
 static void DiscoverGreyObjectsWithIterator(Heap* heap,
                                             MarkingDeque* marking_deque,
                                             T* it) {
@@ -1916,9 +1885,7 @@ static void DiscoverGreyObjectsWithIterator(Heap* heap,
   DCHECK(!marking_deque->IsFull());
 
   Map* filler_map = heap->one_pointer_filler_map();
-  for (HeapObject* object = it->Next();
-       object != NULL;
-       object = it->Next()) {
+  for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
     MarkBit markbit = Marking::MarkBitFrom(object);
     if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
       Marking::GreyToBlack(markbit);
@@ -1950,9 +1917,9 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
 
     MarkBit::CellType grey_objects;
     if (it.HasNext()) {
-      const MarkBit::CellType next_cell = *(cell+1);
-      grey_objects = current_cell &
-          ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
+      const MarkBit::CellType next_cell = *(cell + 1);
+      grey_objects = current_cell & ((current_cell >> 1) |
+                                     (next_cell << (Bitmap::kBitsPerCell - 1)));
     } else {
       grey_objects = current_cell & (current_cell >> 1);
     }
@@ -1980,8 +1947,7 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
 
 
 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
-    NewSpace* new_space,
-    NewSpacePage* p) {
+    NewSpace* new_space, NewSpacePage* p) {
   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
   DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
@@ -2032,10 +1998,7 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
       }
       Object* target = allocation.ToObjectChecked();
 
-      MigrateObject(HeapObject::cast(target),
-                    object,
-                    size,
-                    NEW_SPACE);
+      MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
       heap()->IncrementSemiSpaceCopiedObjectSize(size);
     }
     *cells = 0;
@@ -2044,8 +2007,7 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
 }
 
 
-static void DiscoverGreyObjectsInSpace(Heap* heap,
-                                       MarkingDeque* marking_deque,
+static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
                                        PagedSpace* space) {
   if (space->swept_precisely()) {
     HeapObjectIterator it(space);
@@ -2203,40 +2165,28 @@ void MarkCompactCollector::RefillMarkingDeque() {
   DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
   if (marking_deque_.IsFull()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
                              heap()->old_pointer_space());
   if (marking_deque_.IsFull()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->old_data_space());
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
   if (marking_deque_.IsFull()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->code_space());
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
   if (marking_deque_.IsFull()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->map_space());
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
   if (marking_deque_.IsFull()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->cell_space());
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
   if (marking_deque_.IsFull()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
                              heap()->property_cell_space());
   if (marking_deque_.IsFull()) return;
 
   LargeObjectIterator lo_it(heap()->lo_space());
-  DiscoverGreyObjectsWithIterator(heap(),
-                                  &marking_deque_,
-                                  &lo_it);
+  DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
   if (marking_deque_.IsFull()) return;
 
   marking_deque_.ClearOverflowed();
@@ -2330,8 +2280,7 @@ void MarkCompactCollector::MarkLiveObjects() {
   if (FLAG_force_marking_deque_overflows) {
     marking_deque_end = marking_deque_start + 64 * kPointerSize;
   }
-  marking_deque_.Initialize(marking_deque_start,
-                            marking_deque_end);
+  marking_deque_.Initialize(marking_deque_start, marking_deque_end);
   DCHECK(!marking_deque_.overflowed());
 
   if (incremental_marking_overflowed) {
@@ -2352,8 +2301,7 @@ void MarkCompactCollector::MarkLiveObjects() {
         if (IsMarked(cell)) {
           int offset = Cell::kValueOffset;
           MarkCompactMarkingVisitor::VisitPointer(
-              heap(),
-              reinterpret_cast<Object**>(cell->address() + offset));
+              heap(), reinterpret_cast<Object**>(cell->address() + offset));
         }
       }
     }
@@ -2465,12 +2413,12 @@ void MarkCompactCollector::ProcessMapCaches() {
         MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
         int existing_elements = map_cache->NumberOfElements();
         int used_elements = 0;
-        for (int i = MapCache::kElementsStartIndex;
-             i < map_cache->length();
+        for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
              i += MapCache::kEntrySize) {
           Object* raw_key = map_cache->get(i);
           if (raw_key == heap()->undefined_value() ||
-              raw_key == heap()->the_hole_value()) continue;
+              raw_key == heap()->the_hole_value())
+            continue;
           STATIC_ASSERT(MapCache::kEntrySize == 2);
           Object* raw_map = map_cache->get(i + 1);
           if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
@@ -2506,8 +2454,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
   // a marked map to an unmarked map to null transitions.  This action
   // is carried out only on maps of JSObjects and related subtypes.
   HeapObjectIterator map_iterator(heap()->map_space());
-  for (HeapObject* obj = map_iterator.Next();
-       obj != NULL;
+  for (HeapObject* obj = map_iterator.Next(); obj != NULL;
        obj = map_iterator.Next()) {
     Map* map = Map::cast(obj);
 
@@ -2528,8 +2475,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
   // Iterate over property cell space, removing dependent code that is not
   // otherwise kept alive by strong references.
   HeapObjectIterator cell_iterator(heap_->property_cell_space());
-  for (HeapObject* cell = cell_iterator.Next();
-       cell != NULL;
+  for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
        cell = cell_iterator.Next()) {
     if (IsMarked(cell)) {
       ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
@@ -2539,8 +2485,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
   // Iterate over allocation sites, removing dependent code that is not
   // otherwise kept alive by strong references.
   Object* undefined = heap()->undefined_value();
-  for (Object* site = heap()->allocation_sites_list();
-       site != undefined;
+  for (Object* site = heap()->allocation_sites_list(); site != undefined;
        site = AllocationSite::cast(site)->weak_next()) {
     if (IsMarked(site)) {
       ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
@@ -2602,14 +2547,9 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
       int proto_index = proto_offset + new_number_of_transitions * step;
       int map_index = map_offset + new_number_of_transitions * step;
       if (new_number_of_transitions != i) {
-        prototype_transitions->set(
-            proto_index,
-            prototype,
-            UPDATE_WRITE_BARRIER);
-        prototype_transitions->set(
-            map_index,
-            cached_map,
-            SKIP_WRITE_BARRIER);
+        prototype_transitions->set(proto_index, prototype,
+                                   UPDATE_WRITE_BARRIER);
+        prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
       }
       Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
       RecordSlot(slot, slot, prototype);
@@ -2623,8 +2563,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
 
   // Fill slots that became free with undefined value.
   for (int i = new_number_of_transitions * step;
-       i < number_of_transitions * step;
-       i++) {
+       i < number_of_transitions * step; i++) {
     prototype_transitions->set_undefined(header + i);
   }
 }
@@ -2661,8 +2600,7 @@ void MarkCompactCollector::ClearDependentICList(Object* head) {
 }
 
 
-void MarkCompactCollector::ClearDependentCode(
-    DependentCode* entries) {
+void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
   DisallowHeapAllocation no_allocation;
   DependentCode::GroupStartIndexes starts(entries);
   int number_of_entries = starts.number_of_entries();
@@ -2767,8 +2705,8 @@ void MarkCompactCollector::ProcessWeakCollections() {
           RecordSlot(anchor, key_slot, *key_slot);
           Object** value_slot =
               table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
-          MarkCompactMarkingVisitor::MarkObjectByPointer(
-              this, anchor, value_slot);
+          MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
+                                                         value_slot);
         }
       }
     }
@@ -2819,15 +2757,13 @@ void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
   if (heap_->InNewSpace(value)) {
     heap_->store_buffer()->Mark(slot);
   } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
-    SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                       &migration_slots_buffer_,
+    SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
                        reinterpret_cast<Object**>(slot),
                        SlotsBuffer::IGNORE_OVERFLOW);
   }
 }
 
 
-
 // We scavange new space simultaneously with sweeping. This is done in two
 // passes.
 //
@@ -2842,10 +2778,8 @@ void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
 // pointer iteration.  This is an issue if the store buffer overflows and we
 // have to scan the entire old space, including dead objects, looking for
 // pointers to new space.
-void MarkCompactCollector::MigrateObject(HeapObject* dst,
-                                         HeapObject* src,
-                                         int size,
-                                         AllocationSpace dest) {
+void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
+                                         int size, AllocationSpace dest) {
   Address dst_addr = dst->address();
   Address src_addr = src->address();
   DCHECK(heap()->AllowedToBeMigrated(src, dest));
@@ -2876,10 +2810,8 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst,
       Address code_entry = Memory::Address_at(code_entry_slot);
 
       if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
-        SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                           &migration_slots_buffer_,
-                           SlotsBuffer::CODE_ENTRY_SLOT,
-                           code_entry_slot,
+        SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                           SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
                            SlotsBuffer::IGNORE_OVERFLOW);
       }
     } else if (dst->IsConstantPoolArray()) {
@@ -2891,10 +2823,8 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst,
         Address code_entry = Memory::Address_at(code_entry_slot);
 
         if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
-          SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                             &migration_slots_buffer_,
-                             SlotsBuffer::CODE_ENTRY_SLOT,
-                             code_entry_slot,
+          SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                             SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
                              SlotsBuffer::IGNORE_OVERFLOW);
         }
       }
@@ -2909,10 +2839,8 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst,
   } else if (dest == CODE_SPACE) {
     PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
     heap()->MoveBlock(dst_addr, src_addr, size);
-    SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                       &migration_slots_buffer_,
-                       SlotsBuffer::RELOCATED_CODE_OBJECT,
-                       dst_addr,
+    SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                       SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
                        SlotsBuffer::IGNORE_OVERFLOW);
     Code::cast(dst)->Relocate(dst_addr - src_addr);
   } else {
@@ -2926,13 +2854,11 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst,
 
 // Visitor for updating pointers from live objects in old spaces to new space.
 // It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor: public ObjectVisitor {
+class PointersUpdatingVisitor : public ObjectVisitor {
  public:
-  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
+  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
 
-  void VisitPointer(Object** p) {
-    UpdatePointer(p);
-  }
+  void VisitPointer(Object** p) { UpdatePointer(p); }
 
   void VisitPointers(Object** start, Object** end) {
     for (Object** p = start; p < end; p++) UpdatePointer(p);
@@ -2999,9 +2925,7 @@ class PointersUpdatingVisitor: public ObjectVisitor {
   }
 
  private:
-  inline void UpdatePointer(Object** p) {
-    UpdateSlot(heap_, p);
-  }
+  inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
 
   Heap* heap_;
 };
@@ -3058,10 +2982,7 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
   HeapObject* target;
   AllocationResult allocation = target_space->AllocateRaw(object_size);
   if (allocation.To(&target)) {
-    MigrateObject(target,
-                  object,
-                  object_size,
-                  target_space->identity());
+    MigrateObject(target, object, object_size, target_space->identity());
     heap()->IncrementPromotedObjectsSize(object_size);
     return true;
   }
@@ -3211,10 +3132,8 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
 };
 
 
-static inline void UpdateSlot(Isolate* isolate,
-                              ObjectVisitor* v,
-                              SlotsBuffer::SlotType slot_type,
-                              Address addr) {
+static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
+                              SlotsBuffer::SlotType slot_type, Address addr) {
   switch (slot_type) {
     case SlotsBuffer::CODE_TARGET_SLOT: {
       RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
@@ -3252,28 +3171,17 @@ static inline void UpdateSlot(Isolate* isolate,
 }
 
 
-enum SweepingMode {
-  SWEEP_ONLY,
-  SWEEP_AND_VISIT_LIVE_OBJECTS
-};
+enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
 
 
-enum SkipListRebuildingMode {
-  REBUILD_SKIP_LIST,
-  IGNORE_SKIP_LIST
-};
+enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
 
 
-enum FreeSpaceTreatmentMode {
-  IGNORE_FREE_SPACE,
-  ZAP_FREE_SPACE
-};
+enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
 
 
-template<MarkCompactCollector::SweepingParallelism mode>
-static intptr_t Free(PagedSpace* space,
-                     FreeList* free_list,
-                     Address start,
+template <MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
                      int size) {
   if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
     DCHECK(free_list == NULL);
@@ -3292,14 +3200,12 @@ static intptr_t Free(PagedSpace* space,
 // Slots in live objects pointing into evacuation candidates are updated
 // if requested.
 // Returns the size of the biggest continuous freed memory chunk in bytes.
-template<SweepingMode sweeping_mode,
-         MarkCompactCollector::SweepingParallelism parallelism,
-         SkipListRebuildingMode skip_list_mode,
-         FreeSpaceTreatmentMode free_space_mode>
-static int SweepPrecisely(PagedSpace* space,
-                           FreeList* free_list,
-                           Page* p,
-                           ObjectVisitor* v) {
+template <SweepingMode sweeping_mode,
+          MarkCompactCollector::SweepingParallelism parallelism,
+          SkipListRebuildingMode skip_list_mode,
+          FreeSpaceTreatmentMode free_space_mode>
+static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p,
+                          ObjectVisitor* v) {
   DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
   DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
             space->identity() == CODE_SPACE);
@@ -3325,7 +3231,7 @@ static int SweepPrecisely(PagedSpace* space,
     MarkBit::CellType* cell = it.CurrentCell();
     int live_objects = MarkWordToObjectStarts(*cell, offsets);
     int live_index = 0;
-    for ( ; live_objects != 0; live_objects--) {
+    for (; live_objects != 0; live_objects--) {
       Address free_end = cell_base + offsets[live_index++] * kPointerSize;
       if (free_end != free_start) {
         int size = static_cast<int>(free_end - free_start);
@@ -3348,12 +3254,10 @@ static int SweepPrecisely(PagedSpace* space,
         live_object->IterateBody(map->instance_type(), size, v);
       }
       if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
-        int new_region_start =
-            SkipList::RegionNumber(free_end);
+        int new_region_start = SkipList::RegionNumber(free_end);
         int new_region_end =
             SkipList::RegionNumber(free_end + size - kPointerSize);
-        if (new_region_start != curr_region ||
-            new_region_end != curr_region) {
+        if (new_region_start != curr_region || new_region_end != curr_region) {
           skip_list->AddObject(free_end, size);
           curr_region = new_region_end;
         }
@@ -3392,8 +3296,7 @@ static int SweepPrecisely(PagedSpace* space,
 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
   Page* p = Page::FromAddress(code->address());
 
-  if (p->IsEvacuationCandidate() ||
-      p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+  if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
     return false;
   }
 
@@ -3426,7 +3329,7 @@ static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
       *end_cell |= end_mask;
     }
   } else {
-    for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
+    for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
       *cell = 0;
     }
   }
@@ -3515,13 +3418,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
   Heap::RelocationLock relocation_lock(heap());
 
   bool code_slots_filtering_required;
-  { GCTracer::Scope gc_scope(heap()->tracer(),
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_SWEEP_NEWSPACE);
     code_slots_filtering_required = MarkInvalidatedCode();
     EvacuateNewSpace();
   }
 
-  { GCTracer::Scope gc_scope(heap()->tracer(),
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_EVACUATE_PAGES);
     EvacuatePages();
   }
@@ -3529,40 +3434,40 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
   // Second pass: find pointers to new space and update them.
   PointersUpdatingVisitor updating_visitor(heap());
 
-  { GCTracer::Scope gc_scope(heap()->tracer(),
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
     // Update pointers in to space.
     SemiSpaceIterator to_it(heap()->new_space()->bottom(),
                             heap()->new_space()->top());
-    for (HeapObject* object = to_it.Next();
-         object != NULL;
+    for (HeapObject* object = to_it.Next(); object != NULL;
          object = to_it.Next()) {
       Map* map = object->map();
-      object->IterateBody(map->instance_type(),
-                          object->SizeFromMap(map),
+      object->IterateBody(map->instance_type(), object->SizeFromMap(map),
                           &updating_visitor);
     }
   }
 
-  { GCTracer::Scope gc_scope(heap()->tracer(),
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
     // Update roots.
     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
   }
 
-  { GCTracer::Scope gc_scope(heap()->tracer(),
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
-    StoreBufferRebuildScope scope(heap_,
-                                  heap_->store_buffer(),
+    StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
                                   &Heap::ScavengeStoreBufferCallback);
     heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
         &UpdatePointer);
   }
 
-  { GCTracer::Scope gc_scope(heap()->tracer(),
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
-    SlotsBuffer::UpdateSlotsRecordedIn(heap_,
-                                       migration_slots_buffer_,
+    SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
                                        code_slots_filtering_required);
     if (FLAG_trace_fragmentation) {
       PrintF("  migration slots buffer: %d\n",
@@ -3587,20 +3492,20 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
   }
 
   int npages = evacuation_candidates_.length();
-  { GCTracer::Scope gc_scope(
-      heap()->tracer(), GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+  {
+    GCTracer::Scope gc_scope(
+        heap()->tracer(),
+        GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
     for (int i = 0; i < npages; i++) {
       Page* p = evacuation_candidates_[i];
       DCHECK(p->IsEvacuationCandidate() ||
              p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
 
       if (p->IsEvacuationCandidate()) {
-        SlotsBuffer::UpdateSlotsRecordedIn(heap_,
-                                           p->slots_buffer(),
+        SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
                                            code_slots_filtering_required);
         if (FLAG_trace_fragmentation) {
-          PrintF("  page %p slots buffer: %d\n",
-                 reinterpret_cast<void*>(p),
+          PrintF("  page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
                  SlotsBuffer::SizeOfChain(p->slots_buffer()));
         }
 
@@ -3622,24 +3527,18 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
             SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
             break;
           case OLD_POINTER_SPACE:
-            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
-                           SWEEP_ON_MAIN_THREAD,
-                           IGNORE_SKIP_LIST,
-                           IGNORE_FREE_SPACE>(
+            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                           IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
                 space, NULL, p, &updating_visitor);
             break;
           case CODE_SPACE:
             if (FLAG_zap_code_space) {
-              SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
-                             SWEEP_ON_MAIN_THREAD,
-                             REBUILD_SKIP_LIST,
-                             ZAP_FREE_SPACE>(
+              SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                             REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
                   space, NULL, p, &updating_visitor);
             } else {
-              SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
-                             SWEEP_ON_MAIN_THREAD,
-                             REBUILD_SKIP_LIST,
-                             IGNORE_FREE_SPACE>(
+              SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                             REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
                   space, NULL, p, &updating_visitor);
             }
             break;
@@ -3656,8 +3555,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
 
   // Update pointers from cells.
   HeapObjectIterator cell_iterator(heap_->cell_space());
-  for (HeapObject* cell = cell_iterator.Next();
-       cell != NULL;
+  for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
        cell = cell_iterator.Next()) {
     if (cell->IsCell()) {
       Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
@@ -3666,8 +3564,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
 
   HeapObjectIterator js_global_property_cell_iterator(
       heap_->property_cell_space());
-  for (HeapObject* cell = js_global_property_cell_iterator.Next();
-       cell != NULL;
+  for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
        cell = js_global_property_cell_iterator.Next()) {
     if (cell->IsPropertyCell()) {
       PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
@@ -3748,177 +3645,348 @@ static const int kStartTableUnusedEntry = 126;
 // Since objects are at least 2 words large we don't have entries for two
 // consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
-  0, _, _, _, _,  // 0
-  1, 0, _, _, _,  // 1
-  1, 1, _, _, _,  // 2
-  X, _, _, _, _,  // 3
-  1, 2, _, _, _,  // 4
-  2, 0, 2, _, _,  // 5
-  X, _, _, _, _,  // 6
-  X, _, _, _, _,  // 7
-  1, 3, _, _, _,  // 8
-  2, 0, 3, _, _,  // 9
-  2, 1, 3, _, _,  // 10
-  X, _, _, _, _,  // 11
-  X, _, _, _, _,  // 12
-  X, _, _, _, _,  // 13
-  X, _, _, _, _,  // 14
-  X, _, _, _, _,  // 15
-  1, 4, _, _, _,  // 16
-  2, 0, 4, _, _,  // 17
-  2, 1, 4, _, _,  // 18
-  X, _, _, _, _,  // 19
-  2, 2, 4, _, _,  // 20
-  3, 0, 2, 4, _,  // 21
-  X, _, _, _, _,  // 22
-  X, _, _, _, _,  // 23
-  X, _, _, _, _,  // 24
-  X, _, _, _, _,  // 25
-  X, _, _, _, _,  // 26
-  X, _, _, _, _,  // 27
-  X, _, _, _, _,  // 28
-  X, _, _, _, _,  // 29
-  X, _, _, _, _,  // 30
-  X, _, _, _, _,  // 31
-  1, 5, _, _, _,  // 32
-  2, 0, 5, _, _,  // 33
-  2, 1, 5, _, _,  // 34
-  X, _, _, _, _,  // 35
-  2, 2, 5, _, _,  // 36
-  3, 0, 2, 5, _,  // 37
-  X, _, _, _, _,  // 38
-  X, _, _, _, _,  // 39
-  2, 3, 5, _, _,  // 40
-  3, 0, 3, 5, _,  // 41
-  3, 1, 3, 5, _,  // 42
-  X, _, _, _, _,  // 43
-  X, _, _, _, _,  // 44
-  X, _, _, _, _,  // 45
-  X, _, _, _, _,  // 46
-  X, _, _, _, _,  // 47
-  X, _, _, _, _,  // 48
-  X, _, _, _, _,  // 49
-  X, _, _, _, _,  // 50
-  X, _, _, _, _,  // 51
-  X, _, _, _, _,  // 52
-  X, _, _, _, _,  // 53
-  X, _, _, _, _,  // 54
-  X, _, _, _, _,  // 55
-  X, _, _, _, _,  // 56
-  X, _, _, _, _,  // 57
-  X, _, _, _, _,  // 58
-  X, _, _, _, _,  // 59
-  X, _, _, _, _,  // 60
-  X, _, _, _, _,  // 61
-  X, _, _, _, _,  // 62
-  X, _, _, _, _,  // 63
-  1, 6, _, _, _,  // 64
-  2, 0, 6, _, _,  // 65
-  2, 1, 6, _, _,  // 66
-  X, _, _, _, _,  // 67
-  2, 2, 6, _, _,  // 68
-  3, 0, 2, 6, _,  // 69
-  X, _, _, _, _,  // 70
-  X, _, _, _, _,  // 71
-  2, 3, 6, _, _,  // 72
-  3, 0, 3, 6, _,  // 73
-  3, 1, 3, 6, _,  // 74
-  X, _, _, _, _,  // 75
-  X, _, _, _, _,  // 76
-  X, _, _, _, _,  // 77
-  X, _, _, _, _,  // 78
-  X, _, _, _, _,  // 79
-  2, 4, 6, _, _,  // 80
-  3, 0, 4, 6, _,  // 81
-  3, 1, 4, 6, _,  // 82
-  X, _, _, _, _,  // 83
-  3, 2, 4, 6, _,  // 84
-  4, 0, 2, 4, 6,  // 85
-  X, _, _, _, _,  // 86
-  X, _, _, _, _,  // 87
-  X, _, _, _, _,  // 88
-  X, _, _, _, _,  // 89
-  X, _, _, _, _,  // 90
-  X, _, _, _, _,  // 91
-  X, _, _, _, _,  // 92
-  X, _, _, _, _,  // 93
-  X, _, _, _, _,  // 94
-  X, _, _, _, _,  // 95
-  X, _, _, _, _,  // 96
-  X, _, _, _, _,  // 97
-  X, _, _, _, _,  // 98
-  X, _, _, _, _,  // 99
-  X, _, _, _, _,  // 100
-  X, _, _, _, _,  // 101
-  X, _, _, _, _,  // 102
-  X, _, _, _, _,  // 103
-  X, _, _, _, _,  // 104
-  X, _, _, _, _,  // 105
-  X, _, _, _, _,  // 106
-  X, _, _, _, _,  // 107
-  X, _, _, _, _,  // 108
-  X, _, _, _, _,  // 109
-  X, _, _, _, _,  // 110
-  X, _, _, _, _,  // 111
-  X, _, _, _, _,  // 112
-  X, _, _, _, _,  // 113
-  X, _, _, _, _,  // 114
-  X, _, _, _, _,  // 115
-  X, _, _, _, _,  // 116
-  X, _, _, _, _,  // 117
-  X, _, _, _, _,  // 118
-  X, _, _, _, _,  // 119
-  X, _, _, _, _,  // 120
-  X, _, _, _, _,  // 121
-  X, _, _, _, _,  // 122
-  X, _, _, _, _,  // 123
-  X, _, _, _, _,  // 124
-  X, _, _, _, _,  // 125
-  X, _, _, _, _,  // 126
-  X, _, _, _, _,  // 127
-  1, 7, _, _, _,  // 128
-  2, 0, 7, _, _,  // 129
-  2, 1, 7, _, _,  // 130
-  X, _, _, _, _,  // 131
-  2, 2, 7, _, _,  // 132
-  3, 0, 2, 7, _,  // 133
-  X, _, _, _, _,  // 134
-  X, _, _, _, _,  // 135
-  2, 3, 7, _, _,  // 136
-  3, 0, 3, 7, _,  // 137
-  3, 1, 3, 7, _,  // 138
-  X, _, _, _, _,  // 139
-  X, _, _, _, _,  // 140
-  X, _, _, _, _,  // 141
-  X, _, _, _, _,  // 142
-  X, _, _, _, _,  // 143
-  2, 4, 7, _, _,  // 144
-  3, 0, 4, 7, _,  // 145
-  3, 1, 4, 7, _,  // 146
-  X, _, _, _, _,  // 147
-  3, 2, 4, 7, _,  // 148
-  4, 0, 2, 4, 7,  // 149
-  X, _, _, _, _,  // 150
-  X, _, _, _, _,  // 151
-  X, _, _, _, _,  // 152
-  X, _, _, _, _,  // 153
-  X, _, _, _, _,  // 154
-  X, _, _, _, _,  // 155
-  X, _, _, _, _,  // 156
-  X, _, _, _, _,  // 157
-  X, _, _, _, _,  // 158
-  X, _, _, _, _,  // 159
-  2, 5, 7, _, _,  // 160
-  3, 0, 5, 7, _,  // 161
-  3, 1, 5, 7, _,  // 162
-  X, _, _, _, _,  // 163
-  3, 2, 5, 7, _,  // 164
-  4, 0, 2, 5, 7,  // 165
-  X, _, _, _, _,  // 166
-  X, _, _, _, _,  // 167
-  3, 3, 5, 7, _,  // 168
-  4, 0, 3, 5, 7,  // 169
-  4, 1, 3, 5, 7   // 170
+    0, _, _,
+    _, _,  // 0
+    1, 0, _,
+    _, _,  // 1
+    1, 1, _,
+    _, _,  // 2
+    X, _, _,
+    _, _,  // 3
+    1, 2, _,
+    _, _,  // 4
+    2, 0, 2,
+    _, _,  // 5
+    X, _, _,
+    _, _,  // 6
+    X, _, _,
+    _, _,  // 7
+    1, 3, _,
+    _, _,  // 8
+    2, 0, 3,
+    _, _,  // 9
+    2, 1, 3,
+    _, _,  // 10
+    X, _, _,
+    _, _,  // 11
+    X, _, _,
+    _, _,  // 12
+    X, _, _,
+    _, _,  // 13
+    X, _, _,
+    _, _,  // 14
+    X, _, _,
+    _, _,  // 15
+    1, 4, _,
+    _, _,  // 16
+    2, 0, 4,
+    _, _,  // 17
+    2, 1, 4,
+    _, _,  // 18
+    X, _, _,
+    _, _,  // 19
+    2, 2, 4,
+    _, _,  // 20
+    3, 0, 2,
+    4, _,  // 21
+    X, _, _,
+    _, _,  // 22
+    X, _, _,
+    _, _,  // 23
+    X, _, _,
+    _, _,  // 24
+    X, _, _,
+    _, _,  // 25
+    X, _, _,
+    _, _,  // 26
+    X, _, _,
+    _, _,  // 27
+    X, _, _,
+    _, _,  // 28
+    X, _, _,
+    _, _,  // 29
+    X, _, _,
+    _, _,  // 30
+    X, _, _,
+    _, _,  // 31
+    1, 5, _,
+    _, _,  // 32
+    2, 0, 5,
+    _, _,  // 33
+    2, 1, 5,
+    _, _,  // 34
+    X, _, _,
+    _, _,  // 35
+    2, 2, 5,
+    _, _,  // 36
+    3, 0, 2,
+    5, _,  // 37
+    X, _, _,
+    _, _,  // 38
+    X, _, _,
+    _, _,  // 39
+    2, 3, 5,
+    _, _,  // 40
+    3, 0, 3,
+    5, _,  // 41
+    3, 1, 3,
+    5, _,  // 42
+    X, _, _,
+    _, _,  // 43
+    X, _, _,
+    _, _,  // 44
+    X, _, _,
+    _, _,  // 45
+    X, _, _,
+    _, _,  // 46
+    X, _, _,
+    _, _,  // 47
+    X, _, _,
+    _, _,  // 48
+    X, _, _,
+    _, _,  // 49
+    X, _, _,
+    _, _,  // 50
+    X, _, _,
+    _, _,  // 51
+    X, _, _,
+    _, _,  // 52
+    X, _, _,
+    _, _,  // 53
+    X, _, _,
+    _, _,  // 54
+    X, _, _,
+    _, _,  // 55
+    X, _, _,
+    _, _,  // 56
+    X, _, _,
+    _, _,  // 57
+    X, _, _,
+    _, _,  // 58
+    X, _, _,
+    _, _,  // 59
+    X, _, _,
+    _, _,  // 60
+    X, _, _,
+    _, _,  // 61
+    X, _, _,
+    _, _,  // 62
+    X, _, _,
+    _, _,  // 63
+    1, 6, _,
+    _, _,  // 64
+    2, 0, 6,
+    _, _,  // 65
+    2, 1, 6,
+    _, _,  // 66
+    X, _, _,
+    _, _,  // 67
+    2, 2, 6,
+    _, _,  // 68
+    3, 0, 2,
+    6, _,  // 69
+    X, _, _,
+    _, _,  // 70
+    X, _, _,
+    _, _,  // 71
+    2, 3, 6,
+    _, _,  // 72
+    3, 0, 3,
+    6, _,  // 73
+    3, 1, 3,
+    6, _,  // 74
+    X, _, _,
+    _, _,  // 75
+    X, _, _,
+    _, _,  // 76
+    X, _, _,
+    _, _,  // 77
+    X, _, _,
+    _, _,  // 78
+    X, _, _,
+    _, _,  // 79
+    2, 4, 6,
+    _, _,  // 80
+    3, 0, 4,
+    6, _,  // 81
+    3, 1, 4,
+    6, _,  // 82
+    X, _, _,
+    _, _,  // 83
+    3, 2, 4,
+    6, _,  // 84
+    4, 0, 2,
+    4, 6,  // 85
+    X, _, _,
+    _, _,  // 86
+    X, _, _,
+    _, _,  // 87
+    X, _, _,
+    _, _,  // 88
+    X, _, _,
+    _, _,  // 89
+    X, _, _,
+    _, _,  // 90
+    X, _, _,
+    _, _,  // 91
+    X, _, _,
+    _, _,  // 92
+    X, _, _,
+    _, _,  // 93
+    X, _, _,
+    _, _,  // 94
+    X, _, _,
+    _, _,  // 95
+    X, _, _,
+    _, _,  // 96
+    X, _, _,
+    _, _,  // 97
+    X, _, _,
+    _, _,  // 98
+    X, _, _,
+    _, _,  // 99
+    X, _, _,
+    _, _,  // 100
+    X, _, _,
+    _, _,  // 101
+    X, _, _,
+    _, _,  // 102
+    X, _, _,
+    _, _,  // 103
+    X, _, _,
+    _, _,  // 104
+    X, _, _,
+    _, _,  // 105
+    X, _, _,
+    _, _,  // 106
+    X, _, _,
+    _, _,  // 107
+    X, _, _,
+    _, _,  // 108
+    X, _, _,
+    _, _,  // 109
+    X, _, _,
+    _, _,  // 110
+    X, _, _,
+    _, _,  // 111
+    X, _, _,
+    _, _,  // 112
+    X, _, _,
+    _, _,  // 113
+    X, _, _,
+    _, _,  // 114
+    X, _, _,
+    _, _,  // 115
+    X, _, _,
+    _, _,  // 116
+    X, _, _,
+    _, _,  // 117
+    X, _, _,
+    _, _,  // 118
+    X, _, _,
+    _, _,  // 119
+    X, _, _,
+    _, _,  // 120
+    X, _, _,
+    _, _,  // 121
+    X, _, _,
+    _, _,  // 122
+    X, _, _,
+    _, _,  // 123
+    X, _, _,
+    _, _,  // 124
+    X, _, _,
+    _, _,  // 125
+    X, _, _,
+    _, _,  // 126
+    X, _, _,
+    _, _,  // 127
+    1, 7, _,
+    _, _,  // 128
+    2, 0, 7,
+    _, _,  // 129
+    2, 1, 7,
+    _, _,  // 130
+    X, _, _,
+    _, _,  // 131
+    2, 2, 7,
+    _, _,  // 132
+    3, 0, 2,
+    7, _,  // 133
+    X, _, _,
+    _, _,  // 134
+    X, _, _,
+    _, _,  // 135
+    2, 3, 7,
+    _, _,  // 136
+    3, 0, 3,
+    7, _,  // 137
+    3, 1, 3,
+    7, _,  // 138
+    X, _, _,
+    _, _,  // 139
+    X, _, _,
+    _, _,  // 140
+    X, _, _,
+    _, _,  // 141
+    X, _, _,
+    _, _,  // 142
+    X, _, _,
+    _, _,  // 143
+    2, 4, 7,
+    _, _,  // 144
+    3, 0, 4,
+    7, _,  // 145
+    3, 1, 4,
+    7, _,  // 146
+    X, _, _,
+    _, _,  // 147
+    3, 2, 4,
+    7, _,  // 148
+    4, 0, 2,
+    4, 7,  // 149
+    X, _, _,
+    _, _,  // 150
+    X, _, _,
+    _, _,  // 151
+    X, _, _,
+    _, _,  // 152
+    X, _, _,
+    _, _,  // 153
+    X, _, _,
+    _, _,  // 154
+    X, _, _,
+    _, _,  // 155
+    X, _, _,
+    _, _,  // 156
+    X, _, _,
+    _, _,  // 157
+    X, _, _,
+    _, _,  // 158
+    X, _, _,
+    _, _,  // 159
+    2, 5, 7,
+    _, _,  // 160
+    3, 0, 5,
+    7, _,  // 161
+    3, 1, 5,
+    7, _,  // 162
+    X, _, _,
+    _, _,  // 163
+    3, 2, 5,
+    7, _,  // 164
+    4, 0, 2,
+    5, 7,  // 165
+    X, _, _,
+    _, _,  // 166
+    X, _, _,
+    _, _,  // 167
+    3, 3, 5,
+    7, _,  // 168
+    4, 0, 3,
+    5, 7,  // 169
+    4, 1, 3,
+    5, 7  // 170
 };
 #undef _
 #undef X
@@ -4009,16 +4077,14 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
 
 // Force instantiation of templatized SweepConservatively method for
 // SWEEP_ON_MAIN_THREAD mode.
-template int MarkCompactCollector::
-    SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(
-        PagedSpace*, FreeList*, Page*);
+template int MarkCompactCollector::SweepConservatively<
+    MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*);
 
 
 // Force instantiation of templatized SweepConservatively method for
 // SWEEP_IN_PARALLEL mode.
-template int MarkCompactCollector::
-    SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
-        PagedSpace*, FreeList*, Page*);
+template int MarkCompactCollector::SweepConservatively<
+    MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*);
 
 
 // Sweeps a space conservatively.  After this has been done the larger free
@@ -4028,15 +4094,14 @@ template int MarkCompactCollector::
 // because it means that any FreeSpace maps left actually describe a region of
 // memory that can be ignored when scanning.  Dead objects other than free
 // spaces will not contain the free space map.
-template<MarkCompactCollector::SweepingParallelism mode>
+template <MarkCompactCollector::SweepingParallelism mode>
 int MarkCompactCollector::SweepConservatively(PagedSpace* space,
-                                              FreeList* free_list,
-                                              Page* p) {
+                                              FreeList* free_list, Page* p) {
   DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
-  DCHECK((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
-         free_list != NULL) ||
-         (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
-         free_list == NULL));
+  DCHECK(
+      (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) ||
+      (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
+       free_list == NULL));
 
   intptr_t freed_bytes = 0;
   intptr_t max_freed_bytes = 0;
@@ -4054,8 +4119,8 @@ int MarkCompactCollector::SweepConservatively(PagedSpace* space,
 
   if (it.Done()) {
     size = p->area_end() - p->area_start();
-    freed_bytes = Free<mode>(space, free_list, p->area_start(),
-                             static_cast<int>(size));
+    freed_bytes =
+        Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
     max_freed_bytes = Max(freed_bytes, max_freed_bytes);
     DCHECK_EQ(0, p->LiveBytes());
     if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
@@ -4073,8 +4138,8 @@ int MarkCompactCollector::SweepConservatively(PagedSpace* space,
   Address free_end = StartOfLiveObject(cell_base, *cell);
   // Free the first free space.
   size = free_end - p->area_start();
-  freed_bytes = Free<mode>(space, free_list, p->area_start(),
-                           static_cast<int>(size));
+  freed_bytes =
+      Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
   max_freed_bytes = Max(freed_bytes, max_freed_bytes);
 
   // The start of the current free area is represented in undigested form by
@@ -4249,9 +4314,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
             PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
                    reinterpret_cast<intptr_t>(p));
           }
-          SweepPrecisely<SWEEP_ONLY,
-                         SWEEP_ON_MAIN_THREAD,
-                         IGNORE_SKIP_LIST,
+          SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
                          IGNORE_FREE_SPACE>(space, NULL, p, NULL);
           pages_swept++;
           parallel_sweeping_active = true;
@@ -4271,34 +4334,25 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
                  reinterpret_cast<intptr_t>(p));
         }
         if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
-          SweepPrecisely<SWEEP_ONLY,
-                         SWEEP_ON_MAIN_THREAD,
-                         REBUILD_SKIP_LIST,
+          SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
                          ZAP_FREE_SPACE>(space, NULL, p, NULL);
         } else if (space->identity() == CODE_SPACE) {
-          SweepPrecisely<SWEEP_ONLY,
-                         SWEEP_ON_MAIN_THREAD,
-                         REBUILD_SKIP_LIST,
+          SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
                          IGNORE_FREE_SPACE>(space, NULL, p, NULL);
         } else {
-          SweepPrecisely<SWEEP_ONLY,
-                         SWEEP_ON_MAIN_THREAD,
-                         IGNORE_SKIP_LIST,
+          SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
                          IGNORE_FREE_SPACE>(space, NULL, p, NULL);
         }
         pages_swept++;
         break;
       }
-      default: {
-        UNREACHABLE();
-      }
+      default: { UNREACHABLE(); }
     }
   }
 
   if (FLAG_gc_verbose) {
     PrintF("SweepSpace: %s (%d pages swept)\n",
-           AllocationSpaceName(space->identity()),
-           pages_swept);
+           AllocationSpaceName(space->identity()), pages_swept);
   }
 
   // Give pages that are queued to be freed back to the OS.
@@ -4349,9 +4403,11 @@ void MarkCompactCollector::SweepSpaces() {
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
-  { GCTracer::Scope sweep_scope(heap()->tracer(),
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
                                 GCTracer::Scope::MC_SWEEP_OLDSPACE);
-    { SequentialSweepingScope scope(this);
+    {
+      SequentialSweepingScope scope(this);
       SweepSpace(heap()->old_pointer_space(), how_to_sweep);
       SweepSpace(heap()->old_data_space(), how_to_sweep);
     }
@@ -4366,12 +4422,14 @@ void MarkCompactCollector::SweepSpaces() {
   }
   RemoveDeadInvalidatedCode();
 
-  { GCTracer::Scope sweep_scope(heap()->tracer(),
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
                                 GCTracer::Scope::MC_SWEEP_CODE);
     SweepSpace(heap()->code_space(), PRECISE);
   }
 
-  { GCTracer::Scope sweep_scope(heap()->tracer(),
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
                                 GCTracer::Scope::MC_SWEEP_CELL);
     SweepSpace(heap()->cell_space(), PRECISE);
     SweepSpace(heap()->property_cell_space(), PRECISE);
@@ -4382,7 +4440,8 @@ void MarkCompactCollector::SweepSpaces() {
   // ClearNonLiveTransitions depends on precise sweeping of map space to
   // detect whether unmarked map became dead in this collection or in one
   // of the previous ones.
-  { GCTracer::Scope sweep_scope(heap()->tracer(),
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
                                 GCTracer::Scope::MC_SWEEP_MAP);
     SweepSpace(heap()->map_space(), PRECISE);
   }
@@ -4456,9 +4515,7 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
 }
 
 
-Isolate* MarkCompactCollector::isolate() const {
-  return heap_->isolate();
-}
+Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
 
 
 void MarkCompactCollector::Initialize() {
@@ -4473,10 +4530,8 @@ bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
 
 
 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
-                        SlotsBuffer** buffer_address,
-                        SlotType type,
-                        Address addr,
-                        AdditionMode mode) {
+                        SlotsBuffer** buffer_address, SlotType type,
+                        Address addr, AdditionMode mode) {
   SlotsBuffer* buffer = *buffer_address;
   if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
     if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
@@ -4519,22 +4574,18 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
       // This doesn't need to be typed since it is just a normal heap pointer.
       Object** target_pointer =
           reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
-      success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                                   target_page->slots_buffer_address(),
-                                   target_pointer,
-                                   SlotsBuffer::FAIL_ON_OVERFLOW);
+      success = SlotsBuffer::AddTo(
+          &slots_buffer_allocator_, target_page->slots_buffer_address(),
+          target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
     } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
-      success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                                   target_page->slots_buffer_address(),
-                                   SlotsBuffer::CODE_ENTRY_SLOT,
-                                   rinfo->constant_pool_entry_address(),
-                                   SlotsBuffer::FAIL_ON_OVERFLOW);
+      success = SlotsBuffer::AddTo(
+          &slots_buffer_allocator_, target_page->slots_buffer_address(),
+          SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
+          SlotsBuffer::FAIL_ON_OVERFLOW);
     } else {
-      success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                                  target_page->slots_buffer_address(),
-                                  SlotTypeForRMode(rmode),
-                                  rinfo->pc(),
-                                  SlotsBuffer::FAIL_ON_OVERFLOW);
+      success = SlotsBuffer::AddTo(
+          &slots_buffer_allocator_, target_page->slots_buffer_address(),
+          SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
     }
     if (!success) {
       EvictEvacuationCandidate(target_page);
@@ -4549,8 +4600,7 @@ void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
       !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
     if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
                             target_page->slots_buffer_address(),
-                            SlotsBuffer::CODE_ENTRY_SLOT,
-                            slot,
+                            SlotsBuffer::CODE_ENTRY_SLOT, slot,
                             SlotsBuffer::FAIL_ON_OVERFLOW)) {
       EvictEvacuationCandidate(target_page);
     }
@@ -4561,8 +4611,9 @@ void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
   DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
   if (is_compacting()) {
-    Code* host = isolate()->inner_pointer_to_code_cache()->
-        GcSafeFindCodeForInnerPointer(pc);
+    Code* host =
+        isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
+            pc);
     MarkBit mark_bit = Marking::MarkBitFrom(host);
     if (Marking::IsBlack(mark_bit)) {
       RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
@@ -4588,9 +4639,7 @@ void SlotsBuffer::UpdateSlots(Heap* heap) {
     } else {
       ++slot_idx;
       DCHECK(slot_idx < idx_);
-      UpdateSlot(heap->isolate(),
-                 &v,
-                 DecodeSlotType(slot),
+      UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
                  reinterpret_cast<Address>(slots_[slot_idx]));
     }
   }
@@ -4611,9 +4660,7 @@ void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
       DCHECK(slot_idx < idx_);
       Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
       if (!IsOnInvalidatedCodeObject(pc)) {
-        UpdateSlot(heap->isolate(),
-                   &v,
-                   DecodeSlotType(slot),
+        UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
                    reinterpret_cast<Address>(slots_[slot_idx]));
       }
     }
@@ -4640,6 +4687,5 @@ void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
   }
   *buffer_address = NULL;
 }
-
-
-} }  // namespace v8::internal
+}
+}  // namespace v8::internal
similarity index 90%
rename from src/mark-compact.h
rename to src/heap/mark-compact.h
index 94f5386..1866c2a 100644 (file)
@@ -2,11 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_MARK_COMPACT_H_
-#define V8_MARK_COMPACT_H_
+#ifndef V8_HEAP_MARK_COMPACT_H_
+#define V8_HEAP_MARK_COMPACT_H_
 
 #include "src/compiler-intrinsics.h"
-#include "src/spaces.h"
+#include "src/heap/spaces.h"
 
 namespace v8 {
 namespace internal {
@@ -25,9 +25,7 @@ class RootMarkingVisitor;
 
 class Marking {
  public:
-  explicit Marking(Heap* heap)
-      : heap_(heap) {
-  }
+  explicit Marking(Heap* heap) : heap_(heap) {}
 
   INLINE(static MarkBit MarkBitFrom(Address addr));
 
@@ -49,9 +47,7 @@ class Marking {
 
   // White markbits: 00 - this is required by the mark bit clearer.
   static const char* kWhiteBitPattern;
-  INLINE(static bool IsWhite(MarkBit mark_bit)) {
-    return !mark_bit.Get();
-  }
+  INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
 
   // Grey markbits: 11
   static const char* kGreyBitPattern;
@@ -64,18 +60,14 @@ class Marking {
     mark_bit.Next().Clear();
   }
 
-  INLINE(static void BlackToGrey(MarkBit markbit)) {
-    markbit.Next().Set();
-  }
+  INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
 
   INLINE(static void WhiteToGrey(MarkBit markbit)) {
     markbit.Set();
     markbit.Next().Set();
   }
 
-  INLINE(static void GreyToBlack(MarkBit markbit)) {
-    markbit.Next().Clear();
-  }
+  INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
 
   INLINE(static void BlackToGrey(HeapObject* obj)) {
     BlackToGrey(MarkBitFrom(obj));
@@ -98,10 +90,14 @@ class Marking {
 
   static const char* ColorName(ObjectColor color) {
     switch (color) {
-      case BLACK_OBJECT: return "black";
-      case WHITE_OBJECT: return "white";
-      case GREY_OBJECT: return "grey";
-      case IMPOSSIBLE_COLOR: return "impossible";
+      case BLACK_OBJECT:
+        return "black";
+      case WHITE_OBJECT:
+        return "white";
+      case GREY_OBJECT:
+        return "grey";
+      case IMPOSSIBLE_COLOR:
+        return "impossible";
     }
     return "error";
   }
@@ -120,8 +116,7 @@ class Marking {
 #endif
 
   // Returns true if the transferred color is black.
-  INLINE(static bool TransferColor(HeapObject* from,
-                                   HeapObject* to)) {
+  INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
     MarkBit from_mark_bit = MarkBitFrom(from);
     MarkBit to_mark_bit = MarkBitFrom(to);
     bool is_black = false;
@@ -145,7 +140,7 @@ class Marking {
 class MarkingDeque {
  public:
   MarkingDeque()
-      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
+      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
 
   void Initialize(Address low, Address high) {
     HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
@@ -261,8 +256,7 @@ class SlotsBuffer {
     }
   }
 
-  ~SlotsBuffer() {
-  }
+  ~SlotsBuffer() {}
 
   void Add(ObjectSlot slot) {
     DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
@@ -311,16 +305,11 @@ class SlotsBuffer {
                             (buffer->chain_length_ - 1) * kNumberOfElements);
   }
 
-  inline bool IsFull() {
-    return idx_ == kNumberOfElements;
-  }
+  inline bool IsFull() { return idx_ == kNumberOfElements; }
 
-  inline bool HasSpaceForTypedSlot() {
-    return idx_ < kNumberOfElements - 1;
-  }
+  inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
 
-  static void UpdateSlotsRecordedIn(Heap* heap,
-                                    SlotsBuffer* buffer,
+  static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
                                     bool code_slots_filtering_required) {
     while (buffer != NULL) {
       if (code_slots_filtering_required) {
@@ -332,18 +321,14 @@ class SlotsBuffer {
     }
   }
 
-  enum AdditionMode {
-    FAIL_ON_OVERFLOW,
-    IGNORE_OVERFLOW
-  };
+  enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
 
   static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
     return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
   }
 
   INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
-                           SlotsBuffer** buffer_address,
-                           ObjectSlot slot,
+                           SlotsBuffer** buffer_address, ObjectSlot slot,
                            AdditionMode mode)) {
     SlotsBuffer* buffer = *buffer_address;
     if (buffer == NULL || buffer->IsFull()) {
@@ -361,9 +346,7 @@ class SlotsBuffer {
   static bool IsTypedSlot(ObjectSlot slot);
 
   static bool AddTo(SlotsBufferAllocator* allocator,
-                    SlotsBuffer** buffer_address,
-                    SlotType type,
-                    Address addr,
+                    SlotsBuffer** buffer_address, SlotType type, Address addr,
                     AdditionMode mode);
 
   static const int kNumberOfElements = 1021;
@@ -532,10 +515,7 @@ class MarkCompactCollector {
   // Performs a global garbage collection.
   void CollectGarbage();
 
-  enum CompactionMode {
-    INCREMENTAL_COMPACTION,
-    NON_INCREMENTAL_COMPACTION
-  };
+  enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
 
   bool StartCompaction(CompactionMode mode);
 
@@ -572,10 +552,7 @@ class MarkCompactCollector {
     PRECISE
   };
 
-  enum SweepingParallelism {
-    SWEEP_ON_MAIN_THREAD,
-    SWEEP_IN_PARALLEL
-  };
+  enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
 
 #ifdef VERIFY_HEAP
   void VerifyMarkbitsAreClean();
@@ -587,24 +564,23 @@ class MarkCompactCollector {
 
   // Sweep a single page from the given space conservatively.
   // Returns the size of the biggest continuous freed memory chunk in bytes.
-  template<SweepingParallelism type>
-  static int SweepConservatively(PagedSpace* space,
-                                      FreeList* free_list,
-                                      Page* p);
+  template <SweepingParallelism type>
+  static int SweepConservatively(PagedSpace* space, FreeList* free_list,
+                                 Page* p);
 
   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
-    return Page::FromAddress(reinterpret_cast<Address>(anchor))->
-        ShouldSkipEvacuationSlotRecording();
+    return Page::FromAddress(reinterpret_cast<Address>(anchor))
+        ->ShouldSkipEvacuationSlotRecording();
   }
 
   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
-    return Page::FromAddress(reinterpret_cast<Address>(host))->
-        ShouldSkipEvacuationSlotRecording();
+    return Page::FromAddress(reinterpret_cast<Address>(host))
+        ->ShouldSkipEvacuationSlotRecording();
   }
 
   INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
-    return Page::FromAddress(reinterpret_cast<Address>(obj))->
-        IsEvacuationCandidate();
+    return Page::FromAddress(reinterpret_cast<Address>(obj))
+        ->IsEvacuationCandidate();
   }
 
   INLINE(void EvictEvacuationCandidate(Page* page)) {
@@ -632,15 +608,11 @@ class MarkCompactCollector {
   void RecordCodeEntrySlot(Address slot, Code* target);
   void RecordCodeTargetPatch(Address pc, Code* target);
 
-  INLINE(void RecordSlot(Object** anchor_slot,
-                         Object** slot,
-                         Object* object,
-                         SlotsBuffer::AdditionMode mode =
-                             SlotsBuffer::FAIL_ON_OVERFLOW));
+  INLINE(void RecordSlot(
+      Object** anchor_slot, Object** slot, Object* object,
+      SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
 
-  void MigrateObject(HeapObject* dst,
-                     HeapObject* src,
-                     int size,
+  void MigrateObject(HeapObject* dst, HeapObject* src, int size,
                      AllocationSpace to_old_space);
 
   bool TryPromoteObject(HeapObject* object, int object_size);
@@ -684,9 +656,7 @@ class MarkCompactCollector {
     sequential_sweeping_ = sequential_sweeping;
   }
 
-  bool sequential_sweeping() const {
-    return sequential_sweeping_;
-  }
+  bool sequential_sweeping() const { return sequential_sweeping_; }
 
   // Mark the global table which maps weak objects to dependent code without
   // marking its contents.
@@ -879,7 +849,7 @@ class MarkCompactCollector {
   void SweepSpaces();
 
   int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
-                                           NewSpacePage* p);
+                                            NewSpacePage* p);
 
   void EvacuateNewSpace();
 
@@ -931,15 +901,12 @@ class MarkCompactCollector {
 
 class MarkBitCellIterator BASE_EMBEDDED {
  public:
-  explicit MarkBitCellIterator(MemoryChunk* chunk)
-      : chunk_(chunk) {
-    last_cell_index_ = Bitmap::IndexToCell(
-        Bitmap::CellAlignIndex(
-            chunk_->AddressToMarkbitIndex(chunk_->area_end())));
+  explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
+    last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+        chunk_->AddressToMarkbitIndex(chunk_->area_end())));
     cell_base_ = chunk_->area_start();
     cell_index_ = Bitmap::IndexToCell(
-        Bitmap::CellAlignIndex(
-            chunk_->AddressToMarkbitIndex(cell_base_)));
+        Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
     cells_ = chunk_->markbits()->cells();
   }
 
@@ -949,13 +916,13 @@ class MarkBitCellIterator BASE_EMBEDDED {
 
   inline MarkBit::CellType* CurrentCell() {
     DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
-        chunk_->AddressToMarkbitIndex(cell_base_))));
+                              chunk_->AddressToMarkbitIndex(cell_base_))));
     return &cells_[cell_index_];
   }
 
   inline Address CurrentCellBase() {
     DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
-        chunk_->AddressToMarkbitIndex(cell_base_))));
+                              chunk_->AddressToMarkbitIndex(cell_base_))));
     return cell_base_;
   }
 
@@ -975,14 +942,12 @@ class MarkBitCellIterator BASE_EMBEDDED {
 
 class SequentialSweepingScope BASE_EMBEDDED {
  public:
-  explicit SequentialSweepingScope(MarkCompactCollector *collector) :
-    collector_(collector) {
+  explicit SequentialSweepingScope(MarkCompactCollector* collector)
+      : collector_(collector) {
     collector_->set_sequential_sweeping(true);
   }
 
-  ~SequentialSweepingScope() {
-    collector_->set_sequential_sweeping(false);
-  }
+  ~SequentialSweepingScope() { collector_->set_sequential_sweeping(false); }
 
  private:
   MarkCompactCollector* collector_;
@@ -990,7 +955,7 @@ class SequentialSweepingScope BASE_EMBEDDED {
 
 
 const char* AllocationSpaceName(AllocationSpace space);
+}
+}  // namespace v8::internal
 
-} }  // namespace v8::internal
-
-#endif  // V8_MARK_COMPACT_H_
+#endif  // V8_HEAP_MARK_COMPACT_H_
similarity index 90%
rename from src/spaces-inl.h
rename to src/heap/spaces-inl.h
index d5b7b2d..56c2bad 100644 (file)
@@ -2,12 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_SPACES_INL_H_
-#define V8_SPACES_INL_H_
+#ifndef V8_HEAP_SPACES_INL_H_
+#define V8_HEAP_SPACES_INL_H_
 
+#include "src/heap/spaces.h"
 #include "src/heap-profiler.h"
 #include "src/isolate.h"
-#include "src/spaces.h"
 #include "src/v8memory.h"
 
 namespace v8 {
@@ -31,12 +31,10 @@ void Bitmap::Clear(MemoryChunk* chunk) {
 PageIterator::PageIterator(PagedSpace* space)
     : space_(space),
       prev_page_(&space->anchor_),
-      next_page_(prev_page_->next_page()) { }
+      next_page_(prev_page_->next_page()) {}
 
 
-bool PageIterator::has_next() {
-  return next_page_ != &space_->anchor_;
-}
+bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
 
 
 Page* PageIterator::next() {
@@ -54,12 +52,12 @@ Page* PageIterator::next() {
 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
     : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
       next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
-      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
+      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
 
 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
     : prev_page_(space->anchor()),
       next_page_(prev_page_->next_page()),
-      last_page_(prev_page_->prev_page()) { }
+      last_page_(prev_page_->prev_page()) {}
 
 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
     : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
@@ -69,9 +67,7 @@ NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
 }
 
 
-bool NewSpacePageIterator::has_next() {
-  return prev_page_ != last_page_;
-}
+bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
 
 
 NewSpacePage* NewSpacePageIterator::next() {
@@ -113,8 +109,7 @@ void MemoryAllocator::Protect(Address start, size_t size) {
 }
 
 
-void MemoryAllocator::Unprotect(Address start,
-                                size_t size,
+void MemoryAllocator::Unprotect(Address start, size_t size,
                                 Executability executable) {
   base::OS::Unprotect(start, size, executable);
 }
@@ -137,9 +132,7 @@ void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
 
 // --------------------------------------------------------------------------
 // PagedSpace
-Page* Page::Initialize(Heap* heap,
-                       MemoryChunk* chunk,
-                       Executability executable,
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
                        PagedSpace* owner) {
   Page* page = reinterpret_cast<Page*>(chunk);
   DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
@@ -209,7 +202,7 @@ PointerChunkIterator::PointerChunkIterator(Heap* heap)
     : state_(kOldPointerState),
       old_pointer_iterator_(heap->old_pointer_space()),
       map_iterator_(heap->map_space()),
-      lo_iterator_(heap->lo_space()) { }
+      lo_iterator_(heap->lo_space()) {}
 
 
 Page* Page::next_page() {
@@ -305,11 +298,11 @@ intptr_t LargeObjectSpace::Available() {
 bool FreeListNode::IsFreeListNode(HeapObject* object) {
   Map* map = object->map();
   Heap* heap = object->GetHeap();
-  return map == heap->raw_unchecked_free_space_map()
-      || map == heap->raw_unchecked_one_pointer_filler_map()
-      || map == heap->raw_unchecked_two_pointer_filler_map();
+  return map == heap->raw_unchecked_free_space_map() ||
+         map == heap->raw_unchecked_one_pointer_filler_map() ||
+         map == heap->raw_unchecked_two_pointer_filler_map();
 }
+}
+}  // namespace v8::internal
 
-} }  // namespace v8::internal
-
-#endif  // V8_SPACES_INL_H_
+#endif  // V8_HEAP_SPACES_INL_H_
similarity index 89%
rename from src/spaces.cc
rename to src/heap/spaces.cc
index ed7437b..cfc283f 100644 (file)
@@ -6,8 +6,8 @@
 
 #include "src/base/platform/platform.h"
 #include "src/full-codegen.h"
+#include "src/heap/mark-compact.h"
 #include "src/macro-assembler.h"
-#include "src/mark-compact.h"
 #include "src/msan.h"
 
 namespace v8 {
@@ -22,11 +22,7 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
   // just an anchor for the double linked page list.  Initialize as if we have
   // reached the end of the anchor page, then the first iteration will move on
   // to the first page.
-  Initialize(space,
-             NULL,
-             NULL,
-             kAllPagesInSpace,
-             NULL);
+  Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
 }
 
 
@@ -36,11 +32,7 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
   // just an anchor for the double linked page list.  Initialize the current
   // address and end as NULL, then the first iteration will move on
   // to the first page.
-  Initialize(space,
-             NULL,
-             NULL,
-             kAllPagesInSpace,
-             size_func);
+  Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
 }
 
 
@@ -53,17 +45,13 @@ HeapObjectIterator::HeapObjectIterator(Page* page,
          owner == page->heap()->cell_space() ||
          owner == page->heap()->property_cell_space() ||
          owner == page->heap()->code_space());
-  Initialize(reinterpret_cast<PagedSpace*>(owner),
-             page->area_start(),
-             page->area_end(),
-             kOnePageOnly,
-             size_func);
+  Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
+             page->area_end(), kOnePageOnly, size_func);
   DCHECK(page->WasSweptPrecisely() || page->SweepingCompleted());
 }
 
 
-void HeapObjectIterator::Initialize(PagedSpace* space,
-                                    Address cur, Address end,
+void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
                                     HeapObjectIterator::PageMode mode,
                                     HeapObjectCallback size_f) {
   // Check that we actually can iterate this space.
@@ -107,8 +95,7 @@ CodeRange::CodeRange(Isolate* isolate)
       code_range_(NULL),
       free_list_(0),
       allocation_list_(0),
-      current_allocation_block_index_(0) {
-}
+      current_allocation_block_index_(0) {}
 
 
 bool CodeRange::SetUp(size_t requested) {
@@ -136,8 +123,7 @@ bool CodeRange::SetUp(size_t requested) {
 
   // We are sure that we have mapped a block of requested addresses.
   DCHECK(code_range_->size() == requested);
-  LOG(isolate_,
-      NewEvent("CodeRange", code_range_->address(), requested));
+  LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
   Address base = reinterpret_cast<Address>(code_range_->address());
   Address aligned_base =
       RoundUp(reinterpret_cast<Address>(code_range_->address()),
@@ -219,10 +205,8 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
   }
   DCHECK(*allocated <= current.size);
   DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
-  if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
-                                                            current.start,
-                                                            commit_size,
-                                                            *allocated)) {
+  if (!isolate_->memory_allocator()->CommitExecutableMemory(
+          code_range_, current.start, commit_size, *allocated)) {
     *allocated = 0;
     return NULL;
   }
@@ -254,10 +238,10 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
 
 
 void CodeRange::TearDown() {
-    delete code_range_;  // Frees all memory in the virtual memory range.
-    code_range_ = NULL;
-    free_list_.Free();
-    allocation_list_.Free();
+  delete code_range_;  // Frees all memory in the virtual memory range.
+  code_range_ = NULL;
+  free_list_.Free();
+  allocation_list_.Free();
 }
 
 
@@ -272,8 +256,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
       size_(0),
       size_executable_(0),
       lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
-      highest_ever_allocated_(reinterpret_cast<void*>(0)) {
-}
+      highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
 
 
 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
@@ -298,8 +281,7 @@ void MemoryAllocator::TearDown() {
 }
 
 
-bool MemoryAllocator::CommitMemory(Address base,
-                                   size_t size,
+bool MemoryAllocator::CommitMemory(Address base, size_t size,
                                    Executability executable) {
   if (!base::VirtualMemory::CommitRegion(base, size,
                                          executable == EXECUTABLE)) {
@@ -328,15 +310,13 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
   DCHECK(isolate_->code_range() == NULL ||
          !isolate_->code_range()->contains(
              static_cast<Address>(reservation->address())));
-  DCHECK(executable == NOT_EXECUTABLE ||
-         isolate_->code_range() == NULL ||
+  DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
          !isolate_->code_range()->valid());
   reservation->Release();
 }
 
 
-void MemoryAllocator::FreeMemory(Address base,
-                                 size_t size,
+void MemoryAllocator::FreeMemory(Address base, size_t size,
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
   DCHECK(size_ >= size);
@@ -353,8 +333,7 @@ void MemoryAllocator::FreeMemory(Address base,
     DCHECK(executable == EXECUTABLE);
     isolate_->code_range()->FreeRawMemory(base, size);
   } else {
-    DCHECK(executable == NOT_EXECUTABLE ||
-           isolate_->code_range() == NULL ||
+    DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
            !isolate_->code_range()->valid());
     bool result = base::VirtualMemory::ReleaseRegion(base, size);
     USE(result);
@@ -363,15 +342,14 @@ void MemoryAllocator::FreeMemory(Address base,
 }
 
 
-Address MemoryAllocator::ReserveAlignedMemory(size_t size,
-                                              size_t alignment,
+Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
                                               base::VirtualMemory* controller) {
   base::VirtualMemory reservation(size, alignment);
 
   if (!reservation.IsReserved()) return NULL;
   size_ += reservation.size();
-  Address base = RoundUp(static_cast<Address>(reservation.address()),
-                         alignment);
+  Address base =
+      RoundUp(static_cast<Address>(reservation.address()), alignment);
   controller->TakeControl(&reservation);
   return base;
 }
@@ -386,9 +364,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
   if (base == NULL) return NULL;
 
   if (executable == EXECUTABLE) {
-    if (!CommitExecutableMemory(&reservation,
-                                base,
-                                commit_size,
+    if (!CommitExecutableMemory(&reservation, base, commit_size,
                                 reserve_size)) {
       base = NULL;
     }
@@ -419,19 +395,14 @@ void Page::InitializeAsAnchor(PagedSpace* owner) {
 }
 
 
-NewSpacePage* NewSpacePage::Initialize(Heap* heap,
-                                       Address start,
+NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
                                        SemiSpace* semi_space) {
   Address area_start = start + NewSpacePage::kObjectStartOffset;
   Address area_end = start + Page::kPageSize;
 
-  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
-                                               start,
-                                               Page::kPageSize,
-                                               area_start,
-                                               area_end,
-                                               NOT_EXECUTABLE,
-                                               semi_space);
+  MemoryChunk* chunk =
+      MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
+                              area_end, NOT_EXECUTABLE, semi_space);
   chunk->set_next_chunk(NULL);
   chunk->set_prev_chunk(NULL);
   chunk->initialize_scan_on_scavenge(true);
@@ -456,13 +427,9 @@ void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
 }
 
 
-MemoryChunk* MemoryChunk::Initialize(Heap* heap,
-                                     Address base,
-                                     size_t size,
-                                     Address area_start,
-                                     Address area_end,
-                                     Executability executable,
-                                     Space* owner) {
+MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
+                                     Address area_start, Address area_end,
+                                     Executability executable, Space* owner) {
   MemoryChunk* chunk = FromAddress(base);
 
   DCHECK(base == chunk->address());
@@ -507,8 +474,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
 
 // Commit MemoryChunk area to the requested size.
 bool MemoryChunk::CommitArea(size_t requested) {
-  size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
-                      MemoryAllocator::CodePageGuardSize() : 0;
+  size_t guard_size =
+      IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
   size_t header_size = area_start() - address() - guard_size;
   size_t commit_size =
       RoundUp(header_size + requested, base::OS::CommitPageSize());
@@ -522,10 +489,10 @@ bool MemoryChunk::CommitArea(size_t requested) {
     Address start = address() + committed_size + guard_size;
     size_t length = commit_size - committed_size;
     if (reservation_.IsReserved()) {
-      Executability executable = IsFlagSet(IS_EXECUTABLE)
-          ? EXECUTABLE : NOT_EXECUTABLE;
-      if (!heap()->isolate()->memory_allocator()->CommitMemory(
-              start, length, executable)) {
+      Executability executable =
+          IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+      if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
+                                                               executable)) {
         return false;
       }
     } else {
@@ -623,13 +590,13 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
 
   if (executable == EXECUTABLE) {
     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
-                         base::OS::CommitPageSize()) + CodePageGuardSize();
+                         base::OS::CommitPageSize()) +
+                 CodePageGuardSize();
 
     // Check executable memory limit.
     if (size_executable_ + chunk_size > capacity_executable_) {
-      LOG(isolate_,
-          StringEvent("MemoryAllocator::AllocateRawMemory",
-                      "V8 Executable Allocation capacity exceeded"));
+      LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
+                                "V8 Executable Allocation capacity exceeded"));
       return NULL;
     }
 
@@ -639,20 +606,17 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
     // Allocate executable memory either from code range or from the
     // OS.
     if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
-      base = isolate_->code_range()->AllocateRawMemory(chunk_size,
-                                                       commit_size,
+      base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
                                                        &chunk_size);
-      DCHECK(IsAligned(reinterpret_cast<intptr_t>(base),
-                       MemoryChunk::kAlignment));
+      DCHECK(
+          IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
       if (base == NULL) return NULL;
       size_ += chunk_size;
       // Update executable memory size.
       size_executable_ += chunk_size;
     } else {
-      base = AllocateAlignedMemory(chunk_size,
-                                   commit_size,
-                                   MemoryChunk::kAlignment,
-                                   executable,
+      base = AllocateAlignedMemory(chunk_size, commit_size,
+                                   MemoryChunk::kAlignment, executable,
                                    &reservation);
       if (base == NULL) return NULL;
       // Update executable memory size.
@@ -669,13 +633,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
   } else {
     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
                          base::OS::CommitPageSize());
-    size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
-                                 commit_area_size, base::OS::CommitPageSize());
-    base = AllocateAlignedMemory(chunk_size,
-                                 commit_size,
-                                 MemoryChunk::kAlignment,
-                                 executable,
-                                 &reservation);
+    size_t commit_size =
+        RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
+                base::OS::CommitPageSize());
+    base =
+        AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
+                              executable, &reservation);
 
     if (base == NULL) return NULL;
 
@@ -689,8 +652,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
 
   // Use chunk_size for statistics and callbacks because we assume that they
   // treat reserved but not-yet committed memory regions of chunks as allocated.
-  isolate_->counters()->memory_allocated()->
-      Increment(static_cast<int>(chunk_size));
+  isolate_->counters()->memory_allocated()->Increment(
+      static_cast<int>(chunk_size));
 
   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
   if (owner != NULL) {
@@ -698,13 +661,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
   }
 
-  MemoryChunk* result = MemoryChunk::Initialize(heap,
-                                                base,
-                                                chunk_size,
-                                                area_start,
-                                                area_end,
-                                                executable,
-                                                owner);
+  MemoryChunk* result = MemoryChunk::Initialize(
+      heap, base, chunk_size, area_start, area_end, executable, owner);
   result->set_reserved_memory(&reservation);
   MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size);
   return result;
@@ -720,8 +678,7 @@ void Page::ResetFreeListStatistics() {
 }
 
 
-Page* MemoryAllocator::AllocatePage(intptr_t size,
-                                    PagedSpace* owner,
+Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
                                     Executability executable) {
   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
 
@@ -734,10 +691,8 @@ Page* MemoryAllocator::AllocatePage(intptr_t size,
 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
                                               Space* owner,
                                               Executability executable) {
-  MemoryChunk* chunk = AllocateChunk(object_size,
-                                     object_size,
-                                     executable,
-                                     owner);
+  MemoryChunk* chunk =
+      AllocateChunk(object_size, object_size, executable, owner);
   if (chunk == NULL) return NULL;
   return LargePage::Initialize(isolate_->heap(), chunk);
 }
@@ -751,8 +706,8 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
     PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
   }
 
-  isolate_->heap()->RememberUnmappedPage(
-      reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
+  isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
+                                         chunk->IsEvacuationCandidate());
 
   delete chunk->slots_buffer();
   delete chunk->skip_list();
@@ -761,15 +716,12 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
   if (reservation->IsReserved()) {
     FreeMemory(reservation, chunk->executable());
   } else {
-    FreeMemory(chunk->address(),
-               chunk->size(),
-               chunk->executable());
+    FreeMemory(chunk->address(), chunk->size(), chunk->executable());
   }
 }
 
 
-bool MemoryAllocator::CommitBlock(Address start,
-                                  size_t size,
+bool MemoryAllocator::CommitBlock(Address start, size_t size,
                                   Executability executable) {
   if (!CommitMemory(start, size, executable)) return false;
 
@@ -801,7 +753,7 @@ void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
                                                 size_t size) {
   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
     MemoryAllocationCallbackRegistration registration =
-      memory_allocation_callbacks_[i];
+        memory_allocation_callbacks_[i];
     if ((registration.space & space) == space &&
         (registration.action & action) == action)
       registration.callback(space, action, static_cast<int>(size));
@@ -819,8 +771,7 @@ bool MemoryAllocator::MemoryAllocationCallbackRegistered(
 
 
 void MemoryAllocator::AddMemoryAllocationCallback(
-    MemoryAllocationCallback callback,
-    ObjectSpace space,
+    MemoryAllocationCallback callback, ObjectSpace space,
     AllocationAction action) {
   DCHECK(callback != NULL);
   MemoryAllocationCallbackRegistration registration(callback, space, action);
@@ -830,7 +781,7 @@ void MemoryAllocator::AddMemoryAllocationCallback(
 
 
 void MemoryAllocator::RemoveMemoryAllocationCallback(
-     MemoryAllocationCallback callback) {
+    MemoryAllocationCallback callback) {
   DCHECK(callback != NULL);
   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
     if (memory_allocation_callbacks_[i].callback == callback) {
@@ -845,10 +796,12 @@ void MemoryAllocator::RemoveMemoryAllocationCallback(
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
   float pct = static_cast<float>(capacity_ - size_) / capacity_;
-  PrintF("  capacity: %" V8_PTR_PREFIX "d"
-             ", used: %" V8_PTR_PREFIX "d"
-             ", available: %%%d\n\n",
-         capacity_, size_, static_cast<int>(pct*100));
+  PrintF("  capacity: %" V8_PTR_PREFIX
+         "d"
+         ", used: %" V8_PTR_PREFIX
+         "d"
+         ", available: %%%d\n\n",
+         capacity_, size_, static_cast<int>(pct * 100));
 }
 #endif
 
@@ -880,13 +833,10 @@ int MemoryAllocator::CodePageAreaEndOffset() {
 
 
 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
-                                             Address start,
-                                             size_t commit_size,
+                                             Address start, size_t commit_size,
                                              size_t reserved_size) {
   // Commit page header (not executable).
-  if (!vm->Commit(start,
-                  CodePageGuardStartOffset(),
-                  false)) {
+  if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
     return false;
   }
 
@@ -897,8 +847,7 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
 
   // Commit page body (executable).
   if (!vm->Commit(start + CodePageAreaStartOffset(),
-                  commit_size - CodePageGuardStartOffset(),
-                  true)) {
+                  commit_size - CodePageGuardStartOffset(), true)) {
     return false;
   }
 
@@ -907,9 +856,9 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
     return false;
   }
 
-  UpdateAllocatedSpaceLimits(start,
-                             start + CodePageAreaStartOffset() +
-                             commit_size - CodePageGuardStartOffset());
+  UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
+                                        commit_size -
+                                        CodePageGuardStartOffset());
   return true;
 }
 
@@ -938,13 +887,12 @@ PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
       end_of_unswept_pages_(NULL),
       emergency_memory_(NULL) {
   if (id == CODE_SPACE) {
-    area_size_ = heap->isolate()->memory_allocator()->
-        CodePageAreaSize();
+    area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize();
   } else {
     area_size_ = Page::kPageSize - Page::kObjectStartOffset;
   }
-  max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
-      * AreaSize();
+  max_capacity_ =
+      (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
   accounting_stats_.Clear();
 
   allocation_info_.set_top(NULL);
@@ -954,14 +902,10 @@ PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
 }
 
 
-bool PagedSpace::SetUp() {
-  return true;
-}
+bool PagedSpace::SetUp() { return true; }
 
 
-bool PagedSpace::HasBeenSetUp() {
-  return true;
-}
+bool PagedSpace::HasBeenSetUp() { return true; }
 
 
 void PagedSpace::TearDown() {
@@ -1029,8 +973,8 @@ bool PagedSpace::Expand() {
     size = SizeOfFirstPage();
   }
 
-  Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
-      size, this, executable());
+  Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
+                                                                executable());
   if (p == NULL) return false;
 
   DCHECK(Capacity() <= max_capacity_);
@@ -1067,9 +1011,9 @@ intptr_t PagedSpace::SizeOfFirstPage() {
         // upgraded to handle small pages.
         size = AreaSize();
       } else {
-        size = RoundUp(
-            480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
-            kPointerSize);
+        size =
+            RoundUp(480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+                    kPointerSize);
       }
       break;
     }
@@ -1173,7 +1117,7 @@ void PagedSpace::UseEmergencyMemory() {
 
 
 #ifdef DEBUG
-void PagedSpace::Print() { }
+void PagedSpace::Print() {}
 #endif
 
 #ifdef VERIFY_HEAP
@@ -1239,9 +1183,8 @@ bool NewSpace::SetUp(int reserved_semispace_capacity,
   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
 
   size_t size = 2 * reserved_semispace_capacity;
-  Address base =
-      heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
-          size, size, &reservation_);
+  Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
+      size, size, &reservation_);
   if (base == NULL) return false;
 
   chunk_base_ = base;
@@ -1255,8 +1198,9 @@ bool NewSpace::SetUp(int reserved_semispace_capacity,
   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
 
-#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
-                       promoted_histogram_[name].set_name(#name);
+#define SET_NAME(name)                        \
+  allocated_histogram_[name].set_name(#name); \
+  promoted_histogram_[name].set_name(#name);
   INSTANCE_TYPE_LIST(SET_NAME)
 #undef SET_NAME
 
@@ -1265,12 +1209,10 @@ bool NewSpace::SetUp(int reserved_semispace_capacity,
          2 * heap()->ReservedSemiSpaceSize());
   DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
 
-  to_space_.SetUp(chunk_base_,
-                  initial_semispace_capacity,
+  to_space_.SetUp(chunk_base_, initial_semispace_capacity,
                   maximum_semispace_capacity);
   from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
-                    initial_semispace_capacity,
-                    maximum_semispace_capacity);
+                    initial_semispace_capacity, maximum_semispace_capacity);
   if (!to_space_.Commit()) {
     return false;
   }
@@ -1314,9 +1256,7 @@ void NewSpace::TearDown() {
 }
 
 
-void NewSpace::Flip() {
-  SemiSpace::Swap(&from_space_, &to_space_);
-}
+void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
 
 
 void NewSpace::Grow() {
@@ -1343,7 +1283,7 @@ void NewSpace::Shrink() {
   int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
   if (rounded_new_capacity < Capacity() &&
-      to_space_.ShrinkTo(rounded_new_capacity))  {
+      to_space_.ShrinkTo(rounded_new_capacity)) {
     // Only shrink from-space if we managed to shrink to-space.
     from_space_.Reset();
     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
@@ -1442,16 +1382,16 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
     // the new limit accordingly.
     Address new_top = old_top + size_in_bytes;
     int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(
-        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
+    heap()->incremental_marking()->Step(bytes_allocated,
+                                        IncrementalMarking::GC_VIA_STACK_GUARD);
     UpdateInlineAllocationLimit(size_in_bytes);
     top_on_previous_step_ = new_top;
     return AllocateRaw(size_in_bytes);
   } else if (AddFreshPage()) {
     // Switched to new page. Try allocating again.
     int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(
-        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
+    heap()->incremental_marking()->Step(bytes_allocated,
+                                        IncrementalMarking::GC_VIA_STACK_GUARD);
     top_on_previous_step_ = to_space_.page_low();
     return AllocateRaw(size_in_bytes);
   } else {
@@ -1519,8 +1459,7 @@ void NewSpace::Verify() {
 // -----------------------------------------------------------------------------
 // SemiSpace implementation
 
-void SemiSpace::SetUp(Address start,
-                      int initial_capacity,
+void SemiSpace::SetUp(Address start, int initial_capacity,
                       int maximum_capacity) {
   // Creates a space in the young generation. The constructor does not
   // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
@@ -1551,8 +1490,7 @@ void SemiSpace::TearDown() {
 bool SemiSpace::Commit() {
   DCHECK(!is_committed());
   int pages = capacity_ / Page::kPageSize;
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
-                                                          capacity_,
+  if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, capacity_,
                                                           executable())) {
     return false;
   }
@@ -1560,7 +1498,7 @@ bool SemiSpace::Commit() {
   NewSpacePage* current = anchor();
   for (int i = 0; i < pages; i++) {
     NewSpacePage* new_page =
-      NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
+        NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
     new_page->InsertAfter(current);
     current = new_page;
   }
@@ -1611,7 +1549,7 @@ bool SemiSpace::GrowTo(int new_capacity) {
 
   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
   if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      start_ + capacity_, delta, executable())) {
+          start_ + capacity_, delta, executable())) {
     return false;
   }
   SetCapacity(new_capacity);
@@ -1619,9 +1557,8 @@ bool SemiSpace::GrowTo(int new_capacity) {
   DCHECK(last_page != anchor());
   for (int i = pages_before; i < pages_after; i++) {
     Address page_address = start_ + i * Page::kPageSize;
-    NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
-                                                      page_address,
-                                                      this);
+    NewSpacePage* new_page =
+        NewSpacePage::Initialize(heap(), page_address, this);
     new_page->InsertAfter(last_page);
     Bitmap::Clear(new_page);
     // Duplicate the flags that was set on the old page.
@@ -1737,7 +1674,7 @@ void SemiSpace::set_age_mark(Address mark) {
 
 
 #ifdef DEBUG
-void SemiSpace::Print() { }
+void SemiSpace::Print() {}
 #endif
 
 #ifdef VERIFY_HEAP
@@ -1759,8 +1696,8 @@ void SemiSpace::Verify() {
       if (page->heap()->incremental_marking()->IsMarking()) {
         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
       } else {
-        CHECK(!page->IsFlagSet(
-            MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+        CHECK(
+            !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
       }
       // TODO(gc): Check that the live_bytes_count_ field matches the
       // black marking on the page (if we make it match in new-space).
@@ -1817,8 +1754,7 @@ SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
 }
 
 
-void SemiSpaceIterator::Initialize(Address start,
-                                   Address end,
+void SemiSpaceIterator::Initialize(Address start, Address end,
                                    HeapObjectCallback size_func) {
   SemiSpace::AssertValidRange(start, end);
   current_ = start;
@@ -1830,7 +1766,7 @@ void SemiSpaceIterator::Initialize(Address start,
 #ifdef DEBUG
 // heap_histograms is shared, always clear it before using it.
 static void ClearHistograms(Isolate* isolate) {
-  // We reset the name each time, though it hasn't changed.
+// We reset the name each time, though it hasn't changed.
 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
 #undef DEF_TYPE_NAME
@@ -1872,8 +1808,8 @@ static int CollectHistogramInfo(HeapObject* obj) {
   isolate->heap_histograms()[type].increment_bytes(obj->Size());
 
   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
-    JSObject::cast(obj)->IncrementSpillStatistics(
-        isolate->js_spill_information());
+    JSObject::cast(obj)
+        ->IncrementSpillStatistics(isolate->js_spill_information());
   }
 
   return obj->Size();
@@ -1895,9 +1831,9 @@ static void ReportHistogram(Isolate* isolate, bool print_spill) {
   // Summarize string types.
   int string_number = 0;
   int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name)      \
-    string_number += isolate->heap_histograms()[type].number(); \
-    string_bytes += isolate->heap_histograms()[type].bytes();
+#define INCREMENT(type, size, name, camel_name)               \
+  string_number += isolate->heap_histograms()[type].number(); \
+  string_bytes += isolate->heap_histograms()[type].bytes();
   STRING_TYPE_LIST(INCREMENT)
 #undef INCREMENT
   if (string_number > 0) {
@@ -1932,15 +1868,15 @@ void NewSpace::CollectStatistics() {
 }
 
 
-static void DoReportStatistics(Isolate* isolate,
-                               HistogramInfo* info, const char* description) {
+static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
+                               const char* description) {
   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
   // Lump all the string types together.
   int string_number = 0;
   int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name)       \
-    string_number += info[type].number();             \
-    string_bytes += info[type].bytes();
+#define INCREMENT(type, size, name, camel_name) \
+  string_number += info[type].number();         \
+  string_bytes += info[type].bytes();
   STRING_TYPE_LIST(INCREMENT)
 #undef INCREMENT
   if (string_number > 0) {
@@ -1951,9 +1887,8 @@ static void DoReportStatistics(Isolate* isolate,
   // Then do the other types.
   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
     if (info[i].number() > 0) {
-      LOG(isolate,
-          HeapSampleItemEvent(info[i].name(), info[i].number(),
-                              info[i].bytes()));
+      LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
+                                       info[i].bytes()));
     }
   }
   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
@@ -1964,14 +1899,14 @@ void NewSpace::ReportStatistics() {
 #ifdef DEBUG
   if (FLAG_heap_stats) {
     float pct = static_cast<float>(Available()) / Capacity();
-    PrintF("  capacity: %" V8_PTR_PREFIX "d"
-               ", available: %" V8_PTR_PREFIX "d, %%%d\n",
-           Capacity(), Available(), static_cast<int>(pct*100));
+    PrintF("  capacity: %" V8_PTR_PREFIX
+           "d"
+           ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+           Capacity(), Available(), static_cast<int>(pct * 100));
     PrintF("\n  Object Histogram:\n");
     for (int i = 0; i <= LAST_TYPE; i++) {
       if (allocated_histogram_[i].number() > 0) {
-        PrintF("    %-34s%10d (%10d bytes)\n",
-               allocated_histogram_[i].name(),
+        PrintF("    %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
                allocated_histogram_[i].number(),
                allocated_histogram_[i].bytes());
       }
@@ -2154,7 +2089,7 @@ bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
 }
 
 
-FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
+FreeListNode* FreeListCategory::PickNodeFromList(intnode_size) {
   FreeListNode* node = top();
 
   if (node == NULL) return NULL;
@@ -2182,7 +2117,7 @@ FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
 
 
 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
-                                                 int *node_size) {
+                                                 intnode_size) {
   FreeListNode* node = PickNodeFromList(node_size);
   if (node != NULL && *node_size < size_in_bytes) {
     Free(node, *node_size);
@@ -2217,8 +2152,7 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
 }
 
 
-FreeList::FreeList(PagedSpace* owner)
-    : owner_(owner), heap_(owner->heap()) {
+FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
   Reset();
 }
 
@@ -2314,8 +2248,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
 
   int huge_list_available = huge_list_.available();
   FreeListNode* top_node = huge_list_.top();
-  for (FreeListNode** cur = &top_node;
-       *cur != NULL;
+  for (FreeListNode** cur = &top_node; *cur != NULL;
        cur = (*cur)->next_address()) {
     FreeListNode* cur_node = *cur;
     while (cur_node != NULL &&
@@ -2404,8 +2337,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
   // if it is big enough.
   owner_->Free(owner_->top(), old_linear_size);
 
-  owner_->heap()->incremental_marking()->OldSpaceStep(
-      size_in_bytes - old_linear_size);
+  owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
+                                                      old_linear_size);
 
   int new_node_size = 0;
   FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
@@ -2472,8 +2405,8 @@ intptr_t FreeList::EvictFreeListItems(Page* p) {
 
   if (sum < p->area_size()) {
     sum += small_list_.EvictFreeListItemsInList(p) +
-        medium_list_.EvictFreeListItemsInList(p) +
-        large_list_.EvictFreeListItemsInList(p);
+           medium_list_.EvictFreeListItemsInList(p) +
+           large_list_.EvictFreeListItemsInList(p);
     p->set_available_in_small_free_list(0);
     p->set_available_in_medium_free_list(0);
     p->set_available_in_large_free_list(0);
@@ -2529,10 +2462,10 @@ int FreeListCategory::FreeListLength() {
 
 
 bool FreeList::IsVeryLong() {
-  if (small_list_.FreeListLength() == kVeryLongFreeList) return  true;
-  if (medium_list_.FreeListLength() == kVeryLongFreeList) return  true;
-  if (large_list_.FreeListLength() == kVeryLongFreeList) return  true;
-  if (huge_list_.FreeListLength() == kVeryLongFreeList) return  true;
+  if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
+  if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
+  if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
+  if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
   return false;
 }
 
@@ -2569,7 +2502,7 @@ void PagedSpace::PrepareForMarkCompact() {
 
 intptr_t PagedSpace::SizeOfObjects() {
   DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() ||
-      (unswept_free_bytes_ == 0));
+         (unswept_free_bytes_ == 0));
   return Size() - unswept_free_bytes_ - (limit() - top());
 }
 
@@ -2578,16 +2511,14 @@ intptr_t PagedSpace::SizeOfObjects() {
 // on the heap.  If there was already a free list then the elements on it
 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
 // fix them.
-void PagedSpace::RepairFreeListsAfterBoot() {
-  free_list_.RepairLists(heap());
-}
+void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
 
 
 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
   if (allocation_info_.top() >= allocation_info_.limit()) return;
 
-  if (Page::FromAllocationTop(allocation_info_.top())->
-      IsEvacuationCandidate()) {
+  if (Page::FromAllocationTop(allocation_info_.top())
+          ->IsEvacuationCandidate()) {
     // Create filler object to keep page iterable if it was iterable.
     int remaining =
         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
@@ -2629,8 +2560,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
     if (object != NULL) return object;
 
     // If sweeping is still in progress try to sweep pages on the main thread.
-    int free_chunk =
-        collector->SweepInParallel(this, size_in_bytes);
+    int free_chunk = collector->SweepInParallel(this, size_in_bytes);
     collector->RefillFreeList(this);
     if (free_chunk >= size_in_bytes) {
       HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2644,8 +2574,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   // Free list allocation failed and there is no next page.  Fail if we have
   // hit the old generation size limit that should cause a garbage
   // collection.
-  if (!heap()->always_allocate()
-      && heap()->OldGenerationAllocationLimitReached()) {
+  if (!heap()->always_allocate() &&
+      heap()->OldGenerationAllocationLimitReached()) {
     // If sweeper threads are active, wait for them at that point and steal
     // elements form their free-lists.
     HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
@@ -2670,13 +2600,14 @@ void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
   CommentStatistic* comments_statistics =
       isolate->paged_space_comments_statistics();
   ReportCodeKindStatistics(isolate->code_kind_statistics());
-  PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "
-         "count  (average)\"):\n");
+  PrintF(
+      "Code comment statistics (\"   [ comment-txt   :    size/   "
+      "count  (average)\"):\n");
   for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
     const CommentStatistic& cs = comments_statistics[i];
     if (cs.size > 0) {
       PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
-             cs.size/cs.count);
+             cs.size / cs.count);
     }
   }
   PrintF("\n");
@@ -2792,9 +2723,11 @@ void PagedSpace::CollectCodeStatistics() {
 
 void PagedSpace::ReportStatistics() {
   int pct = static_cast<int>(Available() * 100 / Capacity());
-  PrintF("  capacity: %" V8_PTR_PREFIX "d"
-             ", waste: %" V8_PTR_PREFIX "d"
-             ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+  PrintF("  capacity: %" V8_PTR_PREFIX
+         "d"
+         ", waste: %" V8_PTR_PREFIX
+         "d"
+         ", available: %" V8_PTR_PREFIX "d, %%%d\n",
          Capacity(), Waste(), Available(), pct);
 
   if (!swept_precisely_) return;
@@ -2813,9 +2746,7 @@ void PagedSpace::ReportStatistics() {
 // there is at least one non-inlined virtual function. I would prefer to hide
 // the VerifyObject definition behind VERIFY_HEAP.
 
-void MapSpace::VerifyObject(HeapObject* object) {
-  CHECK(object->IsMap());
-}
+void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
 
 
 // -----------------------------------------------------------------------------
@@ -2824,9 +2755,7 @@ void MapSpace::VerifyObject(HeapObject* object) {
 // there is at least one non-inlined virtual function. I would prefer to hide
 // the VerifyObject definition behind VERIFY_HEAP.
 
-void CellSpace::VerifyObject(HeapObject* object) {
-  CHECK(object->IsCell());
-}
+void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
 
 
 void PropertyCellSpace::VerifyObject(HeapObject* object) {
@@ -2861,13 +2790,10 @@ HeapObject* LargeObjectIterator::Next() {
 
 // -----------------------------------------------------------------------------
 // LargeObjectSpace
-static bool ComparePointers(void* key1, void* key2) {
-    return key1 == key2;
-}
+static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
 
 
-LargeObjectSpace::LargeObjectSpace(Heap* heap,
-                                   intptr_t max_capacity,
+LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
                                    AllocationSpace id)
     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
       max_capacity_(max_capacity),
@@ -2917,8 +2843,8 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
     return AllocationResult::Retry(identity());
   }
 
-  LargePage* page = heap()->isolate()->memory_allocator()->
-      AllocateLargePage(object_size, this, executable);
+  LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
+      object_size, this, executable);
   if (page == NULL) return AllocationResult::Retry(identity());
   DCHECK(page->area_size() >= object_size);
 
@@ -2938,8 +2864,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
   for (uintptr_t key = base; key <= limit; key++) {
     HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
-                                              static_cast<uint32_t>(key),
-                                              true);
+                                              static_cast<uint32_t>(key), true);
     DCHECK(entry != NULL);
     entry->value = page;
   }
@@ -2984,8 +2909,7 @@ Object* LargeObjectSpace::FindObject(Address a) {
 LargePage* LargeObjectSpace::FindPage(Address a) {
   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
   HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
-                                        static_cast<uint32_t>(key),
-                                        false);
+                                        static_cast<uint32_t>(key), false);
   if (e != NULL) {
     DCHECK(e->value != NULL);
     LargePage* page = reinterpret_cast<LargePage*>(e->value);
@@ -3024,8 +2948,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
       }
 
       // Free the chunk.
-      heap()->mark_compact_collector()->ReportDeleteIfNeeded(
-          object, heap()->isolate());
+      heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
+                                                             heap()->isolate());
       size_ -= static_cast<int>(page->size());
       objects_size_ -= object->Size();
       page_count_--;
@@ -3034,8 +2958,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
       // Use variable alignment to help pass length check (<= 80 characters)
       // of single line in tools/presubmit.py.
       const intptr_t alignment = MemoryChunk::kAlignment;
-      uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
-      uintptr_t limit = base + (page->size()-1)/alignment;
+      uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
+      uintptr_t limit = base + (page->size() - 1) / alignment;
       for (uintptr_t key = base; key <= limit; key++) {
         chunk_map_.Remove(reinterpret_cast<void*>(key),
                           static_cast<uint32_t>(key));
@@ -3068,8 +2992,7 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
 // We do not assume that the large object iterator works, because it depends
 // on the invariants we are checking during verification.
 void LargeObjectSpace::Verify() {
-  for (LargePage* chunk = first_page_;
-       chunk != NULL;
+  for (LargePage* chunk = first_page_; chunk != NULL;
        chunk = chunk->next_page()) {
     // Each chunk contains an object that starts at the large object page's
     // object area start.
@@ -3098,9 +3021,7 @@ void LargeObjectSpace::Verify() {
     // Byte arrays and strings don't have interior pointers.
     if (object->IsCode()) {
       VerifyPointersVisitor code_visitor;
-      object->IterateBody(map->instance_type(),
-                          object->Size(),
-                          &code_visitor);
+      object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
     } else if (object->IsFixedArray()) {
       FixedArray* array = FixedArray::cast(object);
       for (int j = 0; j < array->length(); j++) {
@@ -3137,8 +3058,10 @@ void LargeObjectSpace::ReportStatistics() {
     CollectHistogramInfo(obj);
   }
 
-  PrintF("  number of objects %d, "
-         "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
+  PrintF(
+      "  number of objects %d, "
+      "size of objects %" V8_PTR_PREFIX "d\n",
+      num_objects, objects_size_);
   if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
 }
 
@@ -3157,14 +3080,12 @@ void LargeObjectSpace::CollectCodeStatistics() {
 
 void Page::Print() {
   // Make a best-effort to print the objects in the page.
-  PrintF("Page@%p in %s\n",
-         this->address(),
+  PrintF("Page@%p in %s\n", this->address(),
          AllocationSpaceName(this->owner()->identity()));
   printf(" --------------------------------------\n");
   HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
   unsigned mark_size = 0;
-  for (HeapObject* object = objects.Next();
-       object != NULL;
+  for (HeapObject* object = objects.Next(); object != NULL;
        object = objects.Next()) {
     bool is_marked = Marking::MarkBitFrom(object).Get();
     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
@@ -3179,5 +3100,5 @@ void Page::Print() {
 }
 
 #endif  // DEBUG
-
-}  // namespace v8::internal
+}
+}  // namespace v8::internal
similarity index 88%
rename from src/spaces.h
rename to src/heap/spaces.h
index 2472bd3..a22e7b1 100644 (file)
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_SPACES_H_
-#define V8_SPACES_H_
+#ifndef V8_HEAP_SPACES_H_
+#define V8_HEAP_SPACES_H_
 
 #include "src/allocation.h"
 #include "src/base/atomicops.h"
@@ -74,20 +74,19 @@ class Isolate;
 
 // Some assertion macros used in the debugging mode.
 
-#define DCHECK_PAGE_ALIGNED(address)                                           \
+#define DCHECK_PAGE_ALIGNED(address) \
   DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
 
-#define DCHECK_OBJECT_ALIGNED(address)                                         \
+#define DCHECK_OBJECT_ALIGNED(address) \
   DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
 
-#define DCHECK_OBJECT_SIZE(size)                                               \
+#define DCHECK_OBJECT_SIZE(size) \
   DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
 
-#define DCHECK_PAGE_OFFSET(offset)                                             \
-  DCHECK((Page::kObjectStartOffset <= offset)                                  \
-      && (offset <= Page::kPageSize))
+#define DCHECK_PAGE_OFFSET(offset) \
+  DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
 
-#define DCHECK_MAP_PAGE_INDEX(index)                                           \
+#define DCHECK_MAP_PAGE_INDEX(index) \
   DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
 
 
@@ -103,7 +102,7 @@ class MarkBit {
   typedef uint32_t CellType;
 
   inline MarkBit(CellType* cell, CellType mask, bool data_only)
-      : cell_(cell), mask_(mask), data_only_(data_only) { }
+      : cell_(cell), mask_(mask), data_only_(data_only) {}
 
   inline CellType* cell() { return cell_; }
   inline CellType mask() { return mask_; }
@@ -149,20 +148,17 @@ class Bitmap {
   static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
   static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
 
-  static const size_t kLength =
-    (1 << kPageSizeBits) >> (kPointerSizeLog2);
+  static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
 
   static const size_t kSize =
-    (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
+      (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
 
 
   static int CellsForLength(int length) {
     return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
   }
 
-  int CellsCount() {
-    return CellsForLength(kLength);
-  }
+  int CellsCount() { return CellsForLength(kLength); }
 
   static int SizeFor(int cells_count) {
     return sizeof(MarkBit::CellType) * cells_count;
@@ -184,9 +180,7 @@ class Bitmap {
     return reinterpret_cast<MarkBit::CellType*>(this);
   }
 
-  INLINE(Address address()) {
-    return reinterpret_cast<Address>(this);
-  }
+  INLINE(Address address()) { return reinterpret_cast<Address>(this); }
 
   INLINE(static Bitmap* FromAddress(Address addr)) {
     return reinterpret_cast<Bitmap*>(addr);
@@ -210,7 +204,7 @@ class Bitmap {
 
   class CellPrinter {
    public:
-    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
+    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
 
     void Print(uint32_t pos, uint32_t cell) {
       if (cell == seq_type) {
@@ -234,9 +228,7 @@ class Bitmap {
 
     void Flush() {
       if (seq_length > 0) {
-        PrintF("%d: %dx%d\n",
-               seq_start,
-               seq_type == 0 ? 0 : 1,
+        PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
                seq_length * kBitsPerCell);
         seq_length = 0;
       }
@@ -284,8 +276,8 @@ class MemoryChunk {
     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
   }
   static const MemoryChunk* FromAddress(const byte* a) {
-    return reinterpret_cast<const MemoryChunk*>(
-        OffsetFrom(a) & ~kAlignmentMask);
+    return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
+                                                ~kAlignmentMask);
   }
 
   // Only works for addresses in pointer spaces, not data or code spaces.
@@ -328,13 +320,9 @@ class MemoryChunk {
            kPageHeaderTag);
   }
 
-  base::VirtualMemory* reserved_memory() {
-    return &reservation_;
-  }
+  base::VirtualMemory* reserved_memory() { return &reservation_; }
 
-  void InitializeReservedMemory() {
-    reservation_.Reset();
-  }
+  void InitializeReservedMemory() { reservation_.Reset(); }
 
   void set_reserved_memory(base::VirtualMemory* reservation) {
     DCHECK_NOT_NULL(reservation);
@@ -409,23 +397,16 @@ class MemoryChunk {
   static const int kPointersFromHereAreInterestingMask =
       1 << POINTERS_FROM_HERE_ARE_INTERESTING;
 
-  static const int kEvacuationCandidateMask =
-      1 << EVACUATION_CANDIDATE;
+  static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
 
   static const int kSkipEvacuationSlotsRecordingMask =
-      (1 << EVACUATION_CANDIDATE) |
-      (1 << RESCAN_ON_EVACUATION) |
-      (1 << IN_FROM_SPACE) |
-      (1 << IN_TO_SPACE);
+      (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
+      (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
 
 
-  void SetFlag(int flag) {
-    flags_ |= static_cast<uintptr_t>(1) << flag;
-  }
+  void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
 
-  void ClearFlag(int flag) {
-    flags_ &= ~(static_cast<uintptr_t>(1) << flag);
-  }
+  void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
 
   void SetFlagTo(int flag, bool value) {
     if (value) {
@@ -473,9 +454,9 @@ class MemoryChunk {
   }
 
   bool TryParallelSweeping() {
-    return base::Acquire_CompareAndSwap(
-        &parallel_sweeping_, SWEEPING_PENDING, SWEEPING_IN_PROGRESS) ==
-            SWEEPING_PENDING;
+    return base::Acquire_CompareAndSwap(&parallel_sweeping_, SWEEPING_PENDING,
+                                        SWEEPING_IN_PROGRESS) ==
+           SWEEPING_PENDING;
   }
 
   bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
@@ -484,16 +465,15 @@ class MemoryChunk {
   // because they are marked black).
   void ResetLiveBytes() {
     if (FLAG_gc_verbose) {
-      PrintF("ResetLiveBytes:%p:%x->0\n",
-             static_cast<void*>(this), live_byte_count_);
+      PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
+             live_byte_count_);
     }
     live_byte_count_ = 0;
   }
   void IncrementLiveBytes(int by) {
     if (FLAG_gc_verbose) {
-      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
-             static_cast<void*>(this), live_byte_count_,
-             ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
+      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
+             live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
              live_byte_count_ + by);
     }
     live_byte_count_ += by;
@@ -550,19 +530,17 @@ class MemoryChunk {
   static const intptr_t kSizeOffset = 0;
 
   static const intptr_t kLiveBytesOffset =
-     kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
-     kPointerSize + kPointerSize +
-     kPointerSize + kPointerSize + kPointerSize + kIntSize;
+      kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize +
+      kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize;
 
   static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
 
   static const size_t kWriteBarrierCounterOffset =
       kSlotsBufferOffset + kPointerSize + kPointerSize;
 
-  static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
-                                    kIntSize + kIntSize + kPointerSize +
-                                    5 * kPointerSize +
-                                    kPointerSize + kPointerSize;
+  static const size_t kHeaderSize =
+      kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize +
+      kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize;
 
   static const int kBodyOffset =
       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -571,14 +549,13 @@ class MemoryChunk {
   // code alignment to be suitable for both.  Also aligned to 32 words because
   // the marking bitmap is arranged in 32 bit chunks.
   static const int kObjectStartAlignment = 32 * kPointerSize;
-  static const int kObjectStartOffset = kBodyOffset - 1 +
+  static const int kObjectStartOffset =
+      kBodyOffset - 1 +
       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
 
   size_t size() const { return size_; }
 
-  void set_size(size_t size) {
-    size_ = size;
-  }
+  void set_size(size_t size) { size_ = size; }
 
   void SetArea(Address area_start, Address area_end) {
     area_start_ = area_start;
@@ -589,21 +566,15 @@ class MemoryChunk {
     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
   }
 
-  bool ContainsOnlyData() {
-    return IsFlagSet(CONTAINS_ONLY_DATA);
-  }
+  bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); }
 
   bool InNewSpace() {
     return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
   }
 
-  bool InToSpace() {
-    return IsFlagSet(IN_TO_SPACE);
-  }
+  bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
 
-  bool InFromSpace() {
-    return IsFlagSet(IN_FROM_SPACE);
-  }
+  bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
 
   // ---------------------------------------------------------------------
   // Markbits support
@@ -619,8 +590,7 @@ class MemoryChunk {
   }
 
   inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
-    const intptr_t offset =
-        reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+    const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
 
     return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
   }
@@ -642,21 +612,13 @@ class MemoryChunk {
     return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
   }
 
-  inline SkipList* skip_list() {
-    return skip_list_;
-  }
+  inline SkipList* skip_list() { return skip_list_; }
 
-  inline void set_skip_list(SkipList* skip_list) {
-    skip_list_ = skip_list;
-  }
+  inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
 
-  inline SlotsBuffer* slots_buffer() {
-    return slots_buffer_;
-  }
+  inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
 
-  inline SlotsBuffer** slots_buffer_address() {
-    return &slots_buffer_;
-  }
+  inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
 
   void MarkEvacuationCandidate() {
     DCHECK(slots_buffer_ == NULL);
@@ -670,15 +632,11 @@ class MemoryChunk {
 
   Address area_start() { return area_start_; }
   Address area_end() { return area_end_; }
-  int area_size() {
-    return static_cast<int>(area_end() - area_start());
-  }
+  int area_size() { return static_cast<int>(area_end() - area_start()); }
   bool CommitArea(size_t requested);
 
   // Approximate amount of physical memory committed for this chunk.
-  size_t CommittedPhysicalMemory() {
-    return high_water_mark_;
-  }
+  size_t CommittedPhysicalMemory() { return high_water_mark_; }
 
   static inline void UpdateHighWaterMark(Address mark);
 
@@ -721,13 +679,9 @@ class MemoryChunk {
   intptr_t available_in_huge_free_list_;
   intptr_t non_available_small_blocks_;
 
-  static MemoryChunk* Initialize(Heap* heap,
-                                 Address base,
-                                 size_t size,
-                                 Address area_start,
-                                 Address area_end,
-                                 Executability executable,
-                                 Space* owner);
+  static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+                                 Address area_start, Address area_end,
+                                 Executability executable, Space* owner);
 
  private:
   // next_chunk_ holds a pointer of type MemoryChunk
@@ -806,10 +760,8 @@ class Page : public MemoryChunk {
 
   inline void ClearGCFields();
 
-  static inline Page* Initialize(Heap* heap,
-                                 MemoryChunk* chunk,
-                                 Executability executable,
-                                 PagedSpace* owner);
+  static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
+                                 Executability executable, PagedSpace* owner);
 
   void InitializeAsAnchor(PagedSpace* owner);
 
@@ -851,17 +803,14 @@ STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
 
 class LargePage : public MemoryChunk {
  public:
-  HeapObject* GetObject() {
-    return HeapObject::FromAddress(area_start());
-  }
+  HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
 
   inline LargePage* next_page() const {
     return static_cast<LargePage*>(next_chunk());
   }
 
-  inline void set_next_page(LargePage* page) {
-    set_next_chunk(page);
-  }
+  inline void set_next_page(LargePage* page) { set_next_chunk(page); }
+
  private:
   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
 
@@ -1002,9 +951,7 @@ class CodeRange {
 
 class SkipList {
  public:
-  SkipList() {
-    Clear();
-  }
+  SkipList() { Clear(); }
 
   void Clear() {
     for (int idx = 0; idx < kSize; idx++) {
@@ -1012,9 +959,7 @@ class SkipList {
     }
   }
 
-  Address StartFor(Address addr) {
-    return starts_[RegionNumber(addr)];
-  }
+  Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
 
   void AddObject(Address addr, int size) {
     int start_region = RegionNumber(addr);
@@ -1067,11 +1012,11 @@ class MemoryAllocator {
 
   void TearDown();
 
-  Page* AllocatePage(
-      intptr_t size, PagedSpace* owner, Executability executable);
+  Page* AllocatePage(intptr_t size, PagedSpace* owner,
+                     Executability executable);
 
-  LargePage* AllocateLargePage(
-      intptr_t object_size, Space* owner, Executability executable);
+  LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
+                               Executability executable);
 
   void Free(MemoryChunk* chunk);
 
@@ -1099,7 +1044,7 @@ class MemoryAllocator {
   // been allocated by this MemoryAllocator.
   V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
     return address < lowest_ever_allocated_ ||
-        address >= highest_ever_allocated_;
+           address >= highest_ever_allocated_;
   }
 
 #ifdef DEBUG
@@ -1112,16 +1057,12 @@ class MemoryAllocator {
   // could be committed later by calling MemoryChunk::CommitArea.
   MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
                              intptr_t commit_area_size,
-                             Executability executable,
-                             Space* space);
+                             Executability executable, Space* space);
 
-  Address ReserveAlignedMemory(size_t requested,
-                               size_t alignment,
+  Address ReserveAlignedMemory(size_t requested, size_t alignment,
                                base::VirtualMemory* controller);
-  Address AllocateAlignedMemory(size_t reserve_size,
-                                size_t commit_size,
-                                size_t alignment,
-                                Executability executable,
+  Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
+                                size_t alignment, Executability executable,
                                 base::VirtualMemory* controller);
 
   bool CommitMemory(Address addr, size_t size, Executability executable);
@@ -1145,19 +1086,15 @@ class MemoryAllocator {
   // filling it up with a recognizable non-NULL bit pattern.
   void ZapBlock(Address start, size_t size);
 
-  void PerformAllocationCallback(ObjectSpace space,
-                                 AllocationAction action,
+  void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
                                  size_t size);
 
   void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
-                                          ObjectSpace space,
-                                          AllocationAction action);
+                                   ObjectSpace space, AllocationAction action);
 
-  void RemoveMemoryAllocationCallback(
-      MemoryAllocationCallback callback);
+  void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
 
-  bool MemoryAllocationCallbackRegistered(
-      MemoryAllocationCallback callback);
+  bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
 
   static int CodePageGuardStartOffset();
 
@@ -1172,8 +1109,7 @@ class MemoryAllocator {
   }
 
   MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
-                                              Address start,
-                                              size_t commit_size,
+                                              Address start, size_t commit_size,
                                               size_t reserved_size);
 
  private:
@@ -1201,16 +1137,14 @@ class MemoryAllocator {
     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
                                          ObjectSpace space,
                                          AllocationAction action)
-        : callback(callback), space(space), action(action) {
-    }
+        : callback(callback), space(space), action(action) {}
     MemoryAllocationCallback callback;
     ObjectSpace space;
     AllocationAction action;
   };
 
   // A List of callback that are triggered when memory is allocated or free'd
-  List<MemoryAllocationCallbackRegistration>
-      memory_allocation_callbacks_;
+  List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
 
   // Initializes pages in a chunk. Returns the first page address.
   // This function and GetChunkId() are provided for the mark-compact
@@ -1238,7 +1172,7 @@ class MemoryAllocator {
 
 class ObjectIterator : public Malloced {
  public:
-  virtual ~ObjectIterator() { }
+  virtual ~ObjectIterator() {}
 
   virtual HeapObject* next_object() = 0;
 };
@@ -1253,7 +1187,7 @@ class ObjectIterator : public Malloced {
 // If objects are allocated in the page during iteration the iterator may
 // or may not iterate over those objects.  The caller must create a new
 // iterator in order to be sure to visit these new objects.
-class HeapObjectIterator: public ObjectIterator {
+class HeapObjectIterator : public ObjectIterator {
  public:
   // Creates a new object iterator in a given space.
   // If the size function is not given, the iterator calls the default
@@ -1273,15 +1207,13 @@ class HeapObjectIterator: public ObjectIterator {
     return NULL;
   }
 
-  virtual HeapObject* next_object() {
-    return Next();
-  }
+  virtual HeapObject* next_object() { return Next(); }
 
  private:
   enum PageMode { kOnePageOnly, kAllPagesInSpace };
 
-  Address cur_addr_;  // Current iteration point.
-  Address cur_end_;   // End iteration point.
+  Address cur_addr_;              // Current iteration point.
+  Address cur_end_;               // End iteration point.
   HeapObjectCallback size_func_;  // Size function or NULL.
   PagedSpace* space_;
   PageMode page_mode_;
@@ -1294,11 +1226,8 @@ class HeapObjectIterator: public ObjectIterator {
   bool AdvanceToNextPage();
 
   // Initializes fields.
-  inline void Initialize(PagedSpace* owner,
-                         Address start,
-                         Address end,
-                         PageMode mode,
-                         HeapObjectCallback size_func);
+  inline void Initialize(PagedSpace* owner, Address start, Address end,
+                         PageMode mode, HeapObjectCallback size_func);
 };
 
 
@@ -1329,45 +1258,41 @@ class PageIterator BASE_EMBEDDED {
 // space.
 class AllocationInfo {
  public:
-  AllocationInfo() : top_(NULL), limit_(NULL) {
-  }
+  AllocationInfo() : top_(NULL), limit_(NULL) {}
 
   INLINE(void set_top(Address top)) {
     SLOW_DCHECK(top == NULL ||
-        (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+                (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
     top_ = top;
   }
 
   INLINE(Address top()) const {
     SLOW_DCHECK(top_ == NULL ||
-        (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+                (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
     return top_;
   }
 
-  Address* top_address() {
-    return &top_;
-  }
+  Address* top_address() { return &top_; }
 
   INLINE(void set_limit(Address limit)) {
     SLOW_DCHECK(limit == NULL ||
-        (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
+                (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
     limit_ = limit;
   }
 
   INLINE(Address limit()) const {
     SLOW_DCHECK(limit_ == NULL ||
-        (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
+                (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) ==
+                    0);
     return limit_;
   }
 
-  Address* limit_address() {
-    return &limit_;
-  }
+  Address* limit_address() { return &limit_; }
 
 #ifdef DEBUG
   bool VerifyPagedAllocation() {
-    return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
-        && (top_ <= limit_);
+    return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
+           (top_ <= limit_);
   }
 #endif
 
@@ -1477,7 +1402,7 @@ class AllocationStats BASE_EMBEDDED {
 // (free-list node pointers have the heap object tag, and they have a map like
 // a heap object).  They have a size and a next pointer.  The next pointer is
 // the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
+class FreeListNode : public HeapObject {
  public:
   // Obtain a free-list node from a raw address.  This is not a cast because
   // it does not check nor require that the first word at the address is a map
@@ -1516,10 +1441,7 @@ class FreeListNode: public HeapObject {
 // the end element of the linked list of free memory blocks.
 class FreeListCategory {
  public:
-  FreeListCategory() :
-      top_(0),
-      end_(NULL),
-      available_(0) {}
+  FreeListCategory() : top_(0), end_(NULL), available_(0) {}
 
   intptr_t Concatenate(FreeListCategory* category);
 
@@ -1527,8 +1449,8 @@ class FreeListCategory {
 
   void Free(FreeListNode* node, int size_in_bytes);
 
-  FreeListNode* PickNodeFromList(int *node_size);
-  FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
+  FreeListNode* PickNodeFromList(intnode_size);
+  FreeListNode* PickNodeFromList(int size_in_bytes, intnode_size);
 
   intptr_t EvictFreeListItemsInList(Page* p);
   bool ContainsPageFreeListItemsInList(Page* p);
@@ -1553,9 +1475,7 @@ class FreeListCategory {
 
   base::Mutex* mutex() { return &mutex_; }
 
-  bool IsEmpty() {
-    return top() == 0;
-  }
+  bool IsEmpty() { return top() == 0; }
 
 #ifdef DEBUG
   intptr_t SumFreeList();
@@ -1691,11 +1611,11 @@ class FreeList {
 class AllocationResult {
  public:
   // Implicit constructor from Object*.
-  AllocationResult(Object* object) : object_(object),  // NOLINT
-                                     retry_space_(INVALID_SPACE) { }
+  AllocationResult(Object* object)  // NOLINT
+      : object_(object),
+        retry_space_(INVALID_SPACE) {}
 
-  AllocationResult() : object_(NULL),
-                       retry_space_(INVALID_SPACE) { }
+  AllocationResult() : object_(NULL), retry_space_(INVALID_SPACE) {}
 
   static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
     return AllocationResult(space);
@@ -1721,8 +1641,8 @@ class AllocationResult {
   }
 
  private:
-  explicit AllocationResult(AllocationSpace space) : object_(NULL),
-                                                     retry_space_(space) { }
+  explicit AllocationResult(AllocationSpace space)
+      : object_(NULL), retry_space_(space) {}
 
   Object* object_;
   AllocationSpace retry_space_;
@@ -1732,9 +1652,7 @@ class AllocationResult {
 class PagedSpace : public Space {
  public:
   // Creates a space with a maximum capacity, and an id.
-  PagedSpace(Heap* heap,
-             intptr_t max_capacity,
-             AllocationSpace id,
+  PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
              Executability executable);
 
   virtual ~PagedSpace() {}
@@ -1838,9 +1756,7 @@ class PagedSpace : public Space {
   Address limit() { return allocation_info_.limit(); }
 
   // The allocation top address.
-  Address* allocation_top_address() {
-    return allocation_info_.top_address();
-  }
+  Address* allocation_top_address() { return allocation_info_.top_address(); }
 
   // The allocation limit address.
   Address* allocation_limit_address() {
@@ -1862,9 +1778,7 @@ class PagedSpace : public Space {
     return size_in_bytes - wasted;
   }
 
-  void ResetFreeList() {
-    free_list_.Reset();
-  }
+  void ResetFreeList() { free_list_.Reset(); }
 
   // Set space allocation info.
   void SetTopAndLimit(Address top, Address limit) {
@@ -1884,9 +1798,7 @@ class PagedSpace : public Space {
     SetTopAndLimit(NULL, NULL);
   }
 
-  void Allocate(int bytes) {
-    accounting_stats_.AllocateBytes(bytes);
-  }
+  void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
 
   void IncreaseCapacity(int size);
 
@@ -1925,31 +1837,24 @@ class PagedSpace : public Space {
   // result before _and_ after evacuation has finished.
   static bool ShouldBeSweptBySweeperThreads(Page* p) {
     return !p->IsEvacuationCandidate() &&
-           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
-           !p->WasSweptPrecisely();
+           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSweptPrecisely();
   }
 
-  void IncrementUnsweptFreeBytes(intptr_t by) {
-    unswept_free_bytes_ += by;
-  }
+  void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
 
   void IncreaseUnsweptFreeBytes(Page* p) {
     DCHECK(ShouldBeSweptBySweeperThreads(p));
     unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
   }
 
-  void DecrementUnsweptFreeBytes(intptr_t by) {
-    unswept_free_bytes_ -= by;
-  }
+  void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
 
   void DecreaseUnsweptFreeBytes(Page* p) {
     DCHECK(ShouldBeSweptBySweeperThreads(p));
     unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
   }
 
-  void ResetUnsweptFreeBytes() {
-    unswept_free_bytes_ = 0;
-  }
+  void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
 
   // This function tries to steal size_in_bytes memory from the sweeper threads
   // free-lists. If it does not succeed stealing enough memory, it will wait
@@ -1957,13 +1862,9 @@ class PagedSpace : public Space {
   // It returns true when sweeping is completed and false otherwise.
   bool EnsureSweeperProgress(intptr_t size_in_bytes);
 
-  void set_end_of_unswept_pages(Page* page) {
-    end_of_unswept_pages_ = page;
-  }
+  void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
 
-  Page* end_of_unswept_pages() {
-    return end_of_unswept_pages_;
-  }
+  Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
 
   Page* FirstPage() { return anchor_.next_page(); }
   Page* LastPage() { return anchor_.prev_page(); }
@@ -1976,9 +1877,7 @@ class PagedSpace : public Space {
   int CountTotalPages();
 
   // Return size of allocatable area on a page in this space.
-  inline int AreaSize() {
-    return area_size_;
-  }
+  inline int AreaSize() { return area_size_; }
 
   void CreateEmergencyMemory();
   void FreeEmergencyMemory();
@@ -2073,7 +1972,7 @@ class NumberAndSizeInfo BASE_EMBEDDED {
 
 // HistogramInfo class for recording a single "bar" of a histogram.  This
 // class is used for collecting statistics to print to the log file.
-class HistogramInfo: public NumberAndSizeInfo {
+class HistogramInfo : public NumberAndSizeInfo {
  public:
   HistogramInfo() : NumberAndSizeInfo() {}
 
@@ -2085,10 +1984,7 @@ class HistogramInfo: public NumberAndSizeInfo {
 };
 
 
-enum SemiSpaceId {
-  kFromSpace = 0,
-  kToSpace = 1
-};
+enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
 
 
 class SemiSpace;
@@ -2099,9 +1995,9 @@ class NewSpacePage : public MemoryChunk {
   // GC related flags copied from from-space to to-space when
   // flipping semispaces.
   static const intptr_t kCopyOnFlipFlagsMask =
-    (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
-    (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
-    (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+      (1 << MemoryChunk::SCAN_ON_SCAVENGE);
 
   static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
 
@@ -2109,36 +2005,28 @@ class NewSpacePage : public MemoryChunk {
     return static_cast<NewSpacePage*>(next_chunk());
   }
 
-  inline void set_next_page(NewSpacePage* page) {
-    set_next_chunk(page);
-  }
+  inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
 
   inline NewSpacePage* prev_page() const {
     return static_cast<NewSpacePage*>(prev_chunk());
   }
 
-  inline void set_prev_page(NewSpacePage* page) {
-    set_prev_chunk(page);
-  }
+  inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
 
-  SemiSpace* semi_space() {
-    return reinterpret_cast<SemiSpace*>(owner());
-  }
+  SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
 
   bool is_anchor() { return !this->InNewSpace(); }
 
   static bool IsAtStart(Address addr) {
-    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
-        == kObjectStartOffset;
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
+           kObjectStartOffset;
   }
 
   static bool IsAtEnd(Address addr) {
     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
   }
 
-  Address address() {
-    return reinterpret_cast<Address>(this);
-  }
+  Address address() { return reinterpret_cast<Address>(this); }
 
   // Finds the NewSpacePage containg the given address.
   static inline NewSpacePage* FromAddress(Address address_in_page) {
@@ -2164,12 +2052,9 @@ class NewSpacePage : public MemoryChunk {
  private:
   // Create a NewSpacePage object that is only used as anchor
   // for the doubly-linked list of real pages.
-  explicit NewSpacePage(SemiSpace* owner) {
-    InitializeAsAnchor(owner);
-  }
+  explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
 
-  static NewSpacePage* Initialize(Heap* heap,
-                                  Address start,
+  static NewSpacePage* Initialize(Heap* heap, Address start,
                                   SemiSpace* semi_space);
 
   // Intialize a fake NewSpacePage used as sentinel at the ends
@@ -2193,12 +2078,12 @@ class SemiSpace : public Space {
  public:
   // Constructor.
   SemiSpace(Heap* heap, SemiSpaceId semispace)
-    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
-      start_(NULL),
-      age_mark_(NULL),
-      id_(semispace),
-      anchor_(this),
-      current_page_(NULL) { }
+      : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+        start_(NULL),
+        age_mark_(NULL),
+        id_(semispace),
+        anchor_(this),
+        current_page_(NULL) {}
 
   // Sets up the semispace using the given chunk.
   void SetUp(Address start, int initial_capacity, int maximum_capacity);
@@ -2227,19 +2112,13 @@ class SemiSpace : public Space {
   }
 
   // Returns the start address of the current page of the space.
-  Address page_low() {
-    return current_page_->area_start();
-  }
+  Address page_low() { return current_page_->area_start(); }
 
   // Returns one past the end address of the space.
-  Address space_end() {
-    return anchor_.prev_page()->area_end();
-  }
+  Address space_end() { return anchor_.prev_page()->area_end(); }
 
   // Returns one past the end address of the current page of the space.
-  Address page_high() {
-    return current_page_->area_end();
-  }
+  Address page_high() { return current_page_->area_end(); }
 
   bool AdvancePage() {
     NewSpacePage* next_page = current_page_->next_page();
@@ -2258,8 +2137,8 @@ class SemiSpace : public Space {
   // True if the address is in the address range of this semispace (not
   // necessarily below the allocation pointer).
   bool Contains(Address a) {
-    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
-           == reinterpret_cast<uintptr_t>(start_);
+    return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
+           reinterpret_cast<uintptr_t>(start_);
   }
 
   // True if the object is a heap object in the address range of this
@@ -2351,6 +2230,7 @@ class SemiSpace : public Space {
 
   friend class SemiSpaceIterator;
   friend class NewSpacePageIterator;
+
  public:
   TRACK_MEMORY("SemiSpace")
 };
@@ -2398,9 +2278,7 @@ class SemiSpaceIterator : public ObjectIterator {
   virtual HeapObject* next_object() { return Next(); }
 
  private:
-  void Initialize(Address start,
-                  Address end,
-                  HeapObjectCallback size_func);
+  void Initialize(Address start, Address end, HeapObjectCallback size_func);
 
   // The current iteration point.
   Address current_;
@@ -2449,11 +2327,11 @@ class NewSpace : public Space {
  public:
   // Constructor.
   explicit NewSpace(Heap* heap)
-    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
-      to_space_(heap, kToSpace),
-      from_space_(heap, kFromSpace),
-      reservation_(),
-      inline_allocation_limit_step_(0) {}
+      : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+        to_space_(heap, kToSpace),
+        from_space_(heap, kFromSpace),
+        reservation_(),
+        inline_allocation_limit_step_(0) {}
 
   // Sets up the new space using the given chunk.
   bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
@@ -2480,8 +2358,8 @@ class NewSpace : public Space {
   // True if the address or object lies in the address range of either
   // semispace (not necessarily below the allocation pointer).
   bool Contains(Address a) {
-    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
-        == reinterpret_cast<uintptr_t>(start_);
+    return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
+           reinterpret_cast<uintptr_t>(start_);
   }
 
   bool Contains(Object* o) {
@@ -2492,7 +2370,7 @@ class NewSpace : public Space {
   // Return the allocated bytes in the active semispace.
   virtual intptr_t Size() {
     return pages_used_ * NewSpacePage::kAreaSize +
-        static_cast<int>(top() - to_space_.page_low());
+           static_cast<int>(top() - to_space_.page_low());
   }
 
   // The same, but returning an int.  We have to have the one that returns
@@ -2521,16 +2399,14 @@ class NewSpace : public Space {
   // Return the total amount of memory committed for new space.
   intptr_t MaximumCommittedMemory() {
     return to_space_.MaximumCommittedMemory() +
-        from_space_.MaximumCommittedMemory();
+           from_space_.MaximumCommittedMemory();
   }
 
   // Approximate amount of physical memory committed for this space.
   size_t CommittedPhysicalMemory();
 
   // Return the available bytes without growing.
-  intptr_t Available() {
-    return Capacity() - Size();
-  }
+  intptr_t Available() { return Capacity() - Size(); }
 
   // Return the maximum capacity of a semispace.
   int MaximumCapacity() {
@@ -2538,9 +2414,7 @@ class NewSpace : public Space {
     return to_space_.MaximumCapacity();
   }
 
-  bool IsAtMaximumCapacity() {
-    return Capacity() == MaximumCapacity();
-  }
+  bool IsAtMaximumCapacity() { return Capacity() == MaximumCapacity(); }
 
   // Returns the initial capacity of a semispace.
   int InitialCapacity() {
@@ -2590,9 +2464,7 @@ class NewSpace : public Space {
   }
 
   // The allocation top and limit address.
-  Address* allocation_top_address() {
-    return allocation_info_.top_address();
-  }
+  Address* allocation_top_address() { return allocation_info_.top_address(); }
 
   // The allocation limit address.
   Address* allocation_limit_address() {
@@ -2732,12 +2604,9 @@ class OldSpace : public PagedSpace {
  public:
   // Creates an old space object with a given maximum capacity.
   // The constructor does not allocate pages from OS.
-  OldSpace(Heap* heap,
-           intptr_t max_capacity,
-           AllocationSpace id,
+  OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
            Executability executable)
-      : PagedSpace(heap, max_capacity, id, executable) {
-  }
+      : PagedSpace(heap, max_capacity, id, executable) {}
 
  public:
   TRACK_MEMORY("OldSpace")
@@ -2747,9 +2616,9 @@ class OldSpace : public PagedSpace {
 // For contiguous spaces, top should be in the space (or at the end) and limit
 // should be the end of the space.
 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
-  SLOW_DCHECK((space).page_low() <= (info).top() \
-              && (info).top() <= (space).page_high() \
-              && (info).limit() <= (space).page_high())
+  SLOW_DCHECK((space).page_low() <= (info).top() &&   \
+              (info).top() <= (space).page_high() &&  \
+              (info).limit() <= (space).page_high())
 
 
 // -----------------------------------------------------------------------------
@@ -2760,8 +2629,7 @@ class MapSpace : public PagedSpace {
   // Creates a map space object with a maximum capacity.
   MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
-        max_map_space_pages_(kMaxMapPageIndex - 1) {
-  }
+        max_map_space_pages_(kMaxMapPageIndex - 1) {}
 
   // Given an index, returns the page address.
   // TODO(1600): this limit is artifical just to keep code compilable
@@ -2800,8 +2668,7 @@ class CellSpace : public PagedSpace {
  public:
   // Creates a property cell space object with a maximum capacity.
   CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
-      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
-  }
+      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
 
   virtual int RoundSizeDownToObjectAlignment(int size) {
     if (IsPowerOf2(Cell::kSize)) {
@@ -2825,10 +2692,8 @@ class CellSpace : public PagedSpace {
 class PropertyCellSpace : public PagedSpace {
  public:
   // Creates a property cell space object with a maximum capacity.
-  PropertyCellSpace(Heap* heap, intptr_t max_capacity,
-                    AllocationSpace id)
-      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
-  }
+  PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
 
   virtual int RoundSizeDownToObjectAlignment(int size) {
     if (IsPowerOf2(PropertyCell::kSize)) {
@@ -2871,34 +2736,24 @@ class LargeObjectSpace : public Space {
 
   // Shared implementation of AllocateRaw, AllocateRawCode and
   // AllocateRawFixedArray.
-  MUST_USE_RESULT AllocationResult AllocateRaw(int object_size,
-                                               Executability executable);
+  MUST_USE_RESULT AllocationResult
+      AllocateRaw(int object_size, Executability executable);
 
   // Available bytes for objects in this space.
   inline intptr_t Available();
 
-  virtual intptr_t Size() {
-    return size_;
-  }
+  virtual intptr_t Size() { return size_; }
 
-  virtual intptr_t SizeOfObjects() {
-    return objects_size_;
-  }
+  virtual intptr_t SizeOfObjects() { return objects_size_; }
 
-  intptr_t MaximumCommittedMemory() {
-    return maximum_committed_;
-  }
+  intptr_t MaximumCommittedMemory() { return maximum_committed_; }
 
-  intptr_t CommittedMemory() {
-    return Size();
-  }
+  intptr_t CommittedMemory() { return Size(); }
 
   // Approximate amount of physical memory committed for this space.
   size_t CommittedPhysicalMemory();
 
-  int PageCount() {
-    return page_count_;
-  }
+  int PageCount() { return page_count_; }
 
   // Finds an object for a given address, returns a Smi if it is not found.
   // The function iterates through all objects in this space, may be slow.
@@ -2937,8 +2792,8 @@ class LargeObjectSpace : public Space {
   intptr_t maximum_committed_;
   // The head of the linked list of large object chunks.
   LargePage* first_page_;
-  intptr_t size_;  // allocated bytes
-  int page_count_;  // number of chunks
+  intptr_t size_;          // allocated bytes
+  int page_count_;         // number of chunks
   intptr_t objects_size_;  // size of objects
   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
   HashMap chunk_map_;
@@ -2950,7 +2805,7 @@ class LargeObjectSpace : public Space {
 };
 
 
-class LargeObjectIterator: public ObjectIterator {
+class LargeObjectIterator : public ObjectIterator {
  public:
   explicit LargeObjectIterator(LargeObjectSpace* space);
   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
@@ -3014,12 +2869,7 @@ class PointerChunkIterator BASE_EMBEDDED {
 
 
  private:
-  enum State {
-    kOldPointerState,
-    kMapState,
-    kLargeObjectState,
-    kFinishedState
-  };
+  enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState };
   State state_;
   PageIterator old_pointer_iterator_;
   PageIterator map_iterator_;
@@ -3041,8 +2891,7 @@ struct CommentStatistic {
   static const int kMaxComments = 64;
 };
 #endif
+}
+}  // namespace v8::internal
 
-
-} }  // namespace v8::internal
-
-#endif  // V8_SPACES_H_
+#endif  // V8_HEAP_SPACES_H_
similarity index 77%
rename from src/sweeper-thread.cc
rename to src/heap/sweeper-thread.cc
index b31f188..b0e8cea 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/sweeper-thread.h"
+#include "src/heap/sweeper-thread.h"
 
 #include "src/v8.h"
 
@@ -15,13 +15,13 @@ namespace internal {
 static const int kSweeperThreadStackSize = 64 * KB;
 
 SweeperThread::SweeperThread(Isolate* isolate)
-     : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
-       isolate_(isolate),
-       heap_(isolate->heap()),
-       collector_(heap_->mark_compact_collector()),
-       start_sweeping_semaphore_(0),
-       end_sweeping_semaphore_(0),
-       stop_semaphore_(0) {
+    : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
+      isolate_(isolate),
+      heap_(isolate->heap()),
+      collector_(heap_->mark_compact_collector()),
+      start_sweeping_semaphore_(0),
+      end_sweeping_semaphore_(0),
+      stop_semaphore_(0) {
   DCHECK(!FLAG_job_based_sweeping);
   base::NoBarrier_Store(&stop_thread_, static_cast<base::AtomicWord>(false));
 }
@@ -56,14 +56,10 @@ void SweeperThread::Stop() {
 }
 
 
-void SweeperThread::StartSweeping() {
-  start_sweeping_semaphore_.Signal();
-}
+void SweeperThread::StartSweeping() { start_sweeping_semaphore_.Signal(); }
 
 
-void SweeperThread::WaitForSweeperThread() {
-  end_sweeping_semaphore_.Wait();
-}
+void SweeperThread::WaitForSweeperThread() { end_sweeping_semaphore_.Wait(); }
 
 
 bool SweeperThread::SweepingCompleted() {
@@ -82,5 +78,5 @@ int SweeperThread::NumberOfThreads(int max_available) {
   DCHECK(FLAG_parallel_sweeping);
   return max_available;
 }
-
-}  // namespace v8::internal
+}
+}  // namespace v8::internal
similarity index 81%
rename from src/sweeper-thread.h
rename to src/heap/sweeper-thread.h
index 692dfea..fc6bdda 100644 (file)
@@ -2,17 +2,17 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_SWEEPER_THREAD_H_
-#define V8_SWEEPER_THREAD_H_
+#ifndef V8_HEAP_SWEEPER_THREAD_H_
+#define V8_HEAP_SWEEPER_THREAD_H_
 
 #include "src/base/atomicops.h"
 #include "src/base/platform/platform.h"
 #include "src/flags.h"
 #include "src/utils.h"
 
-#include "src/spaces.h"
+#include "src/heap/spaces.h"
 
-#include "src/heap.h"
+#include "src/heap/heap.h"
 
 namespace v8 {
 namespace internal {
@@ -39,7 +39,7 @@ class SweeperThread : public base::Thread {
   base::Semaphore stop_semaphore_;
   volatile base::AtomicWord stop_thread_;
 };
+}
+}  // namespace v8::internal
 
-} }  // namespace v8::internal
-
-#endif  // V8_SWEEPER_THREAD_H_
+#endif  // V8_HEAP_SWEEPER_THREAD_H_
index 4bcb486..be74439 100644 (file)
@@ -7,7 +7,7 @@
 #if V8_TARGET_ARCH_IA32
 
 #include "src/codegen.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
index cc01f57..a1fc062 100644 (file)
@@ -15,6 +15,8 @@
 #include "src/cpu-profiler.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
+#include "src/heap/spaces.h"
+#include "src/heap/sweeper-thread.h"
 #include "src/heap-profiler.h"
 #include "src/hydrogen.h"
 #include "src/isolate-inl.h"
@@ -28,9 +30,7 @@
 #include "src/scopeinfo.h"
 #include "src/serialize.h"
 #include "src/simulator.h"
-#include "src/spaces.h"
 #include "src/stub-cache.h"
-#include "src/sweeper-thread.h"
 #include "src/version.h"
 #include "src/vm-state-inl.h"
 
index 4ca7763..cf1de10 100644 (file)
@@ -17,7 +17,7 @@
 #include "src/global-handles.h"
 #include "src/handles.h"
 #include "src/hashmap.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/optimizing-compiler-thread.h"
 #include "src/regexp-stack.h"
 #include "src/runtime.h"
index b996fb0..b02c3af 100644 (file)
@@ -9,8 +9,8 @@
 
 #include "src/char-predicates-inl.h"
 #include "src/conversions.h"
+#include "src/heap/spaces-inl.h"
 #include "src/messages.h"
-#include "src/spaces-inl.h"
 #include "src/token.h"
 
 namespace v8 {
index 86fe1d6..1ab70b8 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "src/allocation.h"
 #include "src/handles.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/jsregexp.h"
 #include "src/objects.h"
 
index 05402e9..865bdca 100644 (file)
@@ -6,8 +6,8 @@
 
 #include "src/api.h"
 #include "src/execution.h"
+#include "src/heap/spaces-inl.h"
 #include "src/messages.h"
-#include "src/spaces-inl.h"
 
 namespace v8 {
 namespace internal {
index fb201f0..fe7d51b 100644 (file)
 #include "src/elements.h"
 #include "src/factory.h"
 #include "src/field-index-inl.h"
-#include "src/heap-inl.h"
-#include "src/heap.h"
-#include "src/incremental-marking.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/spaces.h"
 #include "src/isolate.h"
 #include "src/lookup.h"
 #include "src/objects.h"
 #include "src/objects-visiting.h"
 #include "src/property.h"
 #include "src/prototype.h"
-#include "src/spaces.h"
 #include "src/store-buffer.h"
 #include "src/transitions-inl.h"
 #include "src/v8memory.h"
index c1f6114..5116a6a 100644 (file)
 #include "src/field-index-inl.h"
 #include "src/field-index.h"
 #include "src/full-codegen.h"
+#include "src/heap/mark-compact.h"
 #include "src/hydrogen.h"
 #include "src/isolate-inl.h"
 #include "src/log.h"
 #include "src/lookup.h"
 #include "src/macro-assembler.h"
-#include "src/mark-compact.h"
 #include "src/objects-inl.h"
 #include "src/objects-visiting-inl.h"
 #include "src/prototype.h"
index 01a3783..99ffa8c 100644 (file)
@@ -14,8 +14,8 @@
 #include "src/execution.h"
 #include "src/full-codegen.h"
 #include "src/global-handles.h"
+#include "src/heap/mark-compact.h"
 #include "src/isolate-inl.h"
-#include "src/mark-compact.h"
 #include "src/scopeinfo.h"
 
 namespace v8 {
index 095719d..0bdd431 100644 (file)
@@ -6,7 +6,7 @@
 #define V8_SAFEPOINT_TABLE_H_
 
 #include "src/allocation.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/v8memory.h"
 #include "src/zone.h"
 
index 1180cd3..21c02ac 100644 (file)
@@ -7,7 +7,7 @@
 
 #include "src/checks.h"
 #include "src/elements-kind.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/isolate.h"
 #include "src/objects.h"
 
index b0db7ca..8ae75fb 100644 (file)
--- a/src/v8.h
+++ b/src/v8.h
 
 // Objects & heap
 #include "src/objects-inl.h"  // NOLINT
-#include "src/spaces-inl.h"  // NOLINT
-#include "src/heap-inl.h"  // NOLINT
-#include "src/incremental-marking-inl.h"  // NOLINT
-#include "src/mark-compact-inl.h"  // NOLINT
+#include "src/heap/spaces-inl.h"               // NOLINT
+#include "src/heap/heap-inl.h"                 // NOLINT
+#include "src/heap/incremental-marking-inl.h"  // NOLINT
+#include "src/heap/mark-compact-inl.h"         // NOLINT
 #include "src/log-inl.h"  // NOLINT
 #include "src/handles-inl.h"  // NOLINT
 #include "src/types-inl.h"  // NOLINT
index 5f43e7f..7a37fb3 100644 (file)
@@ -10,7 +10,7 @@
 #include "src/codegen.h"
 #include "src/cpu-profiler.h"
 #include "src/debug.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/isolate-inl.h"
 #include "src/serialize.h"
 #include "src/x64/assembler-x64.h"
index fcdea87..f6b8fc4 100644 (file)
@@ -7,7 +7,7 @@
 #if V8_TARGET_ARCH_X87
 
 #include "src/codegen.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
index 7a826c2..34f0b69 100644 (file)
@@ -29,7 +29,7 @@
 
 #include "src/v8.h"
 
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8;
index 881f18c..eece780 100644 (file)
@@ -34,6 +34,7 @@
 #include "src/bootstrapper.h"
 #include "src/compilation-cache.h"
 #include "src/debug.h"
+#include "src/heap/spaces.h"
 #include "src/ic-inl.h"
 #include "src/natives.h"
 #include "src/objects.h"
@@ -41,7 +42,6 @@
 #include "src/scopeinfo.h"
 #include "src/serialize.h"
 #include "src/snapshot.h"
-#include "src/spaces.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
index 8d72612..d40b7e9 100644 (file)
@@ -31,7 +31,7 @@
 #include "test/cctest/cctest.h"
 
 #include "src/api.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/objects.h"
 
 using namespace v8::internal;
index 96dcb6d..1af0d17 100644 (file)
         '../../src/full-codegen.h',
         '../../src/func-name-inferrer.cc',
         '../../src/func-name-inferrer.h',
-        '../../src/gc-tracer.cc',
-        '../../src/gc-tracer.h',
         '../../src/gdb-jit.cc',
         '../../src/gdb-jit.h',
         '../../src/global-handles.cc',
         '../../src/handles.cc',
         '../../src/handles.h',
         '../../src/hashmap.h',
-        '../../src/heap-inl.h',
         '../../src/heap-profiler.cc',
         '../../src/heap-profiler.h',
         '../../src/heap-snapshot-generator-inl.h',
         '../../src/heap-snapshot-generator.cc',
         '../../src/heap-snapshot-generator.h',
-        '../../src/heap.cc',
-        '../../src/heap.h',
+        '../../src/heap/gc-tracer.cc',
+        '../../src/heap/gc-tracer.h',
+        '../../src/heap/heap-inl.h',
+        '../../src/heap/heap.cc',
+        '../../src/heap/heap.h',
+        '../../src/heap/incremental-marking-inl.h',
+        '../../src/heap/incremental-marking.cc',
+        '../../src/heap/incremental-marking.h',
+        '../../src/heap/mark-compact-inl.h',
+        '../../src/heap/mark-compact.cc',
+        '../../src/heap/mark-compact.h',
+        '../../src/heap/spaces-inl.h',
+        '../../src/heap/spaces.cc',
+        '../../src/heap/spaces.h',
+        '../../src/heap/sweeper-thread.h',
+        '../../src/heap/sweeper-thread.cc',
         '../../src/hydrogen-alias-analysis.h',
         '../../src/hydrogen-bce.cc',
         '../../src/hydrogen-bce.h',
         '../../src/ic-inl.h',
         '../../src/ic.cc',
         '../../src/ic.h',
-        '../../src/incremental-marking.cc',
-        '../../src/incremental-marking.h',
         '../../src/interface.cc',
         '../../src/interface.h',
         '../../src/interpreter-irregexp.cc',
         '../../src/lookup.cc',
         '../../src/lookup.h',
         '../../src/macro-assembler.h',
-        '../../src/mark-compact.cc',
-        '../../src/mark-compact.h',
         '../../src/messages.cc',
         '../../src/messages.h',
         '../../src/msan.h',
         '../../src/snapshot.h',
         '../../src/snapshot-source-sink.cc',
         '../../src/snapshot-source-sink.h',
-        '../../src/spaces-inl.h',
-        '../../src/spaces.cc',
-        '../../src/spaces.h',
         '../../src/store-buffer-inl.h',
         '../../src/store-buffer.cc',
         '../../src/store-buffer.h',
         '../../src/strtod.h',
         '../../src/stub-cache.cc',
         '../../src/stub-cache.h',
-        '../../src/sweeper-thread.h',
-        '../../src/sweeper-thread.cc',
         '../../src/token.cc',
         '../../src/token.h',
         '../../src/transitions-inl.h',
index 69fc5a0..26d4dd8 100755 (executable)
@@ -80,7 +80,6 @@ runtime/mutex
 runtime/nonconf
 runtime/printf
 runtime/printf_format
-runtime/references
 runtime/rtti
 runtime/sizeof
 runtime/string