"src/full-codegen.h",
"src/func-name-inferrer.cc",
"src/func-name-inferrer.h",
- "src/gc-tracer.cc",
- "src/gc-tracer.h",
"src/gdb-jit.cc",
"src/gdb-jit.h",
"src/global-handles.cc",
"src/handles.cc",
"src/handles.h",
"src/hashmap.h",
- "src/heap-inl.h",
"src/heap-profiler.cc",
"src/heap-profiler.h",
"src/heap-snapshot-generator-inl.h",
"src/heap-snapshot-generator.cc",
"src/heap-snapshot-generator.h",
- "src/heap.cc",
- "src/heap.h",
+ "src/heap/gc-tracer.cc",
+ "src/heap/gc-tracer.h",
+ "src/heap/heap-inl.h",
+ "src/heap/heap.cc",
+ "src/heap/heap.h",
+ "src/heap/incremental-marking.cc",
+ "src/heap/incremental-marking.h",
+ "src/heap/mark-compact-inl.h",
+ "src/heap/mark-compact.cc",
+ "src/heap/mark-compact.h",
+ "src/heap/spaces-inl.h",
+ "src/heap/spaces.cc",
+ "src/heap/spaces.h",
+ "src/heap/sweeper-thread.h",
+ "src/heap/sweeper-thread.cc",
"src/hydrogen-alias-analysis.h",
"src/hydrogen-bce.cc",
"src/hydrogen-bce.h",
"src/ic-inl.h",
"src/ic.cc",
"src/ic.h",
- "src/incremental-marking.cc",
- "src/incremental-marking.h",
"src/interface.cc",
"src/interface.h",
"src/interpreter-irregexp.cc",
"src/lookup.cc",
"src/lookup.h",
"src/macro-assembler.h",
- "src/mark-compact.cc",
- "src/mark-compact.h",
"src/messages.cc",
"src/messages.h",
"src/msan.h",
"src/snapshot-source-sink.cc",
"src/snapshot-source-sink.h",
"src/snapshot.h",
- "src/spaces-inl.h",
- "src/spaces.cc",
- "src/spaces.h",
"src/store-buffer-inl.h",
"src/store-buffer.cc",
"src/store-buffer.h",
"src/strtod.h",
"src/stub-cache.cc",
"src/stub-cache.h",
- "src/sweeper-thread.h",
- "src/sweeper-thread.cc",
"src/token.cc",
"src/token.h",
"src/transitions-inl.h",
#include "src/builtins.h"
#include "src/cpu-profiler.h"
#include "src/gdb-jit.h"
+#include "src/heap/mark-compact.h"
#include "src/heap-profiler.h"
#include "src/ic-inl.h"
-#include "src/mark-compact.h"
#include "src/prototype.h"
#include "src/stub-cache.h"
#include "src/vm-state-inl.h"
#ifndef V8_CONTEXTS_H_
#define V8_CONTEXTS_H_
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/objects.h"
namespace v8 {
#define V8_ELEMENTS_H_
#include "src/elements-kind.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/isolate.h"
#include "src/objects.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen.h"
-#include "src/mark-compact.h"
+#include "src/heap/mark-compact.h"
#include "src/safepoint-table.h"
#include "src/scopeinfo.h"
#include "src/string-stream.h"
#include "src/api.h"
#include "src/handles.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/isolate.h"
namespace v8 {
#include "src/v8.h"
-#include "src/gc-tracer.h"
+#include "src/heap/gc-tracer.h"
namespace v8 {
namespace internal {
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_GC_TRACER_H_
-#define V8_GC_TRACER_H_
+#ifndef V8_HEAP_GC_TRACER_H_
+#define V8_HEAP_GC_TRACER_H_
namespace v8 {
namespace internal {
}
} // namespace v8::internal
-#endif // V8_GC_TRACER_H_
+#endif // V8_HEAP_GC_TRACER_H_
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_INL_H_
-#define V8_HEAP_INL_H_
+#ifndef V8_HEAP_HEAP_INL_H_
+#define V8_HEAP_HEAP_INL_H_
#include <cmath>
#include "src/base/platform/platform.h"
#include "src/cpu-profiler.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/heap-profiler.h"
#include "src/isolate.h"
#include "src/list-inl.h"
*(--rear_) = reinterpret_cast<intptr_t>(target);
*(--rear_) = size;
- // Assert no overflow into live objects.
+// Assert no overflow into live objects.
#ifdef DEBUG
SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
reinterpret_cast<Address>(rear_));
void PromotionQueue::ActivateGuardIfOnTheSamePage() {
guard_ = guard_ ||
- heap_->new_space()->active_space()->current_page()->address() ==
- GetHeadPage()->address();
+ heap_->new_space()->active_space()->current_page()->address() ==
+ GetHeadPage()->address();
}
-template<>
+template <>
bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
// TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
// ASCII only check.
}
-template<>
+template <>
bool inline Heap::IsOneByte(String* str, int chars) {
return str->IsOneByteRepresentation();
}
AllocationResult Heap::AllocateInternalizedStringFromUtf8(
Vector<const char> str, int chars, uint32_t hash_field) {
if (IsOneByte(str, chars)) {
- return AllocateOneByteInternalizedString(
- Vector<const uint8_t>::cast(str), hash_field);
+ return AllocateOneByteInternalizedString(Vector<const uint8_t>::cast(str),
+ hash_field);
}
return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
}
-template<typename T>
-AllocationResult Heap::AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field) {
+template <typename T>
+AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
+ uint32_t hash_field) {
if (IsOneByte(t, chars)) {
return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
}
AllocationResult Heap::AllocateOneByteInternalizedString(
- Vector<const uint8_t> str,
- uint32_t hash_field) {
+ Vector<const uint8_t> str, uint32_t hash_field) {
CHECK_GE(String::kMaxLength, str.length());
// Compute map and object size.
Map* map = ascii_internalized_string_map();
// Allocate string.
HeapObject* result;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
// Allocate string.
HeapObject* result;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
}
-AllocationResult Heap::AllocateRaw(int size_in_bytes,
- AllocationSpace space,
+AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
AllocationSpace retry_space) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(gc_state_ == NOT_IN_GC);
#ifdef DEBUG
- if (FLAG_gc_interval >= 0 &&
- AllowAllocationFailure::IsAllowed(isolate_) &&
+ if (FLAG_gc_interval >= 0 && AllowAllocationFailure::IsAllowed(isolate_) &&
Heap::allocation_timeout_-- <= 0) {
return AllocationResult::Retry(space);
}
AllocationResult allocation;
if (NEW_SPACE == space) {
allocation = new_space_.AllocateRaw(size_in_bytes);
- if (always_allocate() &&
- allocation.IsRetry() &&
- retry_space != NEW_SPACE) {
+ if (always_allocate() && allocation.IsRetry() && retry_space != NEW_SPACE) {
space = retry_space;
} else {
if (allocation.To(&object)) {
}
-void Heap::OnMoveEvent(HeapObject* target,
- HeapObject* source,
+void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
int size_in_bytes) {
HeapProfiler* heap_profiler = isolate_->heap_profiler();
if (heap_profiler->is_tracking_object_moves()) {
if (isolate_->logger()->is_logging_code_events() ||
isolate_->cpu_profiler()->is_profiling()) {
if (target->IsSharedFunctionInfo()) {
- PROFILE(isolate_, SharedFunctionInfoMoveEvent(
- source->address(), target->address()));
+ PROFILE(isolate_, SharedFunctionInfoMoveEvent(source->address(),
+ target->address()));
}
}
DCHECK(string->IsExternalString());
v8::String::ExternalStringResourceBase** resource_addr =
reinterpret_cast<v8::String::ExternalStringResourceBase**>(
- reinterpret_cast<byte*>(string) +
- ExternalString::kResourceOffset -
+ reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
kHeapObjectTag);
// Dispose of the C++ object if it has not already been disposed.
bool Heap::InNewSpace(Object* object) {
bool result = new_space_.Contains(object);
- DCHECK(!result || // Either not in new space
- gc_state_ != NOT_IN_GC || // ... or in the middle of GC
- InToSpace(object)); // ... or in to-space (where we allocate).
+ DCHECK(!result || // Either not in new space
+ gc_state_ != NOT_IN_GC || // ... or in the middle of GC
+ InToSpace(object)); // ... or in to-space (where we allocate).
return result;
}
-bool Heap::InNewSpace(Address address) {
- return new_space_.Contains(address);
-}
+bool Heap::InNewSpace(Address address) { return new_space_.Contains(address); }
bool Heap::InFromSpace(Object* object) {
NewSpacePage* page = NewSpacePage::FromAddress(old_address);
Address age_mark = new_space_.age_mark();
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
- (!page->ContainsLimit(age_mark) || old_address < age_mark);
+ (!page->ContainsLimit(age_mark) || old_address < age_mark);
}
OldSpace* Heap::TargetSpace(HeapObject* object) {
InstanceType type = object->map()->instance_type();
AllocationSpace space = TargetSpaceId(type);
- return (space == OLD_POINTER_SPACE)
- ? old_pointer_space_
- : old_data_space_;
+ return (space == OLD_POINTER_SPACE) ? old_pointer_space_ : old_data_space_;
}
// strings, cons strings, and sliced strings.
// Only the latter two contain non-map-word pointers to heap objects.
return ((type & kIsIndirectStringMask) == kIsIndirectStringTag)
- ? OLD_POINTER_SPACE
- : OLD_DATA_SPACE;
+ ? OLD_POINTER_SPACE
+ : OLD_DATA_SPACE;
} else {
return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
}
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
- CopyWords(reinterpret_cast<Object**>(dst),
- reinterpret_cast<Object**>(src),
+ CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
static_cast<size_t>(byte_size / kPointerSize));
}
}
-void Heap::ScavengePointer(HeapObject** p) {
- ScavengeObject(p, *p);
-}
+void Heap::ScavengePointer(HeapObject** p) { ScavengeObject(p, *p); }
AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
Address object_address = object->address();
Address memento_address = object_address + object->Size();
Address last_memento_word_address = memento_address + kPointerSize;
- if (!NewSpacePage::OnSamePage(object_address,
- last_memento_word_address)) {
+ if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
return NULL;
}
DCHECK(heap->InFromSpace(object));
if (!FLAG_allocation_site_pretenuring ||
- !AllocationSite::CanTrack(object->map()->instance_type())) return;
+ !AllocationSite::CanTrack(object->map()->instance_type()))
+ return;
AllocationMemento* memento = heap->FindAllocationMemento(object);
if (memento == NULL) return;
}
-bool Heap::CollectGarbage(AllocationSpace space,
- const char* gc_reason,
+bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
const v8::GCCallbackFlags callbackFlags) {
const char* collector_reason = NULL;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
Isolate* Heap::isolate() {
- return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
+ return reinterpret_cast<Isolate*>(
+ reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
}
// Warning: Do not use the identifiers __object__, __maybe_object__ or
// __scope__ in a call to this macro.
-#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
- if (__allocation__.To(&__object__)) { \
- DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
- RETURN_VALUE; \
- }
-
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
- do { \
- AllocationResult __allocation__ = FUNCTION_CALL; \
- Object* __object__ = NULL; \
- RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
- (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \
- "allocation failure"); \
- __allocation__ = FUNCTION_CALL; \
- RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
- (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
- (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
- { \
- AlwaysAllocateScope __scope__(ISOLATE); \
- __allocation__ = FUNCTION_CALL; \
- } \
- RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
- /* TODO(1181417): Fix this. */ \
- v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
- RETURN_EMPTY; \
+#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
+ if (__allocation__.To(&__object__)) { \
+ DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
+ RETURN_VALUE; \
+ }
+
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
+ do { \
+ AllocationResult __allocation__ = FUNCTION_CALL; \
+ Object* __object__ = NULL; \
+ RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
+ (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \
+ "allocation failure"); \
+ __allocation__ = FUNCTION_CALL; \
+ RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
+ (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
+ (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
+ { \
+ AlwaysAllocateScope __scope__(ISOLATE); \
+ __allocation__ = FUNCTION_CALL; \
+ } \
+ RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
+ /* TODO(1181417): Fix this. */ \
+ v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
+ RETURN_EMPTY; \
} while (false)
-#define CALL_AND_RETRY_OR_DIE( \
- ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
- CALL_AND_RETRY( \
- ISOLATE, \
- FUNCTION_CALL, \
- RETURN_VALUE, \
- RETURN_EMPTY)
+#define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \
+ RETURN_EMPTY) \
+ CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)
#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
- CALL_AND_RETRY_OR_DIE(ISOLATE, \
- FUNCTION_CALL, \
+ CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, \
return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
- return Handle<TYPE>()) \
+ return Handle<TYPE>())
-#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
+#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
}
-GCCallbacksScope::~GCCallbacksScope() {
- heap_->gc_callbacks_depth_--;
-}
+GCCallbacksScope::~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
bool GCCallbacksScope::CheckReenter() {
void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
- CHECK((*current)->IsSmi());
+ CHECK((*current)->IsSmi());
}
}
+}
+} // namespace v8::internal
-
-} } // namespace v8::internal
-
-#endif // V8_HEAP_INL_H_
+#endif // V8_HEAP_HEAP_INL_H_
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
#include "src/heap-profiler.h"
-#include "src/incremental-marking.h"
#include "src/isolate-inl.h"
-#include "src/mark-compact.h"
#include "src/natives.h"
#include "src/objects-visiting-inl.h"
#include "src/objects-visiting.h"
#include "src/vm-state-inl.h"
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h" // NOLINT
+#include "src/regexp-macro-assembler.h" // NOLINT
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
#endif
#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h" // NOLINT
+#include "src/regexp-macro-assembler.h" // NOLINT
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#endif
#if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
external_string_table_(this),
chunks_queued_for_free_(NULL),
gc_callbacks_depth_(0) {
- // Allow build-time customization of the max semispace size. Building
- // V8 with snapshots and a non-default max semispace size is much
- // easier if you can define it as part of the build environment.
+// Allow build-time customization of the max semispace size. Building
+// V8 with snapshots and a non-default max semispace size is much
+// easier if you can define it as part of the build environment.
#if defined(V8_MAX_SEMISPACE_SIZE)
max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
intptr_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
- return new_space_.Capacity() +
- old_pointer_space_->Capacity() +
- old_data_space_->Capacity() +
- code_space_->Capacity() +
- map_space_->Capacity() +
- cell_space_->Capacity() +
- property_cell_space_->Capacity();
+ return new_space_.Capacity() + old_pointer_space_->Capacity() +
+ old_data_space_->Capacity() + code_space_->Capacity() +
+ map_space_->Capacity() + cell_space_->Capacity() +
+ property_cell_space_->Capacity();
}
intptr_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
- return new_space_.CommittedMemory() +
- old_pointer_space_->CommittedMemory() +
- old_data_space_->CommittedMemory() +
- code_space_->CommittedMemory() +
- map_space_->CommittedMemory() +
- cell_space_->CommittedMemory() +
- property_cell_space_->CommittedMemory() +
- lo_space_->Size();
+ return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
+ old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
+ map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
+ property_cell_space_->CommittedMemory() + lo_space_->Size();
}
if (!HasBeenSetUp()) return 0;
return new_space_.CommittedPhysicalMemory() +
- old_pointer_space_->CommittedPhysicalMemory() +
- old_data_space_->CommittedPhysicalMemory() +
- code_space_->CommittedPhysicalMemory() +
- map_space_->CommittedPhysicalMemory() +
- cell_space_->CommittedPhysicalMemory() +
- property_cell_space_->CommittedPhysicalMemory() +
- lo_space_->CommittedPhysicalMemory();
+ old_pointer_space_->CommittedPhysicalMemory() +
+ old_data_space_->CommittedPhysicalMemory() +
+ code_space_->CommittedPhysicalMemory() +
+ map_space_->CommittedPhysicalMemory() +
+ cell_space_->CommittedPhysicalMemory() +
+ property_cell_space_->CommittedPhysicalMemory() +
+ lo_space_->CommittedPhysicalMemory();
}
intptr_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
- return new_space_.Available() +
- old_pointer_space_->Available() +
- old_data_space_->Available() +
- code_space_->Available() +
- map_space_->Available() +
- cell_space_->Available() +
- property_cell_space_->Available();
+ return new_space_.Available() + old_pointer_space_->Available() +
+ old_data_space_->Available() + code_space_->Available() +
+ map_space_->Available() + cell_space_->Available() +
+ property_cell_space_->Available();
}
bool Heap::HasBeenSetUp() {
- return old_pointer_space_ != NULL &&
- old_data_space_ != NULL &&
- code_space_ != NULL &&
- map_space_ != NULL &&
- cell_space_ != NULL &&
- property_cell_space_ != NULL &&
- lo_space_ != NULL;
+ return old_pointer_space_ != NULL && old_data_space_ != NULL &&
+ code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
+ property_cell_space_ != NULL && lo_space_ != NULL;
}
// Have allocation in OLD and LO failed?
if (old_gen_exhausted_) {
- isolate_->counters()->
- gc_compactor_caused_by_oldspace_exhaustion()->Increment();
+ isolate_->counters()
+ ->gc_compactor_caused_by_oldspace_exhaustion()
+ ->Increment();
*reason = "old generations exhausted";
return MARK_COMPACTOR;
}
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
- isolate_->counters()->
- gc_compactor_caused_by_oldspace_exhaustion()->Increment();
+ isolate_->counters()
+ ->gc_compactor_caused_by_oldspace_exhaustion()
+ ->Increment();
*reason = "scavenge might not succeed";
return MARK_COMPACTOR;
}
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsBeforeGC() {
- // Heap::ReportHeapStatistics will also log NewSpace statistics when
- // compiled --log-gc is set. The following logic is used to avoid
- // double logging.
+// Heap::ReportHeapStatistics will also log NewSpace statistics when
+// compiled --log-gc is set. The following logic is used to avoid
+// double logging.
#ifdef DEBUG
if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
if (FLAG_heap_stats) {
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
- PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB\n",
+ PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX
+ "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB\n",
isolate_->memory_allocator()->Size() / KB,
isolate_->memory_allocator()->Available() / KB);
- PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- new_space_.Size() / KB,
- new_space_.Available() / KB,
+ PrintPID("New space, used: %6" V8_PTR_PREFIX
+ "d KB"
+ ", available: %6" V8_PTR_PREFIX
+ "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ new_space_.Size() / KB, new_space_.Available() / KB,
new_space_.CommittedMemory() / KB);
- PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ PrintPID("Old pointers, used: %6" V8_PTR_PREFIX
+ "d KB"
+ ", available: %6" V8_PTR_PREFIX
+ "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
old_pointer_space_->SizeOfObjects() / KB,
old_pointer_space_->Available() / KB,
old_pointer_space_->CommittedMemory() / KB);
- PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ PrintPID("Old data space, used: %6" V8_PTR_PREFIX
+ "d KB"
+ ", available: %6" V8_PTR_PREFIX
+ "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
old_data_space_->SizeOfObjects() / KB,
old_data_space_->Available() / KB,
old_data_space_->CommittedMemory() / KB);
- PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- code_space_->SizeOfObjects() / KB,
- code_space_->Available() / KB,
+ PrintPID("Code space, used: %6" V8_PTR_PREFIX
+ "d KB"
+ ", available: %6" V8_PTR_PREFIX
+ "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
- PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- map_space_->SizeOfObjects() / KB,
- map_space_->Available() / KB,
+ PrintPID("Map space, used: %6" V8_PTR_PREFIX
+ "d KB"
+ ", available: %6" V8_PTR_PREFIX
+ "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
map_space_->CommittedMemory() / KB);
- PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- cell_space_->SizeOfObjects() / KB,
- cell_space_->Available() / KB,
+ PrintPID("Cell space, used: %6" V8_PTR_PREFIX
+ "d KB"
+ ", available: %6" V8_PTR_PREFIX
+ "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
cell_space_->CommittedMemory() / KB);
- PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
+ "d KB"
+ ", available: %6" V8_PTR_PREFIX
+ "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
property_cell_space_->SizeOfObjects() / KB,
property_cell_space_->Available() / KB,
property_cell_space_->CommittedMemory() / KB);
- PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- lo_space_->SizeOfObjects() / KB,
- lo_space_->Available() / KB,
+ PrintPID("Large object space, used: %6" V8_PTR_PREFIX
+ "d KB"
+ ", available: %6" V8_PTR_PREFIX
+ "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
- PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- this->SizeOfObjects() / KB,
- this->Available() / KB,
+ PrintPID("All spaces, used: %6" V8_PTR_PREFIX
+ "d KB"
+ ", available: %6" V8_PTR_PREFIX
+ "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ this->SizeOfObjects() / KB, this->Available() / KB,
this->CommittedMemory() / KB);
PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsAfterGC() {
- // Similar to the before GC, we use some complicated logic to ensure that
- // NewSpace statistics are logged exactly once when --log-gc is turned on.
+// Similar to the before GC, we use some complicated logic to ensure that
+// NewSpace statistics are logged exactly once when --log-gc is turned on.
#if defined(DEBUG)
if (FLAG_heap_stats) {
new_space_.CollectStatistics();
void Heap::GarbageCollectionPrologue() {
- { AllowHeapAllocation for_the_first_part_of_prologue;
+ {
+ AllowHeapAllocation for_the_first_part_of_prologue;
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
void Heap::RepairFreeListsAfterBoot() {
PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next();
- space != NULL;
+ for (PagedSpace* space = spaces.next(); space != NULL;
space = spaces.next()) {
space->RepairFreeListsAfterBoot();
}
// in a seperate data structure if this is a performance problem.
bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
bool use_scratchpad =
- allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
- !deopt_maybe_tenured;
+ allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
+ !deopt_maybe_tenured;
int i = 0;
Object* list_element = allocation_sites_list();
bool trigger_deoptimization = false;
bool maximum_size_scavenge = MaximumSizeScavenge();
- while (use_scratchpad ?
- i < allocation_sites_scratchpad_length_ :
- list_element->IsAllocationSite()) {
- AllocationSite* site = use_scratchpad ?
- AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
- AllocationSite::cast(list_element);
+ while (use_scratchpad ? i < allocation_sites_scratchpad_length_
+ : list_element->IsAllocationSite()) {
+ AllocationSite* site =
+ use_scratchpad
+ ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
+ : AllocationSite::cast(list_element);
allocation_mementos_found += site->memento_found_count();
if (site->memento_found_count() > 0) {
active_allocation_sites++;
FlushAllocationSitesScratchpad();
if (FLAG_trace_pretenuring_statistics &&
- (allocation_mementos_found > 0 ||
- tenure_decisions > 0 ||
+ (allocation_mementos_found > 0 || tenure_decisions > 0 ||
dont_tenure_decisions > 0)) {
- PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
- "#mementos, #tenure decisions, #donttenure decisions) "
- "(%s, %d, %d, %d, %d, %d)\n",
- use_scratchpad ? "use scratchpad" : "use list",
- allocation_sites,
- active_allocation_sites,
- allocation_mementos_found,
- tenure_decisions,
- dont_tenure_decisions);
+ PrintF(
+ "GC: (mode, #visited allocation sites, #active allocation sites, "
+ "#mementos, #tenure decisions, #donttenure decisions) "
+ "(%s, %d, %d, %d, %d, %d)\n",
+ use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
+ active_allocation_sites, allocation_mementos_found, tenure_decisions,
+ dont_tenure_decisions);
}
}
}
AllocationSite* site = AllocationSite::cast(list_element);
if (site->deopt_dependent_code()) {
site->dependent_code()->MarkCodeForDeoptimization(
- isolate_,
- DependentCode::kAllocationSiteTenuringChangedGroup);
+ isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
site->set_deopt_dependent_code(false);
}
list_element = site->weak_next();
if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
- (crankshaft_codegen_bytes_generated_
- + full_codegen_bytes_generated_)));
+ (crankshaft_codegen_bytes_generated_ +
+ full_codegen_bytes_generated_)));
}
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_new_space()->
- AddSample(static_cast<int>(
- (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
+ (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
- static_cast<int>(
- (old_pointer_space()->CommittedMemory() * 100.0) /
- CommittedMemory()));
+ static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
isolate_->counters()->heap_fraction_old_data_space()->AddSample(
- static_cast<int>(
- (old_data_space()->CommittedMemory() * 100.0) /
- CommittedMemory()));
- isolate_->counters()->heap_fraction_code_space()->
- AddSample(static_cast<int>(
- (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_map_space()->AddSample(
- static_cast<int>(
- (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_code_space()->AddSample(
+ static_cast<int>((code_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
+ (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_cell_space()->AddSample(
- static_cast<int>(
- (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
- isolate_->counters()->heap_fraction_property_cell_space()->
- AddSample(static_cast<int>(
- (property_cell_space()->CommittedMemory() * 100.0) /
- CommittedMemory()));
- isolate_->counters()->heap_fraction_lo_space()->
- AddSample(static_cast<int>(
- (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
+ static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
+ (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
static_cast<int>(map_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
static_cast<int>(cell_space()->CommittedMemory() / KB));
- isolate_->counters()->
- heap_sample_property_cell_space_committed()->
- AddSample(static_cast<int>(
- property_cell_space()->CommittedMemory() / KB));
+ isolate_->counters()
+ ->heap_sample_property_cell_space_committed()
+ ->AddSample(
+ static_cast<int>(property_cell_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_code_space_committed()->AddSample(
static_cast<int>(code_space()->CommittedMemory() / KB));
static_cast<int>(MaximumCommittedMemory() / KB));
}
-#define UPDATE_COUNTERS_FOR_SPACE(space) \
- isolate_->counters()->space##_bytes_available()->Set( \
- static_cast<int>(space()->Available())); \
- isolate_->counters()->space##_bytes_committed()->Set( \
- static_cast<int>(space()->CommittedMemory())); \
- isolate_->counters()->space##_bytes_used()->Set( \
+#define UPDATE_COUNTERS_FOR_SPACE(space) \
+ isolate_->counters()->space##_bytes_available()->Set( \
+ static_cast<int>(space()->Available())); \
+ isolate_->counters()->space##_bytes_committed()->Set( \
+ static_cast<int>(space()->CommittedMemory())); \
+ isolate_->counters()->space##_bytes_used()->Set( \
static_cast<int>(space()->SizeOfObjects()));
-#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
- if (space()->CommittedMemory() > 0) { \
- isolate_->counters()->external_fragmentation_##space()->AddSample( \
- static_cast<int>(100 - \
- (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
- }
-#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
- UPDATE_COUNTERS_FOR_SPACE(space) \
+#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
+ if (space()->CommittedMemory() > 0) { \
+ isolate_->counters()->external_fragmentation_##space()->AddSample( \
+ static_cast<int>(100 - \
+ (space()->SizeOfObjects() * 100.0) / \
+ space()->CommittedMemory())); \
+ }
+#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
+ UPDATE_COUNTERS_FOR_SPACE(space) \
UPDATE_FRAGMENTATION_FOR_SPACE(space)
UPDATE_COUNTERS_FOR_SPACE(new_space)
}
-void Heap::CollectAllGarbage(int flags,
- const char* gc_reason,
+void Heap::CollectAllGarbage(int flags, const char* gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
}
-bool Heap::CollectGarbage(GarbageCollector collector,
- const char* gc_reason,
+bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
const char* collector_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
// generator needs incremental marking to stay off after it aborted.
if (!mark_compact_collector()->abort_incremental_marking() &&
incremental_marking()->IsStopped() &&
- incremental_marking()->WorthActivating() &&
- NextGCIsLikelyToBeFull()) {
+ incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
incremental_marking()->Start();
}
}
-void Heap::MoveElements(FixedArray* array,
- int dst_index,
- int src_index,
+void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
int len) {
if (len == 0) return;
static bool AbortIncrementalMarkingAndCollectGarbage(
- Heap* heap,
- AllocationSpace space,
- const char* gc_reason = NULL) {
+ Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
bool result = heap->CollectGarbage(space, gc_reason);
heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
}
-void Heap::ReserveSpace(int *sizes, Address *locations_out) {
+void Heap::ReserveSpace(int* sizes, Address* locations_out) {
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
"failed to reserve space in the new space");
} else {
AbortIncrementalMarkingAndCollectGarbage(
- this,
- static_cast<AllocationSpace>(space),
+ this, static_cast<AllocationSpace>(space),
"failed to reserve space in paged space");
}
gc_performed = true;
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
- promotion_rate_ =
- (static_cast<double>(promoted_objects_size_) /
- static_cast<double>(start_new_space_size) * 100);
+ promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
+ static_cast<double>(start_new_space_size) * 100);
semi_space_copied_rate_ =
- (static_cast<double>(semi_space_copied_object_size_) /
- static_cast<double>(start_new_space_size) * 100);
+ (static_cast<double>(semi_space_copied_object_size_) /
+ static_cast<double>(start_new_space_size) * 100);
double survival_rate = promotion_rate_ + semi_space_copied_rate_;
}
bool Heap::PerformGarbageCollection(
- GarbageCollector collector,
- const v8::GCCallbackFlags gc_callback_flags) {
+ GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
int freed_global_handles = 0;
if (collector != SCAVENGER) {
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
- { GCCallbacksScope scope(this);
+ {
+ GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
gc_post_processing_depth_++;
- { AllowHeapAllocation allow_allocation;
+ {
+ AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
freed_global_handles =
isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
- old_generation_allocation_limit_ =
- OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
- freed_global_handles);
+ old_generation_allocation_limit_ = OldGenerationAllocationLimit(
+ PromotedSpaceSizeOfObjects(), freed_global_handles);
}
- { GCCallbacksScope scope(this);
+ {
+ GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
callback(gc_type, gc_callback_flags);
} else {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
- gc_epilogue_callbacks_[i].callback(
- isolate, gc_type, gc_callback_flags);
+ gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
}
}
}
// Helper class for copying HeapObjects
-class ScavengeVisitor: public ObjectVisitor {
+class ScavengeVisitor : public ObjectVisitor {
public:
explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
#ifdef VERIFY_HEAP
// Visitor class to verify pointers in code or data space do not point into
// new space.
-class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
+class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
public:
explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointers(Object** start, Object**end) {
+ void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
// do not expect them.
VerifyNonPointerSpacePointersVisitor v(heap);
HeapObjectIterator code_it(heap->code_space());
- for (HeapObject* object = code_it.Next();
- object != NULL; object = code_it.Next())
+ for (HeapObject* object = code_it.Next(); object != NULL;
+ object = code_it.Next())
object->Iterate(&v);
// The old data space was normally swept conservatively so that the iterator
// doesn't work, so we normally skip the next bit.
if (heap->old_data_space()->swept_precisely()) {
HeapObjectIterator data_it(heap->old_data_space());
- for (HeapObject* object = data_it.Next();
- object != NULL; object = data_it.Next())
+ for (HeapObject* object = data_it.Next(); object != NULL;
+ object = data_it.Next())
object->Iterate(&v);
}
}
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
return heap->InNewSpace(*p) &&
- !HeapObject::cast(*p)->map_word().IsForwardingAddress();
+ !HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
-void Heap::ScavengeStoreBufferCallback(
- Heap* heap,
- MemoryChunk* page,
- StoreBufferEvent event) {
+void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
+ StoreBufferEvent event) {
heap->store_buffer_rebuilder_.Callback(page, event);
}
// Assumes that a NewSpacePage exactly fits a number of promotion queue
// entries (where each is a pair of intptr_t). This allows us to simplify
// the test fpr when to switch pages.
- DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
- == 0);
+ DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
+ 0);
limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
front_ = rear_ =
reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
intptr_t* head_start = rear_;
- intptr_t* head_end =
- Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
+ intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
int entries_count =
static_cast<int>(head_end - head_start) / kEntrySizeInWords;
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
- explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
+ explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
virtual Object* RetainAs(Object* object) {
if (!heap_->InFromSpace(object)) {
// Copy objects reachable from the old generation.
{
- StoreBufferRebuildScope scope(this,
- store_buffer(),
+ StoreBufferRebuildScope scope(this, store_buffer(),
&ScavengeStoreBufferCallback);
store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
}
// Copy objects reachable from simple cells by scavenging cell values
// directly.
HeapObjectIterator cell_iterator(cell_space_);
- for (HeapObject* heap_object = cell_iterator.Next();
- heap_object != NULL;
+ for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
heap_object = cell_iterator.Next()) {
if (heap_object->IsCell()) {
Cell* cell = Cell::cast(heap_object);
void Heap::UpdateReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
-
// Update old space string references.
if (external_string_table_.old_space_strings_.length() > 0) {
Object** start = &external_string_table_.old_space_strings_[0];
uint64_t size_of_objects_after_gc = SizeOfObjects();
double old_generation_survival_rate =
(static_cast<double>(size_of_objects_after_gc) * 100) /
- static_cast<double>(size_of_objects_before_gc);
+ static_cast<double>(size_of_objects_before_gc);
if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
// Too many objects died in the old generation, pretenuring of wrong
// our pretenuring decisions.
ResetAllAllocationSitesDependentCode(TENURED);
if (FLAG_trace_pretenuring) {
- PrintF("Deopt all allocation sites dependent code due to low survival "
- "rate in the old generation %f\n", old_generation_survival_rate);
+ PrintF(
+ "Deopt all allocation sites dependent code due to low survival "
+ "rate in the old generation %f\n",
+ old_generation_survival_rate);
}
}
}
class ExternalStringTableVisitorAdapter : public ObjectVisitor {
public:
explicit ExternalStringTableVisitorAdapter(
- v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
+ v8::ExternalResourceVisitor* visitor)
+ : visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
DCHECK((*p)->IsExternalString());
- visitor_->VisitExternalString(Utils::ToLocal(
- Handle<String>(String::cast(*p))));
+ visitor_->VisitExternalString(
+ Utils::ToLocal(Handle<String>(String::cast(*p))));
}
}
+
private:
v8::ExternalResourceVisitor* visitor_;
} external_string_table_visitor(visitor);
if (!NewSpacePage::IsAtEnd(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
new_space_front +=
- NewSpaceScavenger::IterateBody(object->map(), object);
+ NewSpaceScavenger::IterateBody(object->map(), object);
} else {
new_space_front =
NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
// Promote and process all the to-be-promoted objects.
{
- StoreBufferRebuildScope scope(this,
- store_buffer(),
+ StoreBufferRebuildScope scope(this, store_buffer(),
&ScavengeStoreBufferCallback);
while (!promotion_queue()->is_empty()) {
HeapObject* target;
// for pointers to from semispace instead of looking for pointers
// to new space.
DCHECK(!target->IsMap());
- IterateAndMarkPointersToFromSpace(target->address(),
- target->address() + size,
- &ScavengeObject);
+ IterateAndMarkPointersToFromSpace(
+ target->address(), target->address() + size, &ScavengeObject);
}
}
}
-STATIC_ASSERT((FixedDoubleArray::kHeaderSize &
- kDoubleAlignmentMask) == 0); // NOLINT
-STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset &
- kDoubleAlignmentMask) == 0); // NOLINT
+STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
+ 0); // NOLINT
+STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
+ 0); // NOLINT
STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
kDoubleAlignmentMask) == 0); // NOLINT
-INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
- HeapObject* object,
+INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
int size));
-static HeapObject* EnsureDoubleAligned(Heap* heap,
- HeapObject* object,
+static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
int size) {
if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
heap->CreateFillerObjectAt(object->address(), kPointerSize);
enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-template<MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
+template <MarksHandling marks_handling,
+ LoggingAndProfiling logging_and_profiling_mode>
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
- table_.Register(kVisitNativeContext,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<Context::kSize>);
+ table_.Register(
+ kVisitNativeContext,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ Context::kSize>);
- table_.Register(kVisitConsString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<ConsString::kSize>);
+ table_.Register(
+ kVisitConsString,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ ConsString::kSize>);
- table_.Register(kVisitSlicedString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<SlicedString::kSize>);
+ table_.Register(
+ kVisitSlicedString,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ SlicedString::kSize>);
- table_.Register(kVisitSymbol,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<Symbol::kSize>);
+ table_.Register(
+ kVisitSymbol,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ Symbol::kSize>);
- table_.Register(kVisitSharedFunctionInfo,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<SharedFunctionInfo::kSize>);
+ table_.Register(
+ kVisitSharedFunctionInfo,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ SharedFunctionInfo::kSize>);
table_.Register(kVisitJSWeakCollection,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- Visit);
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
table_.Register(kVisitJSArrayBuffer,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- Visit);
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
table_.Register(kVisitJSTypedArray,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- Visit);
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
table_.Register(kVisitJSDataView,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- Visit);
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
table_.Register(kVisitJSRegExp,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- Visit);
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
if (marks_handling == IGNORE_MARKS) {
- table_.Register(kVisitJSFunction,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<JSFunction::kSize>);
+ table_.Register(
+ kVisitJSFunction,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ JSFunction::kSize>);
} else {
table_.Register(kVisitJSFunction, &EvacuateJSFunction);
}
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
+ kVisitDataObject, kVisitDataObjectGeneric>();
table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
+ kVisitJSObject, kVisitJSObjectGeneric>();
table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
- kVisitStruct,
- kVisitStructGeneric>();
+ kVisitStruct, kVisitStructGeneric>();
}
static VisitorDispatchTable<ScavengingCallback>* GetTable() {
}
private:
- enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
+ enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
bool should_record = false;
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
- INLINE(static void MigrateObject(Heap* heap,
- HeapObject* source,
- HeapObject* target,
- int size)) {
+ INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
+ HeapObject* target, int size)) {
// If we migrate into to-space, then the to-space top pointer should be
// right after the target object. Incorporate double alignment
// over-allocation.
DCHECK(!heap->InToSpace(target) ||
- target->address() + size == heap->new_space()->top() ||
- target->address() + size + kPointerSize == heap->new_space()->top());
+ target->address() + size == heap->new_space()->top() ||
+ target->address() + size + kPointerSize == heap->new_space()->top());
// Make sure that we do not overwrite the promotion queue which is at
// the end of to-space.
DCHECK(!heap->InToSpace(target) ||
- heap->promotion_queue()->IsBelowPromotionQueue(
- heap->new_space()->top()));
+ heap->promotion_queue()->IsBelowPromotionQueue(
+ heap->new_space()->top()));
// Copy the content of source to target.
heap->CopyBlock(target->address(), source->address(), size);
}
}
- template<int alignment>
- static inline bool SemiSpaceCopyObject(Map* map,
- HeapObject** slot,
- HeapObject* object,
- int object_size) {
+ template <int alignment>
+ static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size) {
Heap* heap = map->GetHeap();
int allocation_size = object_size;
}
- template<ObjectContents object_contents, int alignment>
- static inline bool PromoteObject(Map* map,
- HeapObject** slot,
- HeapObject* object,
- int object_size) {
+ template <ObjectContents object_contents, int alignment>
+ static inline bool PromoteObject(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size) {
Heap* heap = map->GetHeap();
int allocation_size = object_size;
if (object_contents == POINTER_OBJECT) {
if (map->instance_type() == JS_FUNCTION_TYPE) {
- heap->promotion_queue()->insert(
- target, JSFunction::kNonWeakFieldsEndOffset);
+ heap->promotion_queue()->insert(target,
+ JSFunction::kNonWeakFieldsEndOffset);
} else {
heap->promotion_queue()->insert(target, object_size);
}
}
- template<ObjectContents object_contents, int alignment>
- static inline void EvacuateObject(Map* map,
- HeapObject** slot,
- HeapObject* object,
- int object_size) {
+ template <ObjectContents object_contents, int alignment>
+ static inline void EvacuateObject(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size) {
SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
SLOW_DCHECK(object->Size() == object_size);
Heap* heap = map->GetHeap();
}
}
- if (PromoteObject<object_contents, alignment>(
- map, slot, object, object_size)) {
+ if (PromoteObject<object_contents, alignment>(map, slot, object,
+ object_size)) {
return;
}
}
- static inline void EvacuateJSFunction(Map* map,
- HeapObject** slot,
+ static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<JSFunction::kSize>(map, slot, object);
+ ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ JSFunction::kSize>(map, slot, object);
HeapObject* target = *slot;
MarkBit mark_bit = Marking::MarkBitFrom(target);
Address code_entry_slot =
target->address() + JSFunction::kCodeEntryOffset;
Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- map->GetHeap()->mark_compact_collector()->
- RecordCodeEntrySlot(code_entry_slot, code);
+ map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
+ code_entry_slot, code);
}
}
- static inline void EvacuateFixedArray(Map* map,
- HeapObject** slot,
+ static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
- map, slot, object, object_size);
+ EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
+ object_size);
}
- static inline void EvacuateFixedDoubleArray(Map* map,
- HeapObject** slot,
+ static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
HeapObject* object) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
int object_size = FixedDoubleArray::SizeFor(length);
- EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
- map, slot, object, object_size);
+ EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
+ object_size);
}
- static inline void EvacuateFixedTypedArray(Map* map,
- HeapObject** slot,
+ static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
- EvacuateObject<DATA_OBJECT, kObjectAlignment>(
- map, slot, object, object_size);
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+ object_size);
}
- static inline void EvacuateFixedFloat64Array(Map* map,
- HeapObject** slot,
+ static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
- EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
- map, slot, object, object_size);
+ EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
+ object_size);
}
- static inline void EvacuateByteArray(Map* map,
- HeapObject** slot,
+ static inline void EvacuateByteArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, kObjectAlignment>(
- map, slot, object, object_size);
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+ object_size);
}
- static inline void EvacuateSeqOneByteString(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqOneByteString::cast(object)->
- SeqOneByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, kObjectAlignment>(
- map, slot, object, object_size);
+ static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ int object_size = SeqOneByteString::cast(object)
+ ->SeqOneByteStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+ object_size);
}
- static inline void EvacuateSeqTwoByteString(Map* map,
- HeapObject** slot,
+ static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
HeapObject* object) {
- int object_size = SeqTwoByteString::cast(object)->
- SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, kObjectAlignment>(
- map, slot, object, object_size);
+ int object_size = SeqTwoByteString::cast(object)
+ ->SeqTwoByteStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+ object_size);
}
- static inline void EvacuateShortcutCandidate(Map* map,
- HeapObject** slot,
+ static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
HeapObject* object) {
DCHECK(IsShortcutCandidate(map->instance_type()));
Heap* heap = map->GetHeap();
if (marks_handling == IGNORE_MARKS &&
- ConsString::cast(object)->unchecked_second() ==
- heap->empty_string()) {
+ ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
HeapObject* first =
HeapObject::cast(ConsString::cast(object)->unchecked_first());
}
int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
- map, slot, object, object_size);
+ EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
+ object_size);
}
- template<ObjectContents object_contents>
+ template <ObjectContents object_contents>
class ObjectEvacuationStrategy {
public:
- template<int object_size>
- static inline void VisitSpecialized(Map* map,
- HeapObject** slot,
+ template <int object_size>
+ static inline void VisitSpecialized(Map* map, HeapObject** slot,
HeapObject* object) {
- EvacuateObject<object_contents, kObjectAlignment>(
- map, slot, object, object_size);
+ EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
+ object_size);
}
- static inline void Visit(Map* map,
- HeapObject** slot,
- HeapObject* object) {
+ static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
int object_size = map->instance_size();
- EvacuateObject<object_contents, kObjectAlignment>(
- map, slot, object, object_size);
+ EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
+ object_size);
}
};
};
-template<MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
+template <MarksHandling marks_handling,
+ LoggingAndProfiling logging_and_profiling_mode>
VisitorDispatchTable<ScavengingCallback>
ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
void Heap::SelectScavengingVisitorsTable() {
bool logging_and_profiling =
- FLAG_verify_predictable ||
- isolate()->logger()->is_logging() ||
+ FLAG_verify_predictable || isolate()->logger()->is_logging() ||
isolate()->cpu_profiler()->is_profiling() ||
(isolate()->heap_profiler() != NULL &&
isolate()->heap_profiler()->is_tracking_object_moves());
if (!incremental_marking()->IsMarking()) {
if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
+ scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+ IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
} else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
+ scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+ IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
}
} else {
if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
+ scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+ TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
} else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
+ scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+ TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
}
if (incremental_marking()->IsCompacting()) {
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
reinterpret_cast<Map*>(result)->set_visitor_id(
- StaticVisitorBase::GetVisitorId(instance_type, instance_size));
+ StaticVisitorBase::GetVisitorId(instance_type, instance_size));
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
}
-AllocationResult Heap::AllocateFillerObject(int size,
- bool double_align,
+AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
AllocationSpace space) {
HeapObject* obj;
- { AllocationResult allocation = AllocateRaw(size, space, space);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, space);
if (!allocation.To(&obj)) return allocation;
}
#ifdef DEBUG
const Heap::StringTypeTable Heap::string_type_table[] = {
-#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
- {type, size, k##camel_name##MapRootIndex},
- STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
+#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
+ { type, size, k##camel_name##MapRootIndex } \
+ ,
+ STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
#undef STRING_TYPE_ELEMENT
};
const Heap::ConstantStringTable Heap::constant_string_table[] = {
-#define CONSTANT_STRING_ELEMENT(name, contents) \
- {contents, k##name##RootIndex},
- INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
+#define CONSTANT_STRING_ELEMENT(name, contents) \
+ { contents, k##name##RootIndex } \
+ ,
+ INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
#undef CONSTANT_STRING_ELEMENT
};
const Heap::StructTable Heap::struct_table[] = {
-#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
- { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
- STRUCT_LIST(STRUCT_TABLE_ELEMENT)
+#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
+ { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
+ ,
+ STRUCT_LIST(STRUCT_TABLE_ELEMENT)
#undef STRUCT_TABLE_ELEMENT
};
bool Heap::CreateInitialMaps() {
HeapObject* obj;
- { AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
+ {
+ AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
if (!allocation.To(&obj)) return false;
}
// Map::cast cannot be used due to uninitialized map field.
new_meta_map->set_map(new_meta_map);
{ // Partial map allocation
-#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
- { Map* map; \
- if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
- set_##field_name##_map(map); \
- }
+#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
+ { \
+ Map* map; \
+ if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
+ set_##field_name##_map(map); \
+ }
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
}
// Allocate the empty array.
- { AllocationResult allocation = AllocateEmptyFixedArray();
+ {
+ AllocationResult allocation = AllocateEmptyFixedArray();
if (!allocation.To(&obj)) return false;
}
set_empty_fixed_array(FixedArray::cast(obj));
- { AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
+ {
+ AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
if (!allocation.To(&obj)) return false;
}
set_null_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kNull);
- { AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
+ {
+ AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
if (!allocation.To(&obj)) return false;
}
set_undefined_value(Oddball::cast(obj));
set_exception(null_value());
// Allocate the empty descriptor array.
- { AllocationResult allocation = AllocateEmptyFixedArray();
+ {
+ AllocationResult allocation = AllocateEmptyFixedArray();
if (!allocation.To(&obj)) return false;
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
// Allocate the constant pool array.
- { AllocationResult allocation = AllocateEmptyConstantPoolArray();
+ {
+ AllocationResult allocation = AllocateEmptyConstantPoolArray();
if (!allocation.To(&obj)) return false;
}
set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
constant_pool_array_map()->set_constructor(null_value());
{ // Map allocation
-#define ALLOCATE_MAP(instance_type, size, field_name) \
- { Map* map; \
- if (!AllocateMap((instance_type), size).To(&map)) return false; \
- set_##field_name##_map(map); \
- }
+#define ALLOCATE_MAP(instance_type, size, field_name) \
+ { \
+ Map* map; \
+ if (!AllocateMap((instance_type), size).To(&map)) return false; \
+ set_##field_name##_map(map); \
+ }
-#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
- ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
+#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
+ ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
DCHECK(fixed_array_map() != fixed_cow_array_map());
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
- ALLOCATE_MAP(
- MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize, mutable_heap_number)
+ ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
+ mutable_heap_number)
ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
- { AllocationResult allocation = AllocateMap(entry.type, entry.size);
+ {
+ AllocationResult allocation = AllocateMap(entry.type, entry.size);
if (!allocation.To(&obj)) return false;
}
// Mark cons string maps as unstable, because their objects can change
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
-#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
- ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
- external_##type##_array)
+#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+ ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
+ external_##type##_array)
- TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
+ TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
#undef ALLOCATE_EXTERNAL_ARRAY_MAP
-#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
- ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, \
- fixed_##type##_array)
+#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+ ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
- TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
+ TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
const StructTable& entry = struct_table[i];
Map* map;
- if (!AllocateMap(entry.type, entry.size).To(&map))
- return false;
+ if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
roots_[entry.index] = map;
}
StaticVisitorBase::kVisitNativeContext);
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
- shared_function_info)
+ shared_function_info)
- ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
- message_object)
- ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
- external)
+ ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
+ ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
external_map()->set_is_extensible(false);
#undef ALLOCATE_VARSIZE_MAP
#undef ALLOCATE_MAP
}
- { // Empty arrays
- { ByteArray* byte_array;
+ { // Empty arrays
+ {
+ ByteArray* byte_array;
if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
set_empty_byte_array(byte_array);
}
-#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
- { ExternalArray* obj; \
- if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
- return false; \
- set_empty_external_##type##_array(obj); \
- }
+#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ ExternalArray* obj; \
+ if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
+ return false; \
+ set_empty_external_##type##_array(obj); \
+ }
TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- { FixedTypedArrayBase* obj; \
- if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
- return false; \
- set_empty_fixed_##type##_array(obj); \
- }
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ FixedTypedArrayBase* obj; \
+ if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
+ return false; \
+ set_empty_fixed_##type##_array(obj); \
+ }
TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
}
-AllocationResult Heap::AllocateHeapNumber(double value,
- MutableMode mode,
+AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
HeapObject* result;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
HeapObject* result;
- { AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->set_map_no_write_barrier(cell_map());
set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
// Finish initializing oddballs after creating the string table.
- Oddball::Initialize(isolate(),
- factory->undefined_value(),
- "undefined",
- factory->nan_value(),
- Oddball::kUndefined);
+ Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
+ factory->nan_value(), Oddball::kUndefined);
// Initialize the null_value.
- Oddball::Initialize(isolate(),
- factory->null_value(),
- "null",
- handle(Smi::FromInt(0), isolate()),
- Oddball::kNull);
-
- set_true_value(*factory->NewOddball(factory->boolean_map(),
- "true",
+ Oddball::Initialize(isolate(), factory->null_value(), "null",
+ handle(Smi::FromInt(0), isolate()), Oddball::kNull);
+
+ set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
handle(Smi::FromInt(1), isolate()),
Oddball::kTrue));
- set_false_value(*factory->NewOddball(factory->boolean_map(),
- "false",
+ set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
handle(Smi::FromInt(0), isolate()),
Oddball::kFalse));
- set_the_hole_value(*factory->NewOddball(factory->the_hole_map(),
- "hole",
+ set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
handle(Smi::FromInt(-1), isolate()),
Oddball::kTheHole));
- set_uninitialized_value(
- *factory->NewOddball(factory->uninitialized_map(),
- "uninitialized",
- handle(Smi::FromInt(-1), isolate()),
- Oddball::kUninitialized));
-
- set_arguments_marker(*factory->NewOddball(factory->arguments_marker_map(),
- "arguments_marker",
- handle(Smi::FromInt(-4), isolate()),
- Oddball::kArgumentMarker));
-
- set_no_interceptor_result_sentinel(
- *factory->NewOddball(factory->no_interceptor_result_sentinel_map(),
- "no_interceptor_result_sentinel",
- handle(Smi::FromInt(-2), isolate()),
- Oddball::kOther));
-
- set_termination_exception(
- *factory->NewOddball(factory->termination_exception_map(),
- "termination_exception",
- handle(Smi::FromInt(-3), isolate()),
- Oddball::kOther));
-
- set_exception(
- *factory->NewOddball(factory->exception_map(),
- "exception",
- handle(Smi::FromInt(-5), isolate()),
- Oddball::kException));
+ set_uninitialized_value(*factory->NewOddball(
+ factory->uninitialized_map(), "uninitialized",
+ handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
+
+ set_arguments_marker(*factory->NewOddball(
+ factory->arguments_marker_map(), "arguments_marker",
+ handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
+
+ set_no_interceptor_result_sentinel(*factory->NewOddball(
+ factory->no_interceptor_result_sentinel_map(),
+ "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
+ Oddball::kOther));
+
+ set_termination_exception(*factory->NewOddball(
+ factory->termination_exception_map(), "termination_exception",
+ handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
+
+ set_exception(*factory->NewOddball(factory->exception_map(), "exception",
+ handle(Smi::FromInt(-5), isolate()),
+ Oddball::kException));
for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
Handle<String> str =
Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
set_intrinsic_function_names(*intrinsic_names);
- set_number_string_cache(*factory->NewFixedArray(
- kInitialNumberStringCacheSize * 2, TENURED));
+ set_number_string_cache(
+ *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
// Allocate cache for single character one byte strings.
- set_single_character_string_cache(*factory->NewFixedArray(
- String::kMaxOneByteCharCode + 1, TENURED));
+ set_single_character_string_cache(
+ *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
// Allocate cache for string split and regexp-multiple.
set_string_split_cache(*factory->NewFixedArray(
RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
// Allocate cache for external strings pointing to native source code.
- set_natives_source_cache(*factory->NewFixedArray(
- Natives::GetBuiltinsCount()));
+ set_natives_source_cache(
+ *factory->NewFixedArray(Natives::GetBuiltinsCount()));
set_undefined_cell(*factory->NewCell(factory->undefined_value()));
// Handling of script id generation is in Factory::NewScript.
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
- set_allocation_sites_scratchpad(*factory->NewFixedArray(
- kAllocationSiteScratchpadSize, TENURED));
+ set_allocation_sites_scratchpad(
+ *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
InitializeAllocationSitesScratchpad();
// Initialize keyed lookup cache.
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
RootListIndex writable_roots[] = {
- kStoreBufferTopRootIndex,
- kStackLimitRootIndex,
- kNumberStringCacheRootIndex,
- kInstanceofCacheFunctionRootIndex,
- kInstanceofCacheMapRootIndex,
- kInstanceofCacheAnswerRootIndex,
- kCodeStubsRootIndex,
- kNonMonomorphicCacheRootIndex,
- kPolymorphicCodeCacheRootIndex,
- kLastScriptIdRootIndex,
- kEmptyScriptRootIndex,
- kRealStackLimitRootIndex,
- kArgumentsAdaptorDeoptPCOffsetRootIndex,
- kConstructStubDeoptPCOffsetRootIndex,
- kGetterStubDeoptPCOffsetRootIndex,
- kSetterStubDeoptPCOffsetRootIndex,
- kStringTableRootIndex,
+ kStoreBufferTopRootIndex,
+ kStackLimitRootIndex,
+ kNumberStringCacheRootIndex,
+ kInstanceofCacheFunctionRootIndex,
+ kInstanceofCacheMapRootIndex,
+ kInstanceofCacheAnswerRootIndex,
+ kCodeStubsRootIndex,
+ kNonMonomorphicCacheRootIndex,
+ kPolymorphicCodeCacheRootIndex,
+ kLastScriptIdRootIndex,
+ kEmptyScriptRootIndex,
+ kRealStackLimitRootIndex,
+ kArgumentsAdaptorDeoptPCOffsetRootIndex,
+ kConstructStubDeoptPCOffsetRootIndex,
+ kGetterStubDeoptPCOffsetRootIndex,
+ kSetterStubDeoptPCOffsetRootIndex,
+ kStringTableRootIndex,
};
for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
- if (root_index == writable_roots[i])
- return true;
+ if (root_index == writable_roots[i]) return true;
}
return false;
}
bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
return !RootCanBeWrittenAfterInitialization(root_index) &&
- !InNewSpace(roots_array_start()[root_index]);
+ !InNewSpace(roots_array_start()[root_index]);
}
-Object* RegExpResultsCache::Lookup(Heap* heap,
- String* key_string,
- Object* key_pattern,
- ResultsCacheType type) {
+Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
+ Object* key_pattern, ResultsCacheType type) {
FixedArray* cache;
if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
if (type == STRING_SPLIT_SUBSTRINGS) {
uint32_t hash = key_string->Hash();
uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
- ~(kArrayEntriesPerCacheEntry - 1));
+ ~(kArrayEntriesPerCacheEntry - 1));
if (cache->get(index + kStringOffset) == key_string &&
cache->get(index + kPatternOffset) == key_pattern) {
return cache->get(index + kArrayOffset);
}
-void RegExpResultsCache::Enter(Isolate* isolate,
- Handle<String> key_string,
+void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
Handle<Object> key_pattern,
Handle<FixedArray> value_array,
ResultsCacheType type) {
uint32_t hash = key_string->Hash();
uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
- ~(kArrayEntriesPerCacheEntry - 1));
+ ~(kArrayEntriesPerCacheEntry - 1));
if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
cache->set(index + kStringOffset, *key_string);
cache->set(index + kPatternOffset, *key_pattern);
// We cannot use the normal write-barrier because slots need to be
// recorded with non-incremental marking as well. We have to explicitly
// record the slot to take evacuation candidates into account.
- allocation_sites_scratchpad()->set(
- allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
+ allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
+ site, SKIP_WRITE_BARRIER);
Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
allocation_sites_scratchpad_length_);
// candidates are not part of the global list of old space pages and
// releasing an evacuation candidate due to a slots buffer overflow
// results in lost pages.
- mark_compact_collector()->RecordSlot(
- slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
+ mark_compact_collector()->RecordSlot(slot, slot, *slot,
+ SlotsBuffer::IGNORE_OVERFLOW);
}
allocation_sites_scratchpad_length_++;
}
Heap::RootListIndex Heap::RootIndexForExternalArrayType(
ExternalArrayType array_type) {
switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return kExternal##Type##ArrayMapRootIndex;
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return kExternal##Type##ArrayMapRootIndex;
TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
#undef ARRAY_TYPE_TO_ROOT_INDEX
Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
ExternalArrayType array_type) {
switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return kFixed##Type##ArrayMapRootIndex;
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return kFixed##Type##ArrayMapRootIndex;
TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
#undef ARRAY_TYPE_TO_ROOT_INDEX
Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
ElementsKind elementsKind) {
switch (elementsKind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
- return kEmptyExternal##Type##ArrayRootIndex;
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case EXTERNAL_##TYPE##_ELEMENTS: \
+ return kEmptyExternal##Type##ArrayRootIndex;
TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
#undef ELEMENT_KIND_TO_ROOT_INDEX
Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
ElementsKind elementsKind) {
switch (elementsKind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- return kEmptyFixed##Type##ArrayRootIndex;
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ return kEmptyFixed##Type##ArrayRootIndex;
TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
#undef ELEMENT_KIND_TO_ROOT_INDEX
int size = ByteArray::SizeFor(length);
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
HeapObject* result;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
AllocationResult Heap::AllocateExternalArray(int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure) {
+ ExternalArrayType array_type,
+ void* external_pointer,
+ PretenureFlag pretenure) {
int size = ExternalArray::kAlignedSize;
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
HeapObject* result;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
- result->set_map_no_write_barrier(
- MapForExternalArrayType(array_type));
+ result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
ExternalArray::cast(result)->set_length(length);
ExternalArray::cast(result)->set_external_pointer(external_pointer);
return result;
}
-static void ForFixedTypedArray(ExternalArrayType array_type,
- int* element_size,
+static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
ElementsKind* element_kind) {
switch (array_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- *element_size = size; \
- *element_kind = TYPE##_ELEMENTS; \
- return;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ *element_size = size; \
+ *element_kind = TYPE##_ELEMENTS; \
+ return;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
default:
- *element_size = 0; // Bogus
+ *element_size = 0; // Bogus
*element_kind = UINT8_ELEMENTS; // Bogus
UNREACHABLE();
}
int element_size;
ElementsKind elements_kind;
ForFixedTypedArray(array_type, &element_size, &elements_kind);
- int size = OBJECT_POINTER_ALIGN(
- length * element_size + FixedTypedArrayBase::kDataOffset);
+ int size = OBJECT_POINTER_ALIGN(length * element_size +
+ FixedTypedArrayBase::kDataOffset);
#ifndef V8_HOST_ARCH_64_BIT
if (array_type == kExternalFloat64Array) {
size += kPointerSize;
result->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result);
- DCHECK(isolate_->code_range() == NULL ||
- !isolate_->code_range()->valid() ||
+ DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
isolate_->code_range()->contains(code->address()));
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
new_code->set_constant_pool(new_constant_pool);
// Relocate the copy.
- DCHECK(isolate_->code_range() == NULL ||
- !isolate_->code_range()->valid() ||
+ DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
return new_code;
// Allocate ByteArray and ConstantPoolArray before the Code object, so that we
// do not risk leaving uninitialized Code object (and breaking the heap).
ByteArray* reloc_info_array;
- { AllocationResult allocation =
+ {
+ AllocationResult allocation =
AllocateByteArray(reloc_info.length(), TENURED);
if (!allocation.To(&reloc_info_array)) return allocation;
}
code->constant_pool() != empty_constant_pool_array()) {
// Copy the constant pool, since edits to the copied code may modify
// the constant pool.
- AllocationResult allocation =
- CopyConstantPoolArray(code->constant_pool());
+ AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
if (!allocation.To(&new_constant_pool)) return allocation;
} else {
new_constant_pool = empty_constant_pool_array();
new_code->set_constant_pool(new_constant_pool);
// Copy patched rinfo.
- CopyBytes(new_code->relocation_start(),
- reloc_info.start(),
+ CopyBytes(new_code->relocation_start(), reloc_info.start(),
static_cast<size_t>(reloc_info.length()));
// Relocate the copy.
- DCHECK(isolate_->code_range() == NULL ||
- !isolate_->code_range()->valid() ||
+ DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
- AllocationSite* allocation_site) {
+ AllocationSite* allocation_site) {
DCHECK(gc_state_ == NOT_IN_GC);
DCHECK(map->instance_type() != MAP_TYPE);
// If allocation failures are disallowed, we may allocate in a different
}
-void Heap::InitializeJSObjectFromMap(JSObject* obj,
- FixedArray* properties,
+void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
Map* map) {
obj->set_properties(properties);
obj->initialize_elements();
// so that object accesses before the constructor completes (e.g. in the
// debugger) will not cause a crash.
if (map->constructor()->IsJSFunction() &&
- JSFunction::cast(map->constructor())->
- IsInobjectSlackTrackingInProgress()) {
+ JSFunction::cast(map->constructor())
+ ->IsInobjectSlackTrackingInProgress()) {
// We might want to shrink the object later.
DCHECK(obj->GetInternalFieldCount() == 0);
filler = Heap::one_pointer_filler_map();
AllocationResult Heap::AllocateJSObjectFromMap(
- Map* map,
- PretenureFlag pretenure,
- bool allocate_properties,
+ Map* map, PretenureFlag pretenure, bool allocate_properties,
AllocationSite* allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
if (allocate_properties) {
int prop_size = map->InitialPropertiesLength();
DCHECK(prop_size >= 0);
- { AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
+ {
+ AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
if (!allocation.To(&properties)) return allocation;
}
} else {
// Initialize the JSObject.
InitializeJSObjectFromMap(js_obj, properties, map);
- DCHECK(js_obj->HasFastElements() ||
- js_obj->HasExternalArrayElements() ||
+ DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
js_obj->HasFixedTypedArrayElements());
return js_obj;
}
// If we're forced to always allocate, we use the general allocation
// functions which may leave us with an object in old space.
if (always_allocate()) {
- { AllocationResult allocation =
+ {
+ AllocationResult allocation =
AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
if (!allocation.To(&clone)) return allocation;
}
Address clone_address = clone->address();
- CopyBlock(clone_address,
- source->address(),
- object_size);
+ CopyBlock(clone_address, source->address(), object_size);
// Update write barrier for all fields that lie beyond the header.
- RecordWrites(clone_address,
- JSObject::kHeaderSize,
+ RecordWrites(clone_address, JSObject::kHeaderSize,
(object_size - JSObject::kHeaderSize) / kPointerSize);
} else {
wb_mode = SKIP_WRITE_BARRIER;
- { int adjusted_object_size = site != NULL
- ? object_size + AllocationMemento::kSize
- : object_size;
- AllocationResult allocation =
+ {
+ int adjusted_object_size =
+ site != NULL ? object_size + AllocationMemento::kSize : object_size;
+ AllocationResult allocation =
AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
if (!allocation.To(&clone)) return allocation;
}
SLOW_DCHECK(InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
- CopyBlock(clone->address(),
- source->address(),
- object_size);
+ CopyBlock(clone->address(), source->address(), object_size);
if (site != NULL) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
}
}
- SLOW_DCHECK(
- JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
+ SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
+ source->GetElementsKind());
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
if (elements->length() > 0) {
FixedArrayBase* elem;
- { AllocationResult allocation;
+ {
+ AllocationResult allocation;
if (elements->map() == fixed_cow_array_map()) {
allocation = FixedArray::cast(elements);
} else if (source->HasFastDoubleElements()) {
// Update properties if necessary.
if (properties->length() > 0) {
FixedArray* prop;
- { AllocationResult allocation = CopyFixedArray(properties);
+ {
+ AllocationResult allocation = CopyFixedArray(properties);
if (!allocation.To(&prop)) return allocation;
}
JSObject::cast(clone)->set_properties(prop, wb_mode);
}
-static inline void WriteOneByteData(Vector<const char> vector,
- uint8_t* chars,
+static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
int len) {
// Only works for ascii.
DCHECK(vector.length() == len);
MemCopy(chars, vector.start(), len);
}
-static inline void WriteTwoByteData(Vector<const char> vector,
- uint16_t* chars,
+static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
int len) {
const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
unsigned stream_length = vector.length();
}
-template<bool is_one_byte, typename T>
-AllocationResult Heap::AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field) {
+template <bool is_one_byte, typename T>
+AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
+ uint32_t hash_field) {
DCHECK(chars >= 0);
// Compute map and object size.
int size;
// Allocate string.
HeapObject* result;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
// Need explicit instantiations.
-template
-AllocationResult Heap::AllocateInternalizedStringImpl<true>(
- String*, int, uint32_t);
-template
-AllocationResult Heap::AllocateInternalizedStringImpl<false>(
- String*, int, uint32_t);
-template
-AllocationResult Heap::AllocateInternalizedStringImpl<false>(
+template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
+ int,
+ uint32_t);
+template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
+ int,
+ uint32_t);
+template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
Vector<const char>, int, uint32_t);
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
HeapObject* result;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
HeapObject* result;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
AllocationResult Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
HeapObject* result;
- { AllocationResult allocation =
+ {
+ AllocationResult allocation =
AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
int len = src->length();
HeapObject* obj;
- { AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
+ {
+ AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
if (!allocation.To(&obj)) return allocation;
}
obj->set_map_no_write_barrier(fixed_array_map());
AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
HeapObject* obj;
- { AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
+ {
+ AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
if (!allocation.To(&obj)) return allocation;
}
if (InNewSpace(obj)) {
obj->set_map_no_write_barrier(map);
- CopyBlock(obj->address() + kPointerSize,
- src->address() + kPointerSize,
+ CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
FixedArray::SizeFor(len) - kPointerSize);
return obj;
}
Map* map) {
int len = src->length();
HeapObject* obj;
- { AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
+ {
+ AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
if (!allocation.To(&obj)) return allocation;
}
obj->set_map_no_write_barrier(map);
- CopyBlock(
- obj->address() + FixedDoubleArray::kLengthOffset,
- src->address() + FixedDoubleArray::kLengthOffset,
- FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
+ CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
+ src->address() + FixedDoubleArray::kLengthOffset,
+ FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
return obj;
}
HeapObject* obj;
if (src->is_extended_layout()) {
ConstantPoolArray::NumberOfEntries small(src,
- ConstantPoolArray::SMALL_SECTION);
- ConstantPoolArray::NumberOfEntries extended(src,
- ConstantPoolArray::EXTENDED_SECTION);
+ ConstantPoolArray::SMALL_SECTION);
+ ConstantPoolArray::NumberOfEntries extended(
+ src, ConstantPoolArray::EXTENDED_SECTION);
AllocationResult allocation =
AllocateExtendedConstantPoolArray(small, extended);
if (!allocation.To(&obj)) return allocation;
} else {
ConstantPoolArray::NumberOfEntries small(src,
- ConstantPoolArray::SMALL_SECTION);
+ ConstantPoolArray::SMALL_SECTION);
AllocationResult allocation = AllocateConstantPoolArray(small);
if (!allocation.To(&obj)) return allocation;
}
obj->set_map_no_write_barrier(map);
- CopyBlock(
- obj->address() + ConstantPoolArray::kFirstEntryOffset,
- src->address() + ConstantPoolArray::kFirstEntryOffset,
- src->size() - ConstantPoolArray::kFirstEntryOffset);
+ CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
+ src->address() + ConstantPoolArray::kFirstEntryOffset,
+ src->size() - ConstantPoolArray::kFirstEntryOffset);
return obj;
}
DCHECK(!InNewSpace(filler));
HeapObject* result;
- { AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
+ {
+ AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
if (!allocation.To(&result)) return allocation;
}
if (length == 0) return empty_fixed_array();
HeapObject* obj;
- { AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
+ {
+ AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
if (!allocation.To(&obj)) return allocation;
}
AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
- int length,
- PretenureFlag pretenure) {
+ int length, PretenureFlag pretenure) {
if (length == 0) return empty_fixed_array();
HeapObject* elements;
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
HeapObject* object;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!allocation.To(&object)) return allocation;
}
AllocationResult Heap::AllocateConstantPoolArray(
- const ConstantPoolArray::NumberOfEntries& small) {
+ const ConstantPoolArray::NumberOfEntries& small) {
CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
int size = ConstantPoolArray::SizeFor(small);
#ifndef V8_HOST_ARCH_64_BIT
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
HeapObject* object;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
if (!allocation.To(&object)) return allocation;
}
object = EnsureDoubleAligned(this, object, size);
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
HeapObject* object;
- { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
+ {
+ AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
if (!allocation.To(&object)) return allocation;
}
object = EnsureDoubleAligned(this, object, size);
ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
int size = ConstantPoolArray::SizeFor(small);
HeapObject* result;
- { AllocationResult allocation =
+ {
+ AllocationResult allocation =
AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
} while (hash == 0 && attempts < 30);
if (hash == 0) hash = 1; // never return 0
- Symbol::cast(result)->set_hash_field(
- Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
+ Symbol::cast(result)
+ ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
Symbol::cast(result)->set_name(undefined_value());
Symbol::cast(result)->set_flags(Smi::FromInt(0));
Map* map;
switch (type) {
#define MAKE_CASE(NAME, Name, name) \
- case NAME##_TYPE: map = name##_map(); break;
-STRUCT_LIST(MAKE_CASE)
+ case NAME##_TYPE: \
+ map = name##_map(); \
+ break;
+ STRUCT_LIST(MAKE_CASE)
#undef MAKE_CASE
default:
UNREACHABLE();
int size = map->instance_size();
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
Struct* result;
- { AllocationResult allocation = Allocate(map, space);
+ {
+ AllocationResult allocation = Allocate(map, space);
if (!allocation.To(&result)) return allocation;
}
result->InitializeBody(size);
// The size factor is in range [5..250]. The numbers here are chosen from
// experiments. If you changes them, make sure to test with
// chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
- intptr_t step_size =
- size_factor * IncrementalMarking::kAllocatedThreshold;
+ intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(hint);
HistogramTimerScope idle_notification_scope(
}
}
- int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
- mark_sweeps_since_idle_round_started_;
+ int remaining_mark_sweeps =
+ kMaxMarkSweepsInIdleRound - mark_sweeps_since_idle_round_started_;
if (incremental_marking()->IsStopped()) {
// If there are no more than two GCs left in this idle round and we are
// just-completed scavenge collection).
void Heap::ReportHeapStatistics(const char* title) {
USE(title);
- PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
- title, gc_count_);
+ PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
+ gc_count_);
PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
old_generation_allocation_limit_);
#endif // DEBUG
-bool Heap::Contains(HeapObject* value) {
- return Contains(value->address());
-}
+bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
bool Heap::Contains(Address addr) {
if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
return HasBeenSetUp() &&
- (new_space_.ToSpaceContains(addr) ||
- old_pointer_space_->Contains(addr) ||
- old_data_space_->Contains(addr) ||
- code_space_->Contains(addr) ||
- map_space_->Contains(addr) ||
- cell_space_->Contains(addr) ||
- property_cell_space_->Contains(addr) ||
- lo_space_->SlowContains(addr));
+ (new_space_.ToSpaceContains(addr) ||
+ old_pointer_space_->Contains(addr) ||
+ old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
+ map_space_->Contains(addr) || cell_space_->Contains(addr) ||
+ property_cell_space_->Contains(addr) ||
+ lo_space_->SlowContains(addr));
}
while (it.has_next()) {
NewSpacePage* page = it.next();
for (Address cursor = page->area_start(), limit = page->area_end();
- cursor < limit;
- cursor += kPointerSize) {
+ cursor < limit; cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue;
}
}
}
-void Heap::IterateAndMarkPointersToFromSpace(Address start,
- Address end,
+void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
ObjectSlotCallback callback) {
Address slot_address = start;
}
-bool EverythingsAPointer(Object** addr) {
- return true;
-}
+bool EverythingsAPointer(Object** addr) { return true; }
-static void CheckStoreBuffer(Heap* heap,
- Object** current,
- Object** limit,
+static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
Object**** store_buffer_position,
Object*** store_buffer_top,
CheckStoreBufferFilter filter,
Address special_garbage_start,
Address special_garbage_end) {
Map* free_space_map = heap->free_space_map();
- for ( ; current < limit; current++) {
+ for (; current < limit; current++) {
Object* o = *current;
Address current_address = reinterpret_cast<Address>(current);
// Skip free space.
Object*** store_buffer_top = store_buffer()->Top();
Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &EverythingsAPointer,
- space->top(),
+ CheckStoreBuffer(this, current, limit, &store_buffer_position,
+ store_buffer_top, &EverythingsAPointer, space->top(),
space->limit());
}
}
Object*** store_buffer_top = store_buffer()->Top();
Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &IsAMapPointerAddress,
- space->top(),
+ CheckStoreBuffer(this, current, limit, &store_buffer_position,
+ store_buffer_top, &IsAMapPointerAddress, space->top(),
space->limit());
}
}
Object** current = reinterpret_cast<Object**>(object->address());
Object** limit =
reinterpret_cast<Object**>(object->address() + object->Size());
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &EverythingsAPointer,
- NULL,
- NULL);
+ CheckStoreBuffer(this, current, limit, &store_buffer_position,
+ store_buffer_top, &EverythingsAPointer, NULL, NULL);
}
}
}
void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
v->Synchronize(VisitorSynchronization::kStringTable);
- if (mode != VISIT_ALL_IN_SCAVENGE &&
- mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+ if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
external_string_table_.Iterate(v);
}
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int max_semi_space_size,
- int max_old_space_size,
- int max_executable_size,
- size_t code_range_size) {
+bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+ int max_executable_size, size_t code_range_size) {
if (HasBeenSetUp()) return false;
// Overwrite default configuration.
if (initial_semispace_size > max_semi_space_size_) {
initial_semispace_size_ = max_semi_space_size_;
if (FLAG_trace_gc) {
- PrintPID("Min semi-space size cannot be more than the maximum"
- "semi-space size of %d MB\n", max_semi_space_size_);
+ PrintPID(
+ "Min semi-space size cannot be more than the maximum"
+ "semi-space size of %d MB\n",
+ max_semi_space_size_);
}
} else {
initial_semispace_size_ = initial_semispace_size;
}
-bool Heap::ConfigureHeapDefault() {
- return ConfigureHeap(0, 0, 0, 0);
-}
+bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
isolate()->memory_allocator()->Size() +
isolate()->memory_allocator()->Available();
*stats->os_error = base::OS::GetLastError();
- isolate()->memory_allocator()->Available();
+ isolate()->memory_allocator()->Available();
if (take_snapshot) {
HeapIterator iterator(this);
- for (HeapObject* obj = iterator.next();
- obj != NULL;
+ for (HeapObject* obj = iterator.next(); obj != NULL;
obj = iterator.next()) {
InstanceType type = obj->map()->instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
intptr_t Heap::PromotedSpaceSizeOfObjects() {
- return old_pointer_space_->SizeOfObjects()
- + old_data_space_->SizeOfObjects()
- + code_space_->SizeOfObjects()
- + map_space_->SizeOfObjects()
- + cell_space_->SizeOfObjects()
- + property_cell_space_->SizeOfObjects()
- + lo_space_->SizeOfObjects();
+ return old_pointer_space_->SizeOfObjects() +
+ old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
+ map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
+ property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
}
int64_t Heap::PromotedExternalMemorySize() {
- if (amount_of_external_allocated_memory_
- <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
- return amount_of_external_allocated_memory_
- - amount_of_external_allocated_memory_at_last_global_gc_;
+ if (amount_of_external_allocated_memory_ <=
+ amount_of_external_allocated_memory_at_last_global_gc_)
+ return 0;
+ return amount_of_external_allocated_memory_ -
+ amount_of_external_allocated_memory_at_last_global_gc_;
}
// (kMinHandles, max_factor) and (kMaxHandles, min_factor).
factor = max_factor -
(freed_global_handles - kMinHandles) * (max_factor - min_factor) /
- (kMaxHandles - kMinHandles);
+ (kMaxHandles - kMinHandles);
}
if (FLAG_stress_compaction ||
// Update inline allocation limit for old spaces.
PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next();
- space != NULL;
+ for (PagedSpace* space = spaces.next(); space != NULL;
space = spaces.next()) {
space->EmptyAllocationInfo();
}
// Set up memory allocator.
if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
- return false;
+ return false;
// Set up new space.
if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
new_space_top_after_last_gc_ = new_space()->top();
// Initialize old pointer space.
- old_pointer_space_ =
- new OldSpace(this,
- max_old_generation_size_,
- OLD_POINTER_SPACE,
- NOT_EXECUTABLE);
+ old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
+ OLD_POINTER_SPACE, NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
if (!old_pointer_space_->SetUp()) return false;
// Initialize old data space.
- old_data_space_ =
- new OldSpace(this,
- max_old_generation_size_,
- OLD_DATA_SPACE,
- NOT_EXECUTABLE);
+ old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
+ NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
if (!old_data_space_->SetUp()) return false;
// Set up the special root array entries containing the stack limits.
// These are actually addresses, but the tag makes the GC ignore it.
- roots_[kStackLimitRootIndex] =
- reinterpret_cast<Object*>(
- (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
- roots_[kRealStackLimitRootIndex] =
- reinterpret_cast<Object*>(
- (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
+ roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
+ (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
+ roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
+ (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
}
PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
- PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
- get_max_alive_after_gc());
+ PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
PrintF("\n\n");
if (FLAG_print_max_heap_committed) {
PrintF("\n");
PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
- MaximumCommittedMemory());
+ MaximumCommittedMemory());
PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
- new_space_.MaximumCommittedMemory());
+ new_space_.MaximumCommittedMemory());
PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
- old_data_space_->MaximumCommittedMemory());
+ old_data_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
- old_pointer_space_->MaximumCommittedMemory());
+ old_pointer_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
- old_pointer_space_->MaximumCommittedMemory());
+ old_pointer_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
- code_space_->MaximumCommittedMemory());
+ code_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
- map_space_->MaximumCommittedMemory());
+ map_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
- cell_space_->MaximumCommittedMemory());
+ cell_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
- property_cell_space_->MaximumCommittedMemory());
+ property_cell_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
- lo_space_->MaximumCommittedMemory());
+ lo_space_->MaximumCommittedMemory());
PrintF("\n\n");
}
void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
- GCType gc_type,
- bool pass_isolate) {
+ GCType gc_type, bool pass_isolate) {
DCHECK(callback != NULL);
GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
DCHECK(!gc_prologue_callbacks_.Contains(pair));
void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
- GCType gc_type,
- bool pass_isolate) {
+ GCType gc_type, bool pass_isolate) {
DCHECK(callback != NULL);
GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
DCHECK(!gc_epilogue_callbacks_.Contains(pair));
void Heap::EnsureWeakObjectToCodeTable() {
if (!weak_object_to_code_table()->IsHashTable()) {
- set_weak_object_to_code_table(*WeakHashTable::New(
- isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY, TENURED));
+ set_weak_object_to_code_table(
+ *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
+ TENURED));
}
}
#ifdef DEBUG
-class PrintHandleVisitor: public ObjectVisitor {
+class PrintHandleVisitor : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++)
- PrintF(" handle %p to %p\n",
- reinterpret_cast<void*>(p),
+ PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
reinterpret_cast<void*>(*p));
}
};
}
-
OldSpace* OldSpaces::next() {
switch (counter_++) {
case OLD_POINTER_SPACE:
: heap_(heap),
current_space_(FIRST_SPACE),
iterator_(NULL),
- size_func_(NULL) {
-}
+ size_func_(NULL) {}
SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
: heap_(heap),
current_space_(FIRST_SPACE),
iterator_(NULL),
- size_func_(size_func) {
-}
+ size_func_(size_func) {}
SpaceIterator::~SpaceIterator() {
iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
break;
case PROPERTY_CELL_SPACE:
- iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
- size_func_);
+ iterator_ =
+ new HeapObjectIterator(heap_->property_cell_space(), size_func_);
break;
case LO_SPACE:
iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
}
-HeapIterator::~HeapIterator() {
- Shutdown();
-}
+HeapIterator::~HeapIterator() { Shutdown(); }
void HeapIterator::Init() {
Object* const PathTracer::kAnyGlobalObject = NULL;
-class PathTracer::MarkVisitor: public ObjectVisitor {
+class PathTracer::MarkVisitor : public ObjectVisitor {
public:
explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
void VisitPointers(Object** start, Object** end) {
// Scan all HeapObject pointers in [start, end)
for (Object** p = start; !tracer_->found() && (p < end); p++) {
- if ((*p)->IsHeapObject())
- tracer_->MarkRecursively(p, this);
+ if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
}
}
};
-class PathTracer::UnmarkVisitor: public ObjectVisitor {
+class PathTracer::UnmarkVisitor : public ObjectVisitor {
public:
explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
void VisitPointers(Object** start, Object** end) {
// Scan all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- tracer_->UnmarkRecursively(p, this);
+ if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
}
}
// Scan the object body.
if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
// This is specialized to scan Context's properly.
- Object** start = reinterpret_cast<Object**>(obj->address() +
- Context::kHeaderSize);
- Object** end = reinterpret_cast<Object**>(obj->address() +
- Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
+ Object** start =
+ reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
+ Object** end =
+ reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
+ Context::FIRST_WEAK_SLOT * kPointerSize);
mark_visitor->VisitPointers(start, end);
} else {
obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
// and finds a path to any global object and prints it. Useful for
// determining the source for leaks of global objects.
void Heap::TracePathToGlobal() {
- PathTracer tracer(PathTracer::kAnyGlobalObject,
- PathTracer::FIND_ALL,
+ PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
VISIT_ALL);
IterateRoots(&tracer, VISIT_ONLY_STRONG);
}
}
-void KeyedLookupCache::Update(Handle<Map> map,
- Handle<Name> name,
+void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
int field_offset) {
DisallowHeapAllocation no_gc;
if (!name->IsUniqueName()) {
- if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
- Handle<String>::cast(name)).
- ToHandle(&name)) {
+ if (!StringTable::InternalizeStringIfExists(
+ name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
return;
}
}
int index = (Hash(map, name) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
// help to get the most frequently used one in position 0).
- for (int i = 0; i< kEntriesPerBucket; i++) {
+ for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index];
Object* free_entry_indicator = NULL;
if (key.map == free_entry_indicator) {
// If FromAnyPointerAddress encounters a slot that belongs to one of
// these smaller pieces it will treat it as a slot on a normal Page.
Address chunk_end = chunk->address() + chunk->size();
- MemoryChunk* inner = MemoryChunk::FromAddress(
- chunk->address() + Page::kPageSize);
+ MemoryChunk* inner =
+ MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
while (inner <= inner_last) {
// Size of a large chunk is always a multiple of
inner->set_size(Page::kPageSize);
inner->set_owner(lo_space());
inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
- inner = MemoryChunk::FromAddress(
- inner->address() + Page::kPageSize);
+ inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
}
}
}
base::LockGuard<base::Mutex> lock_guard(
checkpoint_object_stats_mutex.Pointer());
Counters* counters = isolate()->counters();
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- counters->count_of_##name()->Increment( \
- static_cast<int>(object_counts_[name])); \
- counters->count_of_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[name])); \
- counters->size_of_##name()->Increment( \
- static_cast<int>(object_sizes_[name])); \
- counters->size_of_##name()->Decrement( \
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ counters->count_of_##name()->Increment( \
+ static_cast<int>(object_counts_[name])); \
+ counters->count_of_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[name])); \
+ counters->size_of_##name()->Increment( \
+ static_cast<int>(object_sizes_[name])); \
+ counters->size_of_##name()->Decrement( \
static_cast<int>(object_sizes_last_time_[name]));
INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
}
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_H_
-#define V8_HEAP_H_
+#ifndef V8_HEAP_HEAP_H_
+#define V8_HEAP_HEAP_H_
#include <cmath>
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/counters.h"
-#include "src/gc-tracer.h"
#include "src/globals.h"
-#include "src/incremental-marking.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/spaces.h"
#include "src/list.h"
-#include "src/mark-compact.h"
#include "src/objects-visiting.h"
-#include "src/spaces.h"
#include "src/splay-tree-inl.h"
#include "src/store-buffer.h"
V(FixedArray, microtask_queue, MicrotaskQueue)
// Entries in this list are limited to Smis and are not visited during GC.
-#define SMI_ROOT_LIST(V) \
- V(Smi, stack_limit, StackLimit) \
- V(Smi, real_stack_limit, RealStackLimit) \
- V(Smi, last_script_id, LastScriptId) \
- V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
- V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
+#define SMI_ROOT_LIST(V) \
+ V(Smi, stack_limit, StackLimit) \
+ V(Smi, real_stack_limit, RealStackLimit) \
+ V(Smi, last_script_id, LastScriptId) \
+ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
+ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
-#define ROOT_LIST(V) \
- STRONG_ROOT_LIST(V) \
- SMI_ROOT_LIST(V) \
+#define ROOT_LIST(V) \
+ STRONG_ROOT_LIST(V) \
+ SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
// Heap roots that are known to be immortal immovable, for which we can safely
// skip write barriers.
-#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
- V(byte_array_map) \
- V(free_space_map) \
- V(one_pointer_filler_map) \
- V(two_pointer_filler_map) \
- V(undefined_value) \
- V(the_hole_value) \
- V(null_value) \
- V(true_value) \
- V(false_value) \
- V(uninitialized_value) \
- V(cell_map) \
- V(global_property_cell_map) \
- V(shared_function_info_map) \
- V(meta_map) \
- V(heap_number_map) \
- V(mutable_heap_number_map) \
- V(native_context_map) \
- V(fixed_array_map) \
- V(code_map) \
- V(scope_info_map) \
- V(fixed_cow_array_map) \
- V(fixed_double_array_map) \
- V(constant_pool_array_map) \
- V(no_interceptor_result_sentinel) \
- V(hash_table_map) \
- V(ordered_hash_table_map) \
- V(empty_fixed_array) \
- V(empty_byte_array) \
- V(empty_descriptor_array) \
- V(empty_constant_pool_array) \
- V(arguments_marker) \
- V(symbol_map) \
- V(sloppy_arguments_elements_map) \
- V(function_context_map) \
- V(catch_context_map) \
- V(with_context_map) \
- V(block_context_map) \
- V(module_context_map) \
- V(global_context_map) \
- V(undefined_map) \
- V(the_hole_map) \
- V(null_map) \
- V(boolean_map) \
- V(uninitialized_map) \
- V(message_object_map) \
- V(foreign_map) \
+#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
+ V(byte_array_map) \
+ V(free_space_map) \
+ V(one_pointer_filler_map) \
+ V(two_pointer_filler_map) \
+ V(undefined_value) \
+ V(the_hole_value) \
+ V(null_value) \
+ V(true_value) \
+ V(false_value) \
+ V(uninitialized_value) \
+ V(cell_map) \
+ V(global_property_cell_map) \
+ V(shared_function_info_map) \
+ V(meta_map) \
+ V(heap_number_map) \
+ V(mutable_heap_number_map) \
+ V(native_context_map) \
+ V(fixed_array_map) \
+ V(code_map) \
+ V(scope_info_map) \
+ V(fixed_cow_array_map) \
+ V(fixed_double_array_map) \
+ V(constant_pool_array_map) \
+ V(no_interceptor_result_sentinel) \
+ V(hash_table_map) \
+ V(ordered_hash_table_map) \
+ V(empty_fixed_array) \
+ V(empty_byte_array) \
+ V(empty_descriptor_array) \
+ V(empty_constant_pool_array) \
+ V(arguments_marker) \
+ V(symbol_map) \
+ V(sloppy_arguments_elements_map) \
+ V(function_context_map) \
+ V(catch_context_map) \
+ V(with_context_map) \
+ V(block_context_map) \
+ V(module_context_map) \
+ V(global_context_map) \
+ V(undefined_map) \
+ V(the_hole_map) \
+ V(null_map) \
+ V(boolean_map) \
+ V(uninitialized_map) \
+ V(message_object_map) \
+ V(foreign_map) \
V(neander_map)
#define INTERNALIZED_STRING_LIST(V) \
class StoreBufferRebuilder {
public:
explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
- : store_buffer_(store_buffer) {
- }
+ : store_buffer_(store_buffer) {}
void Callback(MemoryChunk* page, StoreBufferEvent event);
};
-
// A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
rear_(NULL),
limit_(NULL),
emergency_stack_(0),
- heap_(heap) { }
+ heap_(heap) {}
void Initialize();
bool is_empty() {
return (front_ == rear_) &&
- (emergency_stack_ == NULL || emergency_stack_->length() == 0);
+ (emergency_stack_ == NULL || emergency_stack_->length() == 0);
}
inline void insert(HeapObject* target, int size);
NewSpacePage* front_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
DCHECK(!front_page->prev_page()->is_anchor());
- front_ =
- reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
+ front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
}
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
static const int kEntrySizeInWords = 2;
struct Entry {
- Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
+ Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {}
HeapObject* obj_;
int size_;
};
-typedef void (*ScavengingCallback)(Map* map,
- HeapObject** slot,
+typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
HeapObject* object);
void TearDown();
private:
- explicit ExternalStringTable(Heap* heap) : heap_(heap) { }
+ explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
friend class Heap;
public:
// Configure heap size in MB before setup. Return false if the heap has been
// set up already.
- bool ConfigureHeap(int max_semi_space_size,
- int max_old_space_size,
- int max_executable_size,
- size_t code_range_size);
+ bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+ int max_executable_size, size_t code_range_size);
bool ConfigureHeapDefault();
// Prepares the heap, setting up memory areas that are needed in the isolate
OldSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
CellSpace* cell_space() { return cell_space_; }
- PropertyCellSpace* property_cell_space() {
- return property_cell_space_;
- }
+ PropertyCellSpace* property_cell_space() { return property_cell_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
PagedSpace* paged_space(int idx) {
switch (idx) {
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Optionally takes an AllocationSite to be appended in an AllocationMemento.
- MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
- AllocationSite* site = NULL);
+ MUST_USE_RESULT AllocationResult
+ CopyJSObject(JSObject* source, AllocationSite* site = NULL);
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
// For use during bootup.
void RepairFreeListsAfterBoot();
- template<typename T>
+ template <typename T>
static inline bool IsOneByte(T t, int chars);
// Move len elements within a given array from src_index index to dst_index
// Returns whether there is a chance that another major GC could
// collect more garbage.
inline bool CollectGarbage(
- AllocationSpace space,
- const char* gc_reason = NULL,
+ AllocationSpace space, const char* gc_reason = NULL,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
static const int kNoGCFlags = 0;
// non-zero, then the slower precise sweeper is used, which leaves the heap
// in a state where we can iterate over the heap visiting all objects.
void CollectAllGarbage(
- int flags,
- const char* gc_reason = NULL,
+ int flags, const char* gc_reason = NULL,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Last hope GC, should try to squeeze as much as possible.
PromotionQueue* promotion_queue() { return &promotion_queue_; }
void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
- GCType gc_type_filter,
- bool pass_isolate = true);
+ GCType gc_type_filter, bool pass_isolate = true);
void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
- GCType gc_type_filter,
- bool pass_isolate = true);
+ GCType gc_type_filter, bool pass_isolate = true);
void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
- // Heap root getters. We have versions with and without type::cast() here.
- // You can't use type::cast during GC because the assert fails.
- // TODO(1490): Try removing the unchecked accessors, now that GC marking does
- // not corrupt the map.
-#define ROOT_ACCESSOR(type, name, camel_name) \
- type* name() { \
- return type::cast(roots_[k##camel_name##RootIndex]); \
- } \
- type* raw_unchecked_##name() { \
- return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
+// Heap root getters. We have versions with and without type::cast() here.
+// You can't use type::cast during GC because the assert fails.
+// TODO(1490): Try removing the unchecked accessors, now that GC marking does
+// not corrupt the map.
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \
+ type* raw_unchecked_##name() { \
+ return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
// Utility type maps
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- Map* name##_map() { \
- return Map::cast(roots_[k##Name##MapRootIndex]); \
- }
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
-#define STRING_ACCESSOR(name, str) String* name() { \
- return String::cast(roots_[k##name##RootIndex]); \
- }
+#define STRING_ACCESSOR(name, str) \
+ String* name() { return String::cast(roots_[k##name##RootIndex]); }
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
}
Object* native_contexts_list() const { return native_contexts_list_; }
- void set_array_buffers_list(Object* object) {
- array_buffers_list_ = object;
- }
+ void set_array_buffers_list(Object* object) { array_buffers_list_ = object; }
Object* array_buffers_list() const { return array_buffers_list_; }
void set_allocation_sites_list(Object* object) {
// Iterate pointers to from semispace of new space found in memory interval
// from start to end.
- void IterateAndMarkPointersToFromSpace(Address start,
- Address end,
+ void IterateAndMarkPointersToFromSpace(Address start, Address end,
ObjectSlotCallback callback);
// Returns whether the object resides in new space.
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
#ifdef DEBUG
- void set_allocation_timeout(int timeout) {
- allocation_timeout_ = timeout;
- }
+ void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
void TracePathToObjectFrom(Object* target, Object* root);
void TracePathToObject(Object* target);
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
- enum ScratchpadSlotMode {
- IGNORE_SCRATCHPAD_SLOT,
- RECORD_SCRATCHPAD_SLOT
- };
+ enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
// If an object has an AllocationMemento trailing it, return it, otherwise
// return NULL;
// An object may have an AllocationSite associated with it through a trailing
// AllocationMemento. Its feedback should be updated when objects are found
// in the heap.
- static inline void UpdateAllocationSiteFeedback(
- HeapObject* object, ScratchpadSlotMode mode);
+ static inline void UpdateAllocationSiteFeedback(HeapObject* object,
+ ScratchpadSlotMode mode);
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
- void ReserveSpace(int *sizes, Address* addresses);
+ void ReserveSpace(int* sizes, Address* addresses);
//
// Support for the API.
static const int kPointerMultiplier = i::kPointerSize / 4;
// The new space size has to be a power of 2. Sizes are in MB.
- static const int kMaxSemiSpaceSizeLowMemoryDevice =
- 1 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeMediumMemoryDevice =
- 4 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeHighMemoryDevice =
- 8 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeHugeMemoryDevice =
- 8 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
// The old space size has to be a multiple of Page::kPageSize.
// Sizes are in MB.
- static const int kMaxOldSpaceSizeLowMemoryDevice =
- 128 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
static const int kMaxOldSpaceSizeMediumMemoryDevice =
256 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHighMemoryDevice =
- 512 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHugeMemoryDevice =
- 700 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
// The executable size has to be a multiple of Page::kPageSize.
// Sizes are in MB.
INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
#undef STRING_DECLARATION
- // Utility type maps
+// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
STRUCT_LIST(DECLARE_STRUCT_MAP)
#undef DECLARE_STRUCT_MAP
-
kStringTableRootIndex,
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
-
kRootListLength,
kStrongRootListLength = kStringTableRootIndex,
kSmiRootsStart = kStringTableRootIndex + 1
bool RootCanBeTreatedAsConstant(RootListIndex root_index);
Map* MapForFixedTypedArray(ExternalArrayType array_type);
- RootListIndex RootIndexForFixedTypedArray(
- ExternalArrayType array_type);
+ RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
Map* MapForExternalArrayType(ExternalArrayType array_type);
- RootListIndex RootIndexForExternalArrayType(
- ExternalArrayType array_type);
+ RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type);
RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
semi_space_copied_object_size_ += object_size;
}
- inline void IncrementNodesDiedInNewSpace() {
- nodes_died_in_new_space_++;
- }
+ inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
- inline void IncrementNodesCopiedInNewSpace() {
- nodes_copied_in_new_space_++;
- }
+ inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
- inline void IncrementNodesPromoted() {
- nodes_promoted_++;
- }
+ inline void IncrementNodesPromoted() { nodes_promoted_++; }
inline void IncrementYoungSurvivorsCounter(int survived) {
DCHECK(survived >= 0);
return &mark_compact_collector_;
}
- StoreBuffer* store_buffer() {
- return &store_buffer_;
- }
+ StoreBuffer* store_buffer() { return &store_buffer_; }
- Marking* marking() {
- return &marking_;
- }
+ Marking* marking() { return &marking_; }
- IncrementalMarking* incremental_marking() {
- return &incremental_marking_;
- }
+ IncrementalMarking* incremental_marking() { return &incremental_marking_; }
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
// Returns the current sweep generation.
- int sweep_generation() {
- return sweep_generation_;
- }
+ int sweep_generation() { return sweep_generation_; }
inline Isolate* isolate();
// Global inline caching age: it is incremented on some GCs after context
// disposal. We use it to flush inline caches.
- int global_ic_age() {
- return global_ic_age_;
- }
+ int global_ic_age() { return global_ic_age_; }
void AgeInlineCaches() {
global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
void DeoptMarkedAllocationSites();
- bool MaximumSizeScavenge() {
- return maximum_size_scavenges_ > 0;
- }
+ bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
bool DeoptMaybeTenuredAllocationSites() {
return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
}
- ~RelocationLock() {
- heap_->relocation_mutex_.Unlock();
- }
+ ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
private:
Heap* heap_;
inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
// This event is triggered after object is moved to a new place.
- inline void OnMoveEvent(HeapObject* target,
- HeapObject* source,
+ inline void OnMoveEvent(HeapObject* target, HeapObject* source,
int size_in_bytes);
protected:
// Methods made available to tests.
// Allocates a JS Map in the heap.
- MUST_USE_RESULT AllocationResult AllocateMap(
- InstanceType instance_type,
- int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+ MUST_USE_RESULT AllocationResult
+ AllocateMap(InstanceType instance_type, int instance_size,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
// Allocates and initializes a new JavaScript object based on a
// constructor.
// If allocation_site is non-null, then a memento is emitted after the object
// that points to the site.
- MUST_USE_RESULT AllocationResult AllocateJSObject(
- JSFunction* constructor,
- PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = NULL);
+ MUST_USE_RESULT AllocationResult
+ AllocateJSObject(JSFunction* constructor,
+ PretenureFlag pretenure = NOT_TENURED,
+ AllocationSite* allocation_site = NULL);
// Allocates and initializes a new JavaScript object based on a map.
// Passing an allocation site means that a memento will be created that
// points to the site.
- MUST_USE_RESULT AllocationResult AllocateJSObjectFromMap(
- Map* map,
- PretenureFlag pretenure = NOT_TENURED,
- bool alloc_props = true,
- AllocationSite* allocation_site = NULL);
+ MUST_USE_RESULT AllocationResult
+ AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
+ bool alloc_props = true,
+ AllocationSite* allocation_site = NULL);
// Allocated a HeapNumber from value.
- MUST_USE_RESULT AllocationResult AllocateHeapNumber(
- double value,
- MutableMode mode = IMMUTABLE,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT AllocationResult
+ AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocate a byte array of the specified length
- MUST_USE_RESULT AllocationResult AllocateByteArray(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT AllocationResult
+ AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
// Copy the code and scope info part of the code object, but insert
// the provided data as the relocation information.
- MUST_USE_RESULT AllocationResult CopyCode(Code* code,
- Vector<byte> reloc_info);
+ MUST_USE_RESULT AllocationResult
+ CopyCode(Code* code, Vector<byte> reloc_info);
MUST_USE_RESULT AllocationResult CopyCode(Code* code);
// Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT AllocationResult AllocateFixedArray(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT AllocationResult
+ AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
private:
Heap();
// Total length of the strings we failed to flatten since the last GC.
int unflattened_strings_length_;
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline void set_##name(type* value) { \
- /* The deserializer makes use of the fact that these common roots are */ \
- /* never in new space and never on a page that is being compacted. */ \
- DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
- roots_[k##camel_name##RootIndex] = value; \
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ inline void set_##name(type* value) { \
+ /* The deserializer makes use of the fact that these common roots are */ \
+ /* never in new space and never on a page that is being compacted. */ \
+ DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
+ roots_[k##camel_name##RootIndex] = value; \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
// Allocations in the callback function are disallowed.
struct GCPrologueCallbackPair {
GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
- GCType gc_type,
- bool pass_isolate)
- : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
- }
+ GCType gc_type, bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
bool operator==(const GCPrologueCallbackPair& pair) const {
return pair.callback == callback;
}
struct GCEpilogueCallbackPair {
GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
- GCType gc_type,
- bool pass_isolate)
- : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
- }
+ GCType gc_type, bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
bool operator==(const GCEpilogueCallbackPair& pair) const {
return pair.callback == callback;
}
// Returns whether there is a chance that another major GC could
// collect more garbage.
bool CollectGarbage(
- GarbageCollector collector,
- const char* gc_reason,
+ GarbageCollector collector, const char* gc_reason,
const char* collector_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
MUST_USE_RESULT inline AllocationResult AllocateRaw(
- int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space);
+ int size_in_bytes, AllocationSpace space, AllocationSpace retry_space);
// Allocates a heap object based on the map.
- MUST_USE_RESULT AllocationResult Allocate(
- Map* map,
- AllocationSpace space,
- AllocationSite* allocation_site = NULL);
+ MUST_USE_RESULT AllocationResult
+ Allocate(Map* map, AllocationSpace space,
+ AllocationSite* allocation_site = NULL);
// Allocates a partial map for bootstrapping.
- MUST_USE_RESULT AllocationResult AllocatePartialMap(
- InstanceType instance_type,
- int instance_size);
+ MUST_USE_RESULT AllocationResult
+ AllocatePartialMap(InstanceType instance_type, int instance_size);
// Initializes a JSObject based on its map.
- void InitializeJSObjectFromMap(JSObject* obj,
- FixedArray* properties,
+ void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
Map* map);
void InitializeAllocationMemento(AllocationMemento* memento,
AllocationSite* allocation_site);
// Allocate a block of memory in the given space (filled with a filler).
// Used as a fall-back for generated code when the space is full.
- MUST_USE_RESULT AllocationResult AllocateFillerObject(int size,
- bool double_align,
- AllocationSpace space);
+ MUST_USE_RESULT AllocationResult
+ AllocateFillerObject(int size, bool double_align, AllocationSpace space);
// Allocate an uninitialized fixed array.
- MUST_USE_RESULT AllocationResult AllocateRawFixedArray(
- int length, PretenureFlag pretenure);
+ MUST_USE_RESULT AllocationResult
+ AllocateRawFixedArray(int length, PretenureFlag pretenure);
// Allocate an uninitialized fixed double array.
- MUST_USE_RESULT AllocationResult AllocateRawFixedDoubleArray(
- int length, PretenureFlag pretenure);
+ MUST_USE_RESULT AllocationResult
+ AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
// Allocate an initialized fixed array with the given filler value.
- MUST_USE_RESULT AllocationResult AllocateFixedArrayWithFiller(
- int length, PretenureFlag pretenure, Object* filler);
+ MUST_USE_RESULT AllocationResult
+ AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
+ Object* filler);
// Allocate and partially initializes a String. There are two String
// encodings: ASCII and two byte. These functions allocate a string of the
// given length and set its map and length fields. The characters of the
// string are uninitialized.
- MUST_USE_RESULT AllocationResult AllocateRawOneByteString(
- int length, PretenureFlag pretenure);
- MUST_USE_RESULT AllocationResult AllocateRawTwoByteString(
- int length, PretenureFlag pretenure);
+ MUST_USE_RESULT AllocationResult
+ AllocateRawOneByteString(int length, PretenureFlag pretenure);
+ MUST_USE_RESULT AllocationResult
+ AllocateRawTwoByteString(int length, PretenureFlag pretenure);
bool CreateInitialMaps();
void CreateInitialObjects();
// Allocates an internalized string in old space based on the character
// stream.
MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
- Vector<const char> str,
- int chars,
- uint32_t hash_field);
+ Vector<const char> str, int chars, uint32_t hash_field);
MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
- Vector<const uint8_t> str,
- uint32_t hash_field);
+ Vector<const uint8_t> str, uint32_t hash_field);
MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
- Vector<const uc16> str,
- uint32_t hash_field);
+ Vector<const uc16> str, uint32_t hash_field);
- template<bool is_one_byte, typename T>
- MUST_USE_RESULT AllocationResult AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field);
+ template <bool is_one_byte, typename T>
+ MUST_USE_RESULT AllocationResult
+ AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
- template<typename T>
+ template <typename T>
MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
T t, int chars, uint32_t hash_field);
// Make a copy of src, set the map, and return the copy. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- MUST_USE_RESULT AllocationResult CopyFixedArrayWithMap(FixedArray* src,
- Map* map);
+ MUST_USE_RESULT AllocationResult
+ CopyFixedArrayWithMap(FixedArray* src, Map* map);
// Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
// Computes a single character string where the character has code.
// A cache is used for ASCII codes.
- MUST_USE_RESULT AllocationResult LookupSingleCharacterStringFromCode(
- uint16_t code);
+ MUST_USE_RESULT AllocationResult
+ LookupSingleCharacterStringFromCode(uint16_t code);
// Allocate a symbol in old space.
MUST_USE_RESULT AllocationResult AllocateSymbol();
// Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult CopyConstantPoolArrayWithMap(
- ConstantPoolArray* src, Map* map);
+ MUST_USE_RESULT AllocationResult
+ CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map);
MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(
const ConstantPoolArray::NumberOfEntries& small);
const ConstantPoolArray::NumberOfEntries& extended);
// Allocates an external array of the specified length and type.
- MUST_USE_RESULT AllocationResult AllocateExternalArray(
- int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure);
+ MUST_USE_RESULT AllocationResult
+ AllocateExternalArray(int length, ExternalArrayType array_type,
+ void* external_pointer, PretenureFlag pretenure);
// Allocates a fixed typed array of the specified length and type.
- MUST_USE_RESULT AllocationResult AllocateFixedTypedArray(
- int length,
- ExternalArrayType array_type,
- PretenureFlag pretenure);
+ MUST_USE_RESULT AllocationResult
+ AllocateFixedTypedArray(int length, ExternalArrayType array_type,
+ PretenureFlag pretenure);
// Make a copy of src and return it.
MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
// Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult CopyFixedDoubleArrayWithMap(
- FixedDoubleArray* src, Map* map);
+ MUST_USE_RESULT AllocationResult
+ CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
// Allocates a fixed double array with uninitialized values. Returns
MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
+ int length, PretenureFlag pretenure = NOT_TENURED);
// These five Create*EntryStub functions are here and forced to not be inlined
// because of a gcc-4.4 bug that assigns wrong vtable entries.
MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
// Allocate empty external array of given type.
- MUST_USE_RESULT AllocationResult AllocateEmptyExternalArray(
- ExternalArrayType array_type);
+ MUST_USE_RESULT AllocationResult
+ AllocateEmptyExternalArray(ExternalArrayType array_type);
// Allocate empty fixed typed array of given type.
- MUST_USE_RESULT AllocationResult AllocateEmptyFixedTypedArray(
- ExternalArrayType array_type);
+ MUST_USE_RESULT AllocationResult
+ AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
// Allocate empty constant pool array.
MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray();
MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
// Allocates a new foreign object.
- MUST_USE_RESULT AllocationResult AllocateForeign(
- Address address, PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT AllocationResult
+ AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT AllocationResult AllocateCode(int object_size,
- bool immovable);
+ MUST_USE_RESULT AllocationResult
+ AllocateCode(int object_size, bool immovable);
MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
void ZapFromSpace();
static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
- Heap* heap,
- Object** pointer);
+ Heap* heap, Object** pointer);
Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
- static void ScavengeStoreBufferCallback(Heap* heap,
- MemoryChunk* page,
+ static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
StoreBufferEvent event);
// Performs a major collection in the whole heap.
// TODO(hpayer): Allocation site pretenuring may make this method obsolete.
// Re-visit incremental marking heuristics.
- bool IsHighSurvivalRate() {
- return high_survival_rate_period_length_ > 0;
- }
+ bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
void SelectScavengingVisitorsTable();
- void StartIdleRound() {
- mark_sweeps_since_idle_round_started_ = 0;
- }
+ void StartIdleRound() { mark_sweeps_since_idle_round_started_ = 0; }
void FinishIdleRound() {
mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
static const int kStartMarker = 0xDECADE00;
static const int kEndMarker = 0xDECADE01;
- int* start_marker; // 0
- int* new_space_size; // 1
- int* new_space_capacity; // 2
- intptr_t* old_pointer_space_size; // 3
- intptr_t* old_pointer_space_capacity; // 4
- intptr_t* old_data_space_size; // 5
- intptr_t* old_data_space_capacity; // 6
- intptr_t* code_space_size; // 7
- intptr_t* code_space_capacity; // 8
- intptr_t* map_space_size; // 9
- intptr_t* map_space_capacity; // 10
- intptr_t* cell_space_size; // 11
- intptr_t* cell_space_capacity; // 12
- intptr_t* lo_space_size; // 13
- int* global_handle_count; // 14
- int* weak_global_handle_count; // 15
- int* pending_global_handle_count; // 16
- int* near_death_global_handle_count; // 17
- int* free_global_handle_count; // 18
- intptr_t* memory_allocator_size; // 19
- intptr_t* memory_allocator_capacity; // 20
- int* objects_per_type; // 21
- int* size_per_type; // 22
- int* os_error; // 23
- int* end_marker; // 24
- intptr_t* property_cell_space_size; // 25
- intptr_t* property_cell_space_capacity; // 26
+ int* start_marker; // 0
+ int* new_space_size; // 1
+ int* new_space_capacity; // 2
+ intptr_t* old_pointer_space_size; // 3
+ intptr_t* old_pointer_space_capacity; // 4
+ intptr_t* old_data_space_size; // 5
+ intptr_t* old_data_space_capacity; // 6
+ intptr_t* code_space_size; // 7
+ intptr_t* code_space_capacity; // 8
+ intptr_t* map_space_size; // 9
+ intptr_t* map_space_capacity; // 10
+ intptr_t* cell_space_size; // 11
+ intptr_t* cell_space_capacity; // 12
+ intptr_t* lo_space_size; // 13
+ int* global_handle_count; // 14
+ int* weak_global_handle_count; // 15
+ int* pending_global_handle_count; // 16
+ int* near_death_global_handle_count; // 17
+ int* free_global_handle_count; // 18
+ intptr_t* memory_allocator_size; // 19
+ intptr_t* memory_allocator_capacity; // 20
+ int* objects_per_type; // 21
+ int* size_per_type; // 22
+ int* os_error; // 23
+ int* end_marker; // 24
+ intptr_t* property_cell_space_size; // 25
+ intptr_t* property_cell_space_capacity; // 26
};
// point into the heap to a location that has a map pointer at its first word.
// Caveat: Heap::Contains is an approximation because it can return true for
// objects in a heap space but above the allocation pointer.
-class VerifyPointersVisitor: public ObjectVisitor {
+class VerifyPointersVisitor : public ObjectVisitor {
public:
inline void VisitPointers(Object** start, Object** end);
};
// Verify that all objects are Smis.
-class VerifySmisVisitor: public ObjectVisitor {
+class VerifySmisVisitor : public ObjectVisitor {
public:
inline void VisitPointers(Object** start, Object** end);
};
public:
explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
Space* next();
+
private:
Heap* heap_;
int counter_;
public:
explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
OldSpace* next();
+
private:
Heap* heap_;
int counter_;
public:
explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
PagedSpace* next();
+
private:
Heap* heap_;
int counter_;
ObjectIterator* CreateIterator();
Heap* heap_;
- int current_space_; // from enum AllocationSpace.
+ int current_space_; // from enum AllocationSpace.
ObjectIterator* iterator_; // object iterator for the current space.
HeapObjectCallback size_func_;
};
class HeapIterator BASE_EMBEDDED {
public:
- enum HeapObjectsFiltering {
- kNoFiltering,
- kFilterUnreachable
- };
+ enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
explicit HeapIterator(Heap* heap);
HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
// Get the address of the keys and field_offsets arrays. Used in
// generated code to perform cache lookups.
- Address keys_address() {
- return reinterpret_cast<Address>(&keys_);
- }
+ Address keys_address() { return reinterpret_cast<Address>(&keys_); }
Address field_offsets_address() {
return reinterpret_cast<Address>(&field_offsets_);
static int Hash(Object* source, Name* name) {
// Uses only lower 32 bits if pointers are larger.
uint32_t source_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source))
- >> kPointerSizeLog2;
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
+ kPointerSizeLog2;
uint32_t name_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name))
- >> kPointerSizeLog2;
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >>
+ kPointerSizeLog2;
return (source_hash ^ name_hash) % kLength;
}
// Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
// On success, the returned result is guaranteed to be a COW-array.
- static Object* Lookup(Heap* heap,
- String* key_string,
- Object* key_pattern,
+ static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
ResultsCacheType type);
// Attempt to add value_array to the cache specified by type. On success,
// value_array is turned into a COW-array.
- static void Enter(Isolate* isolate,
- Handle<String> key_string,
- Handle<Object> key_pattern,
- Handle<FixedArray> value_array,
+ static void Enter(Isolate* isolate, Handle<String> key_string,
+ Handle<Object> key_pattern, Handle<FixedArray> value_array,
ResultsCacheType type);
static void Clear(FixedArray* cache);
static const int kRegExpResultsCacheSize = 0x100;
// For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
// after the first match. If FIND_ALL is specified, then tracing will be
// done for all matches.
- PathTracer(Object* search_target,
- WhatToFind what_to_find,
+ PathTracer(Object* search_target, WhatToFind what_to_find,
VisitMode visit_mode)
: search_target_(search_target),
found_target_(false),
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
};
#endif // DEBUG
+}
+} // namespace v8::internal
-} } // namespace v8::internal
-
-#endif // V8_HEAP_H_
+#endif // V8_HEAP_HEAP_H_
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INCREMENTAL_MARKING_INL_H_
-#define V8_INCREMENTAL_MARKING_INL_H_
+#ifndef V8_HEAP_INCREMENTAL_MARKING_INL_H_
+#define V8_HEAP_INCREMENTAL_MARKING_INL_H_
-#include "src/incremental-marking.h"
+#include "src/heap/incremental-marking.h"
namespace v8 {
namespace internal {
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
- Object** slot,
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot,
Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
}
-void IncrementalMarking::RecordWrite(HeapObject* obj,
- Object** slot,
+void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
Object* value) {
if (IsMarking() && value->IsHeapObject()) {
RecordWriteSlow(obj, slot, value);
}
-void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
- Object** slot,
+void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value) {
if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
}
-void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
- RelocInfo* rinfo,
+void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
Object* value) {
if (IsMarking() && value->IsHeapObject()) {
RecordWriteIntoCodeSlow(obj, rinfo, value);
void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
MarkBit mark_bit) {
DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
- DCHECK(obj->Size() >= 2*kPointerSize);
+ DCHECK(obj->Size() >= 2 * kPointerSize);
DCHECK(IsMarking());
Marking::BlackToGrey(mark_bit);
int obj_size = obj->Size();
Marking::WhiteToGrey(mark_bit);
marking_deque_.PushGrey(obj);
}
+}
+} // namespace v8::internal
-
-} } // namespace v8::internal
-
-#endif // V8_INCREMENTAL_MARKING_INL_H_
+#endif // V8_HEAP_INCREMENTAL_MARKING_INL_H_
#include "src/v8.h"
-#include "src/incremental-marking.h"
+#include "src/heap/incremental-marking.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
unscanned_bytes_of_large_object_(0) {}
-void IncrementalMarking::TearDown() {
- delete marking_deque_memory_;
-}
+void IncrementalMarking::TearDown() { delete marking_deque_memory_; }
-void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
- Object** slot,
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
Object* value) {
if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
// Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordSlot(
- HeapObject::RawField(obj, 0), slot, value);
+ heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
+ slot, value);
}
}
}
-void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
- Object** slot,
+void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
Isolate* isolate) {
DCHECK(obj->IsHeapObject());
IncrementalMarking* marking = isolate->heap()->incremental_marking();
if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
marking->write_barriers_invoked_since_last_step_ +=
MemoryChunk::kWriteBarrierCounterGranularity -
- chunk->write_barrier_counter();
+ chunk->write_barrier_counter();
chunk->set_write_barrier_counter(
MemoryChunk::kWriteBarrierCounterGranularity);
}
}
-void IncrementalMarking::RecordCodeTargetPatch(Code* host,
- Address pc,
+void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
HeapObject* value) {
if (IsMarking()) {
RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
if (IsMarking()) {
- Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(pc);
+ Code* host = heap_->isolate()
+ ->inner_pointer_to_code_cache()
+ ->GcSafeFindCodeForInnerPointer(pc);
RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
RecordWriteIntoCode(host, &rinfo, value);
}
Code* value) {
if (BaseRecordWrite(host, slot, value)) {
DCHECK(slot != NULL);
- heap_->mark_compact_collector()->
- RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
+ heap_->mark_compact_collector()->RecordCodeEntrySlot(
+ reinterpret_cast<Address>(slot), value);
}
}
static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
- MarkBit mark_bit,
- int size) {
+ MarkBit mark_bit, int size) {
DCHECK(!Marking::IsImpossible(mark_bit));
if (mark_bit.Get()) return;
mark_bit.Set();
static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
- MarkBit mark_bit,
- int size) {
+ MarkBit mark_bit, int size) {
DCHECK(!Marking::IsImpossible(mark_bit));
if (Marking::IsBlack(mark_bit)) return;
Marking::MarkBlack(mark_bit);
// fully scanned. Fall back to scanning it through to the end in case this
// fails because of a full deque.
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
- chunk->progress_bar());
- int end_offset = Min(object_size,
- start_offset + kProgressBarScanningChunk);
+ int start_offset =
+ Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
+ int end_offset =
+ Min(object_size, start_offset + kProgressBarScanningChunk);
int already_scanned_offset = start_offset;
bool scan_until_end = false;
do {
- VisitPointersWithAnchor(heap,
- HeapObject::RawField(object, 0),
+ VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
HeapObject::RawField(object, start_offset),
HeapObject::RawField(object, end_offset));
start_offset = end_offset;
}
}
- INLINE(static void VisitPointersWithAnchor(Heap* heap,
- Object** anchor,
- Object** start,
- Object** end)) {
+ INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
+ Object** start, Object** end)) {
for (Object** p = start; p < end; p++) {
Object* obj = *p;
if (obj->IsHeapObject()) {
public:
explicit IncrementalMarkingRootMarkingVisitor(
IncrementalMarking* incremental_marking)
- : incremental_marking_(incremental_marking) {
- }
+ : incremental_marking_(incremental_marking) {}
- void VisitPointer(Object** p) {
- MarkObjectByPointer(p);
- }
+ void VisitPointer(Object** p) { MarkObjectByPointer(p); }
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
// It's difficult to filter out slots recorded for large objects.
if (chunk->owner()->identity() == LO_SPACE &&
- chunk->size() > static_cast<size_t>(Page::kPageSize) &&
- is_compacting) {
+ chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
}
} else if (chunk->owner()->identity() == CELL_SPACE ||
// Only start incremental marking in a safe state: 1) when incremental
// marking is turned on, 2) when we are currently not in a GC, and
// 3) when we are currently not serializing or deserializing the heap.
- return FLAG_incremental_marking &&
- FLAG_incremental_marking_steps &&
- heap_->gc_state() == Heap::NOT_IN_GC &&
- !heap_->isolate()->serializer_enabled() &&
- heap_->isolate()->IsInitialized() &&
- heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
+ return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
+ heap_->gc_state() == Heap::NOT_IN_GC &&
+ !heap_->isolate()->serializer_enabled() &&
+ heap_->isolate()->IsInitialized() &&
+ heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
}
void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
- DCHECK(RecordWriteStub::GetMode(stub) ==
- RecordWriteStub::STORE_BUFFER_ONLY);
+ DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
if (!IsMarking()) {
// Initially stub is generated in STORE_BUFFER_ONLY mode thus
if (stubs->IsKey(k)) {
uint32_t key = NumberToUint32(k);
- if (CodeStub::MajorKeyFromKey(key) ==
- CodeStub::RecordWrite) {
+ if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
Object* e = stubs->ValueAt(i);
if (e->IsCode()) {
RecordWriteStub::Patch(Code::cast(e), mode);
}
is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
- heap_->mark_compact_collector()->StartCompaction(
- MarkCompactCollector::INCREMENTAL_COMPACTION);
+ heap_->mark_compact_collector()->StartCompaction(
+ MarkCompactCollector::INCREMENTAL_COMPACTION);
state_ = MARKING;
- RecordWriteStub::Mode mode = is_compacting_ ?
- RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
+ RecordWriteStub::Mode mode = is_compacting_
+ ? RecordWriteStub::INCREMENTAL_COMPACTION
+ : RecordWriteStub::INCREMENTAL;
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
ActivateIncrementalWriteBarrier();
- // Marking bits are cleared by the sweeper.
+// Marking bits are cleared by the sweeper.
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
new_top = ((new_top + 1) & mask);
DCHECK(new_top != marking_deque_.bottom());
#ifdef DEBUG
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- DCHECK(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
- (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
- Marking::IsBlack(mark_bit)));
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ DCHECK(Marking::IsGrey(mark_bit) ||
+ (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+ (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+ Marking::IsBlack(mark_bit)));
#endif
}
}
void IncrementalMarking::Step(intptr_t allocated_bytes,
CompletionAction action) {
- if (heap_->gc_state() != Heap::NOT_IN_GC ||
- !FLAG_incremental_marking ||
+ if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
!FLAG_incremental_marking_steps ||
(state_ != SWEEPING && state_ != MARKING)) {
return;
int64_t IncrementalMarking::SpaceLeftInOldSpace() {
return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
}
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INCREMENTAL_MARKING_H_
-#define V8_INCREMENTAL_MARKING_H_
+#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
+#define V8_HEAP_INCREMENTAL_MARKING_H_
#include "src/execution.h"
-#include "src/mark-compact.h"
+#include "src/heap/mark-compact.h"
#include "src/objects.h"
namespace v8 {
class IncrementalMarking {
public:
- enum State {
- STOPPED,
- SWEEPING,
- MARKING,
- COMPLETE
- };
-
- enum CompletionAction {
- GC_VIA_STACK_GUARD,
- NO_GC_VIA_STACK_GUARD
- };
+ enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
+
+ enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
explicit IncrementalMarking(Heap* heap);
}
}
- static void RecordWriteFromCode(HeapObject* obj,
- Object** slot,
+ static void RecordWriteFromCode(HeapObject* obj, Object** slot,
Isolate* isolate);
// Record a slot for compaction. Returns false for objects that are
// the incremental cycle (stays white).
INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
- INLINE(void RecordWriteIntoCode(HeapObject* obj,
- RelocInfo* rinfo,
+ INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
Object* value));
- INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
- Object** slot,
+ INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
- void RecordWriteIntoCodeSlow(HeapObject* obj,
- RelocInfo* rinfo,
+ void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo,
Object* value);
void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
if (IsMarking()) {
if (marking_speed_ < kFastMarking) {
if (FLAG_trace_gc) {
- PrintPID("Increasing marking speed to %d "
- "due to high promotion rate\n",
- static_cast<int>(kFastMarking));
+ PrintPID(
+ "Increasing marking speed to %d "
+ "due to high promotion rate\n",
+ static_cast<int>(kFastMarking));
}
marking_speed_ = kFastMarking;
}
}
}
- void EnterNoMarkingScope() {
- no_marking_scope_depth_++;
- }
+ void EnterNoMarkingScope() { no_marking_scope_depth_++; }
- void LeaveNoMarkingScope() {
- no_marking_scope_depth_--;
- }
+ void LeaveNoMarkingScope() { no_marking_scope_depth_--; }
void UncommitMarkingDeque();
static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
void DeactivateIncrementalWriteBarrier();
- static void SetOldSpacePageFlags(MemoryChunk* chunk,
- bool is_marking,
+ static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
bool is_compacting);
static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
+}
+} // namespace v8::internal
-} } // namespace v8::internal
-
-#endif // V8_INCREMENTAL_MARKING_H_
+#endif // V8_HEAP_INCREMENTAL_MARKING_H_
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MARK_COMPACT_INL_H_
-#define V8_MARK_COMPACT_INL_H_
+#ifndef V8_HEAP_MARK_COMPACT_INL_H_
+#define V8_HEAP_MARK_COMPACT_INL_H_
#include <memory.h>
+#include "src/heap/mark-compact.h"
#include "src/isolate.h"
-#include "src/mark-compact.h"
namespace v8 {
}
-void MarkCompactCollector::RecordSlot(Object** anchor_slot,
- Object** slot,
+void MarkCompactCollector::RecordSlot(Object** anchor_slot, Object** slot,
Object* object,
SlotsBuffer::AdditionMode mode) {
Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (object_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(anchor_slot)) {
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- object_page->slots_buffer_address(),
- slot,
- mode)) {
+ object_page->slots_buffer_address(), slot, mode)) {
EvictEvacuationCandidate(object_page);
}
}
}
+}
+} // namespace v8::internal
-
-} } // namespace v8::internal
-
-#endif // V8_MARK_COMPACT_INL_H_
+#endif // V8_HEAP_MARK_COMPACT_INL_H_
#include "src/execution.h"
#include "src/gdb-jit.h"
#include "src/global-handles.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/spaces-inl.h"
+#include "src/heap/sweeper-thread.h"
#include "src/heap-profiler.h"
#include "src/ic-inl.h"
-#include "src/incremental-marking.h"
-#include "src/mark-compact.h"
#include "src/objects-visiting.h"
#include "src/objects-visiting-inl.h"
-#include "src/spaces-inl.h"
#include "src/stub-cache.h"
-#include "src/sweeper-thread.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// MarkCompactCollector
-MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
+MarkCompactCollector::MarkCompactCollector(Heap* heap)
+ : // NOLINT
#ifdef DEBUG
state_(IDLE),
#endif
migration_slots_buffer_(NULL),
heap_(heap),
code_flusher_(NULL),
- have_code_to_deoptimize_(false) { }
+ have_code_to_deoptimize_(false) {
+}
#ifdef VERIFY_HEAP
-class VerifyMarkingVisitor: public ObjectVisitor {
+class VerifyMarkingVisitor : public ObjectVisitor {
public:
explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
HeapObject* object;
Address next_object_must_be_here_or_later = bottom;
- for (Address current = bottom;
- current < top;
- current += kPointerSize) {
+ for (Address current = bottom; current < top; current += kPointerSize) {
object = HeapObject::FromAddress(current);
if (MarkCompactCollector::IsMarked(object)) {
CHECK(current >= next_object_must_be_here_or_later);
}
-class VerifyEvacuationVisitor: public ObjectVisitor {
+class VerifyEvacuationVisitor : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
VerifyEvacuationVisitor visitor;
HeapObjectIterator iterator(page, NULL);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
- heap_object = iterator.Next()) {
+ heap_object = iterator.Next()) {
// We skip free space objects.
if (!heap_object->IsFiller()) {
heap_object->Iterate(&visitor);
#ifdef DEBUG
-class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
+class VerifyNativeContextSeparationVisitor : public ObjectVisitor {
public:
VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
// Set array length to zero to prevent cycles while iterating
// over array bodies, this is easier than intrusive marking.
array->set_length(0);
- array->IterateBody(
- FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
+ array->IterateBody(FIXED_ARRAY_TYPE, FixedArray::SizeFor(length),
+ this);
array->set_length(length);
}
break;
}
-void MarkCompactCollector::TearDown() {
- AbortCompaction();
-}
+void MarkCompactCollector::TearDown() { AbortCompaction(); }
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
intptr_t reserved = (number_of_pages * space->AreaSize());
intptr_t free = reserved - space->SizeOfObjects();
PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
- AllocationSpaceName(space->identity()),
- number_of_pages,
- static_cast<int>(free),
- static_cast<double>(free) * 100 / reserved);
+ AllocationSpaceName(space->identity()), number_of_pages,
+ static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
}
CollectEvacuationCandidates(heap()->old_pointer_space());
CollectEvacuationCandidates(heap()->old_data_space());
- if (FLAG_compact_code_space &&
- (mode == NON_INCREMENTAL_COMPACTION ||
- FLAG_incremental_code_compaction)) {
+ if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
+ FLAG_incremental_code_compaction)) {
CollectEvacuationCandidates(heap()->code_space());
} else if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->code_space());
void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
HeapObjectIterator code_iterator(heap()->code_space());
- for (HeapObject* obj = code_iterator.Next();
- obj != NULL;
+ for (HeapObject* obj = code_iterator.Next(); obj != NULL;
obj = code_iterator.Next()) {
Code* code = Code::cast(obj);
if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
void MarkCompactCollector::VerifyOmittedMapChecks() {
HeapObjectIterator iterator(heap()->map_space());
- for (HeapObject* obj = iterator.Next();
- obj != NULL;
- obj = iterator.Next()) {
+ for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
Map* map = Map::cast(obj);
map->VerifyOmittedMapChecks();
}
class MarkCompactCollector::SweeperTask : public v8::Task {
public:
- SweeperTask(Heap* heap, PagedSpace* space)
- : heap_(heap), space_(space) {}
+ SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
virtual ~SweeperTask() {}
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
- case NEW_SPACE: return "NEW_SPACE";
- case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
- case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
- case CODE_SPACE: return "CODE_SPACE";
- case MAP_SPACE: return "MAP_SPACE";
- case CELL_SPACE: return "CELL_SPACE";
+ case NEW_SPACE:
+ return "NEW_SPACE";
+ case OLD_POINTER_SPACE:
+ return "OLD_POINTER_SPACE";
+ case OLD_DATA_SPACE:
+ return "OLD_DATA_SPACE";
+ case CODE_SPACE:
+ return "CODE_SPACE";
+ case MAP_SPACE:
+ return "MAP_SPACE";
+ case CELL_SPACE:
+ return "CELL_SPACE";
case PROPERTY_CELL_SPACE:
return "PROPERTY_CELL_SPACE";
- case LO_SPACE: return "LO_SPACE";
+ case LO_SPACE:
+ return "LO_SPACE";
default:
UNREACHABLE();
}
// If page was not swept then there are no free list items on it.
if (!p->WasSwept()) {
if (FLAG_trace_fragmentation) {
- PrintF("%p [%s]: %d bytes live (unswept)\n",
- reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()),
- p->LiveBytes());
+ PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
+ AllocationSpaceName(space->identity()), p->LiveBytes());
}
return 0;
}
intptr_t ratio_threshold;
intptr_t area_size = space->AreaSize();
if (space->identity() == CODE_SPACE) {
- ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
- area_size;
+ ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
ratio_threshold = 10;
} else {
- ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
- area_size;
+ ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
ratio_threshold = 15;
}
if (FLAG_trace_fragmentation) {
PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
- reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()),
+ reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
static_cast<int>(sizes.small_size_),
- static_cast<double>(sizes.small_size_ * 100) /
- area_size,
+ static_cast<double>(sizes.small_size_ * 100) / area_size,
static_cast<int>(sizes.medium_size_),
- static_cast<double>(sizes.medium_size_ * 100) /
- area_size,
+ static_cast<double>(sizes.medium_size_ * 100) / area_size,
static_cast<int>(sizes.large_size_),
- static_cast<double>(sizes.large_size_ * 100) /
- area_size,
+ static_cast<double>(sizes.large_size_ * 100) / area_size,
static_cast<int>(sizes.huge_size_),
- static_cast<double>(sizes.huge_size_ * 100) /
- area_size,
+ static_cast<double>(sizes.huge_size_ * 100) / area_size,
(ratio > ratio_threshold) ? "[fragmented]" : "");
}
class Candidate {
public:
- Candidate() : fragmentation_(0), page_(NULL) { }
- Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
+ Candidate() : fragmentation_(0), page_(NULL) {}
+ Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
int fragmentation() { return fragmentation_; }
Page* page() { return page_; }
Page* page_;
};
- enum CompactionMode {
- COMPACT_FREE_LISTS,
- REDUCE_MEMORY_FOOTPRINT
- };
+ enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
CompactionMode mode = COMPACT_FREE_LISTS;
}
if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
- PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
- "evacuation candidate limit: %d\n",
- static_cast<double>(over_reserved) / MB,
- static_cast<double>(reserved) / MB,
- static_cast<int>(kFreenessThreshold),
- max_evacuation_candidates);
+ PrintF(
+ "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
+ "evacuation candidate limit: %d\n",
+ static_cast<double>(over_reserved) / MB,
+ static_cast<double>(reserved) / MB,
+ static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
}
intptr_t estimated_release = 0;
}
if (FLAG_trace_fragmentation) {
- PrintF("%p [%s]: %d (%.2f%%) free %s\n",
- reinterpret_cast<void*>(p),
+ PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
AllocationSpaceName(space->identity()),
static_cast<int>(free_bytes),
static_cast<double>(free_bytes * 100) / p->area_size(),
}
if (count > 0 && FLAG_trace_fragmentation) {
- PrintF("Collected %d evacuation candidates for space %s\n",
- count,
+ PrintF("Collected %d evacuation candidates for space %s\n", count,
AllocationSpaceName(space->identity()));
}
}
}
PagedSpaces spaces(heap());
- for (PagedSpace* space = spaces.next();
- space != NULL;
+ for (PagedSpace* space = spaces.next(); space != NULL;
space = spaces.next()) {
space->PrepareForMarkCompact();
}
// setter did not record the slot update and we have to do that manually.
Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
- isolate_->heap()->mark_compact_collector()->
- RecordCodeEntrySlot(slot, target);
+ isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
+ target);
Object** shared_code_slot =
HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
+ isolate_->heap()->mark_compact_collector()->RecordSlot(
+ shared_code_slot, shared_code_slot, *shared_code_slot);
candidate = next_candidate;
}
Object** code_slot =
HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(code_slot, code_slot, *code_slot);
+ isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
+ *code_slot);
candidate = next_candidate;
}
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
int new_length = SharedFunctionInfo::kEntriesStart;
int old_length = code_map->length();
- for (int i = SharedFunctionInfo::kEntriesStart;
- i < old_length;
+ for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
i += SharedFunctionInfo::kEntryLength) {
Code* code =
Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
if (j == SharedFunctionInfo::kOsrAstIdOffset) {
DCHECK(object->IsSmi());
} else {
- DCHECK(Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(*slot))));
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(slot, slot, *slot);
+ DCHECK(
+ Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
+ isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
+ *slot);
}
}
}
void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())->
- get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
+ DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
+ ->get(SharedFunctionInfo::kNextMapIndex)
+ ->IsUndefined());
// Make sure previous flushing decisions are revisited.
isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
class MarkCompactMarkingVisitor
: public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
public:
- static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
- Map* map, HeapObject* obj);
+ static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
+ HeapObject* obj);
static void ObjectStatsCountFixedArray(
- FixedArrayBase* fixed_array,
- FixedArraySubInstanceType fast_type,
+ FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
FixedArraySubInstanceType dictionary_type);
- template<MarkCompactMarkingVisitor::VisitorId id>
+ template <MarkCompactMarkingVisitor::VisitorId id>
class ObjectStatsTracker {
public:
static inline void Visit(Map* map, HeapObject* obj);
// Mark object pointed to by p.
INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
- Object** anchor_slot,
- Object** p)) {
+ Object** anchor_slot, Object** p)) {
if (!(*p)->IsHeapObject()) return;
HeapObject* object = ShortCircuitConsString(p);
collector->RecordSlot(anchor_slot, p, object);
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- INLINE(static bool VisitUnmarkedObjects(Heap* heap,
- Object** start,
+ INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
Object** end)) {
// Return false is we are close to the stack limit.
StackLimitCheck check(heap->isolate());
}
private:
- template<int id>
+ template <int id>
static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
// Code flushing support.
static const int kRegExpCodeThreshold = 5;
- static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
- JSRegExp* re,
+ static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
bool is_ascii) {
// Make sure that the fixed array is in fact initialized on the RegExp.
// We could potentially trigger a GC when initializing the RegExp.
if (HeapObject::cast(re->data())->map()->instance_type() !=
- FIXED_ARRAY_TYPE) return;
+ FIXED_ARRAY_TYPE)
+ return;
// Make sure this is a RegExp that actually contains code.
if (re->TypeTag() != JSRegExp::IRREGEXP) return;
// object.
FixedArray* data = FixedArray::cast(re->data());
Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
- heap->mark_compact_collector()->
- RecordSlot(slot, slot, code);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, code);
// Set a number in the 0-255 range to guarantee no smi overflow.
re->SetDataAt(JSRegExp::code_index(is_ascii),
void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
- FixedArrayBase* fixed_array,
- FixedArraySubInstanceType fast_type,
+ FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
FixedArraySubInstanceType dictionary_type) {
Heap* heap = fixed_array->map()->GetHeap();
if (fixed_array->map() != heap->fixed_cow_array_map() &&
fixed_array->map() != heap->fixed_double_array_map() &&
fixed_array != heap->empty_fixed_array()) {
if (fixed_array->IsDictionary()) {
- heap->RecordFixedArraySubTypeStats(dictionary_type,
- fixed_array->Size());
+ heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
} else {
- heap->RecordFixedArraySubTypeStats(fast_type,
- fixed_array->Size());
+ heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
}
}
}
non_count_table_.GetVisitorById(id)(map, obj);
if (obj->IsJSObject()) {
JSObject* object = JSObject::cast(obj);
- ObjectStatsCountFixedArray(object->elements(),
- DICTIONARY_ELEMENTS_SUB_TYPE,
+ ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
FAST_ELEMENTS_SUB_TYPE);
ObjectStatsCountFixedArray(object->properties(),
DICTIONARY_PROPERTIES_SUB_TYPE,
}
-template<MarkCompactMarkingVisitor::VisitorId id>
-void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
- Map* map, HeapObject* obj) {
+template <MarkCompactMarkingVisitor::VisitorId id>
+void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
+ HeapObject* obj) {
ObjectStatsVisitBase(id, map, obj);
}
-template<>
+template <>
class MarkCompactMarkingVisitor::ObjectStatsTracker<
MarkCompactMarkingVisitor::kVisitMap> {
public:
};
-template<>
+template <>
class MarkCompactMarkingVisitor::ObjectStatsTracker<
MarkCompactMarkingVisitor::kVisitCode> {
public:
};
-template<>
+template <>
class MarkCompactMarkingVisitor::ObjectStatsTracker<
MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
public:
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
if (sfi->scope_info() != heap->empty_fixed_array()) {
heap->RecordFixedArraySubTypeStats(
- SCOPE_INFO_SUB_TYPE,
- FixedArray::cast(sfi->scope_info())->Size());
+ SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
}
ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
}
};
-template<>
+template <>
class MarkCompactMarkingVisitor::ObjectStatsTracker<
MarkCompactMarkingVisitor::kVisitFixedArray> {
public:
Heap* heap = map->GetHeap();
FixedArray* fixed_array = FixedArray::cast(obj);
if (fixed_array == heap->string_table()) {
- heap->RecordFixedArraySubTypeStats(
- STRING_TABLE_SUB_TYPE,
- fixed_array->Size());
+ heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
+ fixed_array->Size());
}
ObjectStatsVisitBase(kVisitFixedArray, map, obj);
}
void MarkCompactMarkingVisitor::Initialize() {
StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
- table_.Register(kVisitJSRegExp,
- &VisitRegExpAndFlushCode);
+ table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
if (FLAG_track_gc_object_stats) {
// Copy the visitor table to make call-through possible.
non_count_table_.CopyFrom(&table_);
-#define VISITOR_ID_COUNT_FUNCTION(id) \
- table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
+#define VISITOR_ID_COUNT_FUNCTION(id) \
+ table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
#undef VISITOR_ID_COUNT_FUNCTION
}
class RootMarkingVisitor : public ObjectVisitor {
public:
explicit RootMarkingVisitor(Heap* heap)
- : collector_(heap->mark_compact_collector()) { }
+ : collector_(heap->mark_compact_collector()) {}
- void VisitPointer(Object** p) {
- MarkObjectByPointer(p);
- }
+ void VisitPointer(Object** p) { MarkObjectByPointer(p); }
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
// Skip the weak next code link in a code object, which is visited in
// ProcessTopOptimizedFrame.
- void VisitNextCodeLink(Object** p) { }
+ void VisitNextCodeLink(Object** p) {}
private:
void MarkObjectByPointer(Object** p) {
// Helper class for pruning the string table.
-template<bool finalize_external_strings>
+template <bool finalize_external_strings>
class StringTableCleaner : public ObjectVisitor {
public:
- explicit StringTableCleaner(Heap* heap)
- : heap_(heap), pointers_removed_(0) { }
+ explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
virtual void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
// Fill the marking stack with overflowed objects returned by the given
// iterator. Stop when the marking stack is filled or the end of the space
// is reached, whichever comes first.
-template<class T>
+template <class T>
static void DiscoverGreyObjectsWithIterator(Heap* heap,
MarkingDeque* marking_deque,
T* it) {
DCHECK(!marking_deque->IsFull());
Map* filler_map = heap->one_pointer_filler_map();
- for (HeapObject* object = it->Next();
- object != NULL;
- object = it->Next()) {
+ for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
MarkBit markbit = Marking::MarkBitFrom(object);
if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
Marking::GreyToBlack(markbit);
MarkBit::CellType grey_objects;
if (it.HasNext()) {
- const MarkBit::CellType next_cell = *(cell+1);
- grey_objects = current_cell &
- ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
+ const MarkBit::CellType next_cell = *(cell + 1);
+ grey_objects = current_cell & ((current_cell >> 1) |
+ (next_cell << (Bitmap::kBitsPerCell - 1)));
} else {
grey_objects = current_cell & (current_cell >> 1);
}
int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
- NewSpace* new_space,
- NewSpacePage* p) {
+ NewSpace* new_space, NewSpacePage* p) {
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
}
Object* target = allocation.ToObjectChecked();
- MigrateObject(HeapObject::cast(target),
- object,
- size,
- NEW_SPACE);
+ MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
heap()->IncrementSemiSpaceCopiedObjectSize(size);
}
*cells = 0;
}
-static void DiscoverGreyObjectsInSpace(Heap* heap,
- MarkingDeque* marking_deque,
+static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
PagedSpace* space) {
if (space->swept_precisely()) {
HeapObjectIterator it(space);
DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
+ DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
heap()->old_pointer_space());
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->old_data_space());
+ DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->code_space());
+ DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->map_space());
+ DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->cell_space());
+ DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
+ DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
heap()->property_cell_space());
if (marking_deque_.IsFull()) return;
LargeObjectIterator lo_it(heap()->lo_space());
- DiscoverGreyObjectsWithIterator(heap(),
- &marking_deque_,
- &lo_it);
+ DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
if (marking_deque_.IsFull()) return;
marking_deque_.ClearOverflowed();
if (FLAG_force_marking_deque_overflows) {
marking_deque_end = marking_deque_start + 64 * kPointerSize;
}
- marking_deque_.Initialize(marking_deque_start,
- marking_deque_end);
+ marking_deque_.Initialize(marking_deque_start, marking_deque_end);
DCHECK(!marking_deque_.overflowed());
if (incremental_marking_overflowed) {
if (IsMarked(cell)) {
int offset = Cell::kValueOffset;
MarkCompactMarkingVisitor::VisitPointer(
- heap(),
- reinterpret_cast<Object**>(cell->address() + offset));
+ heap(), reinterpret_cast<Object**>(cell->address() + offset));
}
}
}
MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
int existing_elements = map_cache->NumberOfElements();
int used_elements = 0;
- for (int i = MapCache::kElementsStartIndex;
- i < map_cache->length();
+ for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
i += MapCache::kEntrySize) {
Object* raw_key = map_cache->get(i);
if (raw_key == heap()->undefined_value() ||
- raw_key == heap()->the_hole_value()) continue;
+ raw_key == heap()->the_hole_value())
+ continue;
STATIC_ASSERT(MapCache::kEntrySize == 2);
Object* raw_map = map_cache->get(i + 1);
if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
// a marked map to an unmarked map to null transitions. This action
// is carried out only on maps of JSObjects and related subtypes.
HeapObjectIterator map_iterator(heap()->map_space());
- for (HeapObject* obj = map_iterator.Next();
- obj != NULL;
+ for (HeapObject* obj = map_iterator.Next(); obj != NULL;
obj = map_iterator.Next()) {
Map* map = Map::cast(obj);
// Iterate over property cell space, removing dependent code that is not
// otherwise kept alive by strong references.
HeapObjectIterator cell_iterator(heap_->property_cell_space());
- for (HeapObject* cell = cell_iterator.Next();
- cell != NULL;
+ for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
cell = cell_iterator.Next()) {
if (IsMarked(cell)) {
ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
// Iterate over allocation sites, removing dependent code that is not
// otherwise kept alive by strong references.
Object* undefined = heap()->undefined_value();
- for (Object* site = heap()->allocation_sites_list();
- site != undefined;
+ for (Object* site = heap()->allocation_sites_list(); site != undefined;
site = AllocationSite::cast(site)->weak_next()) {
if (IsMarked(site)) {
ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
int proto_index = proto_offset + new_number_of_transitions * step;
int map_index = map_offset + new_number_of_transitions * step;
if (new_number_of_transitions != i) {
- prototype_transitions->set(
- proto_index,
- prototype,
- UPDATE_WRITE_BARRIER);
- prototype_transitions->set(
- map_index,
- cached_map,
- SKIP_WRITE_BARRIER);
+ prototype_transitions->set(proto_index, prototype,
+ UPDATE_WRITE_BARRIER);
+ prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
}
Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
RecordSlot(slot, slot, prototype);
// Fill slots that became free with undefined value.
for (int i = new_number_of_transitions * step;
- i < number_of_transitions * step;
- i++) {
+ i < number_of_transitions * step; i++) {
prototype_transitions->set_undefined(header + i);
}
}
}
-void MarkCompactCollector::ClearDependentCode(
- DependentCode* entries) {
+void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
DisallowHeapAllocation no_allocation;
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
RecordSlot(anchor, key_slot, *key_slot);
Object** value_slot =
table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
- MarkCompactMarkingVisitor::MarkObjectByPointer(
- this, anchor, value_slot);
+ MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
+ value_slot);
}
}
}
if (heap_->InNewSpace(value)) {
heap_->store_buffer()->Mark(slot);
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
- &migration_slots_buffer_,
+ SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
reinterpret_cast<Object**>(slot),
SlotsBuffer::IGNORE_OVERFLOW);
}
}
-
// We scavange new space simultaneously with sweeping. This is done in two
// passes.
//
// pointer iteration. This is an issue if the store buffer overflows and we
// have to scan the entire old space, including dead objects, looking for
// pointers to new space.
-void MarkCompactCollector::MigrateObject(HeapObject* dst,
- HeapObject* src,
- int size,
- AllocationSpace dest) {
+void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
+ int size, AllocationSpace dest) {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(heap()->AllowedToBeMigrated(src, dest));
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
- &migration_slots_buffer_,
- SlotsBuffer::CODE_ENTRY_SLOT,
- code_entry_slot,
+ SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+ SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
} else if (dst->IsConstantPoolArray()) {
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
- &migration_slots_buffer_,
- SlotsBuffer::CODE_ENTRY_SLOT,
- code_entry_slot,
+ SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+ SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
}
} else if (dest == CODE_SPACE) {
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
heap()->MoveBlock(dst_addr, src_addr, size);
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
- &migration_slots_buffer_,
- SlotsBuffer::RELOCATED_CODE_OBJECT,
- dst_addr,
+ SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+ SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
SlotsBuffer::IGNORE_OVERFLOW);
Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor: public ObjectVisitor {
+class PointersUpdatingVisitor : public ObjectVisitor {
public:
- explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
+ explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointer(Object** p) {
- UpdatePointer(p);
- }
+ void VisitPointer(Object** p) { UpdatePointer(p); }
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) UpdatePointer(p);
}
private:
- inline void UpdatePointer(Object** p) {
- UpdateSlot(heap_, p);
- }
+ inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
Heap* heap_;
};
HeapObject* target;
AllocationResult allocation = target_space->AllocateRaw(object_size);
if (allocation.To(&target)) {
- MigrateObject(target,
- object,
- object_size,
- target_space->identity());
+ MigrateObject(target, object, object_size, target_space->identity());
heap()->IncrementPromotedObjectsSize(object_size);
return true;
}
};
-static inline void UpdateSlot(Isolate* isolate,
- ObjectVisitor* v,
- SlotsBuffer::SlotType slot_type,
- Address addr) {
+static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
+ SlotsBuffer::SlotType slot_type, Address addr) {
switch (slot_type) {
case SlotsBuffer::CODE_TARGET_SLOT: {
RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
}
-enum SweepingMode {
- SWEEP_ONLY,
- SWEEP_AND_VISIT_LIVE_OBJECTS
-};
+enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
-enum SkipListRebuildingMode {
- REBUILD_SKIP_LIST,
- IGNORE_SKIP_LIST
-};
+enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
-enum FreeSpaceTreatmentMode {
- IGNORE_FREE_SPACE,
- ZAP_FREE_SPACE
-};
+enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
-template<MarkCompactCollector::SweepingParallelism mode>
-static intptr_t Free(PagedSpace* space,
- FreeList* free_list,
- Address start,
+template <MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
int size) {
if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
DCHECK(free_list == NULL);
// Slots in live objects pointing into evacuation candidates are updated
// if requested.
// Returns the size of the biggest continuous freed memory chunk in bytes.
-template<SweepingMode sweeping_mode,
- MarkCompactCollector::SweepingParallelism parallelism,
- SkipListRebuildingMode skip_list_mode,
- FreeSpaceTreatmentMode free_space_mode>
-static int SweepPrecisely(PagedSpace* space,
- FreeList* free_list,
- Page* p,
- ObjectVisitor* v) {
+template <SweepingMode sweeping_mode,
+ MarkCompactCollector::SweepingParallelism parallelism,
+ SkipListRebuildingMode skip_list_mode,
+ FreeSpaceTreatmentMode free_space_mode>
+static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p,
+ ObjectVisitor* v) {
DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
space->identity() == CODE_SPACE);
MarkBit::CellType* cell = it.CurrentCell();
int live_objects = MarkWordToObjectStarts(*cell, offsets);
int live_index = 0;
- for ( ; live_objects != 0; live_objects--) {
+ for (; live_objects != 0; live_objects--) {
Address free_end = cell_base + offsets[live_index++] * kPointerSize;
if (free_end != free_start) {
int size = static_cast<int>(free_end - free_start);
live_object->IterateBody(map->instance_type(), size, v);
}
if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
- int new_region_start =
- SkipList::RegionNumber(free_end);
+ int new_region_start = SkipList::RegionNumber(free_end);
int new_region_end =
SkipList::RegionNumber(free_end + size - kPointerSize);
- if (new_region_start != curr_region ||
- new_region_end != curr_region) {
+ if (new_region_start != curr_region || new_region_end != curr_region) {
skip_list->AddObject(free_end, size);
curr_region = new_region_end;
}
static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
Page* p = Page::FromAddress(code->address());
- if (p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+ if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
return false;
}
*end_cell |= end_mask;
}
} else {
- for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
+ for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
*cell = 0;
}
}
Heap::RelocationLock relocation_lock(heap());
bool code_slots_filtering_required;
- { GCTracer::Scope gc_scope(heap()->tracer(),
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_NEWSPACE);
code_slots_filtering_required = MarkInvalidatedCode();
EvacuateNewSpace();
}
- { GCTracer::Scope gc_scope(heap()->tracer(),
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuatePages();
}
// Second pass: find pointers to new space and update them.
PointersUpdatingVisitor updating_visitor(heap());
- { GCTracer::Scope gc_scope(heap()->tracer(),
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
// Update pointers in to space.
SemiSpaceIterator to_it(heap()->new_space()->bottom(),
heap()->new_space()->top());
- for (HeapObject* object = to_it.Next();
- object != NULL;
+ for (HeapObject* object = to_it.Next(); object != NULL;
object = to_it.Next()) {
Map* map = object->map();
- object->IterateBody(map->instance_type(),
- object->SizeFromMap(map),
+ object->IterateBody(map->instance_type(), object->SizeFromMap(map),
&updating_visitor);
}
}
- { GCTracer::Scope gc_scope(heap()->tracer(),
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
}
- { GCTracer::Scope gc_scope(heap()->tracer(),
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
- StoreBufferRebuildScope scope(heap_,
- heap_->store_buffer(),
+ StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
&Heap::ScavengeStoreBufferCallback);
heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
&UpdatePointer);
}
- { GCTracer::Scope gc_scope(heap()->tracer(),
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
- SlotsBuffer::UpdateSlotsRecordedIn(heap_,
- migration_slots_buffer_,
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
code_slots_filtering_required);
if (FLAG_trace_fragmentation) {
PrintF(" migration slots buffer: %d\n",
}
int npages = evacuation_candidates_.length();
- { GCTracer::Scope gc_scope(
- heap()->tracer(), GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+ {
+ GCTracer::Scope gc_scope(
+ heap()->tracer(),
+ GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
if (p->IsEvacuationCandidate()) {
- SlotsBuffer::UpdateSlotsRecordedIn(heap_,
- p->slots_buffer(),
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
code_slots_filtering_required);
if (FLAG_trace_fragmentation) {
- PrintF(" page %p slots buffer: %d\n",
- reinterpret_cast<void*>(p),
+ PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer()));
}
SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
break;
case OLD_POINTER_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
- SWEEP_ON_MAIN_THREAD,
- IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+ IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
space, NULL, p, &updating_visitor);
break;
case CODE_SPACE:
if (FLAG_zap_code_space) {
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
- SWEEP_ON_MAIN_THREAD,
- REBUILD_SKIP_LIST,
- ZAP_FREE_SPACE>(
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+ REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
space, NULL, p, &updating_visitor);
} else {
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
- SWEEP_ON_MAIN_THREAD,
- REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+ REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
space, NULL, p, &updating_visitor);
}
break;
// Update pointers from cells.
HeapObjectIterator cell_iterator(heap_->cell_space());
- for (HeapObject* cell = cell_iterator.Next();
- cell != NULL;
+ for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
cell = cell_iterator.Next()) {
if (cell->IsCell()) {
Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
HeapObjectIterator js_global_property_cell_iterator(
heap_->property_cell_space());
- for (HeapObject* cell = js_global_property_cell_iterator.Next();
- cell != NULL;
+ for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
cell = js_global_property_cell_iterator.Next()) {
if (cell->IsPropertyCell()) {
PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
// Since objects are at least 2 words large we don't have entries for two
// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
- 0, _, _, _, _, // 0
- 1, 0, _, _, _, // 1
- 1, 1, _, _, _, // 2
- X, _, _, _, _, // 3
- 1, 2, _, _, _, // 4
- 2, 0, 2, _, _, // 5
- X, _, _, _, _, // 6
- X, _, _, _, _, // 7
- 1, 3, _, _, _, // 8
- 2, 0, 3, _, _, // 9
- 2, 1, 3, _, _, // 10
- X, _, _, _, _, // 11
- X, _, _, _, _, // 12
- X, _, _, _, _, // 13
- X, _, _, _, _, // 14
- X, _, _, _, _, // 15
- 1, 4, _, _, _, // 16
- 2, 0, 4, _, _, // 17
- 2, 1, 4, _, _, // 18
- X, _, _, _, _, // 19
- 2, 2, 4, _, _, // 20
- 3, 0, 2, 4, _, // 21
- X, _, _, _, _, // 22
- X, _, _, _, _, // 23
- X, _, _, _, _, // 24
- X, _, _, _, _, // 25
- X, _, _, _, _, // 26
- X, _, _, _, _, // 27
- X, _, _, _, _, // 28
- X, _, _, _, _, // 29
- X, _, _, _, _, // 30
- X, _, _, _, _, // 31
- 1, 5, _, _, _, // 32
- 2, 0, 5, _, _, // 33
- 2, 1, 5, _, _, // 34
- X, _, _, _, _, // 35
- 2, 2, 5, _, _, // 36
- 3, 0, 2, 5, _, // 37
- X, _, _, _, _, // 38
- X, _, _, _, _, // 39
- 2, 3, 5, _, _, // 40
- 3, 0, 3, 5, _, // 41
- 3, 1, 3, 5, _, // 42
- X, _, _, _, _, // 43
- X, _, _, _, _, // 44
- X, _, _, _, _, // 45
- X, _, _, _, _, // 46
- X, _, _, _, _, // 47
- X, _, _, _, _, // 48
- X, _, _, _, _, // 49
- X, _, _, _, _, // 50
- X, _, _, _, _, // 51
- X, _, _, _, _, // 52
- X, _, _, _, _, // 53
- X, _, _, _, _, // 54
- X, _, _, _, _, // 55
- X, _, _, _, _, // 56
- X, _, _, _, _, // 57
- X, _, _, _, _, // 58
- X, _, _, _, _, // 59
- X, _, _, _, _, // 60
- X, _, _, _, _, // 61
- X, _, _, _, _, // 62
- X, _, _, _, _, // 63
- 1, 6, _, _, _, // 64
- 2, 0, 6, _, _, // 65
- 2, 1, 6, _, _, // 66
- X, _, _, _, _, // 67
- 2, 2, 6, _, _, // 68
- 3, 0, 2, 6, _, // 69
- X, _, _, _, _, // 70
- X, _, _, _, _, // 71
- 2, 3, 6, _, _, // 72
- 3, 0, 3, 6, _, // 73
- 3, 1, 3, 6, _, // 74
- X, _, _, _, _, // 75
- X, _, _, _, _, // 76
- X, _, _, _, _, // 77
- X, _, _, _, _, // 78
- X, _, _, _, _, // 79
- 2, 4, 6, _, _, // 80
- 3, 0, 4, 6, _, // 81
- 3, 1, 4, 6, _, // 82
- X, _, _, _, _, // 83
- 3, 2, 4, 6, _, // 84
- 4, 0, 2, 4, 6, // 85
- X, _, _, _, _, // 86
- X, _, _, _, _, // 87
- X, _, _, _, _, // 88
- X, _, _, _, _, // 89
- X, _, _, _, _, // 90
- X, _, _, _, _, // 91
- X, _, _, _, _, // 92
- X, _, _, _, _, // 93
- X, _, _, _, _, // 94
- X, _, _, _, _, // 95
- X, _, _, _, _, // 96
- X, _, _, _, _, // 97
- X, _, _, _, _, // 98
- X, _, _, _, _, // 99
- X, _, _, _, _, // 100
- X, _, _, _, _, // 101
- X, _, _, _, _, // 102
- X, _, _, _, _, // 103
- X, _, _, _, _, // 104
- X, _, _, _, _, // 105
- X, _, _, _, _, // 106
- X, _, _, _, _, // 107
- X, _, _, _, _, // 108
- X, _, _, _, _, // 109
- X, _, _, _, _, // 110
- X, _, _, _, _, // 111
- X, _, _, _, _, // 112
- X, _, _, _, _, // 113
- X, _, _, _, _, // 114
- X, _, _, _, _, // 115
- X, _, _, _, _, // 116
- X, _, _, _, _, // 117
- X, _, _, _, _, // 118
- X, _, _, _, _, // 119
- X, _, _, _, _, // 120
- X, _, _, _, _, // 121
- X, _, _, _, _, // 122
- X, _, _, _, _, // 123
- X, _, _, _, _, // 124
- X, _, _, _, _, // 125
- X, _, _, _, _, // 126
- X, _, _, _, _, // 127
- 1, 7, _, _, _, // 128
- 2, 0, 7, _, _, // 129
- 2, 1, 7, _, _, // 130
- X, _, _, _, _, // 131
- 2, 2, 7, _, _, // 132
- 3, 0, 2, 7, _, // 133
- X, _, _, _, _, // 134
- X, _, _, _, _, // 135
- 2, 3, 7, _, _, // 136
- 3, 0, 3, 7, _, // 137
- 3, 1, 3, 7, _, // 138
- X, _, _, _, _, // 139
- X, _, _, _, _, // 140
- X, _, _, _, _, // 141
- X, _, _, _, _, // 142
- X, _, _, _, _, // 143
- 2, 4, 7, _, _, // 144
- 3, 0, 4, 7, _, // 145
- 3, 1, 4, 7, _, // 146
- X, _, _, _, _, // 147
- 3, 2, 4, 7, _, // 148
- 4, 0, 2, 4, 7, // 149
- X, _, _, _, _, // 150
- X, _, _, _, _, // 151
- X, _, _, _, _, // 152
- X, _, _, _, _, // 153
- X, _, _, _, _, // 154
- X, _, _, _, _, // 155
- X, _, _, _, _, // 156
- X, _, _, _, _, // 157
- X, _, _, _, _, // 158
- X, _, _, _, _, // 159
- 2, 5, 7, _, _, // 160
- 3, 0, 5, 7, _, // 161
- 3, 1, 5, 7, _, // 162
- X, _, _, _, _, // 163
- 3, 2, 5, 7, _, // 164
- 4, 0, 2, 5, 7, // 165
- X, _, _, _, _, // 166
- X, _, _, _, _, // 167
- 3, 3, 5, 7, _, // 168
- 4, 0, 3, 5, 7, // 169
- 4, 1, 3, 5, 7 // 170
+ 0, _, _,
+ _, _, // 0
+ 1, 0, _,
+ _, _, // 1
+ 1, 1, _,
+ _, _, // 2
+ X, _, _,
+ _, _, // 3
+ 1, 2, _,
+ _, _, // 4
+ 2, 0, 2,
+ _, _, // 5
+ X, _, _,
+ _, _, // 6
+ X, _, _,
+ _, _, // 7
+ 1, 3, _,
+ _, _, // 8
+ 2, 0, 3,
+ _, _, // 9
+ 2, 1, 3,
+ _, _, // 10
+ X, _, _,
+ _, _, // 11
+ X, _, _,
+ _, _, // 12
+ X, _, _,
+ _, _, // 13
+ X, _, _,
+ _, _, // 14
+ X, _, _,
+ _, _, // 15
+ 1, 4, _,
+ _, _, // 16
+ 2, 0, 4,
+ _, _, // 17
+ 2, 1, 4,
+ _, _, // 18
+ X, _, _,
+ _, _, // 19
+ 2, 2, 4,
+ _, _, // 20
+ 3, 0, 2,
+ 4, _, // 21
+ X, _, _,
+ _, _, // 22
+ X, _, _,
+ _, _, // 23
+ X, _, _,
+ _, _, // 24
+ X, _, _,
+ _, _, // 25
+ X, _, _,
+ _, _, // 26
+ X, _, _,
+ _, _, // 27
+ X, _, _,
+ _, _, // 28
+ X, _, _,
+ _, _, // 29
+ X, _, _,
+ _, _, // 30
+ X, _, _,
+ _, _, // 31
+ 1, 5, _,
+ _, _, // 32
+ 2, 0, 5,
+ _, _, // 33
+ 2, 1, 5,
+ _, _, // 34
+ X, _, _,
+ _, _, // 35
+ 2, 2, 5,
+ _, _, // 36
+ 3, 0, 2,
+ 5, _, // 37
+ X, _, _,
+ _, _, // 38
+ X, _, _,
+ _, _, // 39
+ 2, 3, 5,
+ _, _, // 40
+ 3, 0, 3,
+ 5, _, // 41
+ 3, 1, 3,
+ 5, _, // 42
+ X, _, _,
+ _, _, // 43
+ X, _, _,
+ _, _, // 44
+ X, _, _,
+ _, _, // 45
+ X, _, _,
+ _, _, // 46
+ X, _, _,
+ _, _, // 47
+ X, _, _,
+ _, _, // 48
+ X, _, _,
+ _, _, // 49
+ X, _, _,
+ _, _, // 50
+ X, _, _,
+ _, _, // 51
+ X, _, _,
+ _, _, // 52
+ X, _, _,
+ _, _, // 53
+ X, _, _,
+ _, _, // 54
+ X, _, _,
+ _, _, // 55
+ X, _, _,
+ _, _, // 56
+ X, _, _,
+ _, _, // 57
+ X, _, _,
+ _, _, // 58
+ X, _, _,
+ _, _, // 59
+ X, _, _,
+ _, _, // 60
+ X, _, _,
+ _, _, // 61
+ X, _, _,
+ _, _, // 62
+ X, _, _,
+ _, _, // 63
+ 1, 6, _,
+ _, _, // 64
+ 2, 0, 6,
+ _, _, // 65
+ 2, 1, 6,
+ _, _, // 66
+ X, _, _,
+ _, _, // 67
+ 2, 2, 6,
+ _, _, // 68
+ 3, 0, 2,
+ 6, _, // 69
+ X, _, _,
+ _, _, // 70
+ X, _, _,
+ _, _, // 71
+ 2, 3, 6,
+ _, _, // 72
+ 3, 0, 3,
+ 6, _, // 73
+ 3, 1, 3,
+ 6, _, // 74
+ X, _, _,
+ _, _, // 75
+ X, _, _,
+ _, _, // 76
+ X, _, _,
+ _, _, // 77
+ X, _, _,
+ _, _, // 78
+ X, _, _,
+ _, _, // 79
+ 2, 4, 6,
+ _, _, // 80
+ 3, 0, 4,
+ 6, _, // 81
+ 3, 1, 4,
+ 6, _, // 82
+ X, _, _,
+ _, _, // 83
+ 3, 2, 4,
+ 6, _, // 84
+ 4, 0, 2,
+ 4, 6, // 85
+ X, _, _,
+ _, _, // 86
+ X, _, _,
+ _, _, // 87
+ X, _, _,
+ _, _, // 88
+ X, _, _,
+ _, _, // 89
+ X, _, _,
+ _, _, // 90
+ X, _, _,
+ _, _, // 91
+ X, _, _,
+ _, _, // 92
+ X, _, _,
+ _, _, // 93
+ X, _, _,
+ _, _, // 94
+ X, _, _,
+ _, _, // 95
+ X, _, _,
+ _, _, // 96
+ X, _, _,
+ _, _, // 97
+ X, _, _,
+ _, _, // 98
+ X, _, _,
+ _, _, // 99
+ X, _, _,
+ _, _, // 100
+ X, _, _,
+ _, _, // 101
+ X, _, _,
+ _, _, // 102
+ X, _, _,
+ _, _, // 103
+ X, _, _,
+ _, _, // 104
+ X, _, _,
+ _, _, // 105
+ X, _, _,
+ _, _, // 106
+ X, _, _,
+ _, _, // 107
+ X, _, _,
+ _, _, // 108
+ X, _, _,
+ _, _, // 109
+ X, _, _,
+ _, _, // 110
+ X, _, _,
+ _, _, // 111
+ X, _, _,
+ _, _, // 112
+ X, _, _,
+ _, _, // 113
+ X, _, _,
+ _, _, // 114
+ X, _, _,
+ _, _, // 115
+ X, _, _,
+ _, _, // 116
+ X, _, _,
+ _, _, // 117
+ X, _, _,
+ _, _, // 118
+ X, _, _,
+ _, _, // 119
+ X, _, _,
+ _, _, // 120
+ X, _, _,
+ _, _, // 121
+ X, _, _,
+ _, _, // 122
+ X, _, _,
+ _, _, // 123
+ X, _, _,
+ _, _, // 124
+ X, _, _,
+ _, _, // 125
+ X, _, _,
+ _, _, // 126
+ X, _, _,
+ _, _, // 127
+ 1, 7, _,
+ _, _, // 128
+ 2, 0, 7,
+ _, _, // 129
+ 2, 1, 7,
+ _, _, // 130
+ X, _, _,
+ _, _, // 131
+ 2, 2, 7,
+ _, _, // 132
+ 3, 0, 2,
+ 7, _, // 133
+ X, _, _,
+ _, _, // 134
+ X, _, _,
+ _, _, // 135
+ 2, 3, 7,
+ _, _, // 136
+ 3, 0, 3,
+ 7, _, // 137
+ 3, 1, 3,
+ 7, _, // 138
+ X, _, _,
+ _, _, // 139
+ X, _, _,
+ _, _, // 140
+ X, _, _,
+ _, _, // 141
+ X, _, _,
+ _, _, // 142
+ X, _, _,
+ _, _, // 143
+ 2, 4, 7,
+ _, _, // 144
+ 3, 0, 4,
+ 7, _, // 145
+ 3, 1, 4,
+ 7, _, // 146
+ X, _, _,
+ _, _, // 147
+ 3, 2, 4,
+ 7, _, // 148
+ 4, 0, 2,
+ 4, 7, // 149
+ X, _, _,
+ _, _, // 150
+ X, _, _,
+ _, _, // 151
+ X, _, _,
+ _, _, // 152
+ X, _, _,
+ _, _, // 153
+ X, _, _,
+ _, _, // 154
+ X, _, _,
+ _, _, // 155
+ X, _, _,
+ _, _, // 156
+ X, _, _,
+ _, _, // 157
+ X, _, _,
+ _, _, // 158
+ X, _, _,
+ _, _, // 159
+ 2, 5, 7,
+ _, _, // 160
+ 3, 0, 5,
+ 7, _, // 161
+ 3, 1, 5,
+ 7, _, // 162
+ X, _, _,
+ _, _, // 163
+ 3, 2, 5,
+ 7, _, // 164
+ 4, 0, 2,
+ 5, 7, // 165
+ X, _, _,
+ _, _, // 166
+ X, _, _,
+ _, _, // 167
+ 3, 3, 5,
+ 7, _, // 168
+ 4, 0, 3,
+ 5, 7, // 169
+ 4, 1, 3,
+ 5, 7 // 170
};
#undef _
#undef X
// Force instantiation of templatized SweepConservatively method for
// SWEEP_ON_MAIN_THREAD mode.
-template int MarkCompactCollector::
- SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(
- PagedSpace*, FreeList*, Page*);
+template int MarkCompactCollector::SweepConservatively<
+ MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*);
// Force instantiation of templatized SweepConservatively method for
// SWEEP_IN_PARALLEL mode.
-template int MarkCompactCollector::
- SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
- PagedSpace*, FreeList*, Page*);
+template int MarkCompactCollector::SweepConservatively<
+ MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*);
// Sweeps a space conservatively. After this has been done the larger free
// because it means that any FreeSpace maps left actually describe a region of
// memory that can be ignored when scanning. Dead objects other than free
// spaces will not contain the free space map.
-template<MarkCompactCollector::SweepingParallelism mode>
+template <MarkCompactCollector::SweepingParallelism mode>
int MarkCompactCollector::SweepConservatively(PagedSpace* space,
- FreeList* free_list,
- Page* p) {
+ FreeList* free_list, Page* p) {
DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
- DCHECK((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
- free_list != NULL) ||
- (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
- free_list == NULL));
+ DCHECK(
+ (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) ||
+ (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
+ free_list == NULL));
intptr_t freed_bytes = 0;
intptr_t max_freed_bytes = 0;
if (it.Done()) {
size = p->area_end() - p->area_start();
- freed_bytes = Free<mode>(space, free_list, p->area_start(),
- static_cast<int>(size));
+ freed_bytes =
+ Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
DCHECK_EQ(0, p->LiveBytes());
if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
Address free_end = StartOfLiveObject(cell_base, *cell);
// Free the first free space.
size = free_end - p->area_start();
- freed_bytes = Free<mode>(space, free_list, p->area_start(),
- static_cast<int>(size));
+ freed_bytes =
+ Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
// The start of the current free area is represented in undigested form by
PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
reinterpret_cast<intptr_t>(p));
}
- SweepPrecisely<SWEEP_ONLY,
- SWEEP_ON_MAIN_THREAD,
- IGNORE_SKIP_LIST,
+ SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
pages_swept++;
parallel_sweeping_active = true;
reinterpret_cast<intptr_t>(p));
}
if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
- SweepPrecisely<SWEEP_ONLY,
- SWEEP_ON_MAIN_THREAD,
- REBUILD_SKIP_LIST,
+ SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
ZAP_FREE_SPACE>(space, NULL, p, NULL);
} else if (space->identity() == CODE_SPACE) {
- SweepPrecisely<SWEEP_ONLY,
- SWEEP_ON_MAIN_THREAD,
- REBUILD_SKIP_LIST,
+ SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
} else {
- SweepPrecisely<SWEEP_ONLY,
- SWEEP_ON_MAIN_THREAD,
- IGNORE_SKIP_LIST,
+ SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
}
pages_swept++;
break;
}
- default: {
- UNREACHABLE();
- }
+ default: { UNREACHABLE(); }
}
}
if (FLAG_gc_verbose) {
PrintF("SweepSpace: %s (%d pages swept)\n",
- AllocationSpaceName(space->identity()),
- pages_swept);
+ AllocationSpaceName(space->identity()), pages_swept);
}
// Give pages that are queued to be freed back to the OS.
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- { GCTracer::Scope sweep_scope(heap()->tracer(),
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_OLDSPACE);
- { SequentialSweepingScope scope(this);
+ {
+ SequentialSweepingScope scope(this);
SweepSpace(heap()->old_pointer_space(), how_to_sweep);
SweepSpace(heap()->old_data_space(), how_to_sweep);
}
}
RemoveDeadInvalidatedCode();
- { GCTracer::Scope sweep_scope(heap()->tracer(),
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE);
SweepSpace(heap()->code_space(), PRECISE);
}
- { GCTracer::Scope sweep_scope(heap()->tracer(),
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CELL);
SweepSpace(heap()->cell_space(), PRECISE);
SweepSpace(heap()->property_cell_space(), PRECISE);
// ClearNonLiveTransitions depends on precise sweeping of map space to
// detect whether unmarked map became dead in this collection or in one
// of the previous ones.
- { GCTracer::Scope sweep_scope(heap()->tracer(),
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_MAP);
SweepSpace(heap()->map_space(), PRECISE);
}
}
-Isolate* MarkCompactCollector::isolate() const {
- return heap_->isolate();
-}
+Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
void MarkCompactCollector::Initialize() {
bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- SlotType type,
- Address addr,
- AdditionMode mode) {
+ SlotsBuffer** buffer_address, SlotType type,
+ Address addr, AdditionMode mode) {
SlotsBuffer* buffer = *buffer_address;
if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
// This doesn't need to be typed since it is just a normal heap pointer.
Object** target_pointer =
reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
- success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- target_pointer,
- SlotsBuffer::FAIL_ON_OVERFLOW);
+ success = SlotsBuffer::AddTo(
+ &slots_buffer_allocator_, target_page->slots_buffer_address(),
+ target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
} else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
- success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- SlotsBuffer::CODE_ENTRY_SLOT,
- rinfo->constant_pool_entry_address(),
- SlotsBuffer::FAIL_ON_OVERFLOW);
+ success = SlotsBuffer::AddTo(
+ &slots_buffer_allocator_, target_page->slots_buffer_address(),
+ SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
+ SlotsBuffer::FAIL_ON_OVERFLOW);
} else {
- success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- SlotTypeForRMode(rmode),
- rinfo->pc(),
- SlotsBuffer::FAIL_ON_OVERFLOW);
+ success = SlotsBuffer::AddTo(
+ &slots_buffer_allocator_, target_page->slots_buffer_address(),
+ SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
}
if (!success) {
EvictEvacuationCandidate(target_page);
!ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
target_page->slots_buffer_address(),
- SlotsBuffer::CODE_ENTRY_SLOT,
- slot,
+ SlotsBuffer::CODE_ENTRY_SLOT, slot,
SlotsBuffer::FAIL_ON_OVERFLOW)) {
EvictEvacuationCandidate(target_page);
}
void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
if (is_compacting()) {
- Code* host = isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(pc);
+ Code* host =
+ isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
+ pc);
MarkBit mark_bit = Marking::MarkBitFrom(host);
if (Marking::IsBlack(mark_bit)) {
RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
} else {
++slot_idx;
DCHECK(slot_idx < idx_);
- UpdateSlot(heap->isolate(),
- &v,
- DecodeSlotType(slot),
+ UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
reinterpret_cast<Address>(slots_[slot_idx]));
}
}
DCHECK(slot_idx < idx_);
Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
if (!IsOnInvalidatedCodeObject(pc)) {
- UpdateSlot(heap->isolate(),
- &v,
- DecodeSlotType(slot),
+ UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
reinterpret_cast<Address>(slots_[slot_idx]));
}
}
}
*buffer_address = NULL;
}
-
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MARK_COMPACT_H_
-#define V8_MARK_COMPACT_H_
+#ifndef V8_HEAP_MARK_COMPACT_H_
+#define V8_HEAP_MARK_COMPACT_H_
#include "src/compiler-intrinsics.h"
-#include "src/spaces.h"
+#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
class Marking {
public:
- explicit Marking(Heap* heap)
- : heap_(heap) {
- }
+ explicit Marking(Heap* heap) : heap_(heap) {}
INLINE(static MarkBit MarkBitFrom(Address addr));
// White markbits: 00 - this is required by the mark bit clearer.
static const char* kWhiteBitPattern;
- INLINE(static bool IsWhite(MarkBit mark_bit)) {
- return !mark_bit.Get();
- }
+ INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
// Grey markbits: 11
static const char* kGreyBitPattern;
mark_bit.Next().Clear();
}
- INLINE(static void BlackToGrey(MarkBit markbit)) {
- markbit.Next().Set();
- }
+ INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
INLINE(static void WhiteToGrey(MarkBit markbit)) {
markbit.Set();
markbit.Next().Set();
}
- INLINE(static void GreyToBlack(MarkBit markbit)) {
- markbit.Next().Clear();
- }
+ INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
INLINE(static void BlackToGrey(HeapObject* obj)) {
BlackToGrey(MarkBitFrom(obj));
static const char* ColorName(ObjectColor color) {
switch (color) {
- case BLACK_OBJECT: return "black";
- case WHITE_OBJECT: return "white";
- case GREY_OBJECT: return "grey";
- case IMPOSSIBLE_COLOR: return "impossible";
+ case BLACK_OBJECT:
+ return "black";
+ case WHITE_OBJECT:
+ return "white";
+ case GREY_OBJECT:
+ return "grey";
+ case IMPOSSIBLE_COLOR:
+ return "impossible";
}
return "error";
}
#endif
// Returns true if the transferred color is black.
- INLINE(static bool TransferColor(HeapObject* from,
- HeapObject* to)) {
+ INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
MarkBit from_mark_bit = MarkBitFrom(from);
MarkBit to_mark_bit = MarkBitFrom(to);
bool is_black = false;
class MarkingDeque {
public:
MarkingDeque()
- : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
+ : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
void Initialize(Address low, Address high) {
HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
}
}
- ~SlotsBuffer() {
- }
+ ~SlotsBuffer() {}
void Add(ObjectSlot slot) {
DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
(buffer->chain_length_ - 1) * kNumberOfElements);
}
- inline bool IsFull() {
- return idx_ == kNumberOfElements;
- }
+ inline bool IsFull() { return idx_ == kNumberOfElements; }
- inline bool HasSpaceForTypedSlot() {
- return idx_ < kNumberOfElements - 1;
- }
+ inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
- static void UpdateSlotsRecordedIn(Heap* heap,
- SlotsBuffer* buffer,
+ static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
bool code_slots_filtering_required) {
while (buffer != NULL) {
if (code_slots_filtering_required) {
}
}
- enum AdditionMode {
- FAIL_ON_OVERFLOW,
- IGNORE_OVERFLOW
- };
+ enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
}
INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- ObjectSlot slot,
+ SlotsBuffer** buffer_address, ObjectSlot slot,
AdditionMode mode)) {
SlotsBuffer* buffer = *buffer_address;
if (buffer == NULL || buffer->IsFull()) {
static bool IsTypedSlot(ObjectSlot slot);
static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- SlotType type,
- Address addr,
+ SlotsBuffer** buffer_address, SlotType type, Address addr,
AdditionMode mode);
static const int kNumberOfElements = 1021;
// Performs a global garbage collection.
void CollectGarbage();
- enum CompactionMode {
- INCREMENTAL_COMPACTION,
- NON_INCREMENTAL_COMPACTION
- };
+ enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
bool StartCompaction(CompactionMode mode);
PRECISE
};
- enum SweepingParallelism {
- SWEEP_ON_MAIN_THREAD,
- SWEEP_IN_PARALLEL
- };
+ enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
#ifdef VERIFY_HEAP
void VerifyMarkbitsAreClean();
// Sweep a single page from the given space conservatively.
// Returns the size of the biggest continuous freed memory chunk in bytes.
- template<SweepingParallelism type>
- static int SweepConservatively(PagedSpace* space,
- FreeList* free_list,
- Page* p);
+ template <SweepingParallelism type>
+ static int SweepConservatively(PagedSpace* space, FreeList* free_list,
+ Page* p);
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
- return Page::FromAddress(reinterpret_cast<Address>(anchor))->
- ShouldSkipEvacuationSlotRecording();
+ return Page::FromAddress(reinterpret_cast<Address>(anchor))
+ ->ShouldSkipEvacuationSlotRecording();
}
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
- return Page::FromAddress(reinterpret_cast<Address>(host))->
- ShouldSkipEvacuationSlotRecording();
+ return Page::FromAddress(reinterpret_cast<Address>(host))
+ ->ShouldSkipEvacuationSlotRecording();
}
INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
- return Page::FromAddress(reinterpret_cast<Address>(obj))->
- IsEvacuationCandidate();
+ return Page::FromAddress(reinterpret_cast<Address>(obj))
+ ->IsEvacuationCandidate();
}
INLINE(void EvictEvacuationCandidate(Page* page)) {
void RecordCodeEntrySlot(Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
- INLINE(void RecordSlot(Object** anchor_slot,
- Object** slot,
- Object* object,
- SlotsBuffer::AdditionMode mode =
- SlotsBuffer::FAIL_ON_OVERFLOW));
+ INLINE(void RecordSlot(
+ Object** anchor_slot, Object** slot, Object* object,
+ SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
- void MigrateObject(HeapObject* dst,
- HeapObject* src,
- int size,
+ void MigrateObject(HeapObject* dst, HeapObject* src, int size,
AllocationSpace to_old_space);
bool TryPromoteObject(HeapObject* object, int object_size);
sequential_sweeping_ = sequential_sweeping;
}
- bool sequential_sweeping() const {
- return sequential_sweeping_;
- }
+ bool sequential_sweeping() const { return sequential_sweeping_; }
// Mark the global table which maps weak objects to dependent code without
// marking its contents.
void SweepSpaces();
int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
- NewSpacePage* p);
+ NewSpacePage* p);
void EvacuateNewSpace();
class MarkBitCellIterator BASE_EMBEDDED {
public:
- explicit MarkBitCellIterator(MemoryChunk* chunk)
- : chunk_(chunk) {
- last_cell_index_ = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(chunk_->area_end())));
+ explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
+ last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(chunk_->area_end())));
cell_base_ = chunk_->area_start();
cell_index_ = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(cell_base_)));
+ Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
cells_ = chunk_->markbits()->cells();
}
inline MarkBit::CellType* CurrentCell() {
DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(cell_base_))));
+ chunk_->AddressToMarkbitIndex(cell_base_))));
return &cells_[cell_index_];
}
inline Address CurrentCellBase() {
DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(cell_base_))));
+ chunk_->AddressToMarkbitIndex(cell_base_))));
return cell_base_;
}
class SequentialSweepingScope BASE_EMBEDDED {
public:
- explicit SequentialSweepingScope(MarkCompactCollector *collector) :
- collector_(collector) {
+ explicit SequentialSweepingScope(MarkCompactCollector* collector)
+ : collector_(collector) {
collector_->set_sequential_sweeping(true);
}
- ~SequentialSweepingScope() {
- collector_->set_sequential_sweeping(false);
- }
+ ~SequentialSweepingScope() { collector_->set_sequential_sweeping(false); }
private:
MarkCompactCollector* collector_;
const char* AllocationSpaceName(AllocationSpace space);
+}
+} // namespace v8::internal
-} } // namespace v8::internal
-
-#endif // V8_MARK_COMPACT_H_
+#endif // V8_HEAP_MARK_COMPACT_H_
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SPACES_INL_H_
-#define V8_SPACES_INL_H_
+#ifndef V8_HEAP_SPACES_INL_H_
+#define V8_HEAP_SPACES_INL_H_
+#include "src/heap/spaces.h"
#include "src/heap-profiler.h"
#include "src/isolate.h"
-#include "src/spaces.h"
#include "src/v8memory.h"
namespace v8 {
PageIterator::PageIterator(PagedSpace* space)
: space_(space),
prev_page_(&space->anchor_),
- next_page_(prev_page_->next_page()) { }
+ next_page_(prev_page_->next_page()) {}
-bool PageIterator::has_next() {
- return next_page_ != &space_->anchor_;
-}
+bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
Page* PageIterator::next() {
NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
: prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
- last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
+ last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
: prev_page_(space->anchor()),
next_page_(prev_page_->next_page()),
- last_page_(prev_page_->prev_page()) { }
+ last_page_(prev_page_->prev_page()) {}
NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
: prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
}
-bool NewSpacePageIterator::has_next() {
- return prev_page_ != last_page_;
-}
+bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
NewSpacePage* NewSpacePageIterator::next() {
}
-void MemoryAllocator::Unprotect(Address start,
- size_t size,
+void MemoryAllocator::Unprotect(Address start, size_t size,
Executability executable) {
base::OS::Unprotect(start, size, executable);
}
// --------------------------------------------------------------------------
// PagedSpace
-Page* Page::Initialize(Heap* heap,
- MemoryChunk* chunk,
- Executability executable,
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
: state_(kOldPointerState),
old_pointer_iterator_(heap->old_pointer_space()),
map_iterator_(heap->map_space()),
- lo_iterator_(heap->lo_space()) { }
+ lo_iterator_(heap->lo_space()) {}
Page* Page::next_page() {
bool FreeListNode::IsFreeListNode(HeapObject* object) {
Map* map = object->map();
Heap* heap = object->GetHeap();
- return map == heap->raw_unchecked_free_space_map()
- || map == heap->raw_unchecked_one_pointer_filler_map()
- || map == heap->raw_unchecked_two_pointer_filler_map();
+ return map == heap->raw_unchecked_free_space_map() ||
+ map == heap->raw_unchecked_one_pointer_filler_map() ||
+ map == heap->raw_unchecked_two_pointer_filler_map();
}
+}
+} // namespace v8::internal
-} } // namespace v8::internal
-
-#endif // V8_SPACES_INL_H_
+#endif // V8_HEAP_SPACES_INL_H_
#include "src/base/platform/platform.h"
#include "src/full-codegen.h"
+#include "src/heap/mark-compact.h"
#include "src/macro-assembler.h"
-#include "src/mark-compact.h"
#include "src/msan.h"
namespace v8 {
// just an anchor for the double linked page list. Initialize as if we have
// reached the end of the anchor page, then the first iteration will move on
// to the first page.
- Initialize(space,
- NULL,
- NULL,
- kAllPagesInSpace,
- NULL);
+ Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
}
// just an anchor for the double linked page list. Initialize the current
// address and end as NULL, then the first iteration will move on
// to the first page.
- Initialize(space,
- NULL,
- NULL,
- kAllPagesInSpace,
- size_func);
+ Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
}
owner == page->heap()->cell_space() ||
owner == page->heap()->property_cell_space() ||
owner == page->heap()->code_space());
- Initialize(reinterpret_cast<PagedSpace*>(owner),
- page->area_start(),
- page->area_end(),
- kOnePageOnly,
- size_func);
+ Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
+ page->area_end(), kOnePageOnly, size_func);
DCHECK(page->WasSweptPrecisely() || page->SweepingCompleted());
}
-void HeapObjectIterator::Initialize(PagedSpace* space,
- Address cur, Address end,
+void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
HeapObjectIterator::PageMode mode,
HeapObjectCallback size_f) {
// Check that we actually can iterate this space.
code_range_(NULL),
free_list_(0),
allocation_list_(0),
- current_allocation_block_index_(0) {
-}
+ current_allocation_block_index_(0) {}
bool CodeRange::SetUp(size_t requested) {
// We are sure that we have mapped a block of requested addresses.
DCHECK(code_range_->size() == requested);
- LOG(isolate_,
- NewEvent("CodeRange", code_range_->address(), requested));
+ LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Address base = reinterpret_cast<Address>(code_range_->address());
Address aligned_base =
RoundUp(reinterpret_cast<Address>(code_range_->address()),
}
DCHECK(*allocated <= current.size);
DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
- current.start,
- commit_size,
- *allocated)) {
+ if (!isolate_->memory_allocator()->CommitExecutableMemory(
+ code_range_, current.start, commit_size, *allocated)) {
*allocated = 0;
return NULL;
}
void CodeRange::TearDown() {
- delete code_range_; // Frees all memory in the virtual memory range.
- code_range_ = NULL;
- free_list_.Free();
- allocation_list_.Free();
+ delete code_range_; // Frees all memory in the virtual memory range.
+ code_range_ = NULL;
+ free_list_.Free();
+ allocation_list_.Free();
}
size_(0),
size_executable_(0),
lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
- highest_ever_allocated_(reinterpret_cast<void*>(0)) {
-}
+ highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
}
-bool MemoryAllocator::CommitMemory(Address base,
- size_t size,
+bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
if (!base::VirtualMemory::CommitRegion(base, size,
executable == EXECUTABLE)) {
DCHECK(isolate_->code_range() == NULL ||
!isolate_->code_range()->contains(
static_cast<Address>(reservation->address())));
- DCHECK(executable == NOT_EXECUTABLE ||
- isolate_->code_range() == NULL ||
+ DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
!isolate_->code_range()->valid());
reservation->Release();
}
-void MemoryAllocator::FreeMemory(Address base,
- size_t size,
+void MemoryAllocator::FreeMemory(Address base, size_t size,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
DCHECK(size_ >= size);
DCHECK(executable == EXECUTABLE);
isolate_->code_range()->FreeRawMemory(base, size);
} else {
- DCHECK(executable == NOT_EXECUTABLE ||
- isolate_->code_range() == NULL ||
+ DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
!isolate_->code_range()->valid());
bool result = base::VirtualMemory::ReleaseRegion(base, size);
USE(result);
}
-Address MemoryAllocator::ReserveAlignedMemory(size_t size,
- size_t alignment,
+Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
base::VirtualMemory* controller) {
base::VirtualMemory reservation(size, alignment);
if (!reservation.IsReserved()) return NULL;
size_ += reservation.size();
- Address base = RoundUp(static_cast<Address>(reservation.address()),
- alignment);
+ Address base =
+ RoundUp(static_cast<Address>(reservation.address()), alignment);
controller->TakeControl(&reservation);
return base;
}
if (base == NULL) return NULL;
if (executable == EXECUTABLE) {
- if (!CommitExecutableMemory(&reservation,
- base,
- commit_size,
+ if (!CommitExecutableMemory(&reservation, base, commit_size,
reserve_size)) {
base = NULL;
}
}
-NewSpacePage* NewSpacePage::Initialize(Heap* heap,
- Address start,
+NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
SemiSpace* semi_space) {
Address area_start = start + NewSpacePage::kObjectStartOffset;
Address area_end = start + Page::kPageSize;
- MemoryChunk* chunk = MemoryChunk::Initialize(heap,
- start,
- Page::kPageSize,
- area_start,
- area_end,
- NOT_EXECUTABLE,
- semi_space);
+ MemoryChunk* chunk =
+ MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
+ area_end, NOT_EXECUTABLE, semi_space);
chunk->set_next_chunk(NULL);
chunk->set_prev_chunk(NULL);
chunk->initialize_scan_on_scavenge(true);
}
-MemoryChunk* MemoryChunk::Initialize(Heap* heap,
- Address base,
- size_t size,
- Address area_start,
- Address area_end,
- Executability executable,
- Space* owner) {
+MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
+ Address area_start, Address area_end,
+ Executability executable, Space* owner) {
MemoryChunk* chunk = FromAddress(base);
DCHECK(base == chunk->address());
// Commit MemoryChunk area to the requested size.
bool MemoryChunk::CommitArea(size_t requested) {
- size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
- MemoryAllocator::CodePageGuardSize() : 0;
+ size_t guard_size =
+ IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
size_t header_size = area_start() - address() - guard_size;
size_t commit_size =
RoundUp(header_size + requested, base::OS::CommitPageSize());
Address start = address() + committed_size + guard_size;
size_t length = commit_size - committed_size;
if (reservation_.IsReserved()) {
- Executability executable = IsFlagSet(IS_EXECUTABLE)
- ? EXECUTABLE : NOT_EXECUTABLE;
- if (!heap()->isolate()->memory_allocator()->CommitMemory(
- start, length, executable)) {
+ Executability executable =
+ IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+ if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
+ executable)) {
return false;
}
} else {
if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
- base::OS::CommitPageSize()) + CodePageGuardSize();
+ base::OS::CommitPageSize()) +
+ CodePageGuardSize();
// Check executable memory limit.
if (size_executable_ + chunk_size > capacity_executable_) {
- LOG(isolate_,
- StringEvent("MemoryAllocator::AllocateRawMemory",
- "V8 Executable Allocation capacity exceeded"));
+ LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
+ "V8 Executable Allocation capacity exceeded"));
return NULL;
}
// Allocate executable memory either from code range or from the
// OS.
if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
- base = isolate_->code_range()->AllocateRawMemory(chunk_size,
- commit_size,
+ base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
&chunk_size);
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(base),
- MemoryChunk::kAlignment));
+ DCHECK(
+ IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
if (base == NULL) return NULL;
size_ += chunk_size;
// Update executable memory size.
size_executable_ += chunk_size;
} else {
- base = AllocateAlignedMemory(chunk_size,
- commit_size,
- MemoryChunk::kAlignment,
- executable,
+ base = AllocateAlignedMemory(chunk_size, commit_size,
+ MemoryChunk::kAlignment, executable,
&reservation);
if (base == NULL) return NULL;
// Update executable memory size.
} else {
chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
base::OS::CommitPageSize());
- size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
- commit_area_size, base::OS::CommitPageSize());
- base = AllocateAlignedMemory(chunk_size,
- commit_size,
- MemoryChunk::kAlignment,
- executable,
- &reservation);
+ size_t commit_size =
+ RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
+ base::OS::CommitPageSize());
+ base =
+ AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
+ executable, &reservation);
if (base == NULL) return NULL;
// Use chunk_size for statistics and callbacks because we assume that they
// treat reserved but not-yet committed memory regions of chunks as allocated.
- isolate_->counters()->memory_allocated()->
- Increment(static_cast<int>(chunk_size));
+ isolate_->counters()->memory_allocated()->Increment(
+ static_cast<int>(chunk_size));
LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
if (owner != NULL) {
PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
}
- MemoryChunk* result = MemoryChunk::Initialize(heap,
- base,
- chunk_size,
- area_start,
- area_end,
- executable,
- owner);
+ MemoryChunk* result = MemoryChunk::Initialize(
+ heap, base, chunk_size, area_start, area_end, executable, owner);
result->set_reserved_memory(&reservation);
MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size);
return result;
}
-Page* MemoryAllocator::AllocatePage(intptr_t size,
- PagedSpace* owner,
+Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
Space* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(object_size,
- object_size,
- executable,
- owner);
+ MemoryChunk* chunk =
+ AllocateChunk(object_size, object_size, executable, owner);
if (chunk == NULL) return NULL;
return LargePage::Initialize(isolate_->heap(), chunk);
}
PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
}
- isolate_->heap()->RememberUnmappedPage(
- reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
+ isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
+ chunk->IsEvacuationCandidate());
delete chunk->slots_buffer();
delete chunk->skip_list();
if (reservation->IsReserved()) {
FreeMemory(reservation, chunk->executable());
} else {
- FreeMemory(chunk->address(),
- chunk->size(),
- chunk->executable());
+ FreeMemory(chunk->address(), chunk->size(), chunk->executable());
}
}
-bool MemoryAllocator::CommitBlock(Address start,
- size_t size,
+bool MemoryAllocator::CommitBlock(Address start, size_t size,
Executability executable) {
if (!CommitMemory(start, size, executable)) return false;
size_t size) {
for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
MemoryAllocationCallbackRegistration registration =
- memory_allocation_callbacks_[i];
+ memory_allocation_callbacks_[i];
if ((registration.space & space) == space &&
(registration.action & action) == action)
registration.callback(space, action, static_cast<int>(size));
void MemoryAllocator::AddMemoryAllocationCallback(
- MemoryAllocationCallback callback,
- ObjectSpace space,
+ MemoryAllocationCallback callback, ObjectSpace space,
AllocationAction action) {
DCHECK(callback != NULL);
MemoryAllocationCallbackRegistration registration(callback, space, action);
void MemoryAllocator::RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback) {
+ MemoryAllocationCallback callback) {
DCHECK(callback != NULL);
for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
if (memory_allocation_callbacks_[i].callback == callback) {
#ifdef DEBUG
void MemoryAllocator::ReportStatistics() {
float pct = static_cast<float>(capacity_ - size_) / capacity_;
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", used: %" V8_PTR_PREFIX "d"
- ", available: %%%d\n\n",
- capacity_, size_, static_cast<int>(pct*100));
+ PrintF(" capacity: %" V8_PTR_PREFIX
+ "d"
+ ", used: %" V8_PTR_PREFIX
+ "d"
+ ", available: %%%d\n\n",
+ capacity_, size_, static_cast<int>(pct * 100));
}
#endif
bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
- Address start,
- size_t commit_size,
+ Address start, size_t commit_size,
size_t reserved_size) {
// Commit page header (not executable).
- if (!vm->Commit(start,
- CodePageGuardStartOffset(),
- false)) {
+ if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
return false;
}
// Commit page body (executable).
if (!vm->Commit(start + CodePageAreaStartOffset(),
- commit_size - CodePageGuardStartOffset(),
- true)) {
+ commit_size - CodePageGuardStartOffset(), true)) {
return false;
}
return false;
}
- UpdateAllocatedSpaceLimits(start,
- start + CodePageAreaStartOffset() +
- commit_size - CodePageGuardStartOffset());
+ UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
+ commit_size -
+ CodePageGuardStartOffset());
return true;
}
end_of_unswept_pages_(NULL),
emergency_memory_(NULL) {
if (id == CODE_SPACE) {
- area_size_ = heap->isolate()->memory_allocator()->
- CodePageAreaSize();
+ area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize();
} else {
area_size_ = Page::kPageSize - Page::kObjectStartOffset;
}
- max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
- * AreaSize();
+ max_capacity_ =
+ (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
accounting_stats_.Clear();
allocation_info_.set_top(NULL);
}
-bool PagedSpace::SetUp() {
- return true;
-}
+bool PagedSpace::SetUp() { return true; }
-bool PagedSpace::HasBeenSetUp() {
- return true;
-}
+bool PagedSpace::HasBeenSetUp() { return true; }
void PagedSpace::TearDown() {
size = SizeOfFirstPage();
}
- Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
- size, this, executable());
+ Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
+ executable());
if (p == NULL) return false;
DCHECK(Capacity() <= max_capacity_);
// upgraded to handle small pages.
size = AreaSize();
} else {
- size = RoundUp(
- 480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
- kPointerSize);
+ size =
+ RoundUp(480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+ kPointerSize);
}
break;
}
#ifdef DEBUG
-void PagedSpace::Print() { }
+void PagedSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
size_t size = 2 * reserved_semispace_capacity;
- Address base =
- heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
- size, size, &reservation_);
+ Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
+ size, size, &reservation_);
if (base == NULL) return false;
chunk_base_ = base;
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
-#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
- promoted_histogram_[name].set_name(#name);
+#define SET_NAME(name) \
+ allocated_histogram_[name].set_name(#name); \
+ promoted_histogram_[name].set_name(#name);
INSTANCE_TYPE_LIST(SET_NAME)
#undef SET_NAME
2 * heap()->ReservedSemiSpaceSize());
DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
- to_space_.SetUp(chunk_base_,
- initial_semispace_capacity,
+ to_space_.SetUp(chunk_base_, initial_semispace_capacity,
maximum_semispace_capacity);
from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
- initial_semispace_capacity,
- maximum_semispace_capacity);
+ initial_semispace_capacity, maximum_semispace_capacity);
if (!to_space_.Commit()) {
return false;
}
}
-void NewSpace::Flip() {
- SemiSpace::Swap(&from_space_, &to_space_);
-}
+void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() {
int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
if (rounded_new_capacity < Capacity() &&
- to_space_.ShrinkTo(rounded_new_capacity)) {
+ to_space_.ShrinkTo(rounded_new_capacity)) {
// Only shrink from-space if we managed to shrink to-space.
from_space_.Reset();
if (!from_space_.ShrinkTo(rounded_new_capacity)) {
// the new limit accordingly.
Address new_top = old_top + size_in_bytes;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(
- bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
+ heap()->incremental_marking()->Step(bytes_allocated,
+ IncrementalMarking::GC_VIA_STACK_GUARD);
UpdateInlineAllocationLimit(size_in_bytes);
top_on_previous_step_ = new_top;
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(
- bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
+ heap()->incremental_marking()->Step(bytes_allocated,
+ IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
return AllocateRaw(size_in_bytes);
} else {
// -----------------------------------------------------------------------------
// SemiSpace implementation
-void SemiSpace::SetUp(Address start,
- int initial_capacity,
+void SemiSpace::SetUp(Address start, int initial_capacity,
int maximum_capacity) {
// Creates a space in the young generation. The constructor does not
// allocate memory from the OS. A SemiSpace is given a contiguous chunk of
bool SemiSpace::Commit() {
DCHECK(!is_committed());
int pages = capacity_ / Page::kPageSize;
- if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
- capacity_,
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, capacity_,
executable())) {
return false;
}
NewSpacePage* current = anchor();
for (int i = 0; i < pages; i++) {
NewSpacePage* new_page =
- NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
+ NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
new_page->InsertAfter(current);
current = new_page;
}
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start_ + capacity_, delta, executable())) {
+ start_ + capacity_, delta, executable())) {
return false;
}
SetCapacity(new_capacity);
DCHECK(last_page != anchor());
for (int i = pages_before; i < pages_after; i++) {
Address page_address = start_ + i * Page::kPageSize;
- NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
- page_address,
- this);
+ NewSpacePage* new_page =
+ NewSpacePage::Initialize(heap(), page_address, this);
new_page->InsertAfter(last_page);
Bitmap::Clear(new_page);
// Duplicate the flags that was set on the old page.
#ifdef DEBUG
-void SemiSpace::Print() { }
+void SemiSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
if (page->heap()->incremental_marking()->IsMarking()) {
CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
} else {
- CHECK(!page->IsFlagSet(
- MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+ CHECK(
+ !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
}
// TODO(gc): Check that the live_bytes_count_ field matches the
// black marking on the page (if we make it match in new-space).
}
-void SemiSpaceIterator::Initialize(Address start,
- Address end,
+void SemiSpaceIterator::Initialize(Address start, Address end,
HeapObjectCallback size_func) {
SemiSpace::AssertValidRange(start, end);
current_ = start;
#ifdef DEBUG
// heap_histograms is shared, always clear it before using it.
static void ClearHistograms(Isolate* isolate) {
- // We reset the name each time, though it hasn't changed.
+// We reset the name each time, though it hasn't changed.
#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
#undef DEF_TYPE_NAME
isolate->heap_histograms()[type].increment_bytes(obj->Size());
if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
- JSObject::cast(obj)->IncrementSpillStatistics(
- isolate->js_spill_information());
+ JSObject::cast(obj)
+ ->IncrementSpillStatistics(isolate->js_spill_information());
}
return obj->Size();
// Summarize string types.
int string_number = 0;
int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name) \
- string_number += isolate->heap_histograms()[type].number(); \
- string_bytes += isolate->heap_histograms()[type].bytes();
+#define INCREMENT(type, size, name, camel_name) \
+ string_number += isolate->heap_histograms()[type].number(); \
+ string_bytes += isolate->heap_histograms()[type].bytes();
STRING_TYPE_LIST(INCREMENT)
#undef INCREMENT
if (string_number > 0) {
}
-static void DoReportStatistics(Isolate* isolate,
- HistogramInfo* info, const char* description) {
+static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
+ const char* description) {
LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
// Lump all the string types together.
int string_number = 0;
int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name) \
- string_number += info[type].number(); \
- string_bytes += info[type].bytes();
+#define INCREMENT(type, size, name, camel_name) \
+ string_number += info[type].number(); \
+ string_bytes += info[type].bytes();
STRING_TYPE_LIST(INCREMENT)
#undef INCREMENT
if (string_number > 0) {
// Then do the other types.
for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
if (info[i].number() > 0) {
- LOG(isolate,
- HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
+ LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
+ info[i].bytes()));
}
}
LOG(isolate, HeapSampleEndEvent("NewSpace", description));
#ifdef DEBUG
if (FLAG_heap_stats) {
float pct = static_cast<float>(Available()) / Capacity();
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
- Capacity(), Available(), static_cast<int>(pct*100));
+ PrintF(" capacity: %" V8_PTR_PREFIX
+ "d"
+ ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+ Capacity(), Available(), static_cast<int>(pct * 100));
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
if (allocated_histogram_[i].number() > 0) {
- PrintF(" %-34s%10d (%10d bytes)\n",
- allocated_histogram_[i].name(),
+ PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
allocated_histogram_[i].number(),
allocated_histogram_[i].bytes());
}
}
-FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
+FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
FreeListNode* node = top();
if (node == NULL) return NULL;
FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
- int *node_size) {
+ int* node_size) {
FreeListNode* node = PickNodeFromList(node_size);
if (node != NULL && *node_size < size_in_bytes) {
Free(node, *node_size);
}
-FreeList::FreeList(PagedSpace* owner)
- : owner_(owner), heap_(owner->heap()) {
+FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
Reset();
}
int huge_list_available = huge_list_.available();
FreeListNode* top_node = huge_list_.top();
- for (FreeListNode** cur = &top_node;
- *cur != NULL;
+ for (FreeListNode** cur = &top_node; *cur != NULL;
cur = (*cur)->next_address()) {
FreeListNode* cur_node = *cur;
while (cur_node != NULL &&
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
- owner_->heap()->incremental_marking()->OldSpaceStep(
- size_in_bytes - old_linear_size);
+ owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
+ old_linear_size);
int new_node_size = 0;
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (sum < p->area_size()) {
sum += small_list_.EvictFreeListItemsInList(p) +
- medium_list_.EvictFreeListItemsInList(p) +
- large_list_.EvictFreeListItemsInList(p);
+ medium_list_.EvictFreeListItemsInList(p) +
+ large_list_.EvictFreeListItemsInList(p);
p->set_available_in_small_free_list(0);
p->set_available_in_medium_free_list(0);
p->set_available_in_large_free_list(0);
bool FreeList::IsVeryLong() {
- if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
+ if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
+ if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
+ if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
+ if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
return false;
}
intptr_t PagedSpace::SizeOfObjects() {
DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() ||
- (unswept_free_bytes_ == 0));
+ (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
// on the heap. If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally NULL), so we need to
// fix them.
-void PagedSpace::RepairFreeListsAfterBoot() {
- free_list_.RepairLists(heap());
-}
+void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
if (allocation_info_.top() >= allocation_info_.limit()) return;
- if (Page::FromAllocationTop(allocation_info_.top())->
- IsEvacuationCandidate()) {
+ if (Page::FromAllocationTop(allocation_info_.top())
+ ->IsEvacuationCandidate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
static_cast<int>(allocation_info_.limit() - allocation_info_.top());
if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
- int free_chunk =
- collector->SweepInParallel(this, size_in_bytes);
+ int free_chunk = collector->SweepInParallel(this, size_in_bytes);
collector->RefillFreeList(this);
if (free_chunk >= size_in_bytes) {
HeapObject* object = free_list_.Allocate(size_in_bytes);
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!heap()->always_allocate()
- && heap()->OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
CommentStatistic* comments_statistics =
isolate->paged_space_comments_statistics();
ReportCodeKindStatistics(isolate->code_kind_statistics());
- PrintF("Code comment statistics (\" [ comment-txt : size/ "
- "count (average)\"):\n");
+ PrintF(
+ "Code comment statistics (\" [ comment-txt : size/ "
+ "count (average)\"):\n");
for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
const CommentStatistic& cs = comments_statistics[i];
if (cs.size > 0) {
PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
- cs.size/cs.count);
+ cs.size / cs.count);
}
}
PrintF("\n");
void PagedSpace::ReportStatistics() {
int pct = static_cast<int>(Available() * 100 / Capacity());
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
- ", waste: %" V8_PTR_PREFIX "d"
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+ PrintF(" capacity: %" V8_PTR_PREFIX
+ "d"
+ ", waste: %" V8_PTR_PREFIX
+ "d"
+ ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
if (!swept_precisely_) return;
// there is at least one non-inlined virtual function. I would prefer to hide
// the VerifyObject definition behind VERIFY_HEAP.
-void MapSpace::VerifyObject(HeapObject* object) {
- CHECK(object->IsMap());
-}
+void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
// -----------------------------------------------------------------------------
// there is at least one non-inlined virtual function. I would prefer to hide
// the VerifyObject definition behind VERIFY_HEAP.
-void CellSpace::VerifyObject(HeapObject* object) {
- CHECK(object->IsCell());
-}
+void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
void PropertyCellSpace::VerifyObject(HeapObject* object) {
// -----------------------------------------------------------------------------
// LargeObjectSpace
-static bool ComparePointers(void* key1, void* key2) {
- return key1 == key2;
-}
+static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
-LargeObjectSpace::LargeObjectSpace(Heap* heap,
- intptr_t max_capacity,
+LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
AllocationSpace id)
: Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
max_capacity_(max_capacity),
return AllocationResult::Retry(identity());
}
- LargePage* page = heap()->isolate()->memory_allocator()->
- AllocateLargePage(object_size, this, executable);
+ LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
+ object_size, this, executable);
if (page == NULL) return AllocationResult::Retry(identity());
DCHECK(page->area_size() >= object_size);
uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
for (uintptr_t key = base; key <= limit; key++) {
HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
- static_cast<uint32_t>(key),
- true);
+ static_cast<uint32_t>(key), true);
DCHECK(entry != NULL);
entry->value = page;
}
LargePage* LargeObjectSpace::FindPage(Address a) {
uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
- static_cast<uint32_t>(key),
- false);
+ static_cast<uint32_t>(key), false);
if (e != NULL) {
DCHECK(e->value != NULL);
LargePage* page = reinterpret_cast<LargePage*>(e->value);
}
// Free the chunk.
- heap()->mark_compact_collector()->ReportDeleteIfNeeded(
- object, heap()->isolate());
+ heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
+ heap()->isolate());
size_ -= static_cast<int>(page->size());
objects_size_ -= object->Size();
page_count_--;
// Use variable alignment to help pass length check (<= 80 characters)
// of single line in tools/presubmit.py.
const intptr_t alignment = MemoryChunk::kAlignment;
- uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
- uintptr_t limit = base + (page->size()-1)/alignment;
+ uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
+ uintptr_t limit = base + (page->size() - 1) / alignment;
for (uintptr_t key = base; key <= limit; key++) {
chunk_map_.Remove(reinterpret_cast<void*>(key),
static_cast<uint32_t>(key));
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify() {
- for (LargePage* chunk = first_page_;
- chunk != NULL;
+ for (LargePage* chunk = first_page_; chunk != NULL;
chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
// object area start.
// Byte arrays and strings don't have interior pointers.
if (object->IsCode()) {
VerifyPointersVisitor code_visitor;
- object->IterateBody(map->instance_type(),
- object->Size(),
- &code_visitor);
+ object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
} else if (object->IsFixedArray()) {
FixedArray* array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
CollectHistogramInfo(obj);
}
- PrintF(" number of objects %d, "
- "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
+ PrintF(
+ " number of objects %d, "
+ "size of objects %" V8_PTR_PREFIX "d\n",
+ num_objects, objects_size_);
if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
}
void Page::Print() {
// Make a best-effort to print the objects in the page.
- PrintF("Page@%p in %s\n",
- this->address(),
+ PrintF("Page@%p in %s\n", this->address(),
AllocationSpaceName(this->owner()->identity()));
printf(" --------------------------------------\n");
HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
unsigned mark_size = 0;
- for (HeapObject* object = objects.Next();
- object != NULL;
+ for (HeapObject* object = objects.Next(); object != NULL;
object = objects.Next()) {
bool is_marked = Marking::MarkBitFrom(object).Get();
PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
}
#endif // DEBUG
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SPACES_H_
-#define V8_SPACES_H_
+#ifndef V8_HEAP_SPACES_H_
+#define V8_HEAP_SPACES_H_
#include "src/allocation.h"
#include "src/base/atomicops.h"
// Some assertion macros used in the debugging mode.
-#define DCHECK_PAGE_ALIGNED(address) \
+#define DCHECK_PAGE_ALIGNED(address) \
DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
-#define DCHECK_OBJECT_ALIGNED(address) \
+#define DCHECK_OBJECT_ALIGNED(address) \
DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
-#define DCHECK_OBJECT_SIZE(size) \
+#define DCHECK_OBJECT_SIZE(size) \
DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
-#define DCHECK_PAGE_OFFSET(offset) \
- DCHECK((Page::kObjectStartOffset <= offset) \
- && (offset <= Page::kPageSize))
+#define DCHECK_PAGE_OFFSET(offset) \
+ DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
-#define DCHECK_MAP_PAGE_INDEX(index) \
+#define DCHECK_MAP_PAGE_INDEX(index) \
DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
typedef uint32_t CellType;
inline MarkBit(CellType* cell, CellType mask, bool data_only)
- : cell_(cell), mask_(mask), data_only_(data_only) { }
+ : cell_(cell), mask_(mask), data_only_(data_only) {}
inline CellType* cell() { return cell_; }
inline CellType mask() { return mask_; }
static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
- static const size_t kLength =
- (1 << kPageSizeBits) >> (kPointerSizeLog2);
+ static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
static const size_t kSize =
- (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
+ (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
static int CellsForLength(int length) {
return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
}
- int CellsCount() {
- return CellsForLength(kLength);
- }
+ int CellsCount() { return CellsForLength(kLength); }
static int SizeFor(int cells_count) {
return sizeof(MarkBit::CellType) * cells_count;
return reinterpret_cast<MarkBit::CellType*>(this);
}
- INLINE(Address address()) {
- return reinterpret_cast<Address>(this);
- }
+ INLINE(Address address()) { return reinterpret_cast<Address>(this); }
INLINE(static Bitmap* FromAddress(Address addr)) {
return reinterpret_cast<Bitmap*>(addr);
class CellPrinter {
public:
- CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
+ CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
void Print(uint32_t pos, uint32_t cell) {
if (cell == seq_type) {
void Flush() {
if (seq_length > 0) {
- PrintF("%d: %dx%d\n",
- seq_start,
- seq_type == 0 ? 0 : 1,
+ PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
seq_length * kBitsPerCell);
seq_length = 0;
}
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
}
static const MemoryChunk* FromAddress(const byte* a) {
- return reinterpret_cast<const MemoryChunk*>(
- OffsetFrom(a) & ~kAlignmentMask);
+ return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
+ ~kAlignmentMask);
}
// Only works for addresses in pointer spaces, not data or code spaces.
kPageHeaderTag);
}
- base::VirtualMemory* reserved_memory() {
- return &reservation_;
- }
+ base::VirtualMemory* reserved_memory() { return &reservation_; }
- void InitializeReservedMemory() {
- reservation_.Reset();
- }
+ void InitializeReservedMemory() { reservation_.Reset(); }
void set_reserved_memory(base::VirtualMemory* reservation) {
DCHECK_NOT_NULL(reservation);
static const int kPointersFromHereAreInterestingMask =
1 << POINTERS_FROM_HERE_ARE_INTERESTING;
- static const int kEvacuationCandidateMask =
- 1 << EVACUATION_CANDIDATE;
+ static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
static const int kSkipEvacuationSlotsRecordingMask =
- (1 << EVACUATION_CANDIDATE) |
- (1 << RESCAN_ON_EVACUATION) |
- (1 << IN_FROM_SPACE) |
- (1 << IN_TO_SPACE);
+ (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
+ (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
- void SetFlag(int flag) {
- flags_ |= static_cast<uintptr_t>(1) << flag;
- }
+ void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
- void ClearFlag(int flag) {
- flags_ &= ~(static_cast<uintptr_t>(1) << flag);
- }
+ void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
void SetFlagTo(int flag, bool value) {
if (value) {
}
bool TryParallelSweeping() {
- return base::Acquire_CompareAndSwap(
- ¶llel_sweeping_, SWEEPING_PENDING, SWEEPING_IN_PROGRESS) ==
- SWEEPING_PENDING;
+ return base::Acquire_CompareAndSwap(¶llel_sweeping_, SWEEPING_PENDING,
+ SWEEPING_IN_PROGRESS) ==
+ SWEEPING_PENDING;
}
bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
// because they are marked black).
void ResetLiveBytes() {
if (FLAG_gc_verbose) {
- PrintF("ResetLiveBytes:%p:%x->0\n",
- static_cast<void*>(this), live_byte_count_);
+ PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
+ live_byte_count_);
}
live_byte_count_ = 0;
}
void IncrementLiveBytes(int by) {
if (FLAG_gc_verbose) {
- printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
- static_cast<void*>(this), live_byte_count_,
- ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
+ printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
+ live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
live_byte_count_ + by);
}
live_byte_count_ += by;
static const intptr_t kSizeOffset = 0;
static const intptr_t kLiveBytesOffset =
- kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
- kPointerSize + kPointerSize +
- kPointerSize + kPointerSize + kPointerSize + kIntSize;
+ kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize +
+ kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize;
static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize;
- static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
- kIntSize + kIntSize + kPointerSize +
- 5 * kPointerSize +
- kPointerSize + kPointerSize;
+ static const size_t kHeaderSize =
+ kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize +
+ kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
// code alignment to be suitable for both. Also aligned to 32 words because
// the marking bitmap is arranged in 32 bit chunks.
static const int kObjectStartAlignment = 32 * kPointerSize;
- static const int kObjectStartOffset = kBodyOffset - 1 +
+ static const int kObjectStartOffset =
+ kBodyOffset - 1 +
(kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
size_t size() const { return size_; }
- void set_size(size_t size) {
- size_ = size;
- }
+ void set_size(size_t size) { size_ = size; }
void SetArea(Address area_start, Address area_end) {
area_start_ = area_start;
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
- bool ContainsOnlyData() {
- return IsFlagSet(CONTAINS_ONLY_DATA);
- }
+ bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); }
bool InNewSpace() {
return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
}
- bool InToSpace() {
- return IsFlagSet(IN_TO_SPACE);
- }
+ bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
- bool InFromSpace() {
- return IsFlagSet(IN_FROM_SPACE);
- }
+ bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
// ---------------------------------------------------------------------
// Markbits support
}
inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
- const intptr_t offset =
- reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+ const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
}
return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
}
- inline SkipList* skip_list() {
- return skip_list_;
- }
+ inline SkipList* skip_list() { return skip_list_; }
- inline void set_skip_list(SkipList* skip_list) {
- skip_list_ = skip_list;
- }
+ inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
- inline SlotsBuffer* slots_buffer() {
- return slots_buffer_;
- }
+ inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
- inline SlotsBuffer** slots_buffer_address() {
- return &slots_buffer_;
- }
+ inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
void MarkEvacuationCandidate() {
DCHECK(slots_buffer_ == NULL);
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
- int area_size() {
- return static_cast<int>(area_end() - area_start());
- }
+ int area_size() { return static_cast<int>(area_end() - area_start()); }
bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk.
- size_t CommittedPhysicalMemory() {
- return high_water_mark_;
- }
+ size_t CommittedPhysicalMemory() { return high_water_mark_; }
static inline void UpdateHighWaterMark(Address mark);
intptr_t available_in_huge_free_list_;
intptr_t non_available_small_blocks_;
- static MemoryChunk* Initialize(Heap* heap,
- Address base,
- size_t size,
- Address area_start,
- Address area_end,
- Executability executable,
- Space* owner);
+ static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+ Address area_start, Address area_end,
+ Executability executable, Space* owner);
private:
// next_chunk_ holds a pointer of type MemoryChunk
inline void ClearGCFields();
- static inline Page* Initialize(Heap* heap,
- MemoryChunk* chunk,
- Executability executable,
- PagedSpace* owner);
+ static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, PagedSpace* owner);
void InitializeAsAnchor(PagedSpace* owner);
class LargePage : public MemoryChunk {
public:
- HeapObject* GetObject() {
- return HeapObject::FromAddress(area_start());
- }
+ HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
inline LargePage* next_page() const {
return static_cast<LargePage*>(next_chunk());
}
- inline void set_next_page(LargePage* page) {
- set_next_chunk(page);
- }
+ inline void set_next_page(LargePage* page) { set_next_chunk(page); }
+
private:
static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
class SkipList {
public:
- SkipList() {
- Clear();
- }
+ SkipList() { Clear(); }
void Clear() {
for (int idx = 0; idx < kSize; idx++) {
}
}
- Address StartFor(Address addr) {
- return starts_[RegionNumber(addr)];
- }
+ Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
void AddObject(Address addr, int size) {
int start_region = RegionNumber(addr);
void TearDown();
- Page* AllocatePage(
- intptr_t size, PagedSpace* owner, Executability executable);
+ Page* AllocatePage(intptr_t size, PagedSpace* owner,
+ Executability executable);
- LargePage* AllocateLargePage(
- intptr_t object_size, Space* owner, Executability executable);
+ LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
+ Executability executable);
void Free(MemoryChunk* chunk);
// been allocated by this MemoryAllocator.
V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
return address < lowest_ever_allocated_ ||
- address >= highest_ever_allocated_;
+ address >= highest_ever_allocated_;
}
#ifdef DEBUG
// could be committed later by calling MemoryChunk::CommitArea.
MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
- Executability executable,
- Space* space);
+ Executability executable, Space* space);
- Address ReserveAlignedMemory(size_t requested,
- size_t alignment,
+ Address ReserveAlignedMemory(size_t requested, size_t alignment,
base::VirtualMemory* controller);
- Address AllocateAlignedMemory(size_t reserve_size,
- size_t commit_size,
- size_t alignment,
- Executability executable,
+ Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
+ size_t alignment, Executability executable,
base::VirtualMemory* controller);
bool CommitMemory(Address addr, size_t size, Executability executable);
// filling it up with a recognizable non-NULL bit pattern.
void ZapBlock(Address start, size_t size);
- void PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
+ void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
size_t size);
void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action);
+ ObjectSpace space, AllocationAction action);
- void RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback);
+ void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
- bool MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback);
+ bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
static int CodePageGuardStartOffset();
}
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
- Address start,
- size_t commit_size,
+ Address start, size_t commit_size,
size_t reserved_size);
private:
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
ObjectSpace space,
AllocationAction action)
- : callback(callback), space(space), action(action) {
- }
+ : callback(callback), space(space), action(action) {}
MemoryAllocationCallback callback;
ObjectSpace space;
AllocationAction action;
};
// A List of callback that are triggered when memory is allocated or free'd
- List<MemoryAllocationCallbackRegistration>
- memory_allocation_callbacks_;
+ List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
class ObjectIterator : public Malloced {
public:
- virtual ~ObjectIterator() { }
+ virtual ~ObjectIterator() {}
virtual HeapObject* next_object() = 0;
};
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects. The caller must create a new
// iterator in order to be sure to visit these new objects.
-class HeapObjectIterator: public ObjectIterator {
+class HeapObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
// If the size function is not given, the iterator calls the default
return NULL;
}
- virtual HeapObject* next_object() {
- return Next();
- }
+ virtual HeapObject* next_object() { return Next(); }
private:
enum PageMode { kOnePageOnly, kAllPagesInSpace };
- Address cur_addr_; // Current iteration point.
- Address cur_end_; // End iteration point.
+ Address cur_addr_; // Current iteration point.
+ Address cur_end_; // End iteration point.
HeapObjectCallback size_func_; // Size function or NULL.
PagedSpace* space_;
PageMode page_mode_;
bool AdvanceToNextPage();
// Initializes fields.
- inline void Initialize(PagedSpace* owner,
- Address start,
- Address end,
- PageMode mode,
- HeapObjectCallback size_func);
+ inline void Initialize(PagedSpace* owner, Address start, Address end,
+ PageMode mode, HeapObjectCallback size_func);
};
// space.
class AllocationInfo {
public:
- AllocationInfo() : top_(NULL), limit_(NULL) {
- }
+ AllocationInfo() : top_(NULL), limit_(NULL) {}
INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL ||
- (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+ (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
top_ = top;
}
INLINE(Address top()) const {
SLOW_DCHECK(top_ == NULL ||
- (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+ (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
return top_;
}
- Address* top_address() {
- return &top_;
- }
+ Address* top_address() { return &top_; }
INLINE(void set_limit(Address limit)) {
SLOW_DCHECK(limit == NULL ||
- (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
+ (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
limit_ = limit;
}
INLINE(Address limit()) const {
SLOW_DCHECK(limit_ == NULL ||
- (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
+ (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) ==
+ 0);
return limit_;
}
- Address* limit_address() {
- return &limit_;
- }
+ Address* limit_address() { return &limit_; }
#ifdef DEBUG
bool VerifyPagedAllocation() {
- return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
- && (top_ <= limit_);
+ return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
+ (top_ <= limit_);
}
#endif
// (free-list node pointers have the heap object tag, and they have a map like
// a heap object). They have a size and a next pointer. The next pointer is
// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
+class FreeListNode : public HeapObject {
public:
// Obtain a free-list node from a raw address. This is not a cast because
// it does not check nor require that the first word at the address is a map
// the end element of the linked list of free memory blocks.
class FreeListCategory {
public:
- FreeListCategory() :
- top_(0),
- end_(NULL),
- available_(0) {}
+ FreeListCategory() : top_(0), end_(NULL), available_(0) {}
intptr_t Concatenate(FreeListCategory* category);
void Free(FreeListNode* node, int size_in_bytes);
- FreeListNode* PickNodeFromList(int *node_size);
- FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
+ FreeListNode* PickNodeFromList(int* node_size);
+ FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
intptr_t EvictFreeListItemsInList(Page* p);
bool ContainsPageFreeListItemsInList(Page* p);
base::Mutex* mutex() { return &mutex_; }
- bool IsEmpty() {
- return top() == 0;
- }
+ bool IsEmpty() { return top() == 0; }
#ifdef DEBUG
intptr_t SumFreeList();
class AllocationResult {
public:
// Implicit constructor from Object*.
- AllocationResult(Object* object) : object_(object), // NOLINT
- retry_space_(INVALID_SPACE) { }
+ AllocationResult(Object* object) // NOLINT
+ : object_(object),
+ retry_space_(INVALID_SPACE) {}
- AllocationResult() : object_(NULL),
- retry_space_(INVALID_SPACE) { }
+ AllocationResult() : object_(NULL), retry_space_(INVALID_SPACE) {}
static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
return AllocationResult(space);
}
private:
- explicit AllocationResult(AllocationSpace space) : object_(NULL),
- retry_space_(space) { }
+ explicit AllocationResult(AllocationSpace space)
+ : object_(NULL), retry_space_(space) {}
Object* object_;
AllocationSpace retry_space_;
class PagedSpace : public Space {
public:
// Creates a space with a maximum capacity, and an id.
- PagedSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
+ PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
Executability executable);
virtual ~PagedSpace() {}
Address limit() { return allocation_info_.limit(); }
// The allocation top address.
- Address* allocation_top_address() {
- return allocation_info_.top_address();
- }
+ Address* allocation_top_address() { return allocation_info_.top_address(); }
// The allocation limit address.
Address* allocation_limit_address() {
return size_in_bytes - wasted;
}
- void ResetFreeList() {
- free_list_.Reset();
- }
+ void ResetFreeList() { free_list_.Reset(); }
// Set space allocation info.
void SetTopAndLimit(Address top, Address limit) {
SetTopAndLimit(NULL, NULL);
}
- void Allocate(int bytes) {
- accounting_stats_.AllocateBytes(bytes);
- }
+ void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
void IncreaseCapacity(int size);
// result before _and_ after evacuation has finished.
static bool ShouldBeSweptBySweeperThreads(Page* p) {
return !p->IsEvacuationCandidate() &&
- !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
- !p->WasSweptPrecisely();
+ !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSweptPrecisely();
}
- void IncrementUnsweptFreeBytes(intptr_t by) {
- unswept_free_bytes_ += by;
- }
+ void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
void IncreaseUnsweptFreeBytes(Page* p) {
DCHECK(ShouldBeSweptBySweeperThreads(p));
unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
}
- void DecrementUnsweptFreeBytes(intptr_t by) {
- unswept_free_bytes_ -= by;
- }
+ void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
void DecreaseUnsweptFreeBytes(Page* p) {
DCHECK(ShouldBeSweptBySweeperThreads(p));
unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
}
- void ResetUnsweptFreeBytes() {
- unswept_free_bytes_ = 0;
- }
+ void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
// This function tries to steal size_in_bytes memory from the sweeper threads
// free-lists. If it does not succeed stealing enough memory, it will wait
// It returns true when sweeping is completed and false otherwise.
bool EnsureSweeperProgress(intptr_t size_in_bytes);
- void set_end_of_unswept_pages(Page* page) {
- end_of_unswept_pages_ = page;
- }
+ void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
- Page* end_of_unswept_pages() {
- return end_of_unswept_pages_;
- }
+ Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
int CountTotalPages();
// Return size of allocatable area on a page in this space.
- inline int AreaSize() {
- return area_size_;
- }
+ inline int AreaSize() { return area_size_; }
void CreateEmergencyMemory();
void FreeEmergencyMemory();
// HistogramInfo class for recording a single "bar" of a histogram. This
// class is used for collecting statistics to print to the log file.
-class HistogramInfo: public NumberAndSizeInfo {
+class HistogramInfo : public NumberAndSizeInfo {
public:
HistogramInfo() : NumberAndSizeInfo() {}
};
-enum SemiSpaceId {
- kFromSpace = 0,
- kToSpace = 1
-};
+enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
class SemiSpace;
// GC related flags copied from from-space to to-space when
// flipping semispaces.
static const intptr_t kCopyOnFlipFlagsMask =
- (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+ (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+ (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+ (1 << MemoryChunk::SCAN_ON_SCAVENGE);
static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
return static_cast<NewSpacePage*>(next_chunk());
}
- inline void set_next_page(NewSpacePage* page) {
- set_next_chunk(page);
- }
+ inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
inline NewSpacePage* prev_page() const {
return static_cast<NewSpacePage*>(prev_chunk());
}
- inline void set_prev_page(NewSpacePage* page) {
- set_prev_chunk(page);
- }
+ inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
- SemiSpace* semi_space() {
- return reinterpret_cast<SemiSpace*>(owner());
- }
+ SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
bool is_anchor() { return !this->InNewSpace(); }
static bool IsAtStart(Address addr) {
- return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
- == kObjectStartOffset;
+ return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
+ kObjectStartOffset;
}
static bool IsAtEnd(Address addr) {
return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
}
- Address address() {
- return reinterpret_cast<Address>(this);
- }
+ Address address() { return reinterpret_cast<Address>(this); }
// Finds the NewSpacePage containg the given address.
static inline NewSpacePage* FromAddress(Address address_in_page) {
private:
// Create a NewSpacePage object that is only used as anchor
// for the doubly-linked list of real pages.
- explicit NewSpacePage(SemiSpace* owner) {
- InitializeAsAnchor(owner);
- }
+ explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
- static NewSpacePage* Initialize(Heap* heap,
- Address start,
+ static NewSpacePage* Initialize(Heap* heap, Address start,
SemiSpace* semi_space);
// Intialize a fake NewSpacePage used as sentinel at the ends
public:
// Constructor.
SemiSpace(Heap* heap, SemiSpaceId semispace)
- : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- start_(NULL),
- age_mark_(NULL),
- id_(semispace),
- anchor_(this),
- current_page_(NULL) { }
+ : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+ start_(NULL),
+ age_mark_(NULL),
+ id_(semispace),
+ anchor_(this),
+ current_page_(NULL) {}
// Sets up the semispace using the given chunk.
void SetUp(Address start, int initial_capacity, int maximum_capacity);
}
// Returns the start address of the current page of the space.
- Address page_low() {
- return current_page_->area_start();
- }
+ Address page_low() { return current_page_->area_start(); }
// Returns one past the end address of the space.
- Address space_end() {
- return anchor_.prev_page()->area_end();
- }
+ Address space_end() { return anchor_.prev_page()->area_end(); }
// Returns one past the end address of the current page of the space.
- Address page_high() {
- return current_page_->area_end();
- }
+ Address page_high() { return current_page_->area_end(); }
bool AdvancePage() {
NewSpacePage* next_page = current_page_->next_page();
// True if the address is in the address range of this semispace (not
// necessarily below the allocation pointer).
bool Contains(Address a) {
- return (reinterpret_cast<uintptr_t>(a) & address_mask_)
- == reinterpret_cast<uintptr_t>(start_);
+ return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
+ reinterpret_cast<uintptr_t>(start_);
}
// True if the object is a heap object in the address range of this
friend class SemiSpaceIterator;
friend class NewSpacePageIterator;
+
public:
TRACK_MEMORY("SemiSpace")
};
virtual HeapObject* next_object() { return Next(); }
private:
- void Initialize(Address start,
- Address end,
- HeapObjectCallback size_func);
+ void Initialize(Address start, Address end, HeapObjectCallback size_func);
// The current iteration point.
Address current_;
public:
// Constructor.
explicit NewSpace(Heap* heap)
- : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- to_space_(heap, kToSpace),
- from_space_(heap, kFromSpace),
- reservation_(),
- inline_allocation_limit_step_(0) {}
+ : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+ to_space_(heap, kToSpace),
+ from_space_(heap, kFromSpace),
+ reservation_(),
+ inline_allocation_limit_step_(0) {}
// Sets up the new space using the given chunk.
bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
// True if the address or object lies in the address range of either
// semispace (not necessarily below the allocation pointer).
bool Contains(Address a) {
- return (reinterpret_cast<uintptr_t>(a) & address_mask_)
- == reinterpret_cast<uintptr_t>(start_);
+ return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
+ reinterpret_cast<uintptr_t>(start_);
}
bool Contains(Object* o) {
// Return the allocated bytes in the active semispace.
virtual intptr_t Size() {
return pages_used_ * NewSpacePage::kAreaSize +
- static_cast<int>(top() - to_space_.page_low());
+ static_cast<int>(top() - to_space_.page_low());
}
// The same, but returning an int. We have to have the one that returns
// Return the total amount of memory committed for new space.
intptr_t MaximumCommittedMemory() {
return to_space_.MaximumCommittedMemory() +
- from_space_.MaximumCommittedMemory();
+ from_space_.MaximumCommittedMemory();
}
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory();
// Return the available bytes without growing.
- intptr_t Available() {
- return Capacity() - Size();
- }
+ intptr_t Available() { return Capacity() - Size(); }
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
return to_space_.MaximumCapacity();
}
- bool IsAtMaximumCapacity() {
- return Capacity() == MaximumCapacity();
- }
+ bool IsAtMaximumCapacity() { return Capacity() == MaximumCapacity(); }
// Returns the initial capacity of a semispace.
int InitialCapacity() {
}
// The allocation top and limit address.
- Address* allocation_top_address() {
- return allocation_info_.top_address();
- }
+ Address* allocation_top_address() { return allocation_info_.top_address(); }
// The allocation limit address.
Address* allocation_limit_address() {
public:
// Creates an old space object with a given maximum capacity.
// The constructor does not allocate pages from OS.
- OldSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
+ OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
Executability executable)
- : PagedSpace(heap, max_capacity, id, executable) {
- }
+ : PagedSpace(heap, max_capacity, id, executable) {}
public:
TRACK_MEMORY("OldSpace")
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
- SLOW_DCHECK((space).page_low() <= (info).top() \
- && (info).top() <= (space).page_high() \
- && (info).limit() <= (space).page_high())
+ SLOW_DCHECK((space).page_low() <= (info).top() && \
+ (info).top() <= (space).page_high() && \
+ (info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------
// Creates a map space object with a maximum capacity.
MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
- max_map_space_pages_(kMaxMapPageIndex - 1) {
- }
+ max_map_space_pages_(kMaxMapPageIndex - 1) {}
// Given an index, returns the page address.
// TODO(1600): this limit is artifical just to keep code compilable
public:
// Creates a property cell space object with a maximum capacity.
CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
- }
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
virtual int RoundSizeDownToObjectAlignment(int size) {
if (IsPowerOf2(Cell::kSize)) {
class PropertyCellSpace : public PagedSpace {
public:
// Creates a property cell space object with a maximum capacity.
- PropertyCellSpace(Heap* heap, intptr_t max_capacity,
- AllocationSpace id)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
- }
+ PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
virtual int RoundSizeDownToObjectAlignment(int size) {
if (IsPowerOf2(PropertyCell::kSize)) {
// Shared implementation of AllocateRaw, AllocateRawCode and
// AllocateRawFixedArray.
- MUST_USE_RESULT AllocationResult AllocateRaw(int object_size,
- Executability executable);
+ MUST_USE_RESULT AllocationResult
+ AllocateRaw(int object_size, Executability executable);
// Available bytes for objects in this space.
inline intptr_t Available();
- virtual intptr_t Size() {
- return size_;
- }
+ virtual intptr_t Size() { return size_; }
- virtual intptr_t SizeOfObjects() {
- return objects_size_;
- }
+ virtual intptr_t SizeOfObjects() { return objects_size_; }
- intptr_t MaximumCommittedMemory() {
- return maximum_committed_;
- }
+ intptr_t MaximumCommittedMemory() { return maximum_committed_; }
- intptr_t CommittedMemory() {
- return Size();
- }
+ intptr_t CommittedMemory() { return Size(); }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory();
- int PageCount() {
- return page_count_;
- }
+ int PageCount() { return page_count_; }
// Finds an object for a given address, returns a Smi if it is not found.
// The function iterates through all objects in this space, may be slow.
intptr_t maximum_committed_;
// The head of the linked list of large object chunks.
LargePage* first_page_;
- intptr_t size_; // allocated bytes
- int page_count_; // number of chunks
+ intptr_t size_; // allocated bytes
+ int page_count_; // number of chunks
intptr_t objects_size_; // size of objects
// Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
HashMap chunk_map_;
};
-class LargeObjectIterator: public ObjectIterator {
+class LargeObjectIterator : public ObjectIterator {
public:
explicit LargeObjectIterator(LargeObjectSpace* space);
LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
private:
- enum State {
- kOldPointerState,
- kMapState,
- kLargeObjectState,
- kFinishedState
- };
+ enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState };
State state_;
PageIterator old_pointer_iterator_;
PageIterator map_iterator_;
static const int kMaxComments = 64;
};
#endif
+}
+} // namespace v8::internal
-
-} } // namespace v8::internal
-
-#endif // V8_SPACES_H_
+#endif // V8_HEAP_SPACES_H_
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/sweeper-thread.h"
+#include "src/heap/sweeper-thread.h"
#include "src/v8.h"
static const int kSweeperThreadStackSize = 64 * KB;
SweeperThread::SweeperThread(Isolate* isolate)
- : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
- isolate_(isolate),
- heap_(isolate->heap()),
- collector_(heap_->mark_compact_collector()),
- start_sweeping_semaphore_(0),
- end_sweeping_semaphore_(0),
- stop_semaphore_(0) {
+ : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
+ isolate_(isolate),
+ heap_(isolate->heap()),
+ collector_(heap_->mark_compact_collector()),
+ start_sweeping_semaphore_(0),
+ end_sweeping_semaphore_(0),
+ stop_semaphore_(0) {
DCHECK(!FLAG_job_based_sweeping);
base::NoBarrier_Store(&stop_thread_, static_cast<base::AtomicWord>(false));
}
}
-void SweeperThread::StartSweeping() {
- start_sweeping_semaphore_.Signal();
-}
+void SweeperThread::StartSweeping() { start_sweeping_semaphore_.Signal(); }
-void SweeperThread::WaitForSweeperThread() {
- end_sweeping_semaphore_.Wait();
-}
+void SweeperThread::WaitForSweeperThread() { end_sweeping_semaphore_.Wait(); }
bool SweeperThread::SweepingCompleted() {
DCHECK(FLAG_parallel_sweeping);
return max_available;
}
-
-} } // namespace v8::internal
+}
+} // namespace v8::internal
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SWEEPER_THREAD_H_
-#define V8_SWEEPER_THREAD_H_
+#ifndef V8_HEAP_SWEEPER_THREAD_H_
+#define V8_HEAP_SWEEPER_THREAD_H_
#include "src/base/atomicops.h"
#include "src/base/platform/platform.h"
#include "src/flags.h"
#include "src/utils.h"
-#include "src/spaces.h"
+#include "src/heap/spaces.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
namespace v8 {
namespace internal {
base::Semaphore stop_semaphore_;
volatile base::AtomicWord stop_thread_;
};
+}
+} // namespace v8::internal
-} } // namespace v8::internal
-
-#endif // V8_SWEEPER_THREAD_H_
+#endif // V8_HEAP_SWEEPER_THREAD_H_
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/macro-assembler.h"
namespace v8 {
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
+#include "src/heap/spaces.h"
+#include "src/heap/sweeper-thread.h"
#include "src/heap-profiler.h"
#include "src/hydrogen.h"
#include "src/isolate-inl.h"
#include "src/scopeinfo.h"
#include "src/serialize.h"
#include "src/simulator.h"
-#include "src/spaces.h"
#include "src/stub-cache.h"
-#include "src/sweeper-thread.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
#include "src/global-handles.h"
#include "src/handles.h"
#include "src/hashmap.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/optimizing-compiler-thread.h"
#include "src/regexp-stack.h"
#include "src/runtime.h"
#include "src/char-predicates-inl.h"
#include "src/conversions.h"
+#include "src/heap/spaces-inl.h"
#include "src/messages.h"
-#include "src/spaces-inl.h"
#include "src/token.h"
namespace v8 {
#include "src/allocation.h"
#include "src/handles.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/jsregexp.h"
#include "src/objects.h"
#include "src/api.h"
#include "src/execution.h"
+#include "src/heap/spaces-inl.h"
#include "src/messages.h"
-#include "src/spaces-inl.h"
namespace v8 {
namespace internal {
#include "src/elements.h"
#include "src/factory.h"
#include "src/field-index-inl.h"
-#include "src/heap-inl.h"
-#include "src/heap.h"
-#include "src/incremental-marking.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/spaces.h"
#include "src/isolate.h"
#include "src/lookup.h"
#include "src/objects.h"
#include "src/objects-visiting.h"
#include "src/property.h"
#include "src/prototype.h"
-#include "src/spaces.h"
#include "src/store-buffer.h"
#include "src/transitions-inl.h"
#include "src/v8memory.h"
#include "src/field-index-inl.h"
#include "src/field-index.h"
#include "src/full-codegen.h"
+#include "src/heap/mark-compact.h"
#include "src/hydrogen.h"
#include "src/isolate-inl.h"
#include "src/log.h"
#include "src/lookup.h"
#include "src/macro-assembler.h"
-#include "src/mark-compact.h"
#include "src/objects-inl.h"
#include "src/objects-visiting-inl.h"
#include "src/prototype.h"
#include "src/execution.h"
#include "src/full-codegen.h"
#include "src/global-handles.h"
+#include "src/heap/mark-compact.h"
#include "src/isolate-inl.h"
-#include "src/mark-compact.h"
#include "src/scopeinfo.h"
namespace v8 {
#define V8_SAFEPOINT_TABLE_H_
#include "src/allocation.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/v8memory.h"
#include "src/zone.h"
#include "src/checks.h"
#include "src/elements-kind.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/isolate.h"
#include "src/objects.h"
// Objects & heap
#include "src/objects-inl.h" // NOLINT
-#include "src/spaces-inl.h" // NOLINT
-#include "src/heap-inl.h" // NOLINT
-#include "src/incremental-marking-inl.h" // NOLINT
-#include "src/mark-compact-inl.h" // NOLINT
+#include "src/heap/spaces-inl.h" // NOLINT
+#include "src/heap/heap-inl.h" // NOLINT
+#include "src/heap/incremental-marking-inl.h" // NOLINT
+#include "src/heap/mark-compact-inl.h" // NOLINT
#include "src/log-inl.h" // NOLINT
#include "src/handles-inl.h" // NOLINT
#include "src/types-inl.h" // NOLINT
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/isolate-inl.h"
#include "src/serialize.h"
#include "src/x64/assembler-x64.h"
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/macro-assembler.h"
namespace v8 {
#include "src/v8.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "test/cctest/cctest.h"
using namespace v8;
#include "src/bootstrapper.h"
#include "src/compilation-cache.h"
#include "src/debug.h"
+#include "src/heap/spaces.h"
#include "src/ic-inl.h"
#include "src/natives.h"
#include "src/objects.h"
#include "src/scopeinfo.h"
#include "src/serialize.h"
#include "src/snapshot.h"
-#include "src/spaces.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
#include "test/cctest/cctest.h"
#include "src/api.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
#include "src/objects.h"
using namespace v8::internal;
'../../src/full-codegen.h',
'../../src/func-name-inferrer.cc',
'../../src/func-name-inferrer.h',
- '../../src/gc-tracer.cc',
- '../../src/gc-tracer.h',
'../../src/gdb-jit.cc',
'../../src/gdb-jit.h',
'../../src/global-handles.cc',
'../../src/handles.cc',
'../../src/handles.h',
'../../src/hashmap.h',
- '../../src/heap-inl.h',
'../../src/heap-profiler.cc',
'../../src/heap-profiler.h',
'../../src/heap-snapshot-generator-inl.h',
'../../src/heap-snapshot-generator.cc',
'../../src/heap-snapshot-generator.h',
- '../../src/heap.cc',
- '../../src/heap.h',
+ '../../src/heap/gc-tracer.cc',
+ '../../src/heap/gc-tracer.h',
+ '../../src/heap/heap-inl.h',
+ '../../src/heap/heap.cc',
+ '../../src/heap/heap.h',
+ '../../src/heap/incremental-marking-inl.h',
+ '../../src/heap/incremental-marking.cc',
+ '../../src/heap/incremental-marking.h',
+ '../../src/heap/mark-compact-inl.h',
+ '../../src/heap/mark-compact.cc',
+ '../../src/heap/mark-compact.h',
+ '../../src/heap/spaces-inl.h',
+ '../../src/heap/spaces.cc',
+ '../../src/heap/spaces.h',
+ '../../src/heap/sweeper-thread.h',
+ '../../src/heap/sweeper-thread.cc',
'../../src/hydrogen-alias-analysis.h',
'../../src/hydrogen-bce.cc',
'../../src/hydrogen-bce.h',
'../../src/ic-inl.h',
'../../src/ic.cc',
'../../src/ic.h',
- '../../src/incremental-marking.cc',
- '../../src/incremental-marking.h',
'../../src/interface.cc',
'../../src/interface.h',
'../../src/interpreter-irregexp.cc',
'../../src/lookup.cc',
'../../src/lookup.h',
'../../src/macro-assembler.h',
- '../../src/mark-compact.cc',
- '../../src/mark-compact.h',
'../../src/messages.cc',
'../../src/messages.h',
'../../src/msan.h',
'../../src/snapshot.h',
'../../src/snapshot-source-sink.cc',
'../../src/snapshot-source-sink.h',
- '../../src/spaces-inl.h',
- '../../src/spaces.cc',
- '../../src/spaces.h',
'../../src/store-buffer-inl.h',
'../../src/store-buffer.cc',
'../../src/store-buffer.h',
'../../src/strtod.h',
'../../src/stub-cache.cc',
'../../src/stub-cache.h',
- '../../src/sweeper-thread.h',
- '../../src/sweeper-thread.cc',
'../../src/token.cc',
'../../src/token.h',
'../../src/transitions-inl.h',
runtime/nonconf
runtime/printf
runtime/printf_format
-runtime/references
runtime/rtti
runtime/sizeof
runtime/string