#include "src/natives.h"
#include "src/runtime-profiler.h"
#include "src/scopeinfo.h"
+#include "src/serialize.h"
#include "src/snapshot.h"
#include "src/utils.h"
#include "src/v8threads.h"
reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize),
+ target_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
configured_(false),
external_string_table_(this),
chunks_queued_for_free_(NULL),
- gc_callbacks_depth_(0) {
+ gc_callbacks_depth_(0),
+ deserialization_complete_(false) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
set_array_buffers_list(Smi::FromInt(0));
set_allocation_sites_list(Smi::FromInt(0));
set_encountered_weak_collections(Smi::FromInt(0));
+ set_encountered_weak_cells(Smi::FromInt(0));
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
}
-void Heap::ReserveSpace(int* sizes, Address* locations_out) {
+bool Heap::ReserveSpace(Reservation* reservations) {
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
- DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
- for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
- if (sizes[space] != 0) {
- AllocationResult allocation;
- if (space == NEW_SPACE) {
- allocation = new_space()->AllocateRaw(sizes[space]);
- } else {
- allocation = paged_space(space)->AllocateRaw(sizes[space]);
- }
- FreeListNode* node;
- if (!allocation.To(&node)) {
+ for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
+ Reservation* reservation = &reservations[space];
+ DCHECK_LE(1, reservation->length());
+ if (reservation->at(0).size == 0) continue;
+ bool perform_gc = false;
+ if (space == LO_SPACE) {
+ DCHECK_EQ(1, reservation->length());
+ perform_gc = !lo_space()->CanAllocateSize(reservation->at(0).size);
+ } else {
+ for (auto& chunk : *reservation) {
+ AllocationResult allocation;
+ int size = chunk.size;
+ DCHECK_LE(size, MemoryAllocator::PageAreaSize(
+ static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
- Heap::CollectGarbage(NEW_SPACE,
- "failed to reserve space in the new space");
+ allocation = new_space()->AllocateRaw(size);
} else {
- AbortIncrementalMarkingAndCollectGarbage(
- this, static_cast<AllocationSpace>(space),
- "failed to reserve space in paged space");
+ allocation = paged_space(space)->AllocateRaw(size);
}
- gc_performed = true;
- break;
+ FreeListNode* node;
+ if (allocation.To(&node)) {
+ // Mark with a free list node, in case we have a GC before
+ // deserializing.
+ node->set_size(this, size);
+ DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
+ chunk.start = node->address();
+ chunk.end = node->address() + size;
+ } else {
+ perform_gc = true;
+ break;
+ }
+ }
+ }
+ if (perform_gc) {
+ if (space == NEW_SPACE) {
+ Heap::CollectGarbage(NEW_SPACE,
+ "failed to reserve space in the new space");
} else {
- // Mark with a free list node, in case we have a GC before
- // deserializing.
- node->set_size(this, sizes[space]);
- locations_out[space] = node->address();
+ AbortIncrementalMarkingAndCollectGarbage(
+ this, static_cast<AllocationSpace>(space),
+ "failed to reserve space in paged or large object space");
}
+ gc_performed = true;
+ break; // Abort for-loop over spaces and retry.
}
}
}
- if (gc_performed) {
- // Failed to reserve the space after several attempts.
- V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
- }
+ return !gc_performed;
}
while (head_start != head_end) {
int size = static_cast<int>(*(head_start++));
HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+ // New space allocation in SemiSpaceCopyObject marked the region
+ // overlapping with promotion queue as uninitialized.
+ MSAN_MEMORY_IS_INITIALIZED(&size, sizeof(size));
+ MSAN_MEMORY_IS_INITIALIZED(&obj, sizeof(obj));
emergency_stack_->Add(Entry(obj, size));
}
rear_ = head_end;
// Copy objects reachable from the encountered weak collections list.
scavenge_visitor.VisitPointer(&encountered_weak_collections_);
+ // Copy objects reachable from the encountered weak cells.
+ scavenge_visitor.VisitPointer(&encountered_weak_cells_);
// Copy objects reachable from the code flushing candidates list.
MarkCompactCollector* collector = mark_compact_collector();
roots_[entry.index] = map;
}
+ { // Create a separate external one byte string map for native sources.
+ AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
+ ExternalOneByteString::kSize);
+ if (!allocation.To(&obj)) return false;
+ set_native_source_string_map(Map::cast(obj));
+ }
+
ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
undetectable_string_map()->set_is_undetectable();
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
+ ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
}
-#define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
- V(Float32x4, float32x4) \
- V(Float64x2, float64x2) \
- V(Int32x4, int32x4)
-
-
-#define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type) \
-AllocationResult Heap::Allocate##TYPE(type##_value_t value, \
- PretenureFlag pretenure) { \
- STATIC_ASSERT(TYPE::kSize <= Page::kMaxRegularHeapObjectSize); \
- \
- AllocationSpace space = \
- SelectSpace(TYPE::kSize, OLD_DATA_SPACE, pretenure); \
- \
- HeapObject* result; \
- { AllocationResult allocation = \
- AllocateRaw(TYPE::kSize, space, OLD_DATA_SPACE); \
- if (!allocation.To(&result)) return allocation; \
- } \
- \
- result->set_map_no_write_barrier( \
- isolate()->native_context()->type##_function()->initial_map()); \
- JSObject::cast(result)->set_properties(empty_fixed_array()); \
- JSObject::cast(result)->set_elements(empty_fixed_array()); \
- \
- HeapObject* storage; \
- int storage_size = \
- FixedTypedArrayBase::kDataOffset + k##TYPE##Size; \
- space = SelectSpace(storage_size, OLD_DATA_SPACE, pretenure); \
- { AllocationResult allocation = \
- AllocateRaw(storage_size, space, OLD_DATA_SPACE); \
- if (!allocation.To(&storage)) return allocation; \
- } \
- \
- storage->set_map( \
- *isolate()->factory()->fixed_##type##_array_map()); \
- FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(storage); \
- elements->set_length(static_cast<int>(1)); \
- memset(elements->DataPtr(), 0, elements->DataSize()); \
- Fixed##TYPE##Array::cast(storage)->set(0, value); \
- TYPE::cast(result)->set_value(storage); \
- return result; \
-}
-
-
-SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
-
-
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
}
+AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
+ int size = WeakCell::kSize;
+ STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
+ HeapObject* result;
+ {
+ AllocationResult allocation =
+ AllocateRaw(size, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
+ if (!allocation.To(&result)) return allocation;
+ }
+ result->set_map_no_write_barrier(weak_cell_map());
+ WeakCell::cast(result)->initialize(value);
+ WeakCell::cast(result)->set_next(undefined_value(), SKIP_WRITE_BARRIER);
+ return result;
+}
+
+
void Heap::CreateApiObjects() {
HandleScope scope(isolate());
Factory* factory = isolate()->factory();
set_instanceof_cache_map(Smi::FromInt(0));
set_instanceof_cache_answer(Smi::FromInt(0));
+ {
+ HandleScope scope(isolate());
+#define SYMBOL_INIT(name) \
+ Handle<Symbol> name = factory->NewPrivateOwnSymbol(); \
+ roots_[k##name##RootIndex] = *name;
+ PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
+ }
+
CreateFixedStubs();
// Allocate the dictionary of intrinsic function names.
set_undefined_cell(*factory->NewCell(factory->undefined_value()));
// The symbol registry is initialized lazily.
- set_symbol_registry(undefined_value());
+ set_symbol_registry(Smi::FromInt(0));
// Allocate object to hold object observation state.
set_observation_state(*factory->NewJSObjectFromMap(
// Number of queued microtasks stored in Isolate::pending_microtask_count().
set_microtask_queue(empty_fixed_array());
- set_detailed_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
- set_elements_transition_symbol(*factory->NewPrivateOwnSymbol());
- set_frozen_symbol(*factory->NewPrivateOwnSymbol());
- set_megamorphic_symbol(*factory->NewPrivateOwnSymbol());
- set_premonomorphic_symbol(*factory->NewPrivateOwnSymbol());
- set_generic_symbol(*factory->NewPrivateOwnSymbol());
- set_nonexistent_symbol(*factory->NewPrivateOwnSymbol());
- set_normal_ic_symbol(*factory->NewPrivateOwnSymbol());
- set_observed_symbol(*factory->NewPrivateOwnSymbol());
- set_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
- set_uninitialized_symbol(*factory->NewPrivateOwnSymbol());
- set_home_object_symbol(*factory->NewPrivateOwnSymbol());
-
Handle<SeededNumberDictionary> slow_element_dictionary =
SeededNumberDictionary::New(isolate(), 0, TENURED);
slow_element_dictionary->set_requires_slow_elements();
AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
- // Never used to copy functions. If functions need to be copied we
- // have to be careful to clear the literals array.
- SLOW_DCHECK(!source->IsJSFunction());
-
// Make the clone.
Map* map = source->map();
+
+ // We can only clone normal objects or arrays. Copying anything else
+ // will break invariants.
+ CHECK(map->instance_type() == JS_OBJECT_TYPE ||
+ map->instance_type() == JS_ARRAY_TYPE);
+
int object_size = map->instance_size();
HeapObject* clone;
}
-void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
- incremental_marking()->Step(step_size,
- IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
-
- if (incremental_marking()->IsComplete()) {
+void Heap::TryFinalizeIdleIncrementalMarking(
+ size_t idle_time_in_ms, size_t size_of_objects,
+ size_t mark_compact_speed_in_bytes_per_ms) {
+ if (incremental_marking()->IsComplete() ||
+ (mark_compact_collector()->IsMarkingDequeEmpty() &&
+ gc_idle_time_handler_.ShouldDoMarkCompact(
+ idle_time_in_ms, size_of_objects,
+ mark_compact_speed_in_bytes_per_ms))) {
IdleMarkCompact("idle notification: finalize incremental");
}
}
tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
heap_state.scavenge_speed_in_bytes_per_ms =
static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
- heap_state.available_new_space_memory = new_space_.Available();
+ heap_state.used_new_space_size = new_space_.Size();
heap_state.new_space_capacity = new_space_.Capacity();
heap_state.new_space_allocation_throughput_in_bytes_per_ms =
static_cast<size_t>(
gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
bool result = false;
+ int actual_time_in_ms = 0;
switch (action.type) {
case DONE:
result = true;
break;
- case DO_INCREMENTAL_MARKING:
+ case DO_INCREMENTAL_MARKING: {
if (incremental_marking()->IsStopped()) {
incremental_marking()->Start();
}
- AdvanceIdleIncrementalMarking(action.parameter);
+ incremental_marking()->Step(action.parameter,
+ IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_MARKING,
+ IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+ actual_time_in_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
+ int remaining_idle_time_in_ms = idle_time_in_ms - actual_time_in_ms;
+ if (remaining_idle_time_in_ms > 0) {
+ TryFinalizeIdleIncrementalMarking(
+ remaining_idle_time_in_ms, heap_state.size_of_objects,
+ heap_state.mark_compact_speed_in_bytes_per_ms);
+ }
break;
+ }
case DO_FULL_GC: {
HistogramTimerScope scope(isolate_->counters()->gc_context());
if (contexts_disposed_) {
break;
}
- int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
- if (actual_time_ms <= idle_time_in_ms) {
- isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
- idle_time_in_ms - actual_time_ms);
+ actual_time_in_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
+ if (actual_time_in_ms <= idle_time_in_ms) {
+ if (action.type != DONE && action.type != DO_NOTHING) {
+ isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
+ idle_time_in_ms - actual_time_in_ms);
+ }
} else {
isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
- actual_time_ms - idle_time_in_ms);
+ actual_time_in_ms - idle_time_in_ms);
}
if (FLAG_trace_idle_notification) {
PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
- idle_time_in_ms, actual_time_ms);
+ idle_time_in_ms, actual_time_in_ms);
action.Print();
PrintF("]\n");
}
return property_cell_space_->Contains(addr);
case LO_SPACE:
return lo_space_->SlowContains(addr);
- case INVALID_SPACE:
- break;
}
UNREACHABLE();
return false;
}
+bool Heap::RootIsImmortalImmovable(int root_index) {
+ switch (root_index) {
+#define CASE(name) \
+ case Heap::k##name##RootIndex: \
+ return true;
+ IMMORTAL_IMMOVABLE_ROOT_LIST(CASE);
+#undef CASE
+ default:
+ return false;
+ }
+}
+
+
#ifdef VERIFY_HEAP
void Heap::Verify() {
CHECK(HasBeenSetUp());
initial_semispace_size_ = max_semi_space_size_;
if (FLAG_trace_gc) {
PrintPID(
- "Min semi-space size cannot be more than the maximum"
+ "Min semi-space size cannot be more than the maximum "
"semi-space size of %d MB\n",
- max_semi_space_size_);
+ max_semi_space_size_ / MB);
}
} else {
initial_semispace_size_ = initial_semispace_size;
initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
+ if (FLAG_target_semi_space_size > 0) {
+ int target_semispace_size = FLAG_target_semi_space_size * MB;
+ if (target_semispace_size < initial_semispace_size_) {
+ target_semispace_size_ = initial_semispace_size_;
+ if (FLAG_trace_gc) {
+ PrintPID(
+ "Target semi-space size cannot be less than the minimum "
+ "semi-space size of %d MB\n",
+ initial_semispace_size_ / MB);
+ }
+ } else if (target_semispace_size > max_semi_space_size_) {
+ target_semispace_size_ = max_semi_space_size_;
+ if (FLAG_trace_gc) {
+ PrintPID(
+ "Target semi-space size cannot be less than the maximum "
+ "semi-space size of %d MB\n",
+ max_semi_space_size_ / MB);
+ }
+ } else {
+ target_semispace_size_ = target_semispace_size;
+ }
+ }
+
+ target_semispace_size_ = Max(initial_semispace_size_, target_semispace_size_);
+
// The old generation is paged and needs at least one page for each space.
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
max_old_generation_size_ =
}
+void Heap::NotifyDeserializationComplete() { deserialization_complete_ = true; }
+
+
void Heap::TearDown() {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {