1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_HEAP_HEAP_H_
6 #define V8_HEAP_HEAP_H_
10 #include "src/allocation.h"
11 #include "src/assert-scope.h"
12 #include "src/counters.h"
13 #include "src/globals.h"
14 #include "src/heap/gc-tracer.h"
15 #include "src/heap/incremental-marking.h"
16 #include "src/heap/mark-compact.h"
17 #include "src/heap/objects-visiting.h"
18 #include "src/heap/spaces.h"
19 #include "src/heap/store-buffer.h"
21 #include "src/splay-tree-inl.h"
26 // Defines all the roots in Heap.
27 #define STRONG_ROOT_LIST(V) \
28 V(Map, byte_array_map, ByteArrayMap) \
29 V(Map, free_space_map, FreeSpaceMap) \
30 V(Map, one_pointer_filler_map, OnePointerFillerMap) \
31 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
32 /* Cluster the most popular ones in a few cache lines here at the top. */ \
33 V(Smi, store_buffer_top, StoreBufferTop) \
34 V(Oddball, undefined_value, UndefinedValue) \
35 V(Oddball, the_hole_value, TheHoleValue) \
36 V(Oddball, null_value, NullValue) \
37 V(Oddball, true_value, TrueValue) \
38 V(Oddball, false_value, FalseValue) \
39 V(Oddball, uninitialized_value, UninitializedValue) \
40 V(Oddball, exception, Exception) \
41 V(Map, cell_map, CellMap) \
42 V(Map, global_property_cell_map, GlobalPropertyCellMap) \
43 V(Map, shared_function_info_map, SharedFunctionInfoMap) \
44 V(Map, meta_map, MetaMap) \
45 V(Map, heap_number_map, HeapNumberMap) \
46 V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
47 V(Map, native_context_map, NativeContextMap) \
48 V(Map, fixed_array_map, FixedArrayMap) \
49 V(Map, code_map, CodeMap) \
50 V(Map, scope_info_map, ScopeInfoMap) \
51 V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
52 V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
53 V(Map, constant_pool_array_map, ConstantPoolArrayMap) \
54 V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
55 V(Map, hash_table_map, HashTableMap) \
56 V(Map, ordered_hash_table_map, OrderedHashTableMap) \
57 V(FixedArray, empty_fixed_array, EmptyFixedArray) \
58 V(ByteArray, empty_byte_array, EmptyByteArray) \
59 V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
60 V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \
61 V(Oddball, arguments_marker, ArgumentsMarker) \
62 /* The roots above this line should be boring from a GC point of view. */ \
63 /* This means they are never in new space and never on a page that is */ \
64 /* being compacted. */ \
65 V(FixedArray, number_string_cache, NumberStringCache) \
66 V(Object, instanceof_cache_function, InstanceofCacheFunction) \
67 V(Object, instanceof_cache_map, InstanceofCacheMap) \
68 V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
69 V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
70 V(FixedArray, string_split_cache, StringSplitCache) \
71 V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
72 V(Oddball, termination_exception, TerminationException) \
73 V(Smi, hash_seed, HashSeed) \
74 V(Map, symbol_map, SymbolMap) \
75 V(Map, string_map, StringMap) \
76 V(Map, ascii_string_map, AsciiStringMap) \
77 V(Map, cons_string_map, ConsStringMap) \
78 V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
79 V(Map, sliced_string_map, SlicedStringMap) \
80 V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \
81 V(Map, external_string_map, ExternalStringMap) \
82 V(Map, external_string_with_one_byte_data_map, \
83 ExternalStringWithOneByteDataMap) \
84 V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
85 V(Map, short_external_string_map, ShortExternalStringMap) \
86 V(Map, short_external_string_with_one_byte_data_map, \
87 ShortExternalStringWithOneByteDataMap) \
88 V(Map, internalized_string_map, InternalizedStringMap) \
89 V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \
90 V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
91 V(Map, external_internalized_string_with_one_byte_data_map, \
92 ExternalInternalizedStringWithOneByteDataMap) \
93 V(Map, external_ascii_internalized_string_map, \
94 ExternalAsciiInternalizedStringMap) \
95 V(Map, short_external_internalized_string_map, \
96 ShortExternalInternalizedStringMap) \
97 V(Map, short_external_internalized_string_with_one_byte_data_map, \
98 ShortExternalInternalizedStringWithOneByteDataMap) \
99 V(Map, short_external_ascii_internalized_string_map, \
100 ShortExternalAsciiInternalizedStringMap) \
101 V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
102 V(Map, undetectable_string_map, UndetectableStringMap) \
103 V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
104 V(Map, external_int8_array_map, ExternalInt8ArrayMap) \
105 V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \
106 V(Map, external_int16_array_map, ExternalInt16ArrayMap) \
107 V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \
108 V(Map, external_int32_array_map, ExternalInt32ArrayMap) \
109 V(Map, external_int32x4_array_map, ExternalInt32x4ArrayMap) \
110 V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \
111 V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \
112 V(Map, external_float32x4_array_map, ExternalFloat32x4ArrayMap) \
113 V(Map, external_float64x2_array_map, ExternalFloat64x2ArrayMap) \
114 V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \
115 V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \
116 V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array) \
117 V(ExternalArray, empty_external_uint8_array, EmptyExternalUint8Array) \
118 V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \
119 V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array) \
120 V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \
121 V(ExternalArray, empty_external_int32x4_array, EmptyExternalInt32x4Array) \
122 V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array) \
123 V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \
124 V(ExternalArray, empty_external_float32x4_array, EmptyExternalFloat32x4Array)\
125 V(ExternalArray, empty_external_float64x2_array, EmptyExternalFloat64x2Array)\
126 V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \
127 V(ExternalArray, empty_external_uint8_clamped_array, \
128 EmptyExternalUint8ClampedArray) \
129 V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
130 V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
131 V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
132 V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
133 V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
134 V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
135 V(Map, fixed_int32x4_array_map, FixedInt32x4ArrayMap) \
136 V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
137 V(Map, fixed_float32x4_array_map, FixedFloat32x4ArrayMap) \
138 V(Map, fixed_float64x2_array_map, FixedFloat64x2ArrayMap) \
139 V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
140 V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
141 V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
142 V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
143 V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
144 V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
145 V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
146 V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
147 V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
148 V(FixedTypedArrayBase, empty_fixed_float32x4_array, \
149 EmptyFixedFloat32x4Array) \
150 V(FixedTypedArrayBase, empty_fixed_float64x2_array, \
151 EmptyFixedFloat64x2Array) \
152 V(FixedTypedArrayBase, empty_fixed_int32x4_array, EmptyFixedInt32x4Array) \
153 V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
154 V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
155 EmptyFixedUint8ClampedArray) \
156 V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
157 V(Map, function_context_map, FunctionContextMap) \
158 V(Map, catch_context_map, CatchContextMap) \
159 V(Map, with_context_map, WithContextMap) \
160 V(Map, block_context_map, BlockContextMap) \
161 V(Map, module_context_map, ModuleContextMap) \
162 V(Map, global_context_map, GlobalContextMap) \
163 V(Map, undefined_map, UndefinedMap) \
164 V(Map, the_hole_map, TheHoleMap) \
165 V(Map, null_map, NullMap) \
166 V(Map, boolean_map, BooleanMap) \
167 V(Map, uninitialized_map, UninitializedMap) \
168 V(Map, arguments_marker_map, ArgumentsMarkerMap) \
169 V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \
170 V(Map, exception_map, ExceptionMap) \
171 V(Map, termination_exception_map, TerminationExceptionMap) \
172 V(Map, message_object_map, JSMessageObjectMap) \
173 V(Map, foreign_map, ForeignMap) \
174 V(HeapNumber, nan_value, NanValue) \
175 V(HeapNumber, infinity_value, InfinityValue) \
176 V(HeapNumber, minus_zero_value, MinusZeroValue) \
177 V(Map, neander_map, NeanderMap) \
178 V(JSObject, message_listeners, MessageListeners) \
179 V(UnseededNumberDictionary, code_stubs, CodeStubs) \
180 V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
181 V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
182 V(Code, js_entry_code, JsEntryCode) \
183 V(Code, js_construct_entry_code, JsConstructEntryCode) \
184 V(FixedArray, natives_source_cache, NativesSourceCache) \
185 V(Script, empty_script, EmptyScript) \
186 V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
187 V(Cell, undefined_cell, UndefineCell) \
188 V(JSObject, observation_state, ObservationState) \
189 V(Map, external_map, ExternalMap) \
190 V(Object, symbol_registry, SymbolRegistry) \
191 V(Symbol, frozen_symbol, FrozenSymbol) \
192 V(Symbol, nonexistent_symbol, NonExistentSymbol) \
193 V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \
194 V(SeededNumberDictionary, empty_slow_element_dictionary, \
195 EmptySlowElementDictionary) \
196 V(Symbol, observed_symbol, ObservedSymbol) \
197 V(Symbol, uninitialized_symbol, UninitializedSymbol) \
198 V(Symbol, megamorphic_symbol, MegamorphicSymbol) \
199 V(Symbol, stack_trace_symbol, StackTraceSymbol) \
200 V(Symbol, detailed_stack_trace_symbol, DetailedStackTraceSymbol) \
201 V(Symbol, normal_ic_symbol, NormalICSymbol) \
202 V(FixedArray, materialized_objects, MaterializedObjects) \
203 V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
204 V(FixedArray, microtask_queue, MicrotaskQueue)
206 // Entries in this list are limited to Smis and are not visited during GC.
207 #define SMI_ROOT_LIST(V) \
208 V(Smi, stack_limit, StackLimit) \
209 V(Smi, real_stack_limit, RealStackLimit) \
210 V(Smi, last_script_id, LastScriptId) \
211 V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
212 V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
213 V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
214 V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
216 #define ROOT_LIST(V) \
217 STRONG_ROOT_LIST(V) \
219 V(StringTable, string_table, StringTable)
221 // Heap roots that are known to be immortal immovable, for which we can safely
222 // skip write barriers.
223 #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
226 V(one_pointer_filler_map) \
227 V(two_pointer_filler_map) \
233 V(uninitialized_value) \
235 V(global_property_cell_map) \
236 V(shared_function_info_map) \
239 V(mutable_heap_number_map) \
240 V(native_context_map) \
244 V(fixed_cow_array_map) \
245 V(fixed_double_array_map) \
246 V(constant_pool_array_map) \
247 V(no_interceptor_result_sentinel) \
249 V(ordered_hash_table_map) \
250 V(empty_fixed_array) \
251 V(empty_byte_array) \
252 V(empty_descriptor_array) \
253 V(empty_constant_pool_array) \
254 V(arguments_marker) \
256 V(sloppy_arguments_elements_map) \
257 V(function_context_map) \
258 V(catch_context_map) \
259 V(with_context_map) \
260 V(block_context_map) \
261 V(module_context_map) \
262 V(global_context_map) \
267 V(uninitialized_map) \
268 V(message_object_map) \
272 #define INTERNALIZED_STRING_LIST(V) \
273 V(Array_string, "Array") \
274 V(Object_string, "Object") \
275 V(proto_string, "__proto__") \
276 V(arguments_string, "arguments") \
277 V(Arguments_string, "Arguments") \
278 V(call_string, "call") \
279 V(apply_string, "apply") \
280 V(caller_string, "caller") \
281 V(boolean_string, "boolean") \
282 V(Boolean_string, "Boolean") \
283 V(callee_string, "callee") \
284 V(constructor_string, "constructor") \
285 V(dot_result_string, ".result") \
286 V(dot_for_string, ".for.") \
287 V(eval_string, "eval") \
288 V(empty_string, "") \
289 V(function_string, "function") \
290 V(length_string, "length") \
291 V(name_string, "name") \
292 V(null_string, "null") \
293 V(number_string, "number") \
294 V(Number_string, "Number") \
295 V(float32x4_string, "float32x4") \
296 V(float64x2_string, "float64x2") \
297 V(int32x4_string, "int32x4") \
298 V(nan_string, "NaN") \
299 V(RegExp_string, "RegExp") \
300 V(source_string, "source") \
301 V(source_url_string, "source_url") \
302 V(source_mapping_url_string, "source_mapping_url") \
303 V(global_string, "global") \
304 V(ignore_case_string, "ignoreCase") \
305 V(multiline_string, "multiline") \
306 V(input_string, "input") \
307 V(index_string, "index") \
308 V(last_index_string, "lastIndex") \
309 V(object_string, "object") \
310 V(literals_string, "literals") \
311 V(prototype_string, "prototype") \
312 V(string_string, "string") \
313 V(String_string, "String") \
314 V(symbol_string, "symbol") \
315 V(Symbol_string, "Symbol") \
316 V(for_string, "for") \
317 V(for_api_string, "for_api") \
318 V(for_intern_string, "for_intern") \
319 V(private_api_string, "private_api") \
320 V(private_intern_string, "private_intern") \
321 V(Date_string, "Date") \
322 V(to_string_string, "toString") \
323 V(char_at_string, "CharAt") \
324 V(undefined_string, "undefined") \
325 V(value_of_string, "valueOf") \
326 V(stack_string, "stack") \
327 V(toJSON_string, "toJSON") \
328 V(InitializeVarGlobal_string, "InitializeVarGlobal") \
329 V(InitializeConstGlobal_string, "InitializeConstGlobal") \
330 V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
331 V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
332 V(stack_overflow_string, "kStackOverflowBoilerplate") \
333 V(illegal_access_string, "illegal access") \
334 V(get_string, "get") \
335 V(set_string, "set") \
336 V(map_field_string, "%map") \
337 V(elements_field_string, "%elements") \
338 V(length_field_string, "%length") \
339 V(cell_value_string, "%cell_value") \
340 V(function_class_string, "Function") \
341 V(illegal_argument_string, "illegal argument") \
342 V(space_string, " ") \
343 V(exec_string, "exec") \
344 V(zero_string, "0") \
345 V(global_eval_string, "GlobalEval") \
346 V(identity_hash_string, "v8::IdentityHash") \
347 V(closure_string, "(closure)") \
349 V(compare_ic_string, "==") \
350 V(strict_compare_ic_string, "===") \
351 V(infinity_string, "Infinity") \
352 V(minus_infinity_string, "-Infinity") \
353 V(query_colon_string, "(?:)") \
354 V(Generator_string, "Generator") \
355 V(throw_string, "throw") \
356 V(done_string, "done") \
357 V(value_string, "value") \
358 V(signMask, "signMask") \
368 V(next_string, "next") \
369 V(byte_length_string, "byteLength") \
370 V(byte_offset_string, "byteOffset") \
371 V(buffer_string, "buffer") \
372 V(intl_initialized_marker_string, "v8::intl_initialized_marker") \
373 V(intl_impl_object_string, "v8::intl_object")
375 // Forward declarations.
378 class WeakObjectRetainer;
381 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
384 class StoreBufferRebuilder {
386 explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
387 : store_buffer_(store_buffer) {}
389 void Callback(MemoryChunk* page, StoreBufferEvent event);
392 StoreBuffer* store_buffer_;
394 // We record in this variable how full the store buffer was when we started
395 // iterating over the current page, finding pointers to new space. If the
396 // store buffer overflows again we can exempt the page from the store buffer
397 // by rewinding to this point instead of having to search the store buffer.
398 Object*** start_of_current_page_;
399 // The current page we are scanning in the store buffer iterator.
400 MemoryChunk* current_page_;
404 // A queue of objects promoted during scavenge. Each object is accompanied
405 // by it's size to avoid dereferencing a map pointer for scanning.
406 class PromotionQueue {
408 explicit PromotionQueue(Heap* heap)
419 delete emergency_stack_;
420 emergency_stack_ = NULL;
423 inline void ActivateGuardIfOnTheSamePage();
425 Page* GetHeadPage() {
426 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
429 void SetNewLimit(Address limit) {
434 DCHECK(GetHeadPage() == Page::FromAllocationTop(limit));
435 limit_ = reinterpret_cast<intptr_t*>(limit);
437 if (limit_ <= rear_) {
444 bool IsBelowPromotionQueue(Address to_space_top) {
445 // If the given to-space top pointer and the head of the promotion queue
446 // are not on the same page, then the to-space objects are below the
448 if (GetHeadPage() != Page::FromAddress(to_space_top)) {
451 // If the to space top pointer is smaller or equal than the promotion
452 // queue head, then the to-space objects are below the promotion queue.
453 return reinterpret_cast<intptr_t*>(to_space_top) <= rear_;
457 return (front_ == rear_) &&
458 (emergency_stack_ == NULL || emergency_stack_->length() == 0);
461 inline void insert(HeapObject* target, int size);
463 void remove(HeapObject** target, int* size) {
465 if (front_ == rear_) {
466 Entry e = emergency_stack_->RemoveLast();
472 if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
473 NewSpacePage* front_page =
474 NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
475 DCHECK(!front_page->prev_page()->is_anchor());
476 front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
478 *target = reinterpret_cast<HeapObject*>(*(--front_));
479 *size = static_cast<int>(*(--front_));
480 // Assert no underflow.
481 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
482 reinterpret_cast<Address>(front_));
486 // The front of the queue is higher in the memory page chain than the rear.
493 static const int kEntrySizeInWords = 2;
496 Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {}
501 List<Entry>* emergency_stack_;
505 void RelocateQueueHead();
507 DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
511 typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
515 // External strings table is a place where all external strings are
516 // registered. We need to keep track of such strings to properly
518 class ExternalStringTable {
520 // Registers an external string.
521 inline void AddString(String* string);
523 inline void Iterate(ObjectVisitor* v);
525 // Restores internal invariant and gets rid of collected strings.
526 // Must be called after each Iterate() that modified the strings.
529 // Destroys all allocated memory.
533 explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
537 inline void Verify();
539 inline void AddOldString(String* string);
541 // Notifies the table that only a prefix of the new list is valid.
542 inline void ShrinkNewStrings(int position);
544 // To speed up scavenge collections new space string are kept
545 // separate from old space strings.
546 List<Object*> new_space_strings_;
547 List<Object*> old_space_strings_;
551 DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
555 enum ArrayStorageAllocationMode {
556 DONT_INITIALIZE_ARRAY_ELEMENTS,
557 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
563 // Configure heap size in MB before setup. Return false if the heap has been
565 bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
566 int max_executable_size, size_t code_range_size);
567 bool ConfigureHeapDefault();
569 // Prepares the heap, setting up memory areas that are needed in the isolate
570 // without actually creating any objects.
573 // Bootstraps the object heap with the core set of objects required to run.
574 // Returns whether it succeeded.
575 bool CreateHeapObjects();
577 // Destroys all memory allocated by the heap.
580 // Set the stack limit in the roots_ array. Some architectures generate
581 // code that looks here, because it is faster than loading from the static
582 // jslimit_/real_jslimit_ variable in the StackGuard.
583 void SetStackLimits();
585 // Returns whether SetUp has been called.
588 // Returns the maximum amount of memory reserved for the heap. For
589 // the young generation, we reserve 4 times the amount needed for a
590 // semi space. The young generation consists of two semi spaces and
591 // we reserve twice the amount needed for those in order to ensure
592 // that new space can be aligned to its size.
593 intptr_t MaxReserved() {
594 return 4 * reserved_semispace_size_ + max_old_generation_size_;
596 int MaxSemiSpaceSize() { return max_semi_space_size_; }
597 int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
598 int InitialSemiSpaceSize() { return initial_semispace_size_; }
599 intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
600 intptr_t MaxExecutableSize() { return max_executable_size_; }
602 // Returns the capacity of the heap in bytes w/o growing. Heap grows when
603 // more spaces are needed until it reaches the limit.
606 // Returns the amount of memory currently committed for the heap.
607 intptr_t CommittedMemory();
609 // Returns the amount of executable memory currently committed for the heap.
610 intptr_t CommittedMemoryExecutable();
612 // Returns the amount of phyical memory currently committed for the heap.
613 size_t CommittedPhysicalMemory();
615 // Returns the maximum amount of memory ever committed for the heap.
616 intptr_t MaximumCommittedMemory() { return maximum_committed_; }
618 // Updates the maximum committed memory for the heap. Should be called
619 // whenever a space grows.
620 void UpdateMaximumCommitted();
622 // Returns the available bytes in space w/o growing.
623 // Heap doesn't guarantee that it can allocate an object that requires
624 // all available bytes. Check MaxHeapObjectSize() instead.
625 intptr_t Available();
627 // Returns of size of all objects residing in the heap.
628 intptr_t SizeOfObjects();
630 // Return the starting address and a mask for the new space. And-masking an
631 // address with the mask will result in the start address of the new space
632 // for all addresses in either semispace.
633 Address NewSpaceStart() { return new_space_.start(); }
634 uintptr_t NewSpaceMask() { return new_space_.mask(); }
635 Address NewSpaceTop() { return new_space_.top(); }
637 NewSpace* new_space() { return &new_space_; }
638 OldSpace* old_pointer_space() { return old_pointer_space_; }
639 OldSpace* old_data_space() { return old_data_space_; }
640 OldSpace* code_space() { return code_space_; }
641 MapSpace* map_space() { return map_space_; }
642 CellSpace* cell_space() { return cell_space_; }
643 PropertyCellSpace* property_cell_space() { return property_cell_space_; }
644 LargeObjectSpace* lo_space() { return lo_space_; }
645 PagedSpace* paged_space(int idx) {
647 case OLD_POINTER_SPACE:
648 return old_pointer_space();
650 return old_data_space();
655 case PROPERTY_CELL_SPACE:
656 return property_cell_space();
666 bool always_allocate() { return always_allocate_scope_depth_ != 0; }
667 Address always_allocate_scope_depth_address() {
668 return reinterpret_cast<Address>(&always_allocate_scope_depth_);
671 Address* NewSpaceAllocationTopAddress() {
672 return new_space_.allocation_top_address();
674 Address* NewSpaceAllocationLimitAddress() {
675 return new_space_.allocation_limit_address();
678 Address* OldPointerSpaceAllocationTopAddress() {
679 return old_pointer_space_->allocation_top_address();
681 Address* OldPointerSpaceAllocationLimitAddress() {
682 return old_pointer_space_->allocation_limit_address();
685 Address* OldDataSpaceAllocationTopAddress() {
686 return old_data_space_->allocation_top_address();
688 Address* OldDataSpaceAllocationLimitAddress() {
689 return old_data_space_->allocation_limit_address();
692 // Returns a deep copy of the JavaScript object.
693 // Properties and elements are copied too.
694 // Optionally takes an AllocationSite to be appended in an AllocationMemento.
695 MUST_USE_RESULT AllocationResult
696 CopyJSObject(JSObject* source, AllocationSite* site = NULL);
698 // Clear the Instanceof cache (used when a prototype changes).
699 inline void ClearInstanceofCache();
701 // Iterates the whole code space to clear all ICs of the given kind.
702 void ClearAllICsByKind(Code::Kind kind);
704 // For use during bootup.
705 void RepairFreeListsAfterBoot();
707 template <typename T>
708 static inline bool IsOneByte(T t, int chars);
710 // Move len elements within a given array from src_index index to dst_index
712 void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
714 // Sloppy mode arguments object size.
715 static const int kSloppyArgumentsObjectSize =
716 JSObject::kHeaderSize + 2 * kPointerSize;
717 // Strict mode arguments has no callee so it is smaller.
718 static const int kStrictArgumentsObjectSize =
719 JSObject::kHeaderSize + 1 * kPointerSize;
720 // Indicies for direct access into argument objects.
721 static const int kArgumentsLengthIndex = 0;
722 // callee is only valid in sloppy mode.
723 static const int kArgumentsCalleeIndex = 1;
725 // Finalizes an external string by deleting the associated external
726 // data and clearing the resource pointer.
727 inline void FinalizeExternalString(String* string);
729 // Initialize a filler object to keep the ability to iterate over the heap
730 // when introducing gaps within pages.
731 void CreateFillerObjectAt(Address addr, int size);
733 bool CanMoveObjectStart(HeapObject* object);
735 // Indicates whether live bytes adjustment is triggered from within the GC
736 // code or from mutator code.
737 enum InvocationMode { FROM_GC, FROM_MUTATOR };
739 // Maintain consistency of live bytes during incremental marking.
740 void AdjustLiveBytes(Address address, int by, InvocationMode mode);
742 // Trim the given array from the left. Note that this relocates the object
743 // start and hence is only valid if there is only a single reference to it.
744 FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
746 // Trim the given array from the right.
747 template<Heap::InvocationMode mode>
748 void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
750 // Converts the given boolean condition to JavaScript boolean value.
751 inline Object* ToBoolean(bool condition);
753 // Performs garbage collection operation.
754 // Returns whether there is a chance that another major GC could
755 // collect more garbage.
756 inline bool CollectGarbage(
757 AllocationSpace space, const char* gc_reason = NULL,
758 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
760 static const int kNoGCFlags = 0;
761 static const int kSweepPreciselyMask = 1;
762 static const int kReduceMemoryFootprintMask = 2;
763 static const int kAbortIncrementalMarkingMask = 4;
765 // Making the heap iterable requires us to sweep precisely and abort any
766 // incremental marking as well.
767 static const int kMakeHeapIterableMask =
768 kSweepPreciselyMask | kAbortIncrementalMarkingMask;
770 // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
771 // non-zero, then the slower precise sweeper is used, which leaves the heap
772 // in a state where we can iterate over the heap visiting all objects.
773 void CollectAllGarbage(
774 int flags, const char* gc_reason = NULL,
775 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
777 // Last hope GC, should try to squeeze as much as possible.
778 void CollectAllAvailableGarbage(const char* gc_reason = NULL);
780 // Check whether the heap is currently iterable.
781 bool IsHeapIterable();
783 // Notify the heap that a context has been disposed.
784 int NotifyContextDisposed();
786 inline void increment_scan_on_scavenge_pages() {
787 scan_on_scavenge_pages_++;
788 if (FLAG_gc_verbose) {
789 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
793 inline void decrement_scan_on_scavenge_pages() {
794 scan_on_scavenge_pages_--;
795 if (FLAG_gc_verbose) {
796 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
800 PromotionQueue* promotion_queue() { return &promotion_queue_; }
802 void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
803 GCType gc_type_filter, bool pass_isolate = true);
804 void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
806 void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
807 GCType gc_type_filter, bool pass_isolate = true);
808 void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
810 // Heap root getters. We have versions with and without type::cast() here.
811 // You can't use type::cast during GC because the assert fails.
812 // TODO(1490): Try removing the unchecked accessors, now that GC marking does
813 // not corrupt the map.
814 #define ROOT_ACCESSOR(type, name, camel_name) \
815 type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \
816 type* raw_unchecked_##name() { \
817 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
819 ROOT_LIST(ROOT_ACCESSOR)
823 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
824 Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
825 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
826 #undef STRUCT_MAP_ACCESSOR
828 #define STRING_ACCESSOR(name, str) \
829 String* name() { return String::cast(roots_[k##name##RootIndex]); }
830 INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
831 #undef STRING_ACCESSOR
833 // The hidden_string is special because it is the empty string, but does
834 // not match the empty string.
835 String* hidden_string() { return hidden_string_; }
837 void set_native_contexts_list(Object* object) {
838 native_contexts_list_ = object;
840 Object* native_contexts_list() const { return native_contexts_list_; }
842 void set_array_buffers_list(Object* object) { array_buffers_list_ = object; }
843 Object* array_buffers_list() const { return array_buffers_list_; }
845 void set_allocation_sites_list(Object* object) {
846 allocation_sites_list_ = object;
848 Object* allocation_sites_list() { return allocation_sites_list_; }
850 // Used in CreateAllocationSiteStub and the (de)serializer.
851 Object** allocation_sites_list_address() { return &allocation_sites_list_; }
853 Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
855 void set_encountered_weak_collections(Object* weak_collection) {
856 encountered_weak_collections_ = weak_collection;
858 Object* encountered_weak_collections() const {
859 return encountered_weak_collections_;
862 // Number of mark-sweeps.
863 unsigned int ms_count() { return ms_count_; }
865 // Iterates over all roots in the heap.
866 void IterateRoots(ObjectVisitor* v, VisitMode mode);
867 // Iterates over all strong roots in the heap.
868 void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
869 // Iterates over entries in the smi roots list. Only interesting to the
870 // serializer/deserializer, since GC does not care about smis.
871 void IterateSmiRoots(ObjectVisitor* v);
872 // Iterates over all the other roots in the heap.
873 void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
875 // Iterate pointers to from semispace of new space found in memory interval
876 // from start to end.
877 void IterateAndMarkPointersToFromSpace(Address start, Address end,
878 ObjectSlotCallback callback);
880 // Returns whether the object resides in new space.
881 inline bool InNewSpace(Object* object);
882 inline bool InNewSpace(Address address);
883 inline bool InNewSpacePage(Address address);
884 inline bool InFromSpace(Object* object);
885 inline bool InToSpace(Object* object);
887 // Returns whether the object resides in old pointer space.
888 inline bool InOldPointerSpace(Address address);
889 inline bool InOldPointerSpace(Object* object);
891 // Returns whether the object resides in old data space.
892 inline bool InOldDataSpace(Address address);
893 inline bool InOldDataSpace(Object* object);
895 // Checks whether an address/object in the heap (including auxiliary
896 // area and unused area).
897 bool Contains(Address addr);
898 bool Contains(HeapObject* value);
900 // Checks whether an address/object in a space.
901 // Currently used by tests, serialization and heap verification only.
902 bool InSpace(Address addr, AllocationSpace space);
903 bool InSpace(HeapObject* value, AllocationSpace space);
905 // Finds out which space an object should get promoted to based on its type.
906 inline OldSpace* TargetSpace(HeapObject* object);
907 static inline AllocationSpace TargetSpaceId(InstanceType type);
909 // Checks whether the given object is allowed to be migrated from it's
910 // current space into the given destination space. Used for debugging.
911 inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
913 // Sets the stub_cache_ (only used when expanding the dictionary).
914 void public_set_code_stubs(UnseededNumberDictionary* value) {
915 roots_[kCodeStubsRootIndex] = value;
918 // Support for computing object sizes for old objects during GCs. Returns
919 // a function that is guaranteed to be safe for computing object sizes in
920 // the current GC phase.
921 HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
922 return gc_safe_size_of_old_object_;
925 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
926 void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
927 roots_[kNonMonomorphicCacheRootIndex] = value;
930 void public_set_empty_script(Script* script) {
931 roots_[kEmptyScriptRootIndex] = script;
934 void public_set_store_buffer_top(Address* top) {
935 roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
938 void public_set_materialized_objects(FixedArray* objects) {
939 roots_[kMaterializedObjectsRootIndex] = objects;
942 // Generated code can embed this address to get access to the roots.
943 Object** roots_array_start() { return roots_; }
945 Address* store_buffer_top_address() {
946 return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
950 // Verify the heap is in its normal state before or after a GC.
954 bool weak_embedded_objects_verification_enabled() {
955 return no_weak_object_verification_scope_depth_ == 0;
963 void OldPointerSpaceCheckStoreBuffer();
964 void MapSpaceCheckStoreBuffer();
965 void LargeObjectSpaceCheckStoreBuffer();
967 // Report heap statistics.
968 void ReportHeapStatistics(const char* title);
969 void ReportCodeStatistics(const char* title);
972 // Zapping is needed for verify heap, and always done in debug builds.
973 static inline bool ShouldZapGarbage() {
978 return FLAG_verify_heap;
985 // Number of "runtime allocations" done so far.
986 uint32_t allocations_count() { return allocations_count_; }
988 // Returns deterministic "time" value in ms. Works only with
989 // FLAG_verify_predictable.
990 double synthetic_time() { return allocations_count_ / 100.0; }
992 // Print short heap statistics.
993 void PrintShortHeapStatistics();
995 // Write barrier support for address[offset] = o.
996 INLINE(void RecordWrite(Address address, int offset));
998 // Write barrier support for address[start : start + len[ = o.
999 INLINE(void RecordWrites(Address address, int start, int len));
1001 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
1002 inline HeapState gc_state() { return gc_state_; }
1004 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
1007 void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1009 void TracePathToObjectFrom(Object* target, Object* root);
1010 void TracePathToObject(Object* target);
1011 void TracePathToGlobal();
1014 // Callback function passed to Heap::Iterate etc. Copies an object if
1015 // necessary, the object might be promoted to an old space. The caller must
1016 // ensure the precondition that the object is (a) a heap object and (b) in
1017 // the heap's from space.
1018 static inline void ScavengePointer(HeapObject** p);
1019 static inline void ScavengeObject(HeapObject** p, HeapObject* object);
1021 enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
1023 // If an object has an AllocationMemento trailing it, return it, otherwise
1025 inline AllocationMemento* FindAllocationMemento(HeapObject* object);
1027 // An object may have an AllocationSite associated with it through a trailing
1028 // AllocationMemento. Its feedback should be updated when objects are found
1030 static inline void UpdateAllocationSiteFeedback(HeapObject* object,
1031 ScratchpadSlotMode mode);
1033 // Support for partial snapshots. After calling this we have a linear
1034 // space to write objects in each space.
1035 void ReserveSpace(int* sizes, Address* addresses);
1038 // Support for the API.
1041 void CreateApiObjects();
1043 inline intptr_t PromotedTotalSize() {
1044 int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
1045 if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt);
1046 if (total < 0) return 0;
1047 return static_cast<intptr_t>(total);
1050 inline intptr_t OldGenerationSpaceAvailable() {
1051 return old_generation_allocation_limit_ - PromotedTotalSize();
1054 inline intptr_t OldGenerationCapacityAvailable() {
1055 return max_old_generation_size_ - PromotedTotalSize();
1058 static const intptr_t kMinimumOldGenerationAllocationLimit =
1059 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
1061 static const int kPointerMultiplier = i::kPointerSize / 4;
1063 // The new space size has to be a power of 2. Sizes are in MB.
1064 static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
1065 static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
1066 static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
1067 static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
1069 // The old space size has to be a multiple of Page::kPageSize.
1071 static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
1072 static const int kMaxOldSpaceSizeMediumMemoryDevice =
1073 256 * kPointerMultiplier;
1074 static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
1075 static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
1077 // The executable size has to be a multiple of Page::kPageSize.
1079 static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
1080 static const int kMaxExecutableSizeMediumMemoryDevice =
1081 192 * kPointerMultiplier;
1082 static const int kMaxExecutableSizeHighMemoryDevice =
1083 256 * kPointerMultiplier;
1084 static const int kMaxExecutableSizeHugeMemoryDevice =
1085 256 * kPointerMultiplier;
1087 intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
1088 int freed_global_handles);
1090 // Indicates whether inline bump-pointer allocation has been disabled.
1091 bool inline_allocation_disabled() { return inline_allocation_disabled_; }
1093 // Switch whether inline bump-pointer allocation should be used.
1094 void EnableInlineAllocation();
1095 void DisableInlineAllocation();
1097 // Implements the corresponding V8 API function.
1098 bool IdleNotification(int hint);
1100 // Declare all the root indices. This defines the root list order.
1101 enum RootListIndex {
1102 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
1103 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
1104 #undef ROOT_INDEX_DECLARATION
1106 #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
1107 INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
1108 #undef STRING_DECLARATION
1110 // Utility type maps
1111 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
1112 STRUCT_LIST(DECLARE_STRUCT_MAP)
1113 #undef DECLARE_STRUCT_MAP
1114 kStringTableRootIndex,
1116 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
1117 SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
1118 #undef ROOT_INDEX_DECLARATION
1120 kStrongRootListLength = kStringTableRootIndex,
1121 kSmiRootsStart = kStringTableRootIndex + 1
1124 STATIC_ASSERT(kUndefinedValueRootIndex ==
1125 Internals::kUndefinedValueRootIndex);
1126 STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
1127 STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
1128 STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
1129 STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
1131 // Generated code can embed direct references to non-writable roots if
1132 // they are in new space.
1133 static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
1134 // Generated code can treat direct references to this root as constant.
1135 bool RootCanBeTreatedAsConstant(RootListIndex root_index);
1137 Map* MapForFixedTypedArray(ExternalArrayType array_type);
1138 RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
1140 Map* MapForExternalArrayType(ExternalArrayType array_type);
1141 RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type);
1143 RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
1144 RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
1145 ExternalArray* EmptyExternalArrayForMap(Map* map);
1146 FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
1148 void RecordStats(HeapStats* stats, bool take_snapshot = false);
1150 // Copy block of memory from src to dst. Size of block should be aligned
1152 static inline void CopyBlock(Address dst, Address src, int byte_size);
1154 // Optimized version of memmove for blocks with pointer size aligned sizes and
1155 // pointer size aligned addresses.
1156 static inline void MoveBlock(Address dst, Address src, int byte_size);
1158 // Check new space expansion criteria and expand semispaces if it was hit.
1159 void CheckNewSpaceExpansionCriteria();
1161 inline void IncrementPromotedObjectsSize(int object_size) {
1162 DCHECK(object_size > 0);
1163 promoted_objects_size_ += object_size;
1166 inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
1167 DCHECK(object_size > 0);
1168 semi_space_copied_object_size_ += object_size;
1171 inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1173 inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1175 inline void IncrementNodesPromoted() { nodes_promoted_++; }
1177 inline void IncrementYoungSurvivorsCounter(int survived) {
1178 DCHECK(survived >= 0);
1179 survived_since_last_expansion_ += survived;
1182 inline bool NextGCIsLikelyToBeFull() {
1183 if (FLAG_gc_global) return true;
1185 if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
1187 intptr_t adjusted_allocation_limit =
1188 old_generation_allocation_limit_ - new_space_.Capacity();
1190 if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
1195 void UpdateNewSpaceReferencesInExternalStringTable(
1196 ExternalStringTableUpdaterCallback updater_func);
1198 void UpdateReferencesInExternalStringTable(
1199 ExternalStringTableUpdaterCallback updater_func);
1201 void ProcessWeakReferences(WeakObjectRetainer* retainer);
1203 void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
1205 // An object should be promoted if the object has survived a
1206 // scavenge operation.
1207 inline bool ShouldBePromoted(Address old_address, int object_size);
1209 void ClearJSFunctionResultCaches();
1211 void ClearNormalizedMapCaches();
1213 GCTracer* tracer() { return &tracer_; }
1215 // Returns the size of objects residing in non new spaces.
1216 intptr_t PromotedSpaceSizeOfObjects();
1218 double total_regexp_code_generated() { return total_regexp_code_generated_; }
1219 void IncreaseTotalRegexpCodeGenerated(int size) {
1220 total_regexp_code_generated_ += size;
1223 void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
1224 if (is_crankshafted) {
1225 crankshaft_codegen_bytes_generated_ += size;
1227 full_codegen_bytes_generated_ += size;
1231 // Update GC statistics that are tracked on the Heap.
1232 void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
1233 double marking_time);
1235 // Returns maximum GC pause.
1236 double get_max_gc_pause() { return max_gc_pause_; }
1238 // Returns maximum size of objects alive after GC.
1239 intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
1241 // Returns minimal interval between two subsequent collections.
1242 double get_min_in_mutator() { return min_in_mutator_; }
1244 MarkCompactCollector* mark_compact_collector() {
1245 return &mark_compact_collector_;
1248 StoreBuffer* store_buffer() { return &store_buffer_; }
1250 Marking* marking() { return &marking_; }
1252 IncrementalMarking* incremental_marking() { return &incremental_marking_; }
1254 ExternalStringTable* external_string_table() {
1255 return &external_string_table_;
1258 // Returns the current sweep generation.
1259 int sweep_generation() { return sweep_generation_; }
1261 inline Isolate* isolate();
1263 void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1264 void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1266 inline bool OldGenerationAllocationLimitReached();
1268 inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1269 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1272 void QueueMemoryChunkForFree(MemoryChunk* chunk);
1273 void FreeQueuedChunks();
1275 int gc_count() const { return gc_count_; }
1277 // Completely clear the Instanceof cache (to stop it keeping objects alive
1279 inline void CompletelyClearInstanceofCache();
1281 // The roots that have an index less than this are always in old space.
1282 static const int kOldSpaceRoots = 0x20;
1284 uint32_t HashSeed() {
1285 uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
1286 DCHECK(FLAG_randomize_hashes || seed == 0);
1290 void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
1291 DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
1292 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
1295 void SetConstructStubDeoptPCOffset(int pc_offset) {
1296 DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
1297 set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1300 void SetGetterStubDeoptPCOffset(int pc_offset) {
1301 DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
1302 set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1305 void SetSetterStubDeoptPCOffset(int pc_offset) {
1306 DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
1307 set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1310 // For post mortem debugging.
1311 void RememberUnmappedPage(Address page, bool compacted);
1313 // Global inline caching age: it is incremented on some GCs after context
1314 // disposal. We use it to flush inline caches.
1315 int global_ic_age() { return global_ic_age_; }
1317 void AgeInlineCaches() {
1318 global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
1321 bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
1323 int64_t amount_of_external_allocated_memory() {
1324 return amount_of_external_allocated_memory_;
1327 void DeoptMarkedAllocationSites();
1329 bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
1331 bool DeoptMaybeTenuredAllocationSites() {
1332 return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
1335 // ObjectStats are kept in two arrays, counts and sizes. Related stats are
1336 // stored in a contiguous linear buffer. Stats groups are stored one after
1339 FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
1340 FIRST_FIXED_ARRAY_SUB_TYPE =
1341 FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
1342 FIRST_CODE_AGE_SUB_TYPE =
1343 FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
1344 OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
1347 void RecordObjectStats(InstanceType type, size_t size) {
1348 DCHECK(type <= LAST_TYPE);
1349 object_counts_[type]++;
1350 object_sizes_[type] += size;
1353 void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
1354 int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
1355 int code_age_index =
1356 FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
1357 DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
1358 code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
1359 DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
1360 code_age_index < OBJECT_STATS_COUNT);
1361 object_counts_[code_sub_type_index]++;
1362 object_sizes_[code_sub_type_index] += size;
1363 object_counts_[code_age_index]++;
1364 object_sizes_[code_age_index] += size;
1367 void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
1368 DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
1369 object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
1370 object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
1373 void CheckpointObjectStats();
1375 // We don't use a LockGuard here since we want to lock the heap
1376 // only when FLAG_concurrent_recompilation is true.
1377 class RelocationLock {
1379 explicit RelocationLock(Heap* heap) : heap_(heap) {
1380 heap_->relocation_mutex_.Lock();
1384 ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
1390 void AddWeakObjectToCodeDependency(Handle<Object> obj,
1391 Handle<DependentCode> dep);
1393 DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj);
1395 void InitializeWeakObjectToCodeTable() {
1396 set_weak_object_to_code_table(undefined_value());
1399 void EnsureWeakObjectToCodeTable();
1401 static void FatalProcessOutOfMemory(const char* location,
1402 bool take_snapshot = false);
1404 // This event is triggered after successful allocation of a new object made
1405 // by runtime. Allocations of target space for object evacuation do not
1406 // trigger the event. In order to track ALL allocations one must turn off
1407 // FLAG_inline_new and FLAG_use_allocation_folding.
1408 inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
1410 // This event is triggered after object is moved to a new place.
1411 inline void OnMoveEvent(HeapObject* target, HeapObject* source,
1415 // Methods made available to tests.
1417 // Allocates a JS Map in the heap.
1418 MUST_USE_RESULT AllocationResult
1419 AllocateMap(InstanceType instance_type, int instance_size,
1420 ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
1422 // Allocates and initializes a new JavaScript object based on a
1424 // If allocation_site is non-null, then a memento is emitted after the object
1425 // that points to the site.
1426 MUST_USE_RESULT AllocationResult
1427 AllocateJSObject(JSFunction* constructor,
1428 PretenureFlag pretenure = NOT_TENURED,
1429 AllocationSite* allocation_site = NULL);
1431 // Allocates and initializes a new JavaScript object based on a map.
1432 // Passing an allocation site means that a memento will be created that
1433 // points to the site.
1434 MUST_USE_RESULT AllocationResult
1435 AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
1436 bool alloc_props = true,
1437 AllocationSite* allocation_site = NULL);
1439 // Allocated a HeapNumber from value.
1440 MUST_USE_RESULT AllocationResult
1441 AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
1442 PretenureFlag pretenure = NOT_TENURED);
1444 // Allocated a Float32x4 from value.
1445 MUST_USE_RESULT AllocationResult AllocateFloat32x4(
1446 float32x4_value_t value,
1447 PretenureFlag pretenure = NOT_TENURED);
1449 // Allocated a Float64x2 from value.
1450 MUST_USE_RESULT AllocationResult AllocateFloat64x2(
1451 float64x2_value_t value,
1452 PretenureFlag pretenure = NOT_TENURED);
1454 // Allocated a Int32x4 from value.
1455 MUST_USE_RESULT AllocationResult AllocateInt32x4(
1456 int32x4_value_t value,
1457 PretenureFlag pretenure = NOT_TENURED);
1459 // Allocate a byte array of the specified length
1460 MUST_USE_RESULT AllocationResult
1461 AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
1463 // Copy the code and scope info part of the code object, but insert
1464 // the provided data as the relocation information.
1465 MUST_USE_RESULT AllocationResult
1466 CopyCode(Code* code, Vector<byte> reloc_info);
1468 MUST_USE_RESULT AllocationResult CopyCode(Code* code);
1470 // Allocates a fixed array initialized with undefined values
1471 MUST_USE_RESULT AllocationResult
1472 AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
1477 // The amount of external memory registered through the API kept alive
1478 // by global handles
1479 int64_t amount_of_external_allocated_memory_;
1481 // Caches the amount of external memory registered at the last global gc.
1482 int64_t amount_of_external_allocated_memory_at_last_global_gc_;
1484 // This can be calculated directly from a pointer to the heap; however, it is
1485 // more expedient to get at the isolate directly from within Heap methods.
1488 Object* roots_[kRootListLength];
1490 size_t code_range_size_;
1491 int reserved_semispace_size_;
1492 int max_semi_space_size_;
1493 int initial_semispace_size_;
1494 intptr_t max_old_generation_size_;
1495 intptr_t max_executable_size_;
1496 intptr_t maximum_committed_;
1498 // For keeping track of how much data has survived
1499 // scavenge since last new space expansion.
1500 int survived_since_last_expansion_;
1502 // For keeping track on when to flush RegExp code.
1503 int sweep_generation_;
1505 int always_allocate_scope_depth_;
1507 // For keeping track of context disposals.
1508 int contexts_disposed_;
1512 bool flush_monomorphic_ics_;
1514 int scan_on_scavenge_pages_;
1516 NewSpace new_space_;
1517 OldSpace* old_pointer_space_;
1518 OldSpace* old_data_space_;
1519 OldSpace* code_space_;
1520 MapSpace* map_space_;
1521 CellSpace* cell_space_;
1522 PropertyCellSpace* property_cell_space_;
1523 LargeObjectSpace* lo_space_;
1524 HeapState gc_state_;
1525 int gc_post_processing_depth_;
1526 Address new_space_top_after_last_gc_;
1528 // Returns the amount of external memory registered since last global gc.
1529 int64_t PromotedExternalMemorySize();
1531 // How many "runtime allocations" happened.
1532 uint32_t allocations_count_;
1534 // Running hash over allocations performed.
1535 uint32_t raw_allocations_hash_;
1537 // Countdown counter, dumps allocation hash when 0.
1538 uint32_t dump_allocations_hash_countdown_;
1540 // How many mark-sweep collections happened.
1541 unsigned int ms_count_;
1543 // How many gc happened.
1544 unsigned int gc_count_;
1546 // For post mortem debugging.
1547 static const int kRememberedUnmappedPages = 128;
1548 int remembered_unmapped_pages_index_;
1549 Address remembered_unmapped_pages_[kRememberedUnmappedPages];
1551 // Total length of the strings we failed to flatten since the last GC.
1552 int unflattened_strings_length_;
1554 #define ROOT_ACCESSOR(type, name, camel_name) \
1555 inline void set_##name(type* value) { \
1556 /* The deserializer makes use of the fact that these common roots are */ \
1557 /* never in new space and never on a page that is being compacted. */ \
1558 DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
1559 roots_[k##camel_name##RootIndex] = value; \
1561 ROOT_LIST(ROOT_ACCESSOR)
1562 #undef ROOT_ACCESSOR
1565 // If the --gc-interval flag is set to a positive value, this
1566 // variable holds the value indicating the number of allocations
1567 // remain until the next failure and garbage collection.
1568 int allocation_timeout_;
1571 // Limit that triggers a global GC on the next (normally caused) GC. This
1572 // is checked when we have already decided to do a GC to help determine
1573 // which collector to invoke, before expanding a paged space in the old
1574 // generation and on every allocation in large object space.
1575 intptr_t old_generation_allocation_limit_;
1577 // Indicates that an allocation has failed in the old generation since the
1579 bool old_gen_exhausted_;
1581 // Indicates that inline bump-pointer allocation has been globally disabled
1582 // for all spaces. This is used to disable allocations in generated code.
1583 bool inline_allocation_disabled_;
1585 // Weak list heads, threaded through the objects.
1586 // List heads are initilized lazily and contain the undefined_value at start.
1587 Object* native_contexts_list_;
1588 Object* array_buffers_list_;
1589 Object* allocation_sites_list_;
1591 // WeakHashTable that maps objects embedded in optimized code to dependent
1592 // code list. It is initilized lazily and contains the undefined_value at
1594 Object* weak_object_to_code_table_;
1596 // List of encountered weak collections (JSWeakMap and JSWeakSet) during
1597 // marking. It is initialized during marking, destroyed after marking and
1598 // contains Smi(0) while marking is not active.
1599 Object* encountered_weak_collections_;
1601 StoreBufferRebuilder store_buffer_rebuilder_;
1603 struct StringTypeTable {
1606 RootListIndex index;
1609 struct ConstantStringTable {
1610 const char* contents;
1611 RootListIndex index;
1614 struct StructTable {
1617 RootListIndex index;
1620 static const StringTypeTable string_type_table[];
1621 static const ConstantStringTable constant_string_table[];
1622 static const StructTable struct_table[];
1624 // The special hidden string which is an empty string, but does not match
1625 // any string when looked up in properties.
1626 String* hidden_string_;
1628 // GC callback function, called before and after mark-compact GC.
1629 // Allocations in the callback function are disallowed.
1630 struct GCPrologueCallbackPair {
1631 GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
1632 GCType gc_type, bool pass_isolate)
1633 : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
1634 bool operator==(const GCPrologueCallbackPair& pair) const {
1635 return pair.callback == callback;
1637 v8::Isolate::GCPrologueCallback callback;
1639 // TODO(dcarney): remove variable
1642 List<GCPrologueCallbackPair> gc_prologue_callbacks_;
1644 struct GCEpilogueCallbackPair {
1645 GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
1646 GCType gc_type, bool pass_isolate)
1647 : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
1648 bool operator==(const GCEpilogueCallbackPair& pair) const {
1649 return pair.callback == callback;
1651 v8::Isolate::GCPrologueCallback callback;
1653 // TODO(dcarney): remove variable
1656 List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
1658 // Support for computing object sizes during GC.
1659 HeapObjectCallback gc_safe_size_of_old_object_;
1660 static int GcSafeSizeOfOldObject(HeapObject* object);
1662 // Update the GC state. Called from the mark-compact collector.
1663 void MarkMapPointersAsEncoded(bool encoded) {
1665 gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
1668 // Code that should be run before and after each GC. Includes some
1669 // reporting/verification activities when compiled with DEBUG set.
1670 void GarbageCollectionPrologue();
1671 void GarbageCollectionEpilogue();
1673 // Pretenuring decisions are made based on feedback collected during new
1674 // space evacuation. Note that between feedback collection and calling this
1675 // method object in old space must not move.
1676 // Right now we only process pretenuring feedback in high promotion mode.
1677 void ProcessPretenuringFeedback();
1679 // Checks whether a global GC is necessary
1680 GarbageCollector SelectGarbageCollector(AllocationSpace space,
1681 const char** reason);
1683 // Make sure there is a filler value behind the top of the new space
1684 // so that the GC does not confuse some unintialized/stale memory
1685 // with the allocation memento of the object at the top
1686 void EnsureFillerObjectAtTop();
1688 // Ensure that we have swept all spaces in such a way that we can iterate
1689 // over all objects. May cause a GC.
1690 void MakeHeapIterable();
1692 // Performs garbage collection operation.
1693 // Returns whether there is a chance that another major GC could
1694 // collect more garbage.
1695 bool CollectGarbage(
1696 GarbageCollector collector, const char* gc_reason,
1697 const char* collector_reason,
1698 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1700 // Performs garbage collection
1701 // Returns whether there is a chance another major GC could
1702 // collect more garbage.
1703 bool PerformGarbageCollection(
1704 GarbageCollector collector,
1705 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1707 inline void UpdateOldSpaceLimits();
1709 // Selects the proper allocation space depending on the given object
1710 // size, pretenuring decision, and preferred old-space.
1711 static AllocationSpace SelectSpace(int object_size,
1712 AllocationSpace preferred_old_space,
1713 PretenureFlag pretenure) {
1714 DCHECK(preferred_old_space == OLD_POINTER_SPACE ||
1715 preferred_old_space == OLD_DATA_SPACE);
1716 if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
1717 return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
1720 // Allocate an uninitialized object. The memory is non-executable if the
1721 // hardware and OS allow. This is the single choke-point for allocations
1722 // performed by the runtime and should not be bypassed (to extend this to
1723 // inlined allocations, use the Heap::DisableInlineAllocation() support).
1724 MUST_USE_RESULT inline AllocationResult AllocateRaw(
1725 int size_in_bytes, AllocationSpace space, AllocationSpace retry_space);
1727 // Allocates a heap object based on the map.
1728 MUST_USE_RESULT AllocationResult
1729 Allocate(Map* map, AllocationSpace space,
1730 AllocationSite* allocation_site = NULL);
1732 // Allocates a partial map for bootstrapping.
1733 MUST_USE_RESULT AllocationResult
1734 AllocatePartialMap(InstanceType instance_type, int instance_size);
1736 // Initializes a JSObject based on its map.
1737 void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
1739 void InitializeAllocationMemento(AllocationMemento* memento,
1740 AllocationSite* allocation_site);
1742 // Allocate a block of memory in the given space (filled with a filler).
1743 // Used as a fall-back for generated code when the space is full.
1744 MUST_USE_RESULT AllocationResult
1745 AllocateFillerObject(int size, bool double_align, AllocationSpace space);
1747 // Allocate an uninitialized fixed array.
1748 MUST_USE_RESULT AllocationResult
1749 AllocateRawFixedArray(int length, PretenureFlag pretenure);
1751 // Allocate an uninitialized fixed double array.
1752 MUST_USE_RESULT AllocationResult
1753 AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
1755 // Allocate an initialized fixed array with the given filler value.
1756 MUST_USE_RESULT AllocationResult
1757 AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
1760 // Allocate and partially initializes a String. There are two String
1761 // encodings: ASCII and two byte. These functions allocate a string of the
1762 // given length and set its map and length fields. The characters of the
1763 // string are uninitialized.
1764 MUST_USE_RESULT AllocationResult
1765 AllocateRawOneByteString(int length, PretenureFlag pretenure);
1766 MUST_USE_RESULT AllocationResult
1767 AllocateRawTwoByteString(int length, PretenureFlag pretenure);
1769 bool CreateInitialMaps();
1770 void CreateInitialObjects();
1772 // Allocates an internalized string in old space based on the character
1774 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
1775 Vector<const char> str, int chars, uint32_t hash_field);
1777 MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
1778 Vector<const uint8_t> str, uint32_t hash_field);
1780 MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
1781 Vector<const uc16> str, uint32_t hash_field);
1783 template <bool is_one_byte, typename T>
1784 MUST_USE_RESULT AllocationResult
1785 AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
1787 template <typename T>
1788 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
1789 T t, int chars, uint32_t hash_field);
1791 // Allocates an uninitialized fixed array. It must be filled by the caller.
1792 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
1794 // Make a copy of src and return it. Returns
1795 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
1796 MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
1798 // Make a copy of src, set the map, and return the copy. Returns
1799 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
1800 MUST_USE_RESULT AllocationResult
1801 CopyFixedArrayWithMap(FixedArray* src, Map* map);
1803 // Make a copy of src and return it. Returns
1804 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
1805 MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
1806 FixedDoubleArray* src);
1808 // Make a copy of src and return it. Returns
1809 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
1810 MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray(
1811 ConstantPoolArray* src);
1814 // Computes a single character string where the character has code.
1815 // A cache is used for ASCII codes.
1816 MUST_USE_RESULT AllocationResult
1817 LookupSingleCharacterStringFromCode(uint16_t code);
1819 // Allocate a symbol in old space.
1820 MUST_USE_RESULT AllocationResult AllocateSymbol();
1822 // Make a copy of src, set the map, and return the copy.
1823 MUST_USE_RESULT AllocationResult
1824 CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map);
1826 MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(
1827 const ConstantPoolArray::NumberOfEntries& small);
1829 MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray(
1830 const ConstantPoolArray::NumberOfEntries& small,
1831 const ConstantPoolArray::NumberOfEntries& extended);
1833 // Allocates an external array of the specified length and type.
1834 MUST_USE_RESULT AllocationResult
1835 AllocateExternalArray(int length, ExternalArrayType array_type,
1836 void* external_pointer, PretenureFlag pretenure);
1838 // Allocates a fixed typed array of the specified length and type.
1839 MUST_USE_RESULT AllocationResult
1840 AllocateFixedTypedArray(int length, ExternalArrayType array_type,
1841 PretenureFlag pretenure);
1843 // Make a copy of src and return it.
1844 MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
1846 // Make a copy of src, set the map, and return the copy.
1847 MUST_USE_RESULT AllocationResult
1848 CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
1850 // Allocates a fixed double array with uninitialized values. Returns
1851 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
1852 int length, PretenureFlag pretenure = NOT_TENURED);
1854 // These five Create*EntryStub functions are here and forced to not be inlined
1855 // because of a gcc-4.4 bug that assigns wrong vtable entries.
1856 NO_INLINE(void CreateJSEntryStub());
1857 NO_INLINE(void CreateJSConstructEntryStub());
1859 void CreateFixedStubs();
1861 // Allocate empty fixed array.
1862 MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
1864 // Allocate empty external array of given type.
1865 MUST_USE_RESULT AllocationResult
1866 AllocateEmptyExternalArray(ExternalArrayType array_type);
1868 // Allocate empty fixed typed array of given type.
1869 MUST_USE_RESULT AllocationResult
1870 AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
1872 // Allocate empty constant pool array.
1873 MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray();
1875 // Allocate a tenured simple cell.
1876 MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
1878 // Allocate a tenured JS global property cell initialized with the hole.
1879 MUST_USE_RESULT AllocationResult AllocatePropertyCell();
1881 // Allocates a new utility object in the old generation.
1882 MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
1884 // Allocates a new foreign object.
1885 MUST_USE_RESULT AllocationResult
1886 AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
1888 MUST_USE_RESULT AllocationResult
1889 AllocateCode(int object_size, bool immovable);
1891 MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
1893 MUST_USE_RESULT AllocationResult InternalizeString(String* str);
1895 // Performs a minor collection in new generation.
1898 // Commits from space if it is uncommitted.
1899 void EnsureFromSpaceIsCommitted();
1901 // Uncommit unused semi space.
1902 bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
1904 // Fill in bogus values in from space
1905 void ZapFromSpace();
1907 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
1908 Heap* heap, Object** pointer);
1910 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
1911 static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1912 StoreBufferEvent event);
1914 // Performs a major collection in the whole heap.
1917 // Code to be run before and after mark-compact.
1918 void MarkCompactPrologue();
1920 void ProcessNativeContexts(WeakObjectRetainer* retainer);
1921 void ProcessArrayBuffers(WeakObjectRetainer* retainer);
1922 void ProcessAllocationSites(WeakObjectRetainer* retainer);
1924 // Deopts all code that contains allocation instruction which are tenured or
1925 // not tenured. Moreover it clears the pretenuring allocation site statistics.
1926 void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
1928 // Evaluates local pretenuring for the old space and calls
1929 // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
1931 void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
1933 // Called on heap tear-down.
1934 void TearDownArrayBuffers();
1936 // Record statistics before and after garbage collection.
1937 void ReportStatisticsBeforeGC();
1938 void ReportStatisticsAfterGC();
1940 // Slow part of scavenge object.
1941 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
1943 // Total RegExp code ever generated
1944 double total_regexp_code_generated_;
1948 // Creates and installs the full-sized number string cache.
1949 int FullSizeNumberStringCacheLength();
1950 // Flush the number to string cache.
1951 void FlushNumberStringCache();
1953 // Sets used allocation sites entries to undefined.
1954 void FlushAllocationSitesScratchpad();
1956 // Initializes the allocation sites scratchpad with undefined values.
1957 void InitializeAllocationSitesScratchpad();
1959 // Adds an allocation site to the scratchpad if there is space left.
1960 void AddAllocationSiteToScratchpad(AllocationSite* site,
1961 ScratchpadSlotMode mode);
1963 void UpdateSurvivalStatistics(int start_new_space_size);
1965 static const int kYoungSurvivalRateHighThreshold = 90;
1966 static const int kYoungSurvivalRateAllowedDeviation = 15;
1968 static const int kOldSurvivalRateLowThreshold = 10;
1970 int high_survival_rate_period_length_;
1971 intptr_t promoted_objects_size_;
1972 double promotion_rate_;
1973 intptr_t semi_space_copied_object_size_;
1974 double semi_space_copied_rate_;
1975 int nodes_died_in_new_space_;
1976 int nodes_copied_in_new_space_;
1977 int nodes_promoted_;
1979 // This is the pretenuring trigger for allocation sites that are in maybe
1980 // tenure state. When we switched to the maximum new space size we deoptimize
1981 // the code that belongs to the allocation site and derive the lifetime
1982 // of the allocation site.
1983 unsigned int maximum_size_scavenges_;
1985 // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
1986 // Re-visit incremental marking heuristics.
1987 bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
1989 void SelectScavengingVisitorsTable();
1991 void StartIdleRound() { mark_sweeps_since_idle_round_started_ = 0; }
1993 void FinishIdleRound() {
1994 mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
1995 scavenges_since_last_idle_round_ = 0;
1998 bool EnoughGarbageSinceLastIdleRound() {
1999 return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
2002 // Estimates how many milliseconds a Mark-Sweep would take to complete.
2003 // In idle notification handler we assume that this function will return:
2004 // - a number less than 10 for small heaps, which are less than 8Mb.
2005 // - a number greater than 10 for large heaps, which are greater than 32Mb.
2006 int TimeMarkSweepWouldTakeInMs() {
2007 // Rough estimate of how many megabytes of heap can be processed in 1 ms.
2008 static const int kMbPerMs = 2;
2010 int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
2011 return heap_size_mb / kMbPerMs;
2014 void AdvanceIdleIncrementalMarking(intptr_t step_size);
2016 void ClearObjectStats(bool clear_last_time_stats = false);
2018 void set_weak_object_to_code_table(Object* value) {
2019 DCHECK(!InNewSpace(value));
2020 weak_object_to_code_table_ = value;
2023 Object** weak_object_to_code_table_address() {
2024 return &weak_object_to_code_table_;
2027 inline void UpdateAllocationsHash(HeapObject* object);
2028 inline void UpdateAllocationsHash(uint32_t value);
2029 inline void PrintAlloctionsHash();
2031 static const int kInitialStringTableSize = 2048;
2032 static const int kInitialEvalCacheSize = 64;
2033 static const int kInitialNumberStringCacheSize = 256;
2035 // Object counts and used memory by InstanceType
2036 size_t object_counts_[OBJECT_STATS_COUNT];
2037 size_t object_counts_last_time_[OBJECT_STATS_COUNT];
2038 size_t object_sizes_[OBJECT_STATS_COUNT];
2039 size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
2041 // Maximum GC pause.
2042 double max_gc_pause_;
2044 // Total time spent in GC.
2045 double total_gc_time_ms_;
2047 // Maximum size of objects alive after GC.
2048 intptr_t max_alive_after_gc_;
2050 // Minimal interval between two subsequent collections.
2051 double min_in_mutator_;
2053 // Cumulative GC time spent in marking
2054 double marking_time_;
2056 // Cumulative GC time spent in sweeping
2057 double sweeping_time_;
2059 MarkCompactCollector mark_compact_collector_;
2061 StoreBuffer store_buffer_;
2065 IncrementalMarking incremental_marking_;
2067 int number_idle_notifications_;
2068 unsigned int last_idle_notification_gc_count_;
2069 bool last_idle_notification_gc_count_init_;
2071 int mark_sweeps_since_idle_round_started_;
2072 unsigned int gc_count_at_last_idle_gc_;
2073 int scavenges_since_last_idle_round_;
2075 // These two counters are monotomically increasing and never reset.
2076 size_t full_codegen_bytes_generated_;
2077 size_t crankshaft_codegen_bytes_generated_;
2079 // If the --deopt_every_n_garbage_collections flag is set to a positive value,
2080 // this variable holds the number of garbage collections since the last
2081 // deoptimization triggered by garbage collection.
2082 int gcs_since_last_deopt_;
2085 int no_weak_object_verification_scope_depth_;
2088 static const int kAllocationSiteScratchpadSize = 256;
2089 int allocation_sites_scratchpad_length_;
2091 static const int kMaxMarkSweepsInIdleRound = 7;
2092 static const int kIdleScavengeThreshold = 5;
2094 // Shared state read by the scavenge collector and set by ScavengeObject.
2095 PromotionQueue promotion_queue_;
2097 // Flag is set when the heap has been configured. The heap can be repeatedly
2098 // configured through the API until it is set up.
2101 ExternalStringTable external_string_table_;
2103 VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
2105 MemoryChunk* chunks_queued_for_free_;
2107 base::Mutex relocation_mutex_;
2109 int gc_callbacks_depth_;
2111 friend class AlwaysAllocateScope;
2112 friend class Factory;
2113 friend class GCCallbacksScope;
2114 friend class GCTracer;
2115 friend class HeapIterator;
2116 friend class Isolate;
2117 friend class MarkCompactCollector;
2118 friend class MarkCompactMarkingVisitor;
2119 friend class MapCompact;
2121 friend class NoWeakObjectVerificationScope;
2125 DISALLOW_COPY_AND_ASSIGN(Heap);
2131 static const int kStartMarker = 0xDECADE00;
2132 static const int kEndMarker = 0xDECADE01;
2134 int* start_marker; // 0
2135 int* new_space_size; // 1
2136 int* new_space_capacity; // 2
2137 intptr_t* old_pointer_space_size; // 3
2138 intptr_t* old_pointer_space_capacity; // 4
2139 intptr_t* old_data_space_size; // 5
2140 intptr_t* old_data_space_capacity; // 6
2141 intptr_t* code_space_size; // 7
2142 intptr_t* code_space_capacity; // 8
2143 intptr_t* map_space_size; // 9
2144 intptr_t* map_space_capacity; // 10
2145 intptr_t* cell_space_size; // 11
2146 intptr_t* cell_space_capacity; // 12
2147 intptr_t* lo_space_size; // 13
2148 int* global_handle_count; // 14
2149 int* weak_global_handle_count; // 15
2150 int* pending_global_handle_count; // 16
2151 int* near_death_global_handle_count; // 17
2152 int* free_global_handle_count; // 18
2153 intptr_t* memory_allocator_size; // 19
2154 intptr_t* memory_allocator_capacity; // 20
2155 int* objects_per_type; // 21
2156 int* size_per_type; // 22
2157 int* os_error; // 23
2158 int* end_marker; // 24
2159 intptr_t* property_cell_space_size; // 25
2160 intptr_t* property_cell_space_capacity; // 26
2164 class AlwaysAllocateScope {
2166 explicit inline AlwaysAllocateScope(Isolate* isolate);
2167 inline ~AlwaysAllocateScope();
2170 // Implicitly disable artificial allocation failures.
2172 DisallowAllocationFailure daf_;
2177 class NoWeakObjectVerificationScope {
2179 inline NoWeakObjectVerificationScope();
2180 inline ~NoWeakObjectVerificationScope();
2185 class GCCallbacksScope {
2187 explicit inline GCCallbacksScope(Heap* heap);
2188 inline ~GCCallbacksScope();
2190 inline bool CheckReenter();
2197 // Visitor class to verify interior pointers in spaces that do not contain
2198 // or care about intergenerational references. All heap object pointers have to
2199 // point into the heap to a location that has a map pointer at its first word.
2200 // Caveat: Heap::Contains is an approximation because it can return true for
2201 // objects in a heap space but above the allocation pointer.
2202 class VerifyPointersVisitor : public ObjectVisitor {
2204 inline void VisitPointers(Object** start, Object** end);
2208 // Verify that all objects are Smis.
2209 class VerifySmisVisitor : public ObjectVisitor {
2211 inline void VisitPointers(Object** start, Object** end);
2215 // Space iterator for iterating over all spaces of the heap. Returns each space
2216 // in turn, and null when it is done.
2217 class AllSpaces BASE_EMBEDDED {
2219 explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
2228 // Space iterator for iterating over all old spaces of the heap: Old pointer
2229 // space, old data space and code space. Returns each space in turn, and null
2231 class OldSpaces BASE_EMBEDDED {
2233 explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
2242 // Space iterator for iterating over all the paged spaces of the heap: Map
2243 // space, old pointer space, old data space, code space and cell space. Returns
2244 // each space in turn, and null when it is done.
2245 class PagedSpaces BASE_EMBEDDED {
2247 explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
2256 // Space iterator for iterating over all spaces of the heap.
2257 // For each space an object iterator is provided. The deallocation of the
2258 // returned object iterators is handled by the space iterator.
2259 class SpaceIterator : public Malloced {
2261 explicit SpaceIterator(Heap* heap);
2262 SpaceIterator(Heap* heap, HeapObjectCallback size_func);
2263 virtual ~SpaceIterator();
2266 ObjectIterator* next();
2269 ObjectIterator* CreateIterator();
2272 int current_space_; // from enum AllocationSpace.
2273 ObjectIterator* iterator_; // object iterator for the current space.
2274 HeapObjectCallback size_func_;
2278 // A HeapIterator provides iteration over the whole heap. It
2279 // aggregates the specific iterators for the different spaces as
2280 // these can only iterate over one space only.
2282 // HeapIterator ensures there is no allocation during its lifetime
2283 // (using an embedded DisallowHeapAllocation instance).
2285 // HeapIterator can skip free list nodes (that is, de-allocated heap
2286 // objects that still remain in the heap). As implementation of free
2287 // nodes filtering uses GC marks, it can't be used during MS/MC GC
2288 // phases. Also, it is forbidden to interrupt iteration in this mode,
2289 // as this will leave heap objects marked (and thus, unusable).
2290 class HeapObjectsFilter;
2292 class HeapIterator BASE_EMBEDDED {
2294 enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
2296 explicit HeapIterator(Heap* heap);
2297 HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
2304 struct MakeHeapIterableHelper {
2305 explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
2308 // Perform the initialization.
2310 // Perform all necessary shutdown (destruction) work.
2312 HeapObject* NextObject();
2314 MakeHeapIterableHelper make_heap_iterable_helper_;
2315 DisallowHeapAllocation no_heap_allocation_;
2317 HeapObjectsFiltering filtering_;
2318 HeapObjectsFilter* filter_;
2319 // Space iterator for iterating all the spaces.
2320 SpaceIterator* space_iterator_;
2321 // Object iterator for the space currently being iterated.
2322 ObjectIterator* object_iterator_;
2326 // Cache for mapping (map, property name) into field offset.
2327 // Cleared at startup and prior to mark sweep collection.
2328 class KeyedLookupCache {
2330 // Lookup field offset for (map, name). If absent, -1 is returned.
2331 int Lookup(Handle<Map> map, Handle<Name> name);
2333 // Update an element in the cache.
2334 void Update(Handle<Map> map, Handle<Name> name, int field_offset);
2339 static const int kLength = 256;
2340 static const int kCapacityMask = kLength - 1;
2341 static const int kMapHashShift = 5;
2342 static const int kHashMask = -4; // Zero the last two bits.
2343 static const int kEntriesPerBucket = 4;
2344 static const int kEntryLength = 2;
2345 static const int kMapIndex = 0;
2346 static const int kKeyIndex = 1;
2347 static const int kNotFound = -1;
2349 // kEntriesPerBucket should be a power of 2.
2350 STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
2351 STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
2354 KeyedLookupCache() {
2355 for (int i = 0; i < kLength; ++i) {
2356 keys_[i].map = NULL;
2357 keys_[i].name = NULL;
2358 field_offsets_[i] = kNotFound;
2362 static inline int Hash(Handle<Map> map, Handle<Name> name);
2364 // Get the address of the keys and field_offsets arrays. Used in
2365 // generated code to perform cache lookups.
2366 Address keys_address() { return reinterpret_cast<Address>(&keys_); }
2368 Address field_offsets_address() {
2369 return reinterpret_cast<Address>(&field_offsets_);
2378 int field_offsets_[kLength];
2380 friend class ExternalReference;
2381 friend class Isolate;
2382 DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
2386 // Cache for mapping (map, property name) into descriptor index.
2387 // The cache contains both positive and negative results.
2388 // Descriptor index equals kNotFound means the property is absent.
2389 // Cleared at startup and prior to any gc.
2390 class DescriptorLookupCache {
2392 // Lookup descriptor index for (map, name).
2393 // If absent, kAbsent is returned.
2394 int Lookup(Map* source, Name* name) {
2395 if (!name->IsUniqueName()) return kAbsent;
2396 int index = Hash(source, name);
2397 Key& key = keys_[index];
2398 if ((key.source == source) && (key.name == name)) return results_[index];
2402 // Update an element in the cache.
2403 void Update(Map* source, Name* name, int result) {
2404 DCHECK(result != kAbsent);
2405 if (name->IsUniqueName()) {
2406 int index = Hash(source, name);
2407 Key& key = keys_[index];
2408 key.source = source;
2410 results_[index] = result;
2417 static const int kAbsent = -2;
2420 DescriptorLookupCache() {
2421 for (int i = 0; i < kLength; ++i) {
2422 keys_[i].source = NULL;
2423 keys_[i].name = NULL;
2424 results_[i] = kAbsent;
2428 static int Hash(Object* source, Name* name) {
2429 // Uses only lower 32 bits if pointers are larger.
2430 uint32_t source_hash =
2431 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
2433 uint32_t name_hash =
2434 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >>
2436 return (source_hash ^ name_hash) % kLength;
2439 static const int kLength = 64;
2446 int results_[kLength];
2448 friend class Isolate;
2449 DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
2453 class RegExpResultsCache {
2455 enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
2457 // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
2458 // On success, the returned result is guaranteed to be a COW-array.
2459 static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
2460 ResultsCacheType type);
2461 // Attempt to add value_array to the cache specified by type. On success,
2462 // value_array is turned into a COW-array.
2463 static void Enter(Isolate* isolate, Handle<String> key_string,
2464 Handle<Object> key_pattern, Handle<FixedArray> value_array,
2465 ResultsCacheType type);
2466 static void Clear(FixedArray* cache);
2467 static const int kRegExpResultsCacheSize = 0x100;
2470 static const int kArrayEntriesPerCacheEntry = 4;
2471 static const int kStringOffset = 0;
2472 static const int kPatternOffset = 1;
2473 static const int kArrayOffset = 2;
2477 // Abstract base class for checking whether a weak object should be retained.
2478 class WeakObjectRetainer {
2480 virtual ~WeakObjectRetainer() {}
2482 // Return whether this object should be retained. If NULL is returned the
2483 // object has no references. Otherwise the address of the retained object
2484 // should be returned as in some GC situations the object has been moved.
2485 virtual Object* RetainAs(Object* object) = 0;
2489 // Intrusive object marking uses least significant bit of
2490 // heap object's map word to mark objects.
2491 // Normally all map words have least significant bit set
2492 // because they contain tagged map pointer.
2493 // If the bit is not set object is marked.
2494 // All objects should be unmarked before resuming
2495 // JavaScript execution.
2496 class IntrusiveMarking {
2498 static bool IsMarked(HeapObject* object) {
2499 return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
2502 static void ClearMark(HeapObject* object) {
2503 uintptr_t map_word = object->map_word().ToRawValue();
2504 object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
2505 DCHECK(!IsMarked(object));
2508 static void SetMark(HeapObject* object) {
2509 uintptr_t map_word = object->map_word().ToRawValue();
2510 object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
2511 DCHECK(IsMarked(object));
2514 static Map* MapOfMarkedObject(HeapObject* object) {
2515 uintptr_t map_word = object->map_word().ToRawValue();
2516 return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
2519 static int SizeOfMarkedObject(HeapObject* object) {
2520 return object->SizeFromMap(MapOfMarkedObject(object));
2524 static const uintptr_t kNotMarkedBit = 0x1;
2525 STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); // NOLINT
2530 // Helper class for tracing paths to a search target Object from all roots.
2531 // The TracePathFrom() method can be used to trace paths from a specific
2532 // object to the search target object.
2533 class PathTracer : public ObjectVisitor {
2536 FIND_ALL, // Will find all matches.
2537 FIND_FIRST // Will stop the search after first match.
2540 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
2541 static const int kMarkTag = 2;
2543 // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
2544 // after the first match. If FIND_ALL is specified, then tracing will be
2545 // done for all matches.
2546 PathTracer(Object* search_target, WhatToFind what_to_find,
2547 VisitMode visit_mode)
2548 : search_target_(search_target),
2549 found_target_(false),
2550 found_target_in_trace_(false),
2551 what_to_find_(what_to_find),
2552 visit_mode_(visit_mode),
2556 virtual void VisitPointers(Object** start, Object** end);
2559 void TracePathFrom(Object** root);
2561 bool found() const { return found_target_; }
2563 static Object* const kAnyGlobalObject;
2567 class UnmarkVisitor;
2569 void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
2570 void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
2571 virtual void ProcessResults();
2573 Object* search_target_;
2575 bool found_target_in_trace_;
2576 WhatToFind what_to_find_;
2577 VisitMode visit_mode_;
2578 List<Object*> object_stack_;
2580 DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
2583 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
2587 } // namespace v8::internal
2589 #endif // V8_HEAP_HEAP_H_