1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_HEAP_HEAP_H_
6 #define V8_HEAP_HEAP_H_
10 #include "src/allocation.h"
11 #include "src/assert-scope.h"
12 #include "src/counters.h"
13 #include "src/globals.h"
14 #include "src/heap/gc-idle-time-handler.h"
15 #include "src/heap/gc-tracer.h"
16 #include "src/heap/incremental-marking.h"
17 #include "src/heap/mark-compact.h"
18 #include "src/heap/objects-visiting.h"
19 #include "src/heap/spaces.h"
20 #include "src/heap/store-buffer.h"
22 #include "src/splay-tree-inl.h"
27 // Defines all the roots in Heap.
28 #define STRONG_ROOT_LIST(V) \
29 V(Map, byte_array_map, ByteArrayMap) \
30 V(Map, free_space_map, FreeSpaceMap) \
31 V(Map, one_pointer_filler_map, OnePointerFillerMap) \
32 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
33 /* Cluster the most popular ones in a few cache lines here at the top. */ \
34 V(Smi, store_buffer_top, StoreBufferTop) \
35 V(Oddball, undefined_value, UndefinedValue) \
36 V(Oddball, the_hole_value, TheHoleValue) \
37 V(Oddball, null_value, NullValue) \
38 V(Oddball, true_value, TrueValue) \
39 V(Oddball, false_value, FalseValue) \
40 V(Oddball, uninitialized_value, UninitializedValue) \
41 V(Oddball, exception, Exception) \
42 V(Map, cell_map, CellMap) \
43 V(Map, global_property_cell_map, GlobalPropertyCellMap) \
44 V(Map, shared_function_info_map, SharedFunctionInfoMap) \
45 V(Map, meta_map, MetaMap) \
46 V(Map, heap_number_map, HeapNumberMap) \
47 V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
48 V(Map, native_context_map, NativeContextMap) \
49 V(Map, fixed_array_map, FixedArrayMap) \
50 V(Map, code_map, CodeMap) \
51 V(Map, scope_info_map, ScopeInfoMap) \
52 V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
53 V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
54 V(Map, constant_pool_array_map, ConstantPoolArrayMap) \
55 V(Map, weak_cell_map, WeakCellMap) \
56 V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
57 V(Map, hash_table_map, HashTableMap) \
58 V(Map, ordered_hash_table_map, OrderedHashTableMap) \
59 V(FixedArray, empty_fixed_array, EmptyFixedArray) \
60 V(ByteArray, empty_byte_array, EmptyByteArray) \
61 V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
62 V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \
63 V(Oddball, arguments_marker, ArgumentsMarker) \
64 /* The roots above this line should be boring from a GC point of view. */ \
65 /* This means they are never in new space and never on a page that is */ \
66 /* being compacted. */ \
67 V(FixedArray, number_string_cache, NumberStringCache) \
68 V(Object, instanceof_cache_function, InstanceofCacheFunction) \
69 V(Object, instanceof_cache_map, InstanceofCacheMap) \
70 V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
71 V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
72 V(FixedArray, string_split_cache, StringSplitCache) \
73 V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
74 V(Oddball, termination_exception, TerminationException) \
75 V(Smi, hash_seed, HashSeed) \
76 V(Map, symbol_map, SymbolMap) \
77 V(Map, string_map, StringMap) \
78 V(Map, one_byte_string_map, OneByteStringMap) \
79 V(Map, cons_string_map, ConsStringMap) \
80 V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \
81 V(Map, sliced_string_map, SlicedStringMap) \
82 V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \
83 V(Map, external_string_map, ExternalStringMap) \
84 V(Map, external_string_with_one_byte_data_map, \
85 ExternalStringWithOneByteDataMap) \
86 V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \
87 V(Map, native_source_string_map, NativeSourceStringMap) \
88 V(Map, short_external_string_map, ShortExternalStringMap) \
89 V(Map, short_external_string_with_one_byte_data_map, \
90 ShortExternalStringWithOneByteDataMap) \
91 V(Map, internalized_string_map, InternalizedStringMap) \
92 V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
93 V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
94 V(Map, external_internalized_string_with_one_byte_data_map, \
95 ExternalInternalizedStringWithOneByteDataMap) \
96 V(Map, external_one_byte_internalized_string_map, \
97 ExternalOneByteInternalizedStringMap) \
98 V(Map, short_external_internalized_string_map, \
99 ShortExternalInternalizedStringMap) \
100 V(Map, short_external_internalized_string_with_one_byte_data_map, \
101 ShortExternalInternalizedStringWithOneByteDataMap) \
102 V(Map, short_external_one_byte_internalized_string_map, \
103 ShortExternalOneByteInternalizedStringMap) \
104 V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
105 V(Map, external_int8_array_map, ExternalInt8ArrayMap) \
106 V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \
107 V(Map, external_int16_array_map, ExternalInt16ArrayMap) \
108 V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \
109 V(Map, external_int32_array_map, ExternalInt32ArrayMap) \
110 V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \
111 V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \
112 V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \
113 V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \
114 V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array) \
115 V(ExternalArray, empty_external_uint8_array, EmptyExternalUint8Array) \
116 V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \
117 V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array) \
118 V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \
119 V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array) \
120 V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \
121 V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \
122 V(ExternalArray, empty_external_uint8_clamped_array, \
123 EmptyExternalUint8ClampedArray) \
124 V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
125 V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
126 V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
127 V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
128 V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
129 V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
130 V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
131 V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
132 V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
133 V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
134 V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
135 V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
136 V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
137 V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
138 V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
139 V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
140 V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
141 V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
142 EmptyFixedUint8ClampedArray) \
143 V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
144 V(Map, function_context_map, FunctionContextMap) \
145 V(Map, catch_context_map, CatchContextMap) \
146 V(Map, with_context_map, WithContextMap) \
147 V(Map, block_context_map, BlockContextMap) \
148 V(Map, module_context_map, ModuleContextMap) \
149 V(Map, script_context_map, ScriptContextMap) \
150 V(Map, script_context_table_map, ScriptContextTableMap) \
151 V(Map, undefined_map, UndefinedMap) \
152 V(Map, the_hole_map, TheHoleMap) \
153 V(Map, null_map, NullMap) \
154 V(Map, boolean_map, BooleanMap) \
155 V(Map, uninitialized_map, UninitializedMap) \
156 V(Map, arguments_marker_map, ArgumentsMarkerMap) \
157 V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \
158 V(Map, exception_map, ExceptionMap) \
159 V(Map, termination_exception_map, TerminationExceptionMap) \
160 V(Map, message_object_map, JSMessageObjectMap) \
161 V(Map, foreign_map, ForeignMap) \
162 V(HeapNumber, nan_value, NanValue) \
163 V(HeapNumber, infinity_value, InfinityValue) \
164 V(HeapNumber, minus_zero_value, MinusZeroValue) \
165 V(Map, neander_map, NeanderMap) \
166 V(JSObject, message_listeners, MessageListeners) \
167 V(UnseededNumberDictionary, code_stubs, CodeStubs) \
168 V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
169 V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
170 V(Code, js_entry_code, JsEntryCode) \
171 V(Code, js_construct_entry_code, JsConstructEntryCode) \
172 V(FixedArray, natives_source_cache, NativesSourceCache) \
173 V(Script, empty_script, EmptyScript) \
174 V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
175 V(Cell, undefined_cell, UndefineCell) \
176 V(JSObject, observation_state, ObservationState) \
177 V(Map, external_map, ExternalMap) \
178 V(Object, symbol_registry, SymbolRegistry) \
179 V(SeededNumberDictionary, empty_slow_element_dictionary, \
180 EmptySlowElementDictionary) \
181 V(FixedArray, materialized_objects, MaterializedObjects) \
182 V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
183 V(FixedArray, microtask_queue, MicrotaskQueue) \
184 V(FixedArray, keyed_load_dummy_vector, KeyedLoadDummyVector) \
185 V(FixedArray, detached_contexts, DetachedContexts) \
186 V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)
188 // Entries in this list are limited to Smis and are not visited during GC.
189 #define SMI_ROOT_LIST(V) \
190 V(Smi, stack_limit, StackLimit) \
191 V(Smi, real_stack_limit, RealStackLimit) \
192 V(Smi, last_script_id, LastScriptId) \
193 V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
194 V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
195 V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
196 V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
198 #define ROOT_LIST(V) \
199 STRONG_ROOT_LIST(V) \
201 V(StringTable, string_table, StringTable)
203 #define INTERNALIZED_STRING_LIST(V) \
204 V(Object_string, "Object") \
205 V(proto_string, "__proto__") \
206 V(arguments_string, "arguments") \
207 V(Arguments_string, "Arguments") \
208 V(caller_string, "caller") \
209 V(boolean_string, "boolean") \
210 V(Boolean_string, "Boolean") \
211 V(callee_string, "callee") \
212 V(constructor_string, "constructor") \
213 V(dot_result_string, ".result") \
214 V(eval_string, "eval") \
215 V(empty_string, "") \
216 V(function_string, "function") \
217 V(Function_string, "Function") \
218 V(length_string, "length") \
219 V(name_string, "name") \
220 V(null_string, "null") \
221 V(number_string, "number") \
222 V(Number_string, "Number") \
223 V(nan_string, "NaN") \
224 V(source_string, "source") \
225 V(source_url_string, "source_url") \
226 V(source_mapping_url_string, "source_mapping_url") \
227 V(global_string, "global") \
228 V(ignore_case_string, "ignoreCase") \
229 V(multiline_string, "multiline") \
230 V(sticky_string, "sticky") \
231 V(unicode_string, "unicode") \
232 V(harmony_regexps_string, "harmony_regexps") \
233 V(harmony_unicode_regexps_string, "harmony_unicode_regexps") \
234 V(input_string, "input") \
235 V(index_string, "index") \
236 V(last_index_string, "lastIndex") \
237 V(object_string, "object") \
238 V(prototype_string, "prototype") \
239 V(string_string, "string") \
240 V(String_string, "String") \
241 V(symbol_string, "symbol") \
242 V(Symbol_string, "Symbol") \
243 V(Map_string, "Map") \
244 V(Set_string, "Set") \
245 V(WeakMap_string, "WeakMap") \
246 V(WeakSet_string, "WeakSet") \
247 V(for_string, "for") \
248 V(for_api_string, "for_api") \
249 V(for_intern_string, "for_intern") \
250 V(private_api_string, "private_api") \
251 V(private_intern_string, "private_intern") \
252 V(Date_string, "Date") \
253 V(char_at_string, "CharAt") \
254 V(undefined_string, "undefined") \
255 V(value_of_string, "valueOf") \
256 V(stack_string, "stack") \
257 V(toJSON_string, "toJSON") \
258 V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
259 V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
260 V(stack_overflow_string, "kStackOverflowBoilerplate") \
261 V(illegal_access_string, "illegal access") \
262 V(cell_value_string, "%cell_value") \
263 V(illegal_argument_string, "illegal argument") \
264 V(identity_hash_string, "v8::IdentityHash") \
265 V(closure_string, "(closure)") \
267 V(compare_ic_string, "==") \
268 V(strict_compare_ic_string, "===") \
269 V(infinity_string, "Infinity") \
270 V(minus_infinity_string, "-Infinity") \
271 V(query_colon_string, "(?:)") \
272 V(Generator_string, "Generator") \
273 V(throw_string, "throw") \
274 V(done_string, "done") \
275 V(value_string, "value") \
276 V(next_string, "next") \
277 V(byte_length_string, "byteLength") \
278 V(byte_offset_string, "byteOffset") \
279 V(minus_zero_string, "-0") \
280 V(Array_string, "Array") \
281 V(Error_string, "Error") \
282 V(RegExp_string, "RegExp")
284 #define PRIVATE_SYMBOL_LIST(V) \
285 V(nonextensible_symbol) \
288 V(nonexistent_symbol) \
289 V(elements_transition_symbol) \
290 V(prototype_users_symbol) \
292 V(uninitialized_symbol) \
293 V(megamorphic_symbol) \
294 V(premonomorphic_symbol) \
295 V(stack_trace_symbol) \
296 V(detailed_stack_trace_symbol) \
297 V(normal_ic_symbol) \
298 V(home_object_symbol) \
299 V(intl_initialized_marker_symbol) \
300 V(intl_impl_object_symbol) \
301 V(promise_debug_marker_symbol) \
302 V(promise_has_handler_symbol) \
303 V(class_script_symbol) \
304 V(class_start_position_symbol) \
305 V(class_end_position_symbol) \
306 V(error_start_pos_symbol) \
307 V(error_end_pos_symbol) \
308 V(error_script_symbol)
310 #define PUBLIC_SYMBOL_LIST(V) \
311 V(has_instance_symbol, symbolHasInstance, Symbol.hasInstance) \
312 V(is_concat_spreadable_symbol, symbolIsConcatSpreadable, \
313 Symbol.isConcatSpreadable) \
314 V(is_regexp_symbol, symbolIsRegExp, Symbol.isRegExp) \
315 V(iterator_symbol, symbolIterator, Symbol.iterator) \
316 V(to_string_tag_symbol, symbolToStringTag, Symbol.toStringTag) \
317 V(unscopables_symbol, symbolUnscopables, Symbol.unscopables)
319 // Heap roots that are known to be immortal immovable, for which we can safely
320 // skip write barriers. This list is not complete and has omissions.
321 #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
324 V(OnePointerFillerMap) \
325 V(TwoPointerFillerMap) \
331 V(UninitializedValue) \
333 V(GlobalPropertyCellMap) \
334 V(SharedFunctionInfoMap) \
337 V(MutableHeapNumberMap) \
338 V(NativeContextMap) \
342 V(FixedCOWArrayMap) \
343 V(FixedDoubleArrayMap) \
344 V(ConstantPoolArrayMap) \
346 V(NoInterceptorResultSentinel) \
348 V(OrderedHashTableMap) \
351 V(EmptyDescriptorArray) \
352 V(EmptyConstantPoolArray) \
355 V(SloppyArgumentsElementsMap) \
356 V(FunctionContextMap) \
360 V(ModuleContextMap) \
361 V(ScriptContextMap) \
366 V(UninitializedMap) \
367 V(ArgumentsMarkerMap) \
368 V(JSMessageObjectMap) \
371 PRIVATE_SYMBOL_LIST(V)
373 // Forward declarations.
376 class WeakObjectRetainer;
379 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
382 class StoreBufferRebuilder {
384 explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
385 : store_buffer_(store_buffer) {}
387 void Callback(MemoryChunk* page, StoreBufferEvent event);
390 StoreBuffer* store_buffer_;
392 // We record in this variable how full the store buffer was when we started
393 // iterating over the current page, finding pointers to new space. If the
394 // store buffer overflows again we can exempt the page from the store buffer
395 // by rewinding to this point instead of having to search the store buffer.
396 Object*** start_of_current_page_;
397 // The current page we are scanning in the store buffer iterator.
398 MemoryChunk* current_page_;
402 // A queue of objects promoted during scavenge. Each object is accompanied
403 // by it's size to avoid dereferencing a map pointer for scanning.
404 // The last page in to-space is used for the promotion queue. On conflict
405 // during scavenge, the promotion queue is allocated externally and all
406 // entries are copied to the external queue.
407 class PromotionQueue {
409 explicit PromotionQueue(Heap* heap)
420 delete emergency_stack_;
421 emergency_stack_ = NULL;
424 Page* GetHeadPage() {
425 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
428 void SetNewLimit(Address limit) {
429 // If we are already using an emergency stack, we can ignore it.
430 if (emergency_stack_) return;
432 // If the limit is not on the same page, we can ignore it.
433 if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
435 limit_ = reinterpret_cast<intptr_t*>(limit);
437 if (limit_ <= rear_) {
444 bool IsBelowPromotionQueue(Address to_space_top) {
445 // If an emergency stack is used, the to-space address cannot interfere
446 // with the promotion queue.
447 if (emergency_stack_) return true;
449 // If the given to-space top pointer and the head of the promotion queue
450 // are not on the same page, then the to-space objects are below the
452 if (GetHeadPage() != Page::FromAddress(to_space_top)) {
455 // If the to space top pointer is smaller or equal than the promotion
456 // queue head, then the to-space objects are below the promotion queue.
457 return reinterpret_cast<intptr_t*>(to_space_top) <= rear_;
461 return (front_ == rear_) &&
462 (emergency_stack_ == NULL || emergency_stack_->length() == 0);
465 inline void insert(HeapObject* target, int size);
467 void remove(HeapObject** target, int* size) {
469 if (front_ == rear_) {
470 Entry e = emergency_stack_->RemoveLast();
476 *target = reinterpret_cast<HeapObject*>(*(--front_));
477 *size = static_cast<int>(*(--front_));
478 // Assert no underflow.
479 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
480 reinterpret_cast<Address>(front_));
484 // The front of the queue is higher in the memory page chain than the rear.
489 static const int kEntrySizeInWords = 2;
492 Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {}
497 List<Entry>* emergency_stack_;
501 void RelocateQueueHead();
503 DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
507 typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
511 // External strings table is a place where all external strings are
512 // registered. We need to keep track of such strings to properly
514 class ExternalStringTable {
516 // Registers an external string.
517 inline void AddString(String* string);
519 inline void Iterate(ObjectVisitor* v);
521 // Restores internal invariant and gets rid of collected strings.
522 // Must be called after each Iterate() that modified the strings.
525 // Destroys all allocated memory.
529 explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
533 inline void Verify();
535 inline void AddOldString(String* string);
537 // Notifies the table that only a prefix of the new list is valid.
538 inline void ShrinkNewStrings(int position);
540 // To speed up scavenge collections new space string are kept
541 // separate from old space strings.
542 List<Object*> new_space_strings_;
543 List<Object*> old_space_strings_;
547 DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
551 enum ArrayStorageAllocationMode {
552 DONT_INITIALIZE_ARRAY_ELEMENTS,
553 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
559 // Configure heap size in MB before setup. Return false if the heap has been
561 bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
562 int max_executable_size, size_t code_range_size);
563 bool ConfigureHeapDefault();
565 // Prepares the heap, setting up memory areas that are needed in the isolate
566 // without actually creating any objects.
569 // Bootstraps the object heap with the core set of objects required to run.
570 // Returns whether it succeeded.
571 bool CreateHeapObjects();
573 // Destroys all memory allocated by the heap.
576 // Set the stack limit in the roots_ array. Some architectures generate
577 // code that looks here, because it is faster than loading from the static
578 // jslimit_/real_jslimit_ variable in the StackGuard.
579 void SetStackLimits();
581 // Notifies the heap that is ok to start marking or other activities that
582 // should not happen during deserialization.
583 void NotifyDeserializationComplete();
585 // Returns whether SetUp has been called.
588 // Returns the maximum amount of memory reserved for the heap. For
589 // the young generation, we reserve 4 times the amount needed for a
590 // semi space. The young generation consists of two semi spaces and
591 // we reserve twice the amount needed for those in order to ensure
592 // that new space can be aligned to its size.
593 intptr_t MaxReserved() {
594 return 4 * reserved_semispace_size_ + max_old_generation_size_;
596 int MaxSemiSpaceSize() { return max_semi_space_size_; }
597 int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
598 int InitialSemiSpaceSize() { return initial_semispace_size_; }
599 int TargetSemiSpaceSize() { return target_semispace_size_; }
600 intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
601 intptr_t MaxExecutableSize() { return max_executable_size_; }
603 // Returns the capacity of the heap in bytes w/o growing. Heap grows when
604 // more spaces are needed until it reaches the limit.
607 // Returns the amount of memory currently committed for the heap.
608 intptr_t CommittedMemory();
610 // Returns the amount of executable memory currently committed for the heap.
611 intptr_t CommittedMemoryExecutable();
613 // Returns the amount of phyical memory currently committed for the heap.
614 size_t CommittedPhysicalMemory();
616 // Returns the maximum amount of memory ever committed for the heap.
617 intptr_t MaximumCommittedMemory() { return maximum_committed_; }
619 // Updates the maximum committed memory for the heap. Should be called
620 // whenever a space grows.
621 void UpdateMaximumCommitted();
623 // Returns the available bytes in space w/o growing.
624 // Heap doesn't guarantee that it can allocate an object that requires
625 // all available bytes. Check MaxHeapObjectSize() instead.
626 intptr_t Available();
628 // Returns of size of all objects residing in the heap.
629 intptr_t SizeOfObjects();
631 intptr_t old_generation_allocation_limit() const {
632 return old_generation_allocation_limit_;
635 // Return the starting address and a mask for the new space. And-masking an
636 // address with the mask will result in the start address of the new space
637 // for all addresses in either semispace.
638 Address NewSpaceStart() { return new_space_.start(); }
639 uintptr_t NewSpaceMask() { return new_space_.mask(); }
640 Address NewSpaceTop() { return new_space_.top(); }
642 NewSpace* new_space() { return &new_space_; }
643 OldSpace* old_pointer_space() { return old_pointer_space_; }
644 OldSpace* old_data_space() { return old_data_space_; }
645 OldSpace* code_space() { return code_space_; }
646 MapSpace* map_space() { return map_space_; }
647 CellSpace* cell_space() { return cell_space_; }
648 PropertyCellSpace* property_cell_space() { return property_cell_space_; }
649 LargeObjectSpace* lo_space() { return lo_space_; }
650 PagedSpace* paged_space(int idx) {
652 case OLD_POINTER_SPACE:
653 return old_pointer_space();
655 return old_data_space();
660 case PROPERTY_CELL_SPACE:
661 return property_cell_space();
671 bool always_allocate() { return always_allocate_scope_depth_ != 0; }
672 Address always_allocate_scope_depth_address() {
673 return reinterpret_cast<Address>(&always_allocate_scope_depth_);
676 Address* NewSpaceAllocationTopAddress() {
677 return new_space_.allocation_top_address();
679 Address* NewSpaceAllocationLimitAddress() {
680 return new_space_.allocation_limit_address();
683 Address* OldPointerSpaceAllocationTopAddress() {
684 return old_pointer_space_->allocation_top_address();
686 Address* OldPointerSpaceAllocationLimitAddress() {
687 return old_pointer_space_->allocation_limit_address();
690 Address* OldDataSpaceAllocationTopAddress() {
691 return old_data_space_->allocation_top_address();
693 Address* OldDataSpaceAllocationLimitAddress() {
694 return old_data_space_->allocation_limit_address();
697 // Returns a deep copy of the JavaScript object.
698 // Properties and elements are copied too.
699 // Optionally takes an AllocationSite to be appended in an AllocationMemento.
700 MUST_USE_RESULT AllocationResult
701 CopyJSObject(JSObject* source, AllocationSite* site = NULL);
703 // Clear the Instanceof cache (used when a prototype changes).
704 inline void ClearInstanceofCache();
706 // Iterates the whole code space to clear all ICs of the given kind.
707 void ClearAllICsByKind(Code::Kind kind);
709 // FreeSpace objects have a null map after deserialization. Update the map.
710 void RepairFreeListsAfterDeserialization();
712 template <typename T>
713 static inline bool IsOneByte(T t, int chars);
715 // Move len elements within a given array from src_index index to dst_index
717 void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
719 // Sloppy mode arguments object size.
720 static const int kSloppyArgumentsObjectSize =
721 JSObject::kHeaderSize + 2 * kPointerSize;
722 // Strict mode arguments has no callee so it is smaller.
723 static const int kStrictArgumentsObjectSize =
724 JSObject::kHeaderSize + 1 * kPointerSize;
725 // Indicies for direct access into argument objects.
726 static const int kArgumentsLengthIndex = 0;
727 // callee is only valid in sloppy mode.
728 static const int kArgumentsCalleeIndex = 1;
730 // Finalizes an external string by deleting the associated external
731 // data and clearing the resource pointer.
732 inline void FinalizeExternalString(String* string);
734 // Initialize a filler object to keep the ability to iterate over the heap
735 // when introducing gaps within pages.
736 void CreateFillerObjectAt(Address addr, int size);
738 bool CanMoveObjectStart(HeapObject* object);
740 // Indicates whether live bytes adjustment is triggered from within the GC
741 // code or from mutator code.
742 enum InvocationMode { FROM_GC, FROM_MUTATOR };
744 // Maintain consistency of live bytes during incremental marking.
745 void AdjustLiveBytes(Address address, int by, InvocationMode mode);
747 // Trim the given array from the left. Note that this relocates the object
748 // start and hence is only valid if there is only a single reference to it.
749 FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
751 // Trim the given array from the right.
752 template<Heap::InvocationMode mode>
753 void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
755 // Converts the given boolean condition to JavaScript boolean value.
756 inline Object* ToBoolean(bool condition);
758 // Performs garbage collection operation.
759 // Returns whether there is a chance that another major GC could
760 // collect more garbage.
761 inline bool CollectGarbage(
762 AllocationSpace space, const char* gc_reason = NULL,
763 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
765 static const int kNoGCFlags = 0;
766 static const int kReduceMemoryFootprintMask = 1;
767 static const int kAbortIncrementalMarkingMask = 2;
769 // Making the heap iterable requires us to abort incremental marking.
770 static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
772 // Invoked when GC was requested via the stack guard.
773 void HandleGCRequest();
775 // Attempt to over-approximate the weak closure by marking object groups and
776 // implicit references from global handles, but don't atomically complete
777 // marking. If we continue to mark incrementally, we might have marked
778 // objects that die later.
779 void OverApproximateWeakClosure(const char* gc_reason);
781 // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
782 // non-zero, then the slower precise sweeper is used, which leaves the heap
783 // in a state where we can iterate over the heap visiting all objects.
784 void CollectAllGarbage(
785 int flags, const char* gc_reason = NULL,
786 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
788 // Last hope GC, should try to squeeze as much as possible.
789 void CollectAllAvailableGarbage(const char* gc_reason = NULL);
791 // Check whether the heap is currently iterable.
792 bool IsHeapIterable();
794 // Notify the heap that a context has been disposed.
795 int NotifyContextDisposed(bool dependant_context);
797 inline void increment_scan_on_scavenge_pages() {
798 scan_on_scavenge_pages_++;
799 if (FLAG_gc_verbose) {
800 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
804 inline void decrement_scan_on_scavenge_pages() {
805 scan_on_scavenge_pages_--;
806 if (FLAG_gc_verbose) {
807 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
811 PromotionQueue* promotion_queue() { return &promotion_queue_; }
813 void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
814 GCType gc_type_filter, bool pass_isolate = true);
815 void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
817 void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
818 GCType gc_type_filter, bool pass_isolate = true);
819 void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
821 // Heap root getters. We have versions with and without type::cast() here.
822 // You can't use type::cast during GC because the assert fails.
823 // TODO(1490): Try removing the unchecked accessors, now that GC marking does
824 // not corrupt the map.
825 #define ROOT_ACCESSOR(type, name, camel_name) \
826 type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \
827 type* raw_unchecked_##name() { \
828 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
830 ROOT_LIST(ROOT_ACCESSOR)
834 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
835 Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
836 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
837 #undef STRUCT_MAP_ACCESSOR
839 #define STRING_ACCESSOR(name, str) \
840 String* name() { return String::cast(roots_[k##name##RootIndex]); }
841 INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
842 #undef STRING_ACCESSOR
844 #define SYMBOL_ACCESSOR(name) \
845 Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); }
846 PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
847 #undef SYMBOL_ACCESSOR
849 #define SYMBOL_ACCESSOR(name, varname, description) \
850 Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); }
851 PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
852 #undef SYMBOL_ACCESSOR
854 // The hidden_string is special because it is the empty string, but does
855 // not match the empty string.
856 String* hidden_string() { return hidden_string_; }
858 void set_native_contexts_list(Object* object) {
859 native_contexts_list_ = object;
861 Object* native_contexts_list() const { return native_contexts_list_; }
863 void set_array_buffers_list(Object* object) { array_buffers_list_ = object; }
864 Object* array_buffers_list() const { return array_buffers_list_; }
866 void set_allocation_sites_list(Object* object) {
867 allocation_sites_list_ = object;
869 Object* allocation_sites_list() { return allocation_sites_list_; }
871 // Used in CreateAllocationSiteStub and the (de)serializer.
872 Object** allocation_sites_list_address() { return &allocation_sites_list_; }
874 void set_encountered_weak_collections(Object* weak_collection) {
875 encountered_weak_collections_ = weak_collection;
877 Object* encountered_weak_collections() const {
878 return encountered_weak_collections_;
881 void set_encountered_weak_cells(Object* weak_cell) {
882 encountered_weak_cells_ = weak_cell;
884 Object* encountered_weak_cells() const { return encountered_weak_cells_; }
886 // Number of mark-sweeps.
887 unsigned int ms_count() { return ms_count_; }
889 // Iterates over all roots in the heap.
890 void IterateRoots(ObjectVisitor* v, VisitMode mode);
891 // Iterates over all strong roots in the heap.
892 void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
893 // Iterates over entries in the smi roots list. Only interesting to the
894 // serializer/deserializer, since GC does not care about smis.
895 void IterateSmiRoots(ObjectVisitor* v);
896 // Iterates over all the other roots in the heap.
897 void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
899 // Iterate pointers to from semispace of new space found in memory interval
900 // from start to end.
901 void IterateAndMarkPointersToFromSpace(Address start, Address end,
902 ObjectSlotCallback callback);
904 // Returns whether the object resides in new space.
905 inline bool InNewSpace(Object* object);
906 inline bool InNewSpace(Address address);
907 inline bool InNewSpacePage(Address address);
908 inline bool InFromSpace(Object* object);
909 inline bool InToSpace(Object* object);
911 // Returns whether the object resides in old pointer space.
912 inline bool InOldPointerSpace(Address address);
913 inline bool InOldPointerSpace(Object* object);
915 // Returns whether the object resides in old data space.
916 inline bool InOldDataSpace(Address address);
917 inline bool InOldDataSpace(Object* object);
919 // Checks whether an address/object in the heap (including auxiliary
920 // area and unused area).
921 bool Contains(Address addr);
922 bool Contains(HeapObject* value);
924 // Checks whether an address/object in a space.
925 // Currently used by tests, serialization and heap verification only.
926 bool InSpace(Address addr, AllocationSpace space);
927 bool InSpace(HeapObject* value, AllocationSpace space);
929 // Finds out which space an object should get promoted to based on its type.
930 inline OldSpace* TargetSpace(HeapObject* object);
931 static inline AllocationSpace TargetSpaceId(InstanceType type);
933 // Checks whether the given object is allowed to be migrated from it's
934 // current space into the given destination space. Used for debugging.
935 inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
937 // Sets the stub_cache_ (only used when expanding the dictionary).
938 void public_set_code_stubs(UnseededNumberDictionary* value) {
939 roots_[kCodeStubsRootIndex] = value;
942 // Support for computing object sizes for old objects during GCs. Returns
943 // a function that is guaranteed to be safe for computing object sizes in
944 // the current GC phase.
945 HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
946 return gc_safe_size_of_old_object_;
949 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
950 void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
951 roots_[kNonMonomorphicCacheRootIndex] = value;
954 void public_set_empty_script(Script* script) {
955 roots_[kEmptyScriptRootIndex] = script;
958 void public_set_store_buffer_top(Address* top) {
959 roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
962 void public_set_materialized_objects(FixedArray* objects) {
963 roots_[kMaterializedObjectsRootIndex] = objects;
966 // Generated code can embed this address to get access to the roots.
967 Object** roots_array_start() { return roots_; }
969 Address* store_buffer_top_address() {
970 return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
973 static bool RootIsImmortalImmovable(int root_index);
976 // Verify the heap is in its normal state before or after a GC.
984 void OldPointerSpaceCheckStoreBuffer();
985 void MapSpaceCheckStoreBuffer();
986 void LargeObjectSpaceCheckStoreBuffer();
988 // Report heap statistics.
989 void ReportHeapStatistics(const char* title);
990 void ReportCodeStatistics(const char* title);
993 // Zapping is needed for verify heap, and always done in debug builds.
994 static inline bool ShouldZapGarbage() {
999 return FLAG_verify_heap;
1006 // Number of "runtime allocations" done so far.
1007 uint32_t allocations_count() { return allocations_count_; }
1009 // Returns deterministic "time" value in ms. Works only with
1010 // FLAG_verify_predictable.
1011 double synthetic_time() { return allocations_count_ / 2.0; }
1013 // Print short heap statistics.
1014 void PrintShortHeapStatistics();
1016 // Write barrier support for address[offset] = o.
1017 INLINE(void RecordWrite(Address address, int offset));
1019 // Write barrier support for address[start : start + len[ = o.
1020 INLINE(void RecordWrites(Address address, int start, int len));
1022 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
1023 inline HeapState gc_state() { return gc_state_; }
1025 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
1028 void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1030 void TracePathToObjectFrom(Object* target, Object* root);
1031 void TracePathToObject(Object* target);
1032 void TracePathToGlobal();
1035 // Callback function passed to Heap::Iterate etc. Copies an object if
1036 // necessary, the object might be promoted to an old space. The caller must
1037 // ensure the precondition that the object is (a) a heap object and (b) in
1038 // the heap's from space.
1039 static inline void ScavengePointer(HeapObject** p);
1040 static inline void ScavengeObject(HeapObject** p, HeapObject* object);
1042 enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
1044 // If an object has an AllocationMemento trailing it, return it, otherwise
1046 inline AllocationMemento* FindAllocationMemento(HeapObject* object);
1048 // An object may have an AllocationSite associated with it through a trailing
1049 // AllocationMemento. Its feedback should be updated when objects are found
1051 static inline void UpdateAllocationSiteFeedback(HeapObject* object,
1052 ScratchpadSlotMode mode);
1054 // Support for partial snapshots. After calling this we have a linear
1055 // space to write objects in each space.
1062 typedef List<Chunk> Reservation;
1064 // Returns false if not able to reserve.
1065 bool ReserveSpace(Reservation* reservations);
1068 // Support for the API.
1071 void CreateApiObjects();
1073 inline intptr_t PromotedTotalSize() {
1074 int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
1075 if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt);
1076 if (total < 0) return 0;
1077 return static_cast<intptr_t>(total);
1080 inline intptr_t OldGenerationSpaceAvailable() {
1081 return old_generation_allocation_limit_ - PromotedTotalSize();
1084 inline intptr_t OldGenerationCapacityAvailable() {
1085 return max_old_generation_size_ - PromotedTotalSize();
1088 static const intptr_t kMinimumOldGenerationAllocationLimit =
1089 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
1091 static const int kInitalOldGenerationLimitFactor = 2;
1093 static const int kPointerMultiplier = i::kPointerSize / 4;
1095 // The new space size has to be a power of 2. Sizes are in MB.
1096 static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
1097 static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
1098 static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
1099 static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
1101 // The old space size has to be a multiple of Page::kPageSize.
1103 static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
1104 static const int kMaxOldSpaceSizeMediumMemoryDevice =
1105 256 * kPointerMultiplier;
1106 static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
1107 static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
1109 // The executable size has to be a multiple of Page::kPageSize.
1111 static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
1112 static const int kMaxExecutableSizeMediumMemoryDevice =
1113 192 * kPointerMultiplier;
1114 static const int kMaxExecutableSizeHighMemoryDevice =
1115 256 * kPointerMultiplier;
1116 static const int kMaxExecutableSizeHugeMemoryDevice =
1117 256 * kPointerMultiplier;
1119 // Calculates the allocation limit based on a given growing factor and a
1120 // given old generation size.
1121 intptr_t CalculateOldGenerationAllocationLimit(double factor,
1122 intptr_t old_gen_size);
1124 // Sets the allocation limit to trigger the next full garbage collection.
1125 void SetOldGenerationAllocationLimit(intptr_t old_gen_size,
1126 int freed_global_handles);
1128 // Indicates whether inline bump-pointer allocation has been disabled.
1129 bool inline_allocation_disabled() { return inline_allocation_disabled_; }
1131 // Switch whether inline bump-pointer allocation should be used.
1132 void EnableInlineAllocation();
1133 void DisableInlineAllocation();
1135 // Implements the corresponding V8 API function.
1136 bool IdleNotification(double deadline_in_seconds);
1137 bool IdleNotification(int idle_time_in_ms);
1139 // Declare all the root indices. This defines the root list order.
1140 enum RootListIndex {
1141 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
1142 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
1143 #undef ROOT_INDEX_DECLARATION
1145 #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
1146 INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
1147 #undef STRING_DECLARATION
1149 #define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
1150 PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
1151 #undef SYMBOL_INDEX_DECLARATION
1153 #define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex,
1154 PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
1155 #undef SYMBOL_INDEX_DECLARATION
1157 // Utility type maps
1158 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
1159 STRUCT_LIST(DECLARE_STRUCT_MAP)
1160 #undef DECLARE_STRUCT_MAP
1161 kStringTableRootIndex,
1163 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
1164 SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
1165 #undef ROOT_INDEX_DECLARATION
1167 kStrongRootListLength = kStringTableRootIndex,
1168 kSmiRootsStart = kStringTableRootIndex + 1
1171 Object* root(RootListIndex index) { return roots_[index]; }
1173 STATIC_ASSERT(kUndefinedValueRootIndex ==
1174 Internals::kUndefinedValueRootIndex);
1175 STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
1176 STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
1177 STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
1178 STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
1180 // Generated code can embed direct references to non-writable roots if
1181 // they are in new space.
1182 static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
1183 // Generated code can treat direct references to this root as constant.
1184 bool RootCanBeTreatedAsConstant(RootListIndex root_index);
1186 Map* MapForFixedTypedArray(ExternalArrayType array_type);
1187 RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
1189 Map* MapForExternalArrayType(ExternalArrayType array_type);
1190 RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type);
1192 RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
1193 RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
1194 ExternalArray* EmptyExternalArrayForMap(Map* map);
1195 FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
1197 void RecordStats(HeapStats* stats, bool take_snapshot = false);
1199 // Copy block of memory from src to dst. Size of block should be aligned
1201 static inline void CopyBlock(Address dst, Address src, int byte_size);
1203 // Optimized version of memmove for blocks with pointer size aligned sizes and
1204 // pointer size aligned addresses.
1205 static inline void MoveBlock(Address dst, Address src, int byte_size);
1207 // Check new space expansion criteria and expand semispaces if it was hit.
1208 void CheckNewSpaceExpansionCriteria();
1210 inline void IncrementPromotedObjectsSize(int object_size) {
1211 DCHECK(object_size > 0);
1212 promoted_objects_size_ += object_size;
1215 inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
1216 DCHECK(object_size > 0);
1217 semi_space_copied_object_size_ += object_size;
1220 inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1222 inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1224 inline void IncrementNodesPromoted() { nodes_promoted_++; }
1226 inline void IncrementYoungSurvivorsCounter(int survived) {
1227 DCHECK(survived >= 0);
1228 survived_last_scavenge_ = survived;
1229 survived_since_last_expansion_ += survived;
1232 inline bool NextGCIsLikelyToBeFull(intptr_t limit) {
1233 if (FLAG_gc_global) return true;
1235 if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
1237 intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
1239 if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
1244 void UpdateNewSpaceReferencesInExternalStringTable(
1245 ExternalStringTableUpdaterCallback updater_func);
1247 void UpdateReferencesInExternalStringTable(
1248 ExternalStringTableUpdaterCallback updater_func);
1250 void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
1251 void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
1253 void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
1255 // An object should be promoted if the object has survived a
1256 // scavenge operation.
1257 inline bool ShouldBePromoted(Address old_address, int object_size);
1259 void ClearJSFunctionResultCaches();
1261 void ClearNormalizedMapCaches();
1263 GCTracer* tracer() { return &tracer_; }
1265 // Returns the size of objects residing in non new spaces.
1266 intptr_t PromotedSpaceSizeOfObjects();
1268 double total_regexp_code_generated() { return total_regexp_code_generated_; }
1269 void IncreaseTotalRegexpCodeGenerated(int size) {
1270 total_regexp_code_generated_ += size;
1273 void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
1274 if (is_crankshafted) {
1275 crankshaft_codegen_bytes_generated_ += size;
1277 full_codegen_bytes_generated_ += size;
1281 // Update GC statistics that are tracked on the Heap.
1282 void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
1283 double marking_time);
1285 // Returns maximum GC pause.
1286 double get_max_gc_pause() { return max_gc_pause_; }
1288 // Returns maximum size of objects alive after GC.
1289 intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
1291 // Returns minimal interval between two subsequent collections.
1292 double get_min_in_mutator() { return min_in_mutator_; }
1294 MarkCompactCollector* mark_compact_collector() {
1295 return &mark_compact_collector_;
1298 StoreBuffer* store_buffer() { return &store_buffer_; }
1300 Marking* marking() { return &marking_; }
1302 IncrementalMarking* incremental_marking() { return &incremental_marking_; }
1304 ExternalStringTable* external_string_table() {
1305 return &external_string_table_;
1308 // Returns the current sweep generation.
1309 int sweep_generation() { return sweep_generation_; }
1311 inline Isolate* isolate();
1313 void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1314 void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1316 inline bool OldGenerationAllocationLimitReached();
1318 inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1319 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1322 void QueueMemoryChunkForFree(MemoryChunk* chunk);
1323 void FreeQueuedChunks();
1325 int gc_count() const { return gc_count_; }
1327 bool RecentIdleNotificationHappened();
1329 // Completely clear the Instanceof cache (to stop it keeping objects alive
1331 inline void CompletelyClearInstanceofCache();
1333 // The roots that have an index less than this are always in old space.
1334 static const int kOldSpaceRoots = 0x20;
1336 uint32_t HashSeed() {
1337 uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
1338 DCHECK(FLAG_randomize_hashes || seed == 0);
1342 Smi* NextScriptId() {
1343 int next_id = last_script_id()->value() + 1;
1344 if (!Smi::IsValid(next_id) || next_id < 0) next_id = 1;
1345 Smi* next_id_smi = Smi::FromInt(next_id);
1346 set_last_script_id(next_id_smi);
1350 void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
1351 DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
1352 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
1355 void SetConstructStubDeoptPCOffset(int pc_offset) {
1356 DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
1357 set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1360 void SetGetterStubDeoptPCOffset(int pc_offset) {
1361 DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
1362 set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1365 void SetSetterStubDeoptPCOffset(int pc_offset) {
1366 DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
1367 set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1370 // For post mortem debugging.
1371 void RememberUnmappedPage(Address page, bool compacted);
1373 // Global inline caching age: it is incremented on some GCs after context
1374 // disposal. We use it to flush inline caches.
1375 int global_ic_age() { return global_ic_age_; }
1377 void AgeInlineCaches() {
1378 global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
1381 int64_t amount_of_external_allocated_memory() {
1382 return amount_of_external_allocated_memory_;
1385 void DeoptMarkedAllocationSites();
1387 bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
1389 bool DeoptMaybeTenuredAllocationSites() {
1390 return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
1393 // ObjectStats are kept in two arrays, counts and sizes. Related stats are
1394 // stored in a contiguous linear buffer. Stats groups are stored one after
1397 FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
1398 FIRST_FIXED_ARRAY_SUB_TYPE =
1399 FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
1400 FIRST_CODE_AGE_SUB_TYPE =
1401 FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
1402 OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
1405 void RecordObjectStats(InstanceType type, size_t size) {
1406 DCHECK(type <= LAST_TYPE);
1407 object_counts_[type]++;
1408 object_sizes_[type] += size;
1411 void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
1412 int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
1413 int code_age_index =
1414 FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
1415 DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
1416 code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
1417 DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
1418 code_age_index < OBJECT_STATS_COUNT);
1419 object_counts_[code_sub_type_index]++;
1420 object_sizes_[code_sub_type_index] += size;
1421 object_counts_[code_age_index]++;
1422 object_sizes_[code_age_index] += size;
1425 void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
1426 DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
1427 object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
1428 object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
1431 void CheckpointObjectStats();
1433 // We don't use a LockGuard here since we want to lock the heap
1434 // only when FLAG_concurrent_recompilation is true.
1435 class RelocationLock {
1437 explicit RelocationLock(Heap* heap) : heap_(heap) {
1438 heap_->relocation_mutex_.Lock();
1442 ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
1448 void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
1449 Handle<DependentCode> dep);
1451 DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
1453 static void FatalProcessOutOfMemory(const char* location,
1454 bool take_snapshot = false);
1456 // This event is triggered after successful allocation of a new object made
1457 // by runtime. Allocations of target space for object evacuation do not
1458 // trigger the event. In order to track ALL allocations one must turn off
1459 // FLAG_inline_new and FLAG_use_allocation_folding.
1460 inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
1462 // This event is triggered after object is moved to a new place.
1463 inline void OnMoveEvent(HeapObject* target, HeapObject* source,
1466 bool deserialization_complete() const { return deserialization_complete_; }
1469 // Methods made available to tests.
1471 // Allocates a JS Map in the heap.
1472 MUST_USE_RESULT AllocationResult
1473 AllocateMap(InstanceType instance_type, int instance_size,
1474 ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
1476 // Allocates and initializes a new JavaScript object based on a
1478 // If allocation_site is non-null, then a memento is emitted after the object
1479 // that points to the site.
1480 MUST_USE_RESULT AllocationResult
1481 AllocateJSObject(JSFunction* constructor,
1482 PretenureFlag pretenure = NOT_TENURED,
1483 AllocationSite* allocation_site = NULL);
1485 // Allocates and initializes a new JavaScript object based on a map.
1486 // Passing an allocation site means that a memento will be created that
1487 // points to the site.
1488 MUST_USE_RESULT AllocationResult
1489 AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
1490 bool alloc_props = true,
1491 AllocationSite* allocation_site = NULL);
1493 // Allocated a HeapNumber from value.
1494 MUST_USE_RESULT AllocationResult
1495 AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
1496 PretenureFlag pretenure = NOT_TENURED);
1498 // Allocate a byte array of the specified length
1499 MUST_USE_RESULT AllocationResult
1500 AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
1502 // Copy the code and scope info part of the code object, but insert
1503 // the provided data as the relocation information.
1504 MUST_USE_RESULT AllocationResult
1505 CopyCode(Code* code, Vector<byte> reloc_info);
1507 MUST_USE_RESULT AllocationResult CopyCode(Code* code);
1509 // Allocates a fixed array initialized with undefined values
1510 MUST_USE_RESULT AllocationResult
1511 AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
1513 static const int kInitialStringTableSize = 2048;
1514 static const int kInitialEvalCacheSize = 64;
1515 static const int kInitialNumberStringCacheSize = 256;
1520 // The amount of external memory registered through the API kept alive
1521 // by global handles
1522 int64_t amount_of_external_allocated_memory_;
1524 // Caches the amount of external memory registered at the last global gc.
1525 int64_t amount_of_external_allocated_memory_at_last_global_gc_;
1527 // This can be calculated directly from a pointer to the heap; however, it is
1528 // more expedient to get at the isolate directly from within Heap methods.
1531 Object* roots_[kRootListLength];
1533 size_t code_range_size_;
1534 int reserved_semispace_size_;
1535 int max_semi_space_size_;
1536 int initial_semispace_size_;
1537 int target_semispace_size_;
1538 intptr_t max_old_generation_size_;
1539 intptr_t initial_old_generation_size_;
1540 bool old_generation_size_configured_;
1541 intptr_t max_executable_size_;
1542 intptr_t maximum_committed_;
1544 // For keeping track of how much data has survived
1545 // scavenge since last new space expansion.
1546 int survived_since_last_expansion_;
1548 // ... and since the last scavenge.
1549 int survived_last_scavenge_;
1551 // For keeping track on when to flush RegExp code.
1552 int sweep_generation_;
1554 int always_allocate_scope_depth_;
1556 // For keeping track of context disposals.
1557 int contexts_disposed_;
1561 int scan_on_scavenge_pages_;
1563 NewSpace new_space_;
1564 OldSpace* old_pointer_space_;
1565 OldSpace* old_data_space_;
1566 OldSpace* code_space_;
1567 MapSpace* map_space_;
1568 CellSpace* cell_space_;
1569 PropertyCellSpace* property_cell_space_;
1570 LargeObjectSpace* lo_space_;
1571 HeapState gc_state_;
1572 int gc_post_processing_depth_;
1573 Address new_space_top_after_last_gc_;
1575 // Returns the amount of external memory registered since last global gc.
1576 int64_t PromotedExternalMemorySize();
1578 // How many "runtime allocations" happened.
1579 uint32_t allocations_count_;
1581 // Running hash over allocations performed.
1582 uint32_t raw_allocations_hash_;
1584 // Countdown counter, dumps allocation hash when 0.
1585 uint32_t dump_allocations_hash_countdown_;
1587 // How many mark-sweep collections happened.
1588 unsigned int ms_count_;
1590 // How many gc happened.
1591 unsigned int gc_count_;
1593 // For post mortem debugging.
1594 static const int kRememberedUnmappedPages = 128;
1595 int remembered_unmapped_pages_index_;
1596 Address remembered_unmapped_pages_[kRememberedUnmappedPages];
1598 // Total length of the strings we failed to flatten since the last GC.
1599 int unflattened_strings_length_;
1601 #define ROOT_ACCESSOR(type, name, camel_name) \
1602 inline void set_##name(type* value) { \
1603 /* The deserializer makes use of the fact that these common roots are */ \
1604 /* never in new space and never on a page that is being compacted. */ \
1605 DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
1606 roots_[k##camel_name##RootIndex] = value; \
1608 ROOT_LIST(ROOT_ACCESSOR)
1609 #undef ROOT_ACCESSOR
1612 // If the --gc-interval flag is set to a positive value, this
1613 // variable holds the value indicating the number of allocations
1614 // remain until the next failure and garbage collection.
1615 int allocation_timeout_;
1618 // Limit that triggers a global GC on the next (normally caused) GC. This
1619 // is checked when we have already decided to do a GC to help determine
1620 // which collector to invoke, before expanding a paged space in the old
1621 // generation and on every allocation in large object space.
1622 intptr_t old_generation_allocation_limit_;
1624 // The allocation limit when there is > kMinIdleTimeToStartIncrementalMarking
1625 // idle time in the idle time handler.
1626 intptr_t idle_old_generation_allocation_limit_;
1628 // Indicates that an allocation has failed in the old generation since the
1630 bool old_gen_exhausted_;
1632 // Indicates that inline bump-pointer allocation has been globally disabled
1633 // for all spaces. This is used to disable allocations in generated code.
1634 bool inline_allocation_disabled_;
1636 // Weak list heads, threaded through the objects.
1637 // List heads are initilized lazily and contain the undefined_value at start.
1638 Object* native_contexts_list_;
1639 Object* array_buffers_list_;
1640 Object* allocation_sites_list_;
1642 // List of encountered weak collections (JSWeakMap and JSWeakSet) during
1643 // marking. It is initialized during marking, destroyed after marking and
1644 // contains Smi(0) while marking is not active.
1645 Object* encountered_weak_collections_;
1647 Object* encountered_weak_cells_;
1649 StoreBufferRebuilder store_buffer_rebuilder_;
1651 struct StringTypeTable {
1654 RootListIndex index;
1657 struct ConstantStringTable {
1658 const char* contents;
1659 RootListIndex index;
1662 struct StructTable {
1665 RootListIndex index;
1668 static const StringTypeTable string_type_table[];
1669 static const ConstantStringTable constant_string_table[];
1670 static const StructTable struct_table[];
1672 // The special hidden string which is an empty string, but does not match
1673 // any string when looked up in properties.
1674 String* hidden_string_;
1676 // GC callback function, called before and after mark-compact GC.
1677 // Allocations in the callback function are disallowed.
1678 struct GCPrologueCallbackPair {
1679 GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
1680 GCType gc_type, bool pass_isolate)
1681 : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
1682 bool operator==(const GCPrologueCallbackPair& pair) const {
1683 return pair.callback == callback;
1685 v8::Isolate::GCPrologueCallback callback;
1687 // TODO(dcarney): remove variable
1690 List<GCPrologueCallbackPair> gc_prologue_callbacks_;
1692 struct GCEpilogueCallbackPair {
1693 GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
1694 GCType gc_type, bool pass_isolate)
1695 : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
1696 bool operator==(const GCEpilogueCallbackPair& pair) const {
1697 return pair.callback == callback;
1699 v8::Isolate::GCPrologueCallback callback;
1701 // TODO(dcarney): remove variable
1704 List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
1706 // Support for computing object sizes during GC.
1707 HeapObjectCallback gc_safe_size_of_old_object_;
1708 static int GcSafeSizeOfOldObject(HeapObject* object);
1710 // Update the GC state. Called from the mark-compact collector.
1711 void MarkMapPointersAsEncoded(bool encoded) {
1713 gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
1716 // Code that should be run before and after each GC. Includes some
1717 // reporting/verification activities when compiled with DEBUG set.
1718 void GarbageCollectionPrologue();
1719 void GarbageCollectionEpilogue();
1721 // Pretenuring decisions are made based on feedback collected during new
1722 // space evacuation. Note that between feedback collection and calling this
1723 // method object in old space must not move.
1724 // Right now we only process pretenuring feedback in high promotion mode.
1725 void ProcessPretenuringFeedback();
1727 // Checks whether a global GC is necessary
1728 GarbageCollector SelectGarbageCollector(AllocationSpace space,
1729 const char** reason);
1731 // Make sure there is a filler value behind the top of the new space
1732 // so that the GC does not confuse some unintialized/stale memory
1733 // with the allocation memento of the object at the top
1734 void EnsureFillerObjectAtTop();
1736 // Ensure that we have swept all spaces in such a way that we can iterate
1737 // over all objects. May cause a GC.
1738 void MakeHeapIterable();
1740 // Performs garbage collection operation.
1741 // Returns whether there is a chance that another major GC could
1742 // collect more garbage.
1743 bool CollectGarbage(
1744 GarbageCollector collector, const char* gc_reason,
1745 const char* collector_reason,
1746 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1748 // Performs garbage collection
1749 // Returns whether there is a chance another major GC could
1750 // collect more garbage.
1751 bool PerformGarbageCollection(
1752 GarbageCollector collector,
1753 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1755 inline void UpdateOldSpaceLimits();
1757 // Selects the proper allocation space depending on the given object
1758 // size, pretenuring decision, and preferred old-space.
1759 static AllocationSpace SelectSpace(int object_size,
1760 AllocationSpace preferred_old_space,
1761 PretenureFlag pretenure) {
1762 DCHECK(preferred_old_space == OLD_POINTER_SPACE ||
1763 preferred_old_space == OLD_DATA_SPACE);
1764 if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
1765 return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
1768 HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
1770 // Allocate an uninitialized object. The memory is non-executable if the
1771 // hardware and OS allow. This is the single choke-point for allocations
1772 // performed by the runtime and should not be bypassed (to extend this to
1773 // inlined allocations, use the Heap::DisableInlineAllocation() support).
1774 MUST_USE_RESULT inline AllocationResult AllocateRaw(
1775 int size_in_bytes, AllocationSpace space, AllocationSpace retry_space);
1777 // Allocates a heap object based on the map.
1778 MUST_USE_RESULT AllocationResult
1779 Allocate(Map* map, AllocationSpace space,
1780 AllocationSite* allocation_site = NULL);
1782 // Allocates a partial map for bootstrapping.
1783 MUST_USE_RESULT AllocationResult
1784 AllocatePartialMap(InstanceType instance_type, int instance_size);
1786 // Initializes a JSObject based on its map.
1787 void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
1789 void InitializeAllocationMemento(AllocationMemento* memento,
1790 AllocationSite* allocation_site);
1792 // Allocate a block of memory in the given space (filled with a filler).
1793 // Used as a fall-back for generated code when the space is full.
1794 MUST_USE_RESULT AllocationResult
1795 AllocateFillerObject(int size, bool double_align, AllocationSpace space);
1797 // Allocate an uninitialized fixed array.
1798 MUST_USE_RESULT AllocationResult
1799 AllocateRawFixedArray(int length, PretenureFlag pretenure);
1801 // Allocate an uninitialized fixed double array.
1802 MUST_USE_RESULT AllocationResult
1803 AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
1805 // Allocate an initialized fixed array with the given filler value.
1806 MUST_USE_RESULT AllocationResult
1807 AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
1810 // Allocate and partially initializes a String. There are two String
1811 // encodings: one-byte and two-byte. These functions allocate a string of
1812 // the given length and set its map and length fields. The characters of
1813 // the string are uninitialized.
1814 MUST_USE_RESULT AllocationResult
1815 AllocateRawOneByteString(int length, PretenureFlag pretenure);
1816 MUST_USE_RESULT AllocationResult
1817 AllocateRawTwoByteString(int length, PretenureFlag pretenure);
1819 bool CreateInitialMaps();
1820 void CreateInitialObjects();
1822 // Allocates an internalized string in old space based on the character
1824 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
1825 Vector<const char> str, int chars, uint32_t hash_field);
1827 MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
1828 Vector<const uint8_t> str, uint32_t hash_field);
1830 MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
1831 Vector<const uc16> str, uint32_t hash_field);
1833 template <bool is_one_byte, typename T>
1834 MUST_USE_RESULT AllocationResult
1835 AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
1837 template <typename T>
1838 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
1839 T t, int chars, uint32_t hash_field);
1841 // Allocates an uninitialized fixed array. It must be filled by the caller.
1842 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
1844 // Make a copy of src and return it. Returns
1845 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
1846 MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
1848 // Make a copy of src, set the map, and return the copy. Returns
1849 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
1850 MUST_USE_RESULT AllocationResult
1851 CopyFixedArrayWithMap(FixedArray* src, Map* map);
1853 // Make a copy of src and return it. Returns
1854 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
1855 MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
1856 FixedDoubleArray* src);
1858 // Make a copy of src and return it. Returns
1859 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
1860 MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray(
1861 ConstantPoolArray* src);
1864 // Computes a single character string where the character has code.
1865 // A cache is used for one-byte (Latin1) codes.
1866 MUST_USE_RESULT AllocationResult
1867 LookupSingleCharacterStringFromCode(uint16_t code);
1869 // Allocate a symbol in old space.
1870 MUST_USE_RESULT AllocationResult AllocateSymbol();
1872 // Make a copy of src, set the map, and return the copy.
1873 MUST_USE_RESULT AllocationResult
1874 CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map);
1876 MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(
1877 const ConstantPoolArray::NumberOfEntries& small);
1879 MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray(
1880 const ConstantPoolArray::NumberOfEntries& small,
1881 const ConstantPoolArray::NumberOfEntries& extended);
1883 // Allocates an external array of the specified length and type.
1884 MUST_USE_RESULT AllocationResult
1885 AllocateExternalArray(int length, ExternalArrayType array_type,
1886 void* external_pointer, PretenureFlag pretenure);
1888 // Allocates a fixed typed array of the specified length and type.
1889 MUST_USE_RESULT AllocationResult
1890 AllocateFixedTypedArray(int length, ExternalArrayType array_type,
1891 PretenureFlag pretenure);
1893 // Make a copy of src and return it.
1894 MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
1896 // Make a copy of src, set the map, and return the copy.
1897 MUST_USE_RESULT AllocationResult
1898 CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
1900 // Allocates a fixed double array with uninitialized values. Returns
1901 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
1902 int length, PretenureFlag pretenure = NOT_TENURED);
1904 // These five Create*EntryStub functions are here and forced to not be inlined
1905 // because of a gcc-4.4 bug that assigns wrong vtable entries.
1906 NO_INLINE(void CreateJSEntryStub());
1907 NO_INLINE(void CreateJSConstructEntryStub());
1909 void CreateFixedStubs();
1911 // Allocate empty fixed array.
1912 MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
1914 // Allocate empty external array of given type.
1915 MUST_USE_RESULT AllocationResult
1916 AllocateEmptyExternalArray(ExternalArrayType array_type);
1918 // Allocate empty fixed typed array of given type.
1919 MUST_USE_RESULT AllocationResult
1920 AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
1922 // Allocate empty constant pool array.
1923 MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray();
1925 // Allocate a tenured simple cell.
1926 MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
1928 // Allocate a tenured JS global property cell initialized with the hole.
1929 MUST_USE_RESULT AllocationResult AllocatePropertyCell();
1931 MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
1933 // Allocates a new utility object in the old generation.
1934 MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
1936 // Allocates a new foreign object.
1937 MUST_USE_RESULT AllocationResult
1938 AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
1940 MUST_USE_RESULT AllocationResult
1941 AllocateCode(int object_size, bool immovable);
1943 MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
1945 MUST_USE_RESULT AllocationResult InternalizeString(String* str);
1947 // Performs a minor collection in new generation.
1950 // Commits from space if it is uncommitted.
1951 void EnsureFromSpaceIsCommitted();
1953 // Uncommit unused semi space.
1954 bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
1956 // Fill in bogus values in from space
1957 void ZapFromSpace();
1959 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
1960 Heap* heap, Object** pointer);
1962 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
1963 static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1964 StoreBufferEvent event);
1966 // Performs a major collection in the whole heap.
1969 // Code to be run before and after mark-compact.
1970 void MarkCompactPrologue();
1971 void MarkCompactEpilogue();
1973 void ProcessNativeContexts(WeakObjectRetainer* retainer);
1974 void ProcessArrayBuffers(WeakObjectRetainer* retainer);
1975 void ProcessAllocationSites(WeakObjectRetainer* retainer);
1977 // Deopts all code that contains allocation instruction which are tenured or
1978 // not tenured. Moreover it clears the pretenuring allocation site statistics.
1979 void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
1981 // Evaluates local pretenuring for the old space and calls
1982 // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
1984 void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
1986 // Called on heap tear-down.
1987 void TearDownArrayBuffers();
1989 // Record statistics before and after garbage collection.
1990 void ReportStatisticsBeforeGC();
1991 void ReportStatisticsAfterGC();
1993 // Slow part of scavenge object.
1994 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
1996 // Total RegExp code ever generated
1997 double total_regexp_code_generated_;
2001 // Creates and installs the full-sized number string cache.
2002 int FullSizeNumberStringCacheLength();
2003 // Flush the number to string cache.
2004 void FlushNumberStringCache();
2006 // Sets used allocation sites entries to undefined.
2007 void FlushAllocationSitesScratchpad();
2009 // Initializes the allocation sites scratchpad with undefined values.
2010 void InitializeAllocationSitesScratchpad();
2012 // Adds an allocation site to the scratchpad if there is space left.
2013 void AddAllocationSiteToScratchpad(AllocationSite* site,
2014 ScratchpadSlotMode mode);
2016 void UpdateSurvivalStatistics(int start_new_space_size);
2018 static const int kYoungSurvivalRateHighThreshold = 90;
2019 static const int kYoungSurvivalRateAllowedDeviation = 15;
2021 static const int kOldSurvivalRateLowThreshold = 10;
2023 int high_survival_rate_period_length_;
2024 intptr_t promoted_objects_size_;
2025 double promotion_ratio_;
2026 double promotion_rate_;
2027 intptr_t semi_space_copied_object_size_;
2028 intptr_t previous_semi_space_copied_object_size_;
2029 double semi_space_copied_rate_;
2030 int nodes_died_in_new_space_;
2031 int nodes_copied_in_new_space_;
2032 int nodes_promoted_;
2034 // This is the pretenuring trigger for allocation sites that are in maybe
2035 // tenure state. When we switched to the maximum new space size we deoptimize
2036 // the code that belongs to the allocation site and derive the lifetime
2037 // of the allocation site.
2038 unsigned int maximum_size_scavenges_;
2040 // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
2041 // Re-visit incremental marking heuristics.
2042 bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
2044 void ConfigureInitialOldGenerationSize();
2046 void SelectScavengingVisitorsTable();
2048 void IdleMarkCompact(const char* message);
2050 bool TryFinalizeIdleIncrementalMarking(
2051 double idle_time_in_ms, size_t size_of_objects,
2052 size_t mark_compact_speed_in_bytes_per_ms);
2054 bool WorthActivatingIncrementalMarking();
2056 void ClearObjectStats(bool clear_last_time_stats = false);
2058 inline void UpdateAllocationsHash(HeapObject* object);
2059 inline void UpdateAllocationsHash(uint32_t value);
2060 inline void PrintAlloctionsHash();
2062 // Object counts and used memory by InstanceType
2063 size_t object_counts_[OBJECT_STATS_COUNT];
2064 size_t object_counts_last_time_[OBJECT_STATS_COUNT];
2065 size_t object_sizes_[OBJECT_STATS_COUNT];
2066 size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
2068 // Maximum GC pause.
2069 double max_gc_pause_;
2071 // Total time spent in GC.
2072 double total_gc_time_ms_;
2074 // Maximum size of objects alive after GC.
2075 intptr_t max_alive_after_gc_;
2077 // Minimal interval between two subsequent collections.
2078 double min_in_mutator_;
2080 // Cumulative GC time spent in marking
2081 double marking_time_;
2083 // Cumulative GC time spent in sweeping
2084 double sweeping_time_;
2086 // Last time an idle notification happened
2087 double last_idle_notification_time_;
2089 MarkCompactCollector mark_compact_collector_;
2091 StoreBuffer store_buffer_;
2095 IncrementalMarking incremental_marking_;
2097 GCIdleTimeHandler gc_idle_time_handler_;
2098 unsigned int gc_count_at_last_idle_gc_;
2100 // These two counters are monotomically increasing and never reset.
2101 size_t full_codegen_bytes_generated_;
2102 size_t crankshaft_codegen_bytes_generated_;
2104 // If the --deopt_every_n_garbage_collections flag is set to a positive value,
2105 // this variable holds the number of garbage collections since the last
2106 // deoptimization triggered by garbage collection.
2107 int gcs_since_last_deopt_;
2109 static const int kAllocationSiteScratchpadSize = 256;
2110 int allocation_sites_scratchpad_length_;
2112 static const int kMaxMarkCompactsInIdleRound = 7;
2113 static const int kIdleScavengeThreshold = 5;
2115 // Shared state read by the scavenge collector and set by ScavengeObject.
2116 PromotionQueue promotion_queue_;
2118 // Flag is set when the heap has been configured. The heap can be repeatedly
2119 // configured through the API until it is set up.
2122 ExternalStringTable external_string_table_;
2124 VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
2126 MemoryChunk* chunks_queued_for_free_;
2128 base::Mutex relocation_mutex_;
2130 int gc_callbacks_depth_;
2132 bool deserialization_complete_;
2134 friend class AlwaysAllocateScope;
2135 friend class Deserializer;
2136 friend class Factory;
2137 friend class GCCallbacksScope;
2138 friend class GCTracer;
2139 friend class HeapIterator;
2140 friend class Isolate;
2141 friend class MarkCompactCollector;
2142 friend class MarkCompactMarkingVisitor;
2143 friend class MapCompact;
2146 DISALLOW_COPY_AND_ASSIGN(Heap);
2152 static const int kStartMarker = 0xDECADE00;
2153 static const int kEndMarker = 0xDECADE01;
2155 int* start_marker; // 0
2156 int* new_space_size; // 1
2157 int* new_space_capacity; // 2
2158 intptr_t* old_pointer_space_size; // 3
2159 intptr_t* old_pointer_space_capacity; // 4
2160 intptr_t* old_data_space_size; // 5
2161 intptr_t* old_data_space_capacity; // 6
2162 intptr_t* code_space_size; // 7
2163 intptr_t* code_space_capacity; // 8
2164 intptr_t* map_space_size; // 9
2165 intptr_t* map_space_capacity; // 10
2166 intptr_t* cell_space_size; // 11
2167 intptr_t* cell_space_capacity; // 12
2168 intptr_t* lo_space_size; // 13
2169 int* global_handle_count; // 14
2170 int* weak_global_handle_count; // 15
2171 int* pending_global_handle_count; // 16
2172 int* near_death_global_handle_count; // 17
2173 int* free_global_handle_count; // 18
2174 intptr_t* memory_allocator_size; // 19
2175 intptr_t* memory_allocator_capacity; // 20
2176 int* objects_per_type; // 21
2177 int* size_per_type; // 22
2178 int* os_error; // 23
2179 int* end_marker; // 24
2180 intptr_t* property_cell_space_size; // 25
2181 intptr_t* property_cell_space_capacity; // 26
2185 class AlwaysAllocateScope {
2187 explicit inline AlwaysAllocateScope(Isolate* isolate);
2188 inline ~AlwaysAllocateScope();
2191 // Implicitly disable artificial allocation failures.
2193 DisallowAllocationFailure daf_;
2197 class GCCallbacksScope {
2199 explicit inline GCCallbacksScope(Heap* heap);
2200 inline ~GCCallbacksScope();
2202 inline bool CheckReenter();
2209 // Visitor class to verify interior pointers in spaces that do not contain
2210 // or care about intergenerational references. All heap object pointers have to
2211 // point into the heap to a location that has a map pointer at its first word.
2212 // Caveat: Heap::Contains is an approximation because it can return true for
2213 // objects in a heap space but above the allocation pointer.
2214 class VerifyPointersVisitor : public ObjectVisitor {
2216 inline void VisitPointers(Object** start, Object** end);
2220 // Verify that all objects are Smis.
2221 class VerifySmisVisitor : public ObjectVisitor {
2223 inline void VisitPointers(Object** start, Object** end);
2227 // Space iterator for iterating over all spaces of the heap. Returns each space
2228 // in turn, and null when it is done.
2229 class AllSpaces BASE_EMBEDDED {
2231 explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
2240 // Space iterator for iterating over all old spaces of the heap: Old pointer
2241 // space, old data space and code space. Returns each space in turn, and null
2243 class OldSpaces BASE_EMBEDDED {
2245 explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
2254 // Space iterator for iterating over all the paged spaces of the heap: Map
2255 // space, old pointer space, old data space, code space and cell space. Returns
2256 // each space in turn, and null when it is done.
2257 class PagedSpaces BASE_EMBEDDED {
2259 explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
2268 // Space iterator for iterating over all spaces of the heap.
2269 // For each space an object iterator is provided. The deallocation of the
2270 // returned object iterators is handled by the space iterator.
2271 class SpaceIterator : public Malloced {
2273 explicit SpaceIterator(Heap* heap);
2274 SpaceIterator(Heap* heap, HeapObjectCallback size_func);
2275 virtual ~SpaceIterator();
2278 ObjectIterator* next();
2281 ObjectIterator* CreateIterator();
2284 int current_space_; // from enum AllocationSpace.
2285 ObjectIterator* iterator_; // object iterator for the current space.
2286 HeapObjectCallback size_func_;
2290 // A HeapIterator provides iteration over the whole heap. It
2291 // aggregates the specific iterators for the different spaces as
2292 // these can only iterate over one space only.
2294 // HeapIterator ensures there is no allocation during its lifetime
2295 // (using an embedded DisallowHeapAllocation instance).
2297 // HeapIterator can skip free list nodes (that is, de-allocated heap
2298 // objects that still remain in the heap). As implementation of free
2299 // nodes filtering uses GC marks, it can't be used during MS/MC GC
2300 // phases. Also, it is forbidden to interrupt iteration in this mode,
2301 // as this will leave heap objects marked (and thus, unusable).
2302 class HeapObjectsFilter;
2304 class HeapIterator BASE_EMBEDDED {
2306 enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
2308 explicit HeapIterator(Heap* heap);
2309 HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
2316 struct MakeHeapIterableHelper {
2317 explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
2320 // Perform the initialization.
2322 // Perform all necessary shutdown (destruction) work.
2324 HeapObject* NextObject();
2326 MakeHeapIterableHelper make_heap_iterable_helper_;
2327 DisallowHeapAllocation no_heap_allocation_;
2329 HeapObjectsFiltering filtering_;
2330 HeapObjectsFilter* filter_;
2331 // Space iterator for iterating all the spaces.
2332 SpaceIterator* space_iterator_;
2333 // Object iterator for the space currently being iterated.
2334 ObjectIterator* object_iterator_;
2338 // Cache for mapping (map, property name) into field offset.
2339 // Cleared at startup and prior to mark sweep collection.
2340 class KeyedLookupCache {
2342 // Lookup field offset for (map, name). If absent, -1 is returned.
2343 int Lookup(Handle<Map> map, Handle<Name> name);
2345 // Update an element in the cache.
2346 void Update(Handle<Map> map, Handle<Name> name, int field_offset);
2351 static const int kLength = 256;
2352 static const int kCapacityMask = kLength - 1;
2353 static const int kMapHashShift = 5;
2354 static const int kHashMask = -4; // Zero the last two bits.
2355 static const int kEntriesPerBucket = 4;
2356 static const int kEntryLength = 2;
2357 static const int kMapIndex = 0;
2358 static const int kKeyIndex = 1;
2359 static const int kNotFound = -1;
2361 // kEntriesPerBucket should be a power of 2.
2362 STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
2363 STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
2366 KeyedLookupCache() {
2367 for (int i = 0; i < kLength; ++i) {
2368 keys_[i].map = NULL;
2369 keys_[i].name = NULL;
2370 field_offsets_[i] = kNotFound;
2374 static inline int Hash(Handle<Map> map, Handle<Name> name);
2376 // Get the address of the keys and field_offsets arrays. Used in
2377 // generated code to perform cache lookups.
2378 Address keys_address() { return reinterpret_cast<Address>(&keys_); }
2380 Address field_offsets_address() {
2381 return reinterpret_cast<Address>(&field_offsets_);
2390 int field_offsets_[kLength];
2392 friend class ExternalReference;
2393 friend class Isolate;
2394 DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
2398 // Cache for mapping (map, property name) into descriptor index.
2399 // The cache contains both positive and negative results.
2400 // Descriptor index equals kNotFound means the property is absent.
2401 // Cleared at startup and prior to any gc.
2402 class DescriptorLookupCache {
2404 // Lookup descriptor index for (map, name).
2405 // If absent, kAbsent is returned.
2406 int Lookup(Map* source, Name* name) {
2407 if (!name->IsUniqueName()) return kAbsent;
2408 int index = Hash(source, name);
2409 Key& key = keys_[index];
2410 if ((key.source == source) && (key.name == name)) return results_[index];
2414 // Update an element in the cache.
2415 void Update(Map* source, Name* name, int result) {
2416 DCHECK(result != kAbsent);
2417 if (name->IsUniqueName()) {
2418 int index = Hash(source, name);
2419 Key& key = keys_[index];
2420 key.source = source;
2422 results_[index] = result;
2429 static const int kAbsent = -2;
2432 DescriptorLookupCache() {
2433 for (int i = 0; i < kLength; ++i) {
2434 keys_[i].source = NULL;
2435 keys_[i].name = NULL;
2436 results_[i] = kAbsent;
2440 static int Hash(Object* source, Name* name) {
2441 // Uses only lower 32 bits if pointers are larger.
2442 uint32_t source_hash =
2443 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
2445 uint32_t name_hash =
2446 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >>
2448 return (source_hash ^ name_hash) % kLength;
2451 static const int kLength = 64;
2458 int results_[kLength];
2460 friend class Isolate;
2461 DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
2465 class RegExpResultsCache {
2467 enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
2469 // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
2470 // On success, the returned result is guaranteed to be a COW-array.
2471 static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
2472 ResultsCacheType type);
2473 // Attempt to add value_array to the cache specified by type. On success,
2474 // value_array is turned into a COW-array.
2475 static void Enter(Isolate* isolate, Handle<String> key_string,
2476 Handle<Object> key_pattern, Handle<FixedArray> value_array,
2477 ResultsCacheType type);
2478 static void Clear(FixedArray* cache);
2479 static const int kRegExpResultsCacheSize = 0x100;
2482 static const int kArrayEntriesPerCacheEntry = 4;
2483 static const int kStringOffset = 0;
2484 static const int kPatternOffset = 1;
2485 static const int kArrayOffset = 2;
2489 // Abstract base class for checking whether a weak object should be retained.
2490 class WeakObjectRetainer {
2492 virtual ~WeakObjectRetainer() {}
2494 // Return whether this object should be retained. If NULL is returned the
2495 // object has no references. Otherwise the address of the retained object
2496 // should be returned as in some GC situations the object has been moved.
2497 virtual Object* RetainAs(Object* object) = 0;
2501 // Intrusive object marking uses least significant bit of
2502 // heap object's map word to mark objects.
2503 // Normally all map words have least significant bit set
2504 // because they contain tagged map pointer.
2505 // If the bit is not set object is marked.
2506 // All objects should be unmarked before resuming
2507 // JavaScript execution.
2508 class IntrusiveMarking {
2510 static bool IsMarked(HeapObject* object) {
2511 return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
2514 static void ClearMark(HeapObject* object) {
2515 uintptr_t map_word = object->map_word().ToRawValue();
2516 object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
2517 DCHECK(!IsMarked(object));
2520 static void SetMark(HeapObject* object) {
2521 uintptr_t map_word = object->map_word().ToRawValue();
2522 object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
2523 DCHECK(IsMarked(object));
2526 static Map* MapOfMarkedObject(HeapObject* object) {
2527 uintptr_t map_word = object->map_word().ToRawValue();
2528 return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
2531 static int SizeOfMarkedObject(HeapObject* object) {
2532 return object->SizeFromMap(MapOfMarkedObject(object));
2536 static const uintptr_t kNotMarkedBit = 0x1;
2537 STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); // NOLINT
2542 // Helper class for tracing paths to a search target Object from all roots.
2543 // The TracePathFrom() method can be used to trace paths from a specific
2544 // object to the search target object.
2545 class PathTracer : public ObjectVisitor {
2548 FIND_ALL, // Will find all matches.
2549 FIND_FIRST // Will stop the search after first match.
2552 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
2553 static const int kMarkTag = 2;
2555 // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
2556 // after the first match. If FIND_ALL is specified, then tracing will be
2557 // done for all matches.
2558 PathTracer(Object* search_target, WhatToFind what_to_find,
2559 VisitMode visit_mode)
2560 : search_target_(search_target),
2561 found_target_(false),
2562 found_target_in_trace_(false),
2563 what_to_find_(what_to_find),
2564 visit_mode_(visit_mode),
2568 virtual void VisitPointers(Object** start, Object** end);
2571 void TracePathFrom(Object** root);
2573 bool found() const { return found_target_; }
2575 static Object* const kAnyGlobalObject;
2579 class UnmarkVisitor;
2581 void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
2582 void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
2583 virtual void ProcessResults();
2585 Object* search_target_;
2587 bool found_target_in_trace_;
2588 WhatToFind what_to_find_;
2589 VisitMode visit_mode_;
2590 List<Object*> object_stack_;
2592 DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
2595 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
2599 } // namespace v8::internal
2601 #endif // V8_HEAP_HEAP_H_