1 // Copyright (c) Microsoft. All rights reserved.
2 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
5 // Copyright (c) Microsoft. All rights reserved.
6 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
13 #pragma optimize( "t", on )
18 #define inline __attribute__((always_inline)) inline
20 #define inline __forceinline
27 #pragma warning(disable:4293)
28 #pragma warning(disable:4477)
31 inline void FATAL_GC_ERROR()
33 #ifndef DACCESS_COMPILE
34 GCToOSInterface::DebugBreak();
35 #endif // DACCESS_COMPILE
36 _ASSERTE(!"Fatal Error in GC.");
37 GCToEEInterface::HandleFatalError((unsigned int)COR_E_EXECUTIONENGINE);
41 // This turns on instrumentation that collects info for heap balancing.
42 // Define it and make sure you have HEAP_BALANCE_LOG/HEAP_BALANCE_TEMP_LOG
43 // level logging enabled *only*.
44 //#define HEAP_BALANCE_INSTRUMENTATION
45 #endif //MULTIPLE_HEAPS
48 #pragma inline_depth(20)
51 // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
52 // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION
53 // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much
54 // work to make FEATURE_STRUCTALIGN not apply to LOH so they can be both
56 #define FEATURE_LOH_COMPACTION
58 #ifdef FEATURE_64BIT_ALIGNMENT
59 // We need the following feature as part of keeping 64-bit types aligned in the GC heap.
60 #define RESPECT_LARGE_ALIGNMENT //Preserve double alignment of objects during relocation
61 #endif //FEATURE_64BIT_ALIGNMENT
63 #define SHORT_PLUGS //used to keep ephemeral plugs short so they fit better into the oldest generation free items
66 #define DESIRED_PLUG_LENGTH (1000)
69 #define FEATURE_PREMORTEM_FINALIZATION
72 #ifndef FEATURE_REDHAWK
74 #define COLLECTIBLE_CLASS
75 #endif // !FEATURE_REDHAWK
78 #define initial_internal_roots (1024*16)
79 #endif // HEAP_ANALYZE
81 #define MARK_LIST //used sorted list to speed up plan phase
83 #define BACKGROUND_GC //concurrent background GC (requires WRITE_WATCH)
86 #define MH_SC_MARK //scalable marking
87 //#define SNOOP_STATS //diagnostic
88 #define PARALLEL_MARK_LIST_SORT //do the sorting and merging of the multiple mark lists in server gc in parallel
91 //This is used to mark some type volatile only when the scalable marking is used.
92 #if defined (SERVER_GC) && defined (MH_SC_MARK)
93 #define SERVER_SC_MARK_VOLATILE(x) VOLATILE(x)
94 #else //SERVER_GC&&MH_SC_MARK
95 #define SERVER_SC_MARK_VOLATILE(x) x
96 #endif //SERVER_GC&&MH_SC_MARK
98 //#define MULTIPLE_HEAPS //Allow multiple heaps for servers
100 #define CARD_BUNDLE //enable card bundle feature.(requires WRITE_WATCH)
102 // #define ALLOW_REFERENCES_IN_POH //Allow POH objects to contain references.
105 #define BGC_SERVO_TUNING
106 #endif //BACKGROUND_GC
108 #if defined(BACKGROUND_GC) || defined(CARD_BUNDLE) || defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP)
109 #define WRITE_WATCH //Write Watch feature
110 #endif //BACKGROUND_GC || CARD_BUNDLE
113 #define array_size 100
116 #define FFIND_DECAY 7 //Number of GC for which fast find will be active
119 #define MAX_LONGPATH 1024
120 #endif // MAX_LONGPATH
123 //#define SIMPLE_DPRINTF
125 //#define JOIN_STATS //amount of time spent in the join
127 //#define SYNCHRONIZATION_STATS
128 //#define SEG_REUSE_STATS
130 #ifdef SYNCHRONIZATION_STATS
131 #define BEGIN_TIMING(x) \
133 x##_start = GCToOSInterface::QueryPerformanceCounter()
135 #define END_TIMING(x) \
137 x##_end = GCToOSInterface::QueryPerformanceCounter(); \
138 x += x##_end - x##_start
140 #else //SYNCHRONIZATION_STATS
141 #define BEGIN_TIMING(x)
142 #define END_TIMING(x)
143 #endif //SYNCHRONIZATION_STATS
145 #ifdef GC_CONFIG_DRIVEN
146 void GCLogConfig (const char *fmt, ... );
147 #define cprintf(x) {GCLogConfig x;}
148 #endif //GC_CONFIG_DRIVEN
150 // For the bestfit algorithm when we relocate ephemeral generations into an
151 // existing gen2 segment.
152 // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
153 #define MIN_INDEX_POWER2 6
158 #define MAX_INDEX_POWER2 30
160 #define MAX_INDEX_POWER2 26
166 #define MAX_INDEX_POWER2 28
168 #define MAX_INDEX_POWER2 24
173 #define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1)
175 #define MAX_NUM_FREE_SPACES 200
176 #define MIN_NUM_FREE_SPACES 5
182 #ifdef FEATURE_STRUCTALIGN
183 #define REQD_ALIGN_DCL ,int requiredAlignment
184 #define REQD_ALIGN_ARG ,requiredAlignment
185 #define REQD_ALIGN_AND_OFFSET_DCL ,int requiredAlignment,size_t alignmentOffset
186 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL ,int requiredAlignment=DATA_ALIGNMENT,size_t alignmentOffset=0
187 #define REQD_ALIGN_AND_OFFSET_ARG ,requiredAlignment,alignmentOffset
188 #else // FEATURE_STRUCTALIGN
189 #define REQD_ALIGN_DCL
190 #define REQD_ALIGN_ARG
191 #define REQD_ALIGN_AND_OFFSET_DCL
192 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL
193 #define REQD_ALIGN_AND_OFFSET_ARG
194 #endif // FEATURE_STRUCTALIGN
196 #ifdef MULTIPLE_HEAPS
197 #define THREAD_NUMBER_DCL ,int thread
198 #define THREAD_NUMBER_ARG ,thread
199 #define THREAD_NUMBER_FROM_CONTEXT int thread = sc->thread_number;
200 #define THREAD_FROM_HEAP int thread = heap_number;
201 #define HEAP_FROM_THREAD gc_heap* hpt = gc_heap::g_heaps[thread];
203 #define THREAD_NUMBER_DCL
204 #define THREAD_NUMBER_ARG
205 #define THREAD_NUMBER_FROM_CONTEXT
206 #define THREAD_FROM_HEAP
207 #define HEAP_FROM_THREAD gc_heap* hpt = 0;
208 #endif //MULTIPLE_HEAPS
210 //These constants are ordered
211 const int policy_sweep = 0;
212 const int policy_compact = 1;
213 const int policy_expand = 2;
216 #define MIN_CUSTOM_LOG_LEVEL 7
217 #define SEG_REUSE_LOG_0 (MIN_CUSTOM_LOG_LEVEL)
218 #define SEG_REUSE_LOG_1 (MIN_CUSTOM_LOG_LEVEL + 1)
219 #define DT_LOG_0 (MIN_CUSTOM_LOG_LEVEL + 2)
220 #define BGC_TUNING_LOG (MIN_CUSTOM_LOG_LEVEL + 3)
221 #define GTC_LOG (MIN_CUSTOM_LOG_LEVEL + 4)
222 #define GC_TABLE_LOG (MIN_CUSTOM_LOG_LEVEL + 5)
223 #define JOIN_LOG (MIN_CUSTOM_LOG_LEVEL + 6)
224 #define SPINLOCK_LOG (MIN_CUSTOM_LOG_LEVEL + 7)
225 #define SNOOP_LOG (MIN_CUSTOM_LOG_LEVEL + 8)
226 #define COMMIT_ACCOUNTING_LOG (MIN_CUSTOM_LOG_LEVEL + 9)
228 // NOTE! This is for HEAP_BALANCE_INSTRUMENTATION
229 // This particular one is special and needs to be well formatted because we
230 // do post processing on it with tools\GCLogParser. If you need to add some
231 // detail to help with investigation that's not 't processed by tooling
232 // prefix it with TEMP so that line will be written to the results as is in
233 // the result. I have some already logged with HEAP_BALANCE_TEMP_LOG.
234 #define HEAP_BALANCE_LOG (DT_LOG_0 + 7)
235 #define HEAP_BALANCE_TEMP_LOG (DT_LOG_0 + 8)
237 #ifndef DACCESS_COMPILE
239 #ifdef SIMPLE_DPRINTF
241 void GCLog (const char *fmt, ... );
242 #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
243 #else //SIMPLE_DPRINTF
244 // Nobody used the logging mechanism that used to be here. If we find ourselves
245 // wanting to inspect GC logs on unmodified builds, we can use this define here
247 #define dprintf(l, x)
248 //#define dprintf(l,x) STRESS_LOG_VA(x);
250 #endif //SIMPLE_DPRINTF
252 #else //DACCESS_COMPILE
254 #endif //DACCESS_COMPILE
259 #if !defined(FEATURE_REDHAWK) && !defined(BUILD_AS_STANDALONE)
261 #define assert _ASSERTE
263 #define ASSERT _ASSERTE
264 #endif // FEATURE_REDHAWK
266 struct GCDebugSpinLock {
267 VOLATILE(int32_t) lock; // -1 if free, 0 if held
269 VOLATILE(Thread *) holding_thread; // -1 if no thread holds the lock.
270 VOLATILE(BOOL) released_by_gc_p; // a GC thread released the lock.
272 #if defined (SYNCHRONIZATION_STATS)
273 // number of times we went into SwitchToThread in enter_spin_lock.
274 unsigned int num_switch_thread;
275 // number of times we went into WaitLonger.
276 unsigned int num_wait_longer;
277 // number of times we went to calling SwitchToThread in WaitLonger.
278 unsigned int num_switch_thread_w;
279 // number of times we went to calling DisablePreemptiveGC in WaitLonger.
280 unsigned int num_disable_preemptive_w;
286 , holding_thread((Thread*) -1)
288 #if defined (SYNCHRONIZATION_STATS)
289 , num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0)
294 #if defined (SYNCHRONIZATION_STATS)
297 num_switch_thread = 0;
299 num_switch_thread_w = 0;
300 num_disable_preemptive_w = 0;
304 typedef GCDebugSpinLock GCSpinLock;
310 class seg_free_spaces;
314 class exclusive_sync;
315 class recursive_gc_sync;
316 #endif //BACKGROUND_GC
318 #ifdef MULTIPLE_HEAPS
319 // card marking stealing only makes sense in server GC
320 // but it works and is easier to debug for workstation GC
321 // so turn it on for server GC, turn on for workstation GC if necessary
322 #define FEATURE_CARD_MARKING_STEALING
323 #endif //MULTIPLE_HEAPS
325 #ifdef FEATURE_CARD_MARKING_STEALING
326 class card_marking_enumerator;
327 #define CARD_MARKING_STEALING_ARG(a) ,a
328 #define CARD_MARKING_STEALING_ARGS(a,b,c) ,a,b,c
329 #else // FEATURE_CARD_MARKING_STEALING
330 #define CARD_MARKING_STEALING_ARG(a)
331 #define CARD_MARKING_STEALING_ARGS(a,b,c)
332 #endif // FEATURE_CARD_MARKING_STEALING
334 // The following 2 modes are of the same format as in clr\src\bcl\system\runtime\gcsettings.cs
335 // make sure you change that one if you change this one!
338 pause_batch = 0, //We are not concerned about pause length
339 pause_interactive = 1, //We are running an interactive app
340 pause_low_latency = 2, //short pauses are essential
341 //avoid long pauses from blocking full GCs unless running out of memory
342 pause_sustained_low_latency = 3,
346 enum gc_loh_compaction_mode
348 loh_compaction_default = 1, // the default mode, don't compact LOH.
349 loh_compaction_once = 2, // only compact once the next time a blocking full GC happens.
350 loh_compaction_auto = 4 // GC decides when to compact LOH, to be implemented.
353 enum set_pause_mode_status
355 set_pause_mode_success = 0,
356 set_pause_mode_no_gc = 1 // NoGCRegion is in progress, can't change pause mode.
360 Latency modes required user to have specific GC knowledge (eg, budget, full blocking GC).
361 We are trying to move away from them as it makes a lot more sense for users to tell
362 us what's the most important out of the perf aspects that make sense to them.
364 In general there are 3 such aspects:
368 + pause predictibility
370 Currently the following levels are supported. We may (and will likely) add more
373 +----------+--------------------+---------------------------------------+
374 | Level | Optimization Goals | Latency Charactaristics |
375 +==========+====================+=======================================+
376 | 0 | memory footprint | pauses can be long and more frequent |
377 +----------+--------------------+---------------------------------------+
378 | 1 | balanced | pauses are more predictable and more |
379 | | | frequent. the longest pauses are |
380 | | | shorter than 1. |
381 +----------+--------------------+---------------------------------------+
383 enum gc_latency_level
385 latency_level_first = 0,
386 latency_level_memory_footprint = latency_level_first,
387 latency_level_balanced = 1,
388 latency_level_last = latency_level_balanced,
389 latency_level_default = latency_level_balanced
394 tuning_deciding_condemned_gen = 0,
395 tuning_deciding_full_gc = 1,
396 tuning_deciding_compaction = 2,
397 tuning_deciding_expansion = 3,
398 tuning_deciding_promote_ephemeral = 4,
399 tuning_deciding_short_on_seg = 5
411 gc_oh_num gen_to_oh (int gen);
413 #if defined(TRACE_GC) && defined(BACKGROUND_GC)
414 static const char * const str_bgc_state[] =
428 #endif // defined(TRACE_GC) && defined(BACKGROUND_GC)
430 enum allocation_state
433 a_state_can_allocate,
434 a_state_cant_allocate,
435 // This could be due to having to wait till a GC is done,
436 // or having to try a different heap.
437 a_state_retry_allocate,
439 a_state_try_fit_new_seg,
440 a_state_try_fit_after_cg,
441 a_state_try_fit_after_bgc,
442 a_state_try_free_full_seg_in_bgc,
443 a_state_try_free_after_bgc,
446 a_state_acquire_seg_after_cg,
447 a_state_acquire_seg_after_bgc,
448 a_state_check_and_wait_for_bgc,
449 a_state_trigger_full_compact_gc,
450 a_state_trigger_ephemeral_gc,
451 a_state_trigger_2nd_ephemeral_gc,
452 a_state_check_retry_seg,
458 gc_type_compacting = 0,
459 gc_type_blocking = 1,
461 gc_type_background = 2,
462 #endif //BACKGROUND_GC
466 //encapsulates the mechanism for the current gc
470 VOLATILE(size_t) gc_index; // starts from 1 for the first GC, like dd_collection_count
471 int condemned_generation;
479 int gen0_reduction_count;
480 BOOL should_lock_elevation;
481 int elevation_locked_count;
482 BOOL elevation_reduced;
485 gc_pause_mode pause_mode;
486 BOOL found_finalizers;
491 BOOL allocations_allowed;
492 #endif //BACKGROUND_GC
496 #endif // STRESS_HEAP
498 // These are opportunistically set
499 uint32_t entry_memory_load;
500 uint64_t entry_available_physical_mem;
501 uint32_t exit_memory_load;
503 void init_mechanisms(); //for each GC
504 void first_init(); // for the life of the EE
506 void record (gc_history_global* history);
509 // This is a compact version of gc_mechanism that we use to save in the history.
510 class gc_mechanisms_store
521 bool should_lock_elevation;
522 int condemned_generation : 8;
523 int gen0_reduction_count : 8;
524 int elevation_locked_count : 8;
525 gc_reason reason : 8;
526 gc_pause_mode pause_mode : 8;
528 bgc_state b_state : 8;
529 #endif //BACKGROUND_GC
530 bool found_finalizers;
534 #endif //BACKGROUND_GC
538 #endif // STRESS_HEAP
541 uint32_t entry_memory_load;
544 void store (gc_mechanisms* gm)
546 gc_index = gm->gc_index;
547 condemned_generation = gm->condemned_generation;
548 promotion = (gm->promotion != 0);
549 compaction = (gm->compaction != 0);
550 loh_compaction = (gm->loh_compaction != 0);
551 heap_expansion = (gm->heap_expansion != 0);
552 concurrent = (gm->concurrent != 0);
553 demotion = (gm->demotion != 0);
554 card_bundles = (gm->card_bundles != 0);
555 gen0_reduction_count = gm->gen0_reduction_count;
556 should_lock_elevation = (gm->should_lock_elevation != 0);
557 elevation_locked_count = gm->elevation_locked_count;
559 pause_mode = gm->pause_mode;
560 found_finalizers = (gm->found_finalizers != 0);
563 background_p = (gm->background_p != 0);
564 b_state = gm->b_state;
565 #endif //BACKGROUND_GC
568 stress_induced = (gm->stress_induced != 0);
569 #endif // STRESS_HEAP
572 entry_memory_load = gm->entry_memory_load;
577 typedef DPTR(class heap_segment) PTR_heap_segment;
578 typedef DPTR(class gc_heap) PTR_gc_heap;
579 typedef DPTR(PTR_gc_heap) PTR_PTR_gc_heap;
580 #ifdef FEATURE_PREMORTEM_FINALIZATION
581 typedef DPTR(class CFinalize) PTR_CFinalize;
582 #endif // FEATURE_PREMORTEM_FINALIZATION
584 //-------------------------------------
585 //generation free list. It is an array of free lists bucketed by size, starting at sizes lower than (1 << first_bucket_bits)
586 //and doubling each time. The last bucket (index == num_buckets) is for largest sizes with no limit
588 #define MAX_SOH_BUCKET_COUNT (13)//Max number of buckets for the SOH generations.
589 #define MAX_BUCKET_COUNT (20)//Max number of buckets.
597 #ifdef FL_VERIFICATION
599 #endif //FL_VERIFICATION
601 uint8_t*& alloc_list_head () { return head;}
602 uint8_t*& alloc_list_tail () { return tail;}
603 size_t& alloc_list_damage_count(){ return damage_count; }
615 int first_bucket_bits;
616 unsigned int num_buckets;
617 alloc_list first_bucket;
619 alloc_list& alloc_list_of (unsigned int bn);
620 size_t& alloc_list_damage_count_of (unsigned int bn);
623 allocator (unsigned int num_b, int fbb, alloc_list* b);
628 first_bucket_bits = sizeof(size_t) * 8 - 1;
631 unsigned int number_of_buckets()
636 // skip buckets that cannot possibly fit "size" and return the next one
637 // there is always such bucket since the last one fits everything
638 unsigned int first_suitable_bucket(size_t size)
640 // sizes taking first_bucket_bits or less are mapped to bucket 0
641 // others are mapped to buckets 0, 1, 2 respectively
642 size = (size >> first_bucket_bits) | 1;
644 DWORD highest_set_bit_index;
646 BitScanReverse64(&highest_set_bit_index, size);
648 BitScanReverse(&highest_set_bit_index, size);
651 return min ((unsigned int)highest_set_bit_index, num_buckets - 1);
654 size_t first_bucket_size()
656 return ((size_t)1 << (first_bucket_bits + 1));
659 uint8_t*& alloc_list_head_of (unsigned int bn)
661 return alloc_list_of (bn).alloc_list_head();
664 uint8_t*& alloc_list_tail_of (unsigned int bn)
666 return alloc_list_of (bn).alloc_list_tail();
671 BOOL discard_if_no_fit_p()
673 return (num_buckets == 1);
676 // This is when we know there's nothing to repair because this free
677 // list has never gone through plan phase. Right now it's only used
678 // by the background ephemeral sweep when we copy the local free list
679 // to gen0's free list.
681 // We copy head and tail manually (vs together like copy_to_alloc_list)
682 // since we need to copy tail first because when we get the free items off
683 // of each bucket we check head first. We also need to copy the
684 // smaller buckets first so when gen0 allocation needs to thread
685 // smaller items back that bucket is guaranteed to have been full
687 void copy_with_no_repair (allocator* allocator_to_copy)
689 assert (num_buckets == allocator_to_copy->number_of_buckets());
690 for (unsigned int i = 0; i < num_buckets; i++)
692 alloc_list* al = &(allocator_to_copy->alloc_list_of (i));
693 alloc_list_tail_of(i) = al->alloc_list_tail();
694 alloc_list_head_of(i) = al->alloc_list_head();
698 void unlink_item (unsigned int bucket_number, uint8_t* item, uint8_t* previous_item, BOOL use_undo_p);
699 void thread_item (uint8_t* item, size_t size);
700 void thread_item_front (uint8_t* itme, size_t size);
701 void copy_to_alloc_list (alloc_list* toalist);
702 void copy_from_alloc_list (alloc_list* fromalist);
703 void commit_alloc_list_changes();
706 #define NUM_GEN_POWER2 (20)
707 #define BASE_GEN_SIZE (1*512)
709 // group the frequently used ones together (need intrumentation on accessors)
713 // Don't move these first two fields without adjusting the references
714 // from the __asm in jitinterface.cpp.
715 alloc_context allocation_context;
716 PTR_heap_segment start_segment;
717 uint8_t* allocation_start;
718 heap_segment* allocation_segment;
719 uint8_t* allocation_context_start_region;
720 allocator free_list_allocator;
721 size_t free_list_allocated;
722 size_t end_seg_allocated;
723 BOOL allocate_end_seg_p;
724 size_t condemned_allocated;
725 size_t sweep_allocated;
726 size_t free_list_space;
727 size_t free_obj_space;
728 size_t allocation_size;
729 uint8_t* plan_allocation_start;
730 size_t plan_allocation_start_size;
732 // this is the pinned plugs that got allocated into this gen.
733 size_t pinned_allocated;
734 size_t pinned_allocation_compact_size;
735 size_t pinned_allocation_sweep_size;
738 #ifdef FREE_USAGE_STATS
739 size_t gen_free_spaces[NUM_GEN_POWER2];
740 // these are non pinned plugs only
741 size_t gen_plugs[NUM_GEN_POWER2];
742 size_t gen_current_pinned_free_spaces[NUM_GEN_POWER2];
743 size_t pinned_free_obj_space;
744 // this is what got allocated into the pinned free spaces.
745 size_t allocated_in_pinned_free;
746 size_t allocated_since_last_pin;
747 #endif //FREE_USAGE_STATS
750 static_assert(offsetof(dac_generation, allocation_context) == offsetof(generation, allocation_context), "DAC generation offset mismatch");
751 static_assert(offsetof(dac_generation, start_segment) == offsetof(generation, start_segment), "DAC generation offset mismatch");
752 static_assert(offsetof(dac_generation, allocation_start) == offsetof(generation, allocation_start), "DAC generation offset mismatch");
754 // static data remains the same after it's initialized.
755 // It's per generation.
756 // TODO: for gen_time_tuning, we should put the multipliers in static data.
761 size_t fragmentation_limit;
762 float fragmentation_burden_limit;
765 uint64_t time_clock; // time after which to collect generation, in performance counts (see QueryPerformanceCounter)
766 size_t gc_clock; // nubmer of gcs after which to collect generation
769 // The dynamic data fields are grouped into 3 categories:
771 // calculated logical data (like desired_allocation)
772 // physical data (like fragmentation)
773 // const data (sdata), initialized at the beginning
777 ptrdiff_t new_allocation;
778 ptrdiff_t gc_new_allocation; // new allocation at beginning of gc
780 size_t desired_allocation;
782 // # of bytes taken by objects (ie, not free space) at the beginning
784 size_t begin_data_size;
785 // # of bytes taken by survived objects after mark.
786 size_t survived_size;
787 // # of bytes taken by survived pinned plugs after mark.
788 size_t pinned_survived_size;
789 size_t artificial_pinned_survived_size;
790 size_t added_pinned_size;
795 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
796 // # of plugs that are not pinned plugs.
797 size_t num_npinned_plugs;
798 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
799 //total object size after a GC, ie, doesn't include fragmentation
801 size_t collection_count;
802 size_t promoted_size;
803 size_t freach_previous_promotion;
804 size_t fragmentation; //fragmentation when we don't compact
805 size_t gc_clock; //gc# when last GC happened
806 uint64_t time_clock; //time when last gc started
807 size_t gc_elapsed_time; // Time it took for the gc to complete
808 float gc_speed; // speed in bytes/msec for the gc to complete
815 struct recorded_generation_info
818 size_t fragmentation_before;
820 size_t fragmentation_after;
823 struct last_recorded_gc_info
825 VOLATILE(size_t) index;
826 size_t total_committed;
828 size_t pinned_objects;
829 size_t finalize_promoted_objects;
830 size_t pause_durations[2];
831 float pause_percentage;
832 recorded_generation_info gen_info[total_generation_count];
834 size_t fragmentation;
835 uint32_t memory_load;
836 uint8_t condemned_generation;
841 #define ro_in_entry 0x1
843 // Note that I am storing both h0 and seg0, even though in Server GC you can get to
844 // the heap* from the segment info. This is because heap_of needs to be really fast
845 // and we would not want yet another indirection.
848 // if an address is > boundary it belongs to h1; else h0.
849 // since we init h0 and h1 to 0, if we get 0 it means that
850 // address doesn't exist on managed segments. And heap_of
851 // would just return heap0 which is what it does now.
853 #ifdef MULTIPLE_HEAPS
856 #endif //MULTIPLE_HEAPS
857 // You could have an address that's inbetween 2 segments and
858 // this would return a seg, the caller then will use
859 // in_range_for_segment to determine if it's on that seg.
860 heap_segment* seg0; // this is what the seg for h0 is.
861 heap_segment* seg1; // this is what the seg for h1 is.
862 // Note that when frozen objects are used we mask seg1
863 // with 0x1 to indicate that there is a ro segment for
868 //Alignment constant for allocation
869 #define ALIGNCONST (DATA_ALIGNMENT-1)
872 size_t Align (size_t nbytes, int alignment=ALIGNCONST)
874 return (nbytes + alignment) & ~alignment;
877 //return alignment constant for small object heap vs large object heap
879 int get_alignment_constant (BOOL small_object_p)
881 #ifdef FEATURE_STRUCTALIGN
882 // If any objects on the large object heap require 8-byte alignment,
883 // the compiler will tell us so. Let's not guess an alignment here.
885 #else // FEATURE_STRUCTALIGN
886 return small_object_p ? ALIGNCONST : 7;
887 #endif // FEATURE_STRUCTALIGN
892 size_t desired_allocation;
893 size_t new_allocation;
897 // Note, I am not removing the ones that are no longer used
898 // because the older versions of the runtime still use them
899 // and ETW interprets them.
900 enum alloc_wait_reason
902 // When we don't care about firing an event for
906 // when we detect we are in low memory
909 // when we detect the ephemeral segment is too full
910 awr_low_ephemeral = 1,
912 // we've given out too much budget for gen0.
915 // we've given out too much budget for loh.
918 // this event is really obsolete - it's for pre-XP
919 // OSs where low mem notification is not supported.
920 awr_alloc_loh_low_mem = 4,
922 // we ran out of VM spaced to reserve on loh.
925 // ran out of space when allocating a small object
926 awr_gen0_oos_bgc = 6,
928 // ran out of space when allocating a large object
931 // waiting for BGC to let FGC happen
932 awr_fgc_wait_for_bgc = 8,
934 // wait for bgc to finish to get loh seg.
935 // no longer used with the introduction of loh msl.
938 // we don't allow loh allocation during bgc planning.
939 // no longer used with the introduction of loh msl.
940 awr_loh_alloc_during_plan = 10,
942 // we don't allow too much uoh allocation during bgc.
943 awr_uoh_alloc_during_bgc = 11
946 struct alloc_thread_wait_data
953 mt_get_large_seg = 0,
978 msl_enter_state enter_state;
979 msl_take_state take_state;
980 EEThreadId thread_id;
984 #define HS_CACHE_LINE_SIZE 128
987 struct snoop_stats_data
991 // total number of objects that we called
993 size_t objects_checked_count;
994 // total number of time we called gc_mark
996 size_t zero_ref_count;
997 // total objects actually marked.
998 size_t objects_marked_count;
999 // number of objects written to the mark stack because
1001 size_t stolen_stack_count;
1002 // number of objects pushed onto the mark stack because
1003 // of the partial mark code path.
1004 size_t partial_stack_count;
1005 // number of objects pushed onto the mark stack because
1006 // of the non partial mark code path.
1007 size_t normal_stack_count;
1008 // number of references marked without mark stack.
1009 size_t non_stack_count;
1011 // number of times we detect next heap's mark stack
1013 size_t stack_idle_count;
1015 // number of times we do switch to thread.
1016 size_t switch_to_thread_count;
1018 // number of times we are checking if the next heap's
1019 // mark stack is busy.
1020 size_t check_level_count;
1021 // number of times next stack is busy and level is
1024 // how many interlocked exchange operations we did
1025 size_t interlocked_count;
1026 // numer of times parent objects stolen
1027 size_t partial_mark_parent_count;
1028 // numer of times we look at a normal stolen entry,
1029 // or the beginning/ending PM pair.
1030 size_t stolen_or_pm_count;
1031 // number of times we see 2 for the entry.
1032 size_t stolen_entry_count;
1033 // number of times we see a PM entry that's not ready.
1034 size_t pm_not_ready_count;
1035 // number of stolen normal marked objects and partial mark children.
1036 size_t normal_count;
1037 // number of times the bottom of mark stack was cleared.
1038 size_t stack_bottom_clear_count;
1040 #endif //SNOOP_STATS
1042 struct no_gc_region_info
1044 size_t soh_allocation_size;
1045 size_t loh_allocation_size;
1048 size_t num_gcs_induced;
1049 start_no_gc_region_status start_status;
1050 gc_pause_mode saved_pause_mode;
1051 size_t saved_gen0_min_size;
1052 size_t saved_gen3_min_size;
1056 // if you change these, make sure you update them for sos (strike.cpp) as well.
1059 // Right now I am only recording data from blocking GCs. When recording from BGC,
1060 // it should have its own copy just like gc_data_per_heap.
1061 // for BGCs we will have a very different set of datapoints to record.
1062 enum interesting_data_point
1067 idp_converted_pin = 3,
1070 idp_pre_and_post_pin = 6,
1071 idp_pre_short_padded = 7,
1072 idp_post_short_padded = 8,
1076 //class definition of the internal class
1079 friend class GCHeap;
1080 #ifdef FEATURE_PREMORTEM_FINALIZATION
1081 friend class CFinalize;
1082 #endif // FEATURE_PREMORTEM_FINALIZATION
1083 friend struct ::alloc_context;
1084 friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, uint32_t dwFlags);
1085 friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1086 friend class t_join;
1087 friend class gc_mechanisms;
1088 friend class seg_free_spaces;
1090 #ifdef BACKGROUND_GC
1091 friend class exclusive_sync;
1092 friend class recursive_gc_sync;
1093 #endif //BACKGROUND_GC
1095 #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1096 friend void checkGCWriteBarrier();
1097 friend void initGCShadow();
1098 #endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1100 friend void PopulateDacVars(GcDacVars *gcDacVars);
1102 #ifdef MULTIPLE_HEAPS
1103 typedef void (gc_heap::* card_fn) (uint8_t**, int);
1104 #define call_fn(this_arg,fn) (this_arg->*fn)
1107 typedef void (* card_fn) (uint8_t**);
1108 #define call_fn(this_arg,fn) (*fn)
1109 #define __this (gc_heap*)0
1116 void print_free_list (int gen, heap_segment* seg);
1119 #ifdef SYNCHRONIZATION_STATS
1122 void init_sync_stats()
1124 #ifdef MULTIPLE_HEAPS
1125 for (int i = 0; i < gc_heap::n_heaps; i++)
1127 gc_heap::g_heaps[i]->init_heap_sync_stats();
1129 #else //MULTIPLE_HEAPS
1130 init_heap_sync_stats();
1131 #endif //MULTIPLE_HEAPS
1135 void print_sync_stats(unsigned int gc_count_during_log)
1137 // bad/good gl acquire is accumulative during the log interval (because the numbers are too small)
1138 // min/max msl_acquire is the min/max during the log interval, not each GC.
1139 // Threads is however many allocation threads for the last GC.
1140 // num of msl acquired, avg_msl, high and low are all for each GC.
1141 printf("%2s%2s%10s%10s%12s%6s%4s%8s( st, wl, stw, dpw)\n",
1142 "H", "T", "good_sus", "bad_sus", "avg_msl", "high", "low", "num_msl");
1144 #ifdef MULTIPLE_HEAPS
1145 for (int i = 0; i < gc_heap::n_heaps; i++)
1147 gc_heap::g_heaps[i]->print_heap_sync_stats(i, gc_count_during_log);
1149 #else //MULTIPLE_HEAPS
1150 print_heap_sync_stats(0, gc_count_during_log);
1151 #endif //MULTIPLE_HEAPS
1154 #endif //SYNCHRONIZATION_STATS
1157 void verify_soh_segment_list();
1161 void verify_free_lists();
1163 void verify_heap (BOOL begin_gc_p);
1164 #endif //VERIFY_HEAP
1167 void fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num);
1170 void fire_pevents();
1172 #ifdef FEATURE_BASICFREEZE
1173 static void walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef);
1177 heap_segment* make_heap_segment (uint8_t* new_pages,
1183 gc_heap* make_gc_heap(
1184 #if defined (MULTIPLE_HEAPS)
1187 #endif //MULTIPLE_HEAPS
1191 void destroy_gc_heap(gc_heap* heap);
1194 HRESULT initialize_gc (size_t soh_segment_size,
1195 size_t loh_segment_size,
1196 size_t poh_segment_size
1197 #ifdef MULTIPLE_HEAPS
1198 , int number_of_heaps
1199 #endif //MULTIPLE_HEAPS
1205 // If the hard limit is specified, take that into consideration
1206 // and this means it may modify the # of heaps.
1208 size_t get_segment_size_hard_limit (uint32_t* num_heaps, bool should_adjust_num_heaps);
1211 bool should_retry_other_heap (int gen_number, size_t size);
1214 CObjectHeader* allocate (size_t jsize,
1215 alloc_context* acontext,
1218 #ifdef MULTIPLE_HEAPS
1220 void hb_log_new_allocation();
1223 void hb_log_balance_activities();
1226 void balance_heaps (alloc_context* acontext);
1228 ptrdiff_t get_balance_heaps_uoh_effective_budget (int generation_num);
1230 gc_heap* balance_heaps_uoh (alloc_context* acontext, size_t size, int generation_num);
1231 // Unlike balance_heaps_uoh, this may return nullptr if we failed to change heaps.
1233 gc_heap* balance_heaps_uoh_hard_limit_retry (alloc_context* acontext, size_t size, int generation_num);
1235 void gc_thread_stub (void* arg);
1236 #endif //MULTIPLE_HEAPS
1238 // For UOH allocations we only update the alloc_bytes_uoh in allocation
1239 // context - we don't actually use the ptr/limit from it so I am
1240 // making this explicit by not passing in the alloc_context.
1241 // Note: This are instance methods, but the heap instance is only used for
1242 // lowest_address and highest_address, which are currently the same accross all heaps.
1244 CObjectHeader* allocate_uoh_object (size_t size, uint32_t flags, int gen_num, int64_t& alloc_bytes);
1246 #ifdef FEATURE_STRUCTALIGN
1248 uint8_t* pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size);
1249 #endif // FEATURE_STRUCTALIGN
1258 void update_recorded_gen_data (last_recorded_gc_info* gc_info);
1261 void update_end_gc_time_per_heap();
1264 void update_end_ngc_time();
1267 void add_to_history_per_heap();
1270 void add_to_history();
1272 #ifdef BGC_SERVO_TUNING
1274 void check_and_adjust_bgc_tuning (int gen_number, size_t physical_size, ptrdiff_t virtual_fl_size);
1276 void get_and_reset_loh_alloc_info();
1277 #endif //BGC_SERVO_TUNING
1280 BOOL expand_soh_with_minimal_gc();
1282 // EE is always suspended when this method is called.
1283 // returning FALSE means we actually didn't do a GC. This happens
1284 // when we figured that we needed to do a BGC.
1286 void garbage_collect (int n);
1288 // Since we don't want to waste a join just to do this, I am doing
1289 // doing this at the last join in gc1.
1291 void pm_full_gc_init_or_clear();
1293 // This does a GC when pm_trigger_full_gc is set
1295 void garbage_collect_pm_full_gc();
1298 bool is_pm_ratio_exceeded();
1301 void init_records();
1304 uint32_t* make_card_table (uint8_t* start, uint8_t* end);
1307 void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
1310 int grow_brick_card_tables (uint8_t* start,
1313 heap_segment* new_seg,
1318 BOOL is_mark_set (uint8_t* o);
1320 #ifdef FEATURE_BASICFREEZE
1322 bool frozen_object_p(Object* obj);
1323 #endif // FEATURE_BASICFREEZE
1327 BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t pinned_size, int num_heaps, bool use_large_pages_p, bool separated_poh_p);
1330 void destroy_initial_memory();
1333 void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1336 void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1338 struct walk_relocate_args
1342 mark* pinned_plug_entry;
1343 void* profiling_context;
1348 void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type);
1351 void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
1352 walk_relocate_args* args);
1355 void walk_relocation (void* profiling_context, record_surv_fn fn);
1358 void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
1361 void walk_finalize_queue (fq_walk_fn fn);
1363 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1365 void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn);
1366 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1368 // used in blocking GCs after plan phase so this walks the plugs.
1370 void walk_survivors_relocation (void* profiling_context, record_surv_fn fn);
1372 void walk_survivors_for_uoh (void* profiling_context, record_surv_fn fn, int gen_number);
1375 int generation_to_condemn (int n,
1376 BOOL* blocking_collection_p,
1377 BOOL* elevation_requested_p,
1381 int joined_generation_to_condemn (BOOL should_evaluate_elevation,
1384 BOOL* blocking_collection
1385 STRESS_HEAP_ARG(int n_original));
1388 size_t min_reclaim_fragmentation_threshold (uint32_t num_heaps);
1391 uint64_t min_high_fragmentation_threshold (uint64_t available_mem, uint32_t num_heaps);
1394 void concurrent_print_time_delta (const char* msg);
1396 void free_list_info (int gen_num, const char* msg);
1398 // in svr GC on entry and exit of this method, the GC threads are not
1404 void save_data_for_no_gc();
1407 void restore_data_for_no_gc();
1410 void update_collection_counts_for_no_gc();
1413 BOOL should_proceed_with_gc();
1416 void record_gcs_during_no_gc();
1419 BOOL find_loh_free_for_no_gc();
1422 BOOL find_loh_space_for_no_gc();
1425 BOOL commit_loh_for_no_gc (heap_segment* seg);
1428 start_no_gc_region_status prepare_for_no_gc_region (uint64_t total_size,
1429 BOOL loh_size_known,
1431 BOOL disallow_full_blocking);
1434 BOOL loh_allocated_for_no_gc();
1437 void release_no_gc_loh_segments();
1440 void thread_no_gc_loh_segments();
1443 void check_and_set_no_gc_oom();
1446 void allocate_for_no_gc_after_gc();
1449 void set_loh_allocations_for_no_gc();
1452 void set_soh_allocations_for_no_gc();
1455 void prepare_for_no_gc_after_gc();
1458 void set_allocations_for_no_gc();
1461 BOOL should_proceed_for_no_gc();
1464 start_no_gc_region_status get_start_no_gc_region_status();
1467 end_no_gc_region_status end_no_gc_region();
1470 void handle_failure_for_no_gc();
1473 void fire_etw_allocation_event (size_t allocation_amount, int gen_number, uint8_t* object_address);
1476 void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject);
1479 size_t limit_from_size (size_t size, uint32_t flags, size_t room, int gen_number,
1482 allocation_state try_allocate_more_space (alloc_context* acontext, size_t jsize, uint32_t flags,
1483 int alloc_generation_number);
1485 BOOL allocate_more_space (alloc_context* acontext, size_t jsize, uint32_t flags,
1486 int alloc_generation_number);
1489 size_t get_full_compact_gc_count();
1492 BOOL short_on_end_of_seg (heap_segment* seg, int align_const);
1495 BOOL a_fit_free_list_p (int gen_number,
1497 alloc_context* acontext,
1501 #ifdef BACKGROUND_GC
1503 void wait_for_background (alloc_wait_reason awr, bool loh_p);
1506 void wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p);
1509 void bgc_uoh_alloc_clr (uint8_t* alloc_start,
1511 alloc_context* acontext,
1517 #endif //BACKGROUND_GC
1519 #ifdef BACKGROUND_GC
1521 void bgc_track_uoh_alloc();
1524 void bgc_untrack_uoh_alloc();
1527 BOOL bgc_loh_allocate_spin();
1530 BOOL bgc_poh_allocate_spin();
1531 #endif //BACKGROUND_GC
1533 #define max_saved_spinlock_info 48
1535 #ifdef SPINLOCK_HISTORY
1537 int spinlock_info_index;
1540 spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
1541 #endif //SPINLOCK_HISTORY
1544 void add_saved_spinlock_info (
1546 msl_enter_state enter_state,
1547 msl_take_state take_state);
1550 void trigger_gc_for_alloc (int gen_number, gc_reason reason,
1551 GCSpinLock* spin_lock, bool loh_p,
1552 msl_take_state take_state);
1555 BOOL a_fit_free_list_uoh_p (size_t size,
1556 alloc_context* acontext,
1562 BOOL a_fit_segment_end_p (int gen_number,
1565 alloc_context* acontext,
1568 BOOL* commit_failed_p);
1570 BOOL uoh_a_fit_segment_end_p (int gen_number,
1572 alloc_context* acontext,
1575 BOOL* commit_failed_p,
1578 BOOL uoh_get_new_seg (int gen_number,
1580 BOOL* commit_failed_p,
1584 size_t get_uoh_seg_size (size_t size);
1587 BOOL retry_full_compact_gc (size_t size);
1590 BOOL check_and_wait_for_bgc (alloc_wait_reason awr,
1591 BOOL* did_full_compact_gc,
1595 BOOL trigger_full_compact_gc (gc_reason gr,
1600 BOOL trigger_ephemeral_gc (gc_reason gr);
1603 BOOL soh_try_fit (int gen_number,
1605 alloc_context* acontext,
1608 BOOL* commit_failed_p,
1609 BOOL* short_seg_end_p);
1611 BOOL uoh_try_fit (int gen_number,
1613 alloc_context* acontext,
1616 BOOL* commit_failed_p,
1620 allocation_state allocate_soh (int gen_number,
1622 alloc_context* acontext,
1626 #ifdef RECORD_LOH_STATE
1627 #define max_saved_loh_states 12
1629 int loh_state_index;
1631 struct loh_state_info
1633 allocation_state alloc_state;
1634 EEThreadId thread_id;
1638 loh_state_info last_loh_states[max_saved_loh_states];
1640 void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id);
1641 #endif //RECORD_LOH_STATE
1643 allocation_state allocate_uoh (int gen_number,
1645 alloc_context* acontext,
1650 int init_semi_shared();
1652 int init_gc_heap (int heap_number);
1654 void self_destroy();
1656 void destroy_semi_shared();
1658 void repair_allocation_contexts (BOOL repair_p);
1660 void fix_allocation_contexts (BOOL for_gc_p);
1662 void fix_youngest_allocation_area();
1664 void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
1667 void fix_uoh_allocation_area();
1669 void fix_older_allocation_area (generation* older_gen);
1671 void set_allocation_heap_segment (generation* gen);
1673 void reset_allocation_pointers (generation* gen, uint8_t* start);
1675 int object_gennum (uint8_t* o);
1677 int object_gennum_plan (uint8_t* o);
1679 void init_heap_segment (heap_segment* seg);
1681 void delete_heap_segment (heap_segment* seg, BOOL consider_hoarding=FALSE);
1682 #ifdef FEATURE_BASICFREEZE
1684 BOOL insert_ro_segment (heap_segment* seg);
1686 void remove_ro_segment (heap_segment* seg);
1687 #endif //FEATURE_BASICFREEZE
1689 BOOL set_ro_segment_in_range (heap_segment* seg);
1691 heap_segment* soh_get_segment_to_expand();
1693 heap_segment* get_segment (size_t size, gc_oh_num oh);
1695 void release_segment (heap_segment* sg);
1697 void seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp);
1699 void seg_mapping_table_remove_segment (heap_segment* seg);
1701 heap_segment* get_uoh_segment (int gen_number, size_t size, BOOL* did_full_compact_gc);
1703 void thread_uoh_segment (int gen_number, heap_segment* new_seg);
1705 heap_segment* get_segment_for_uoh (int gen_number, size_t size
1706 #ifdef MULTIPLE_HEAPS
1708 #endif //MULTIPLE_HEAPS
1711 void reset_heap_segment_pages (heap_segment* seg);
1713 void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
1715 size_t decommit_ephemeral_segment_pages_step ();
1717 size_t decommit_heap_segment_pages_worker (heap_segment* seg, uint8_t *new_committed);
1719 bool decommit_step ();
1721 void decommit_heap_segment (heap_segment* seg);
1723 bool virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number);
1725 bool virtual_commit (void* address, size_t size, gc_oh_num oh, int h_number=-1, bool* hard_limit_exceeded_p=NULL);
1727 bool virtual_decommit (void* address, size_t size, gc_oh_num oh, int h_number=-1);
1729 void virtual_free (void* add, size_t size, heap_segment* sg=NULL);
1731 void clear_gen0_bricks();
1732 #ifdef BACKGROUND_GC
1734 void rearrange_small_heap_segments();
1735 #endif //BACKGROUND_GC
1737 void rearrange_uoh_segments();
1739 void rearrange_heap_segments(BOOL compacting);
1742 void reset_write_watch_for_gc_heap(void* base_address, size_t region_size);
1744 void get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended);
1747 void switch_one_quantum();
1749 void reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size);
1751 void switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size);
1753 void reset_write_watch (BOOL concurrent_p);
1755 void adjust_ephemeral_limits();
1757 void make_generation (int gen_num, heap_segment* seg, uint8_t* start);
1759 #define USE_PADDING_FRONT 1
1760 #define USE_PADDING_TAIL 2
1763 BOOL size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1764 uint8_t* old_loc=0, int use_padding=USE_PADDING_TAIL);
1766 BOOL a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1770 void handle_oom (oom_reason reason, size_t alloc_size,
1771 uint8_t* allocated, uint8_t* reserved);
1774 size_t card_of ( uint8_t* object);
1776 uint8_t* brick_address (size_t brick);
1778 size_t brick_of (uint8_t* add);
1780 uint8_t* card_address (size_t card);
1782 size_t card_to_brick (size_t card);
1784 void clear_card (size_t card);
1786 void set_card (size_t card);
1788 BOOL card_set_p (size_t card);
1790 void card_table_set_bit (uint8_t* location);
1794 void update_card_table_bundle();
1796 void reset_card_table_write_watch();
1798 void card_bundle_clear(size_t cardb);
1800 void card_bundle_set (size_t cardb);
1802 void card_bundles_set (size_t start_cardb, size_t end_cardb);
1804 void verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word);
1806 void verify_card_bundles();
1808 BOOL card_bundle_set_p (size_t cardb);
1810 BOOL find_card_dword (size_t& cardw, size_t cardw_end);
1812 void enable_card_bundles();
1814 BOOL card_bundles_enabled();
1816 #endif //CARD_BUNDLE
1819 BOOL find_card (uint32_t* card_table, size_t& card,
1820 size_t card_word_end, size_t& end_card);
1822 BOOL grow_heap_segment (heap_segment* seg, uint8_t* high_address, bool* hard_limit_exceeded_p=NULL);
1824 int grow_heap_segment (heap_segment* seg, uint8_t* high_address, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL);
1826 void copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
1827 short* old_brick_table,
1828 uint8_t* start, uint8_t* end);
1830 void init_brick_card_range (heap_segment* seg);
1832 void copy_brick_card_table_l_heap ();
1834 void copy_brick_card_table();
1836 void clear_brick_table (uint8_t* from, uint8_t* end);
1838 void set_brick (size_t index, ptrdiff_t val);
1840 int get_brick_entry (size_t index);
1841 #ifdef BACKGROUND_GC
1843 unsigned int mark_array_marked (uint8_t* add);
1845 void mark_array_set_marked (uint8_t* add);
1847 BOOL is_mark_bit_set (uint8_t* add);
1849 void gmark_array_set_marked (uint8_t* add);
1851 void set_mark_array_bit (size_t mark_bit);
1853 BOOL mark_array_bit_set (size_t mark_bit);
1855 void mark_array_clear_marked (uint8_t* add);
1857 void clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only=TRUE
1858 #ifdef FEATURE_BASICFREEZE
1859 , BOOL read_only=FALSE
1860 #endif // FEATURE_BASICFREEZE
1863 void seg_clear_mark_array_bits_soh (heap_segment* seg);
1865 void clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1867 void bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1870 void set_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1872 void check_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1873 #endif //VERIFY_HEAP
1874 #endif //BACKGROUND_GC
1877 BOOL uoh_object_marked (uint8_t* o, BOOL clearp);
1879 #ifdef BACKGROUND_GC
1881 BOOL background_allowed_p();
1882 #endif //BACKGROUND_GC
1885 void send_full_gc_notification (int gen_num, BOOL due_to_alloc_p);
1888 void check_for_full_gc (int gen_num, size_t size);
1891 void adjust_limit (uint8_t* start, size_t limit_size, generation* gen);
1893 void adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
1894 alloc_context* acontext, uint32_t flags, heap_segment* seg,
1895 int align_const, int gen_number);
1897 void leave_allocation_segment (generation* gen);
1900 void init_free_and_plug();
1903 void print_free_and_plug (const char* msg);
1906 void add_gen_plug (int gen_number, size_t plug_size);
1909 void add_gen_free (int gen_number, size_t free_size);
1912 void add_item_to_current_pinned_free (int gen_number, size_t free_size);
1915 void remove_gen_free (int gen_number, size_t free_size);
1918 uint8_t* allocate_in_older_generation (generation* gen, size_t size,
1919 int from_gen_number,
1921 REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1923 generation* ensure_ephemeral_heap_segment (generation* consing_gen);
1925 uint8_t* allocate_in_condemned_generations (generation* gen,
1927 int from_gen_number,
1929 BOOL* convert_to_pinned_p=NULL,
1930 uint8_t* next_pinned_plug=0,
1931 heap_segment* current_seg=0,
1932 #endif //SHORT_PLUGS
1934 REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1935 // Verifies that interior is actually in the range of seg; otherwise
1938 heap_segment* find_segment (uint8_t* interior, BOOL small_segment_only_p);
1941 gc_heap* heap_of (uint8_t* object);
1944 gc_heap* heap_of_gc (uint8_t* object);
1947 size_t& promoted_bytes (int);
1950 uint8_t* find_object (uint8_t* o);
1953 dynamic_data* dynamic_data_of (int gen_number);
1955 ptrdiff_t get_desired_allocation (int gen_number);
1957 ptrdiff_t get_new_allocation (int gen_number);
1959 ptrdiff_t get_allocation (int gen_number);
1961 bool new_allocation_allowed (int gen_number);
1962 #ifdef BACKGROUND_GC
1964 void allow_new_allocation (int gen_number);
1966 void disallow_new_allocation (int gen_number);
1967 #endif //BACKGROUND_GC
1969 void reset_pinned_queue();
1971 void reset_pinned_queue_bos();
1973 void set_allocator_next_pin (generation* gen);
1975 void enque_pinned_plug (generation* gen, uint8_t* plug, size_t len);
1977 void enque_pinned_plug (uint8_t* plug,
1978 BOOL save_pre_plug_info_p,
1979 uint8_t* last_object_in_last_plug);
1981 void merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size);
1983 void set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen);
1985 void save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug);
1987 size_t deque_pinned_plug ();
1989 mark* pinned_plug_of (size_t bos);
1991 mark* oldest_pin ();
1993 mark* before_oldest_pin();
1995 BOOL pinned_plug_que_empty_p ();
1997 void make_mark_stack (mark* arr);
2000 int& mark_stack_busy();
2002 VOLATILE(uint8_t*)& ref_mark_stack (gc_heap* hp, int index);
2004 #ifdef BACKGROUND_GC
2006 size_t& bpromoted_bytes (int);
2008 void make_background_mark_stack (uint8_t** arr);
2010 void make_c_mark_list (uint8_t** arr);
2011 #endif //BACKGROUND_GC
2013 generation* generation_of (int n);
2015 BOOL gc_mark1 (uint8_t* o);
2017 BOOL gc_mark (uint8_t* o, uint8_t* low, uint8_t* high);
2019 void mark_object (uint8_t* o THREAD_NUMBER_DCL);
2022 void ha_mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
2023 #endif //HEAP_ANALYZE
2025 void mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
2027 void mark_object_simple1 (uint8_t* o, uint8_t* start THREAD_NUMBER_DCL);
2034 #ifdef BACKGROUND_GC
2036 BOOL background_marked (uint8_t* o);
2038 BOOL background_mark1 (uint8_t* o);
2040 BOOL background_mark (uint8_t* o, uint8_t* low, uint8_t* high);
2042 uint8_t* background_mark_object (uint8_t* o THREAD_NUMBER_DCL);
2044 void background_mark_simple (uint8_t* o THREAD_NUMBER_DCL);
2046 void background_mark_simple1 (uint8_t* o THREAD_NUMBER_DCL);
2048 void background_promote (Object**, ScanContext* , uint32_t);
2050 BOOL background_object_marked (uint8_t* o, BOOL clearp);
2052 void init_background_gc();
2054 uint8_t* background_next_end (heap_segment*, BOOL);
2055 // while we are in LOH sweep we can't modify the segment list
2056 // there so we mark them as to be deleted and deleted them
2057 // at the next chance we get.
2059 void background_delay_delete_uoh_segments();
2061 void generation_delete_heap_segment (generation*,
2062 heap_segment*, heap_segment*, heap_segment*);
2064 void set_mem_verify (uint8_t*, uint8_t*, uint8_t);
2066 void process_background_segment_end (heap_segment*, generation*, uint8_t*,
2067 heap_segment*, BOOL*);
2069 BOOL fgc_should_consider_object (uint8_t* o,
2071 BOOL consider_bgc_mark_p,
2072 BOOL check_current_sweep_p,
2073 BOOL check_saved_sweep_p);
2075 void should_check_bgc_mark (heap_segment* seg,
2076 BOOL* consider_bgc_mark_p,
2077 BOOL* check_current_sweep_p,
2078 BOOL* check_saved_sweep_p);
2080 void background_ephemeral_sweep();
2082 void background_sweep ();
2084 uint8_t* background_seg_end (heap_segment* seg, BOOL concurrent_p);
2086 uint8_t* background_first_overflow (uint8_t* min_add,
2089 BOOL small_object_p);
2091 void background_process_mark_overflow_internal (int condemned_gen_number,
2092 uint8_t* min_add, uint8_t* max_add,
2095 BOOL background_process_mark_overflow (BOOL concurrent_p);
2097 // for foreground GC to get hold of background structures containing refs
2100 scan_background_roots (promote_func* fn, int hn, ScanContext *pSC);
2103 BOOL bgc_mark_array_range (heap_segment* seg,
2105 uint8_t** range_beg,
2106 uint8_t** range_end);
2108 void bgc_verify_mark_array_cleared (heap_segment* seg);
2110 void verify_mark_array_cleared();
2112 void verify_partial();
2114 void verify_mark_bits_cleared (uint8_t* obj, size_t s);
2116 void clear_all_mark_array();
2118 #ifdef BGC_SERVO_TUNING
2120 // Currently BGC servo tuning is an experimental feature.
2124 struct tuning_calculation
2126 // We use this virtual size that represents the generation
2127 // size at goal. We calculate the flr based on this.
2128 size_t end_gen_size_goal;
2130 // sweep goal is expressed as flr as we want to avoid
2131 // expanding the gen size.
2132 double sweep_flr_goal;
2134 // gen2 size at the end of last bgc.
2135 size_t last_bgc_size;
2138 // these need to be double so we don't loose so much accurancy
2141 // the FL ratio at the start of current bgc sweep.
2142 double current_bgc_sweep_flr;
2143 // the FL ratio at the end of last bgc.
2144 // Only used for FF.
2145 double last_bgc_flr;
2146 // the FL ratio last time we started a bgc
2147 double current_bgc_start_flr;
2149 double above_goal_accu_error;
2151 // We will trigger the next BGC if this much
2152 // alloc has been consumed between the last
2154 size_t alloc_to_trigger;
2155 // actual consumed alloc
2156 size_t actual_alloc_to_trigger;
2158 // the alloc between last bgc sweep start and end.
2159 size_t last_bgc_end_alloc;
2162 // For smoothing calc
2164 size_t smoothed_alloc_to_trigger;
2169 // last time we checked, were we above sweep flr goal?
2170 bool last_sweep_above_p;
2171 size_t alloc_to_trigger_0;
2173 // This is to get us started. It's set when we observe in a gen1
2174 // GC when the memory load is high enough and is used to seed the first
2175 // BGC triggered due to this tuning.
2176 size_t first_alloc_to_trigger;
2181 size_t last_bgc_physical_size;
2183 size_t last_alloc_end_to_start;
2184 size_t last_alloc_start_to_sweep;
2185 size_t last_alloc_sweep_to_end;
2186 // records the alloc at the last significant point,
2187 // used to calculate the 3 alloc's above.
2188 // It's reset at bgc sweep start as that's when we reset
2189 // all the allocation data (sweep_allocated/condemned_allocated/etc)
2192 // the FL size at the end of last bgc.
2193 size_t last_bgc_fl_size;
2195 // last gen2 surv rate
2196 double last_bgc_surv_rate;
2198 // the FL ratio last time gen size increased.
2199 double last_gen_increase_flr;
2202 // This is just so that I don't need to calculate things multiple
2203 // times. Only used during bgc end calculations. Everything that
2204 // needs to be perserved across GCs will be saved in the other 2
2206 struct bgc_size_data
2209 size_t gen_physical_size;
2211 // The actual physical fl size, unadjusted
2212 size_t gen_actual_phys_fl_size;
2213 // I call this physical_fl but really it's adjusted based on alloc
2214 // that we haven't consumed because the other generation consumed
2215 // its alloc and triggered the BGC. See init_bgc_end_data.
2216 // We don't allow it to go negative.
2217 ptrdiff_t gen_physical_fl_size;
2218 double gen_physical_flr;
2222 static bool enable_fl_tuning;
2223 // the memory load we aim to maintain.
2224 static uint32_t memory_load_goal;
2226 // if we are BGCMemGoalSlack above BGCMemGoal, this is where we
2227 // panic and start to see if we should do NGC2.
2228 static uint32_t memory_load_goal_slack;
2229 // This is calculated based on memory_load_goal.
2230 static uint64_t available_memory_goal;
2231 // If we are above (ml goal + slack), we need to panic.
2232 // Currently we just trigger the next GC as an NGC2, but
2233 // we do track the accumulated error and could be more
2234 // sophisticated about triggering NGC2 especially when
2235 // slack is small. We could say unless we see the error
2236 // is large enough would we actually trigger an NGC2.
2237 static bool panic_activated_p;
2238 static double accu_error_panic;
2240 static double above_goal_kp;
2241 static double above_goal_ki;
2242 static bool enable_ki;
2243 static bool enable_kd;
2244 static bool enable_smooth;
2245 static bool enable_tbh;
2246 static bool enable_ff;
2247 static bool enable_gradual_d;
2248 static double above_goal_kd;
2249 static double above_goal_ff;
2250 static double num_gen1s_smooth_factor;
2252 // for ML servo loop
2253 static double ml_kp;
2254 static double ml_ki;
2257 static double accu_error;
2259 // did we start tuning with FL yet?
2260 static bool fl_tuning_triggered;
2262 // ==================================================
2263 // ============what's used in calculation============
2264 // ==================================================
2266 // only used in smoothing.
2267 static size_t num_bgcs_since_tuning_trigger;
2269 // gen1 GC setting the next GC as a BGC when it observes the
2270 // memory load is high enough for the first time.
2271 static bool next_bgc_p;
2273 // this is organized as:
2274 // element 0 is for max_generation
2275 // element 1 is for max_generation+1
2276 static tuning_calculation gen_calc[2];
2278 // ======================================================
2279 // ============what's used to only show stats============
2280 // ======================================================
2282 // how many gen1's actually happened before triggering next bgc.
2283 static size_t actual_num_gen1s_to_trigger;
2285 static size_t gen1_index_last_bgc_end;
2286 static size_t gen1_index_last_bgc_start;
2287 static size_t gen1_index_last_bgc_sweep;
2289 static tuning_stats gen_stats[2];
2290 // ============end of stats============
2292 static bgc_size_data current_bgc_end_data[2];
2294 static size_t last_stepping_bgc_count;
2295 static uint32_t last_stepping_mem_load;
2296 static uint32_t stepping_interval;
2298 // When we are in the initial stage before fl tuning is triggered.
2299 static bool use_stepping_trigger_p;
2301 // the gen2 correction factor is used to put more emphasis
2302 // on the gen2 when it triggered the BGC.
2303 // If the BGC was triggered due to gen3, we decrease this
2305 static double gen2_ratio_correction;
2306 static double ratio_correction_step;
2308 // Since we have 2 loops, this BGC was caused by one of them; for the other loop we know
2309 // we didn't reach the goal so use the output from last time.
2310 static void calculate_tuning (int gen_number, bool use_this_loop_p);
2312 static void init_bgc_end_data (int gen_number, bool use_this_loop_p);
2313 static void calc_end_bgc_fl (int gen_number);
2315 static void convert_to_fl (bool use_gen2_loop_p, bool use_gen3_loop_p);
2316 static double calculate_ml_tuning (uint64_t current_available_physical, bool reduce_p, ptrdiff_t* _vfl_from_kp, ptrdiff_t* _vfl_from_ki);
2318 // This invokes the ml tuning loop and sets the total gen sizes, ie
2320 static void set_total_gen_sizes (bool use_gen2_loop_p, bool use_gen3_loop_p);
2322 static bool should_trigger_bgc_loh();
2324 // This is only called when we've already stopped for GC.
2325 // For LOH we'd be doing this in the alloc path.
2326 static bool should_trigger_bgc();
2328 // If we keep being above ml goal, we need to compact.
2329 static bool should_trigger_ngc2();
2331 // Only implemented for gen2 now while we are in sweep.
2332 // Before we could build up enough fl, we delay gen1 consuming
2333 // gen2 alloc so we don't get into panic.
2334 // When we maintain the fl instead of building a new one, this
2335 // can be eliminated.
2336 static bool should_delay_alloc (int gen_number);
2338 // When we are under the memory load goal, we'd like to do 10 BGCs
2339 // before we reach the goal.
2340 static bool stepping_trigger (uint32_t current_memory_load, size_t current_gen2_count);
2342 static void update_bgc_start (int gen_number, size_t num_gen1s_since_end);
2343 // Updates the following:
2344 // current_bgc_start_flr
2345 // actual_alloc_to_trigger
2346 // last_alloc_end_to_start
2348 // actual_num_gen1s_to_trigger
2349 // gen1_index_last_bgc_start
2350 static void record_bgc_start();
2352 static void update_bgc_sweep_start (int gen_number, size_t num_gen1s_since_start);
2353 // Updates the following:
2354 // current_bgc_sweep_flr
2355 // last_alloc_start_to_sweep
2357 // gen1_index_last_bgc_sweep
2358 static void record_bgc_sweep_start();
2360 static void record_and_adjust_bgc_end();
2363 // This tells us why we chose to do a bgc in tuning.
2365 int saved_bgc_tuning_reason;
2366 #endif //BGC_SERVO_TUNING
2368 #endif //BACKGROUND_GC
2371 void mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
2373 BOOL process_mark_overflow (int condemned_gen_number);
2375 void process_mark_overflow_internal (int condemned_gen_number,
2376 uint8_t* min_address, uint8_t* max_address);
2380 void print_snoop_stat();
2381 #endif //SNOOP_STATS
2386 BOOL check_next_mark_stack (gc_heap* next_heap);
2391 void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
2394 void mark_phase (int condemned_gen_number, BOOL mark_only_p);
2397 void pin_object (uint8_t* o, uint8_t** ppObject);
2400 size_t get_total_pinned_objects();
2403 void reset_mark_stack ();
2405 uint8_t* insert_node (uint8_t* new_node, size_t sequence_number,
2406 uint8_t* tree, uint8_t* last_node);
2408 size_t update_brick_table (uint8_t* tree, size_t current_brick,
2409 uint8_t* x, uint8_t* plug_end);
2412 void plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate);
2415 void realloc_plan_generation_start (generation* gen, generation* consing_gen);
2418 void plan_generation_starts (generation*& consing_gen);
2421 void advance_pins_for_demotion (generation* gen);
2424 void process_ephemeral_boundaries(uint8_t* x, int& active_new_gen_number,
2425 int& active_old_gen_number,
2426 generation*& consing_gen,
2427 BOOL& allocate_in_condemned);
2429 void seg_clear_mark_bits (heap_segment* seg);
2431 void sweep_ro_segments (heap_segment* start_seg);
2433 void convert_to_pinned_plug (BOOL& last_npinned_plug_p,
2434 BOOL& last_pinned_plug_p,
2435 BOOL& pinned_plug_p,
2437 size_t& artificial_pinned_size);
2439 void store_plug_gap_info (uint8_t* plug_start,
2441 BOOL& last_npinned_plug_p,
2442 BOOL& last_pinned_plug_p,
2443 uint8_t*& last_pinned_plug,
2444 BOOL& pinned_plug_p,
2445 uint8_t* last_object_in_last_plug,
2446 BOOL& merge_with_last_pin_p,
2447 // this is only for verification purpose
2448 size_t last_plug_len);
2450 void plan_phase (int condemned_gen_number);
2453 void record_interesting_data_point (interesting_data_point idp);
2455 #ifdef GC_CONFIG_DRIVEN
2457 void record_interesting_info_per_heap();
2459 void record_global_mechanisms();
2461 BOOL should_do_sweeping_gc (BOOL compact_p);
2462 #endif //GC_CONFIG_DRIVEN
2464 #ifdef FEATURE_LOH_COMPACTION
2465 // plan_loh can allocate memory so it can fail. If it fails, we will
2466 // fall back to sweeping.
2474 void relocate_in_loh_compact();
2477 void walk_relocation_for_loh (void* profiling_context, record_surv_fn fn);
2480 BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
2483 void loh_set_allocator_next_pin();
2486 BOOL loh_pinned_plug_que_empty_p();
2489 size_t loh_deque_pinned_plug();
2492 mark* loh_pinned_plug_of (size_t bos);
2495 mark* loh_oldest_pin();
2498 BOOL loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit);
2501 uint8_t* loh_allocate_in_condemned (size_t size);
2504 BOOL loh_object_p (uint8_t* o);
2507 BOOL loh_compaction_requested();
2509 // If the LOH compaction mode is just to compact once,
2510 // we need to see if we should reset it back to not compact.
2511 // We would only reset if every heap's LOH was compacted.
2513 void check_loh_compact_mode (BOOL all_heaps_compacted_p);
2514 #endif //FEATURE_LOH_COMPACTION
2517 void fix_generation_bounds (int condemned_gen_number,
2518 generation* consing_gen);
2520 uint8_t* generation_limit (int gen_number);
2522 struct make_free_args
2524 int free_list_gen_number;
2525 uint8_t* current_gen_limit;
2526 generation* free_list_gen;
2527 uint8_t* highest_plug;
2530 uint8_t* allocate_at_end (size_t size);
2532 BOOL ensure_gap_allocation (int condemned_gen_number);
2533 // make_free_lists is only called by blocking GCs.
2535 void make_free_lists (int condemned_gen_number);
2537 void make_free_list_in_brick (uint8_t* tree, make_free_args* args);
2539 void thread_gap (uint8_t* gap_start, size_t size, generation* gen);
2541 void loh_thread_gap_front (uint8_t* gap_start, size_t size, generation* gen);
2543 void make_unused_array (uint8_t* x, size_t size, BOOL clearp=FALSE, BOOL resetp=FALSE);
2545 void clear_unused_array (uint8_t* x, size_t size);
2547 void relocate_address (uint8_t** old_address THREAD_NUMBER_DCL);
2548 struct relocate_args
2552 mark* pinned_plug_entry;
2556 void reloc_survivor_helper (uint8_t** pval);
2558 void check_class_object_demotion (uint8_t* obj);
2560 void check_class_object_demotion_internal (uint8_t* obj);
2563 void check_demotion_helper (uint8_t** pval, uint8_t* parent_obj);
2566 void relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end);
2569 void verify_pins_with_post_plug_info (const char* msg);
2571 #ifdef COLLECTIBLE_CLASS
2573 void unconditional_set_card_collectible (uint8_t* obj);
2574 #endif //COLLECTIBLE_CLASS
2577 void relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry);
2580 void relocate_obj_helper (uint8_t* x, size_t s);
2583 void reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc);
2586 void relocate_pre_plug_info (mark* pinned_plug_entry);
2589 void relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned);
2592 void relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end,
2593 BOOL check_last_object_p,
2594 mark* pinned_plug_entry);
2596 void relocate_survivors_in_brick (uint8_t* tree, relocate_args* args);
2599 void update_oldest_pinned_plug();
2602 void relocate_survivors (int condemned_gen_number,
2603 uint8_t* first_condemned_address );
2605 void relocate_phase (int condemned_gen_number,
2606 uint8_t* first_condemned_address);
2612 ptrdiff_t last_plug_relocation;
2613 uint8_t* before_last_plug;
2614 size_t current_compacted_brick;
2616 mark* pinned_plug_entry;
2617 BOOL check_gennum_p;
2622 dprintf (3, ("last plug: %Ix, last plug reloc: %Ix, before last: %Ix, b: %Ix",
2623 last_plug, last_plug_relocation, before_last_plug, current_compacted_brick));
2628 void copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2630 void gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2632 void compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args);
2634 void compact_in_brick (uint8_t* tree, compact_args* args);
2637 mark* get_next_pinned_entry (uint8_t* tree,
2638 BOOL* has_pre_plug_info_p,
2639 BOOL* has_post_plug_info_p,
2643 mark* get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p);
2646 void recover_saved_pinned_info();
2649 void compact_phase (int condemned_gen_number, uint8_t*
2650 first_condemned_address, BOOL clear_cards);
2652 void clear_cards (size_t start_card, size_t end_card);
2654 void clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address);
2656 void copy_cards (size_t dst_card, size_t src_card,
2657 size_t end_card, BOOL nextp);
2659 void copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2661 #ifdef BACKGROUND_GC
2663 void copy_mark_bits (size_t dst_mark_bit, size_t src_mark_bit, size_t end_mark_bit);
2665 void copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2666 #endif //BACKGROUND_GC
2670 BOOL ephemeral_pointer_p (uint8_t* o);
2672 void fix_brick_to_highest (uint8_t* o, uint8_t* next_o);
2674 uint8_t* find_first_object (uint8_t* start_address, uint8_t* first_object);
2676 uint8_t* compute_next_boundary (int gen_number, BOOL relocating);
2678 void keep_card_live (uint8_t* o, size_t& n_gen,
2679 size_t& cg_pointers_found);
2681 void mark_through_cards_helper (uint8_t** poo, size_t& ngen,
2682 size_t& cg_pointers_found,
2683 card_fn fn, uint8_t* nhigh,
2684 uint8_t* next_boundary
2685 CARD_MARKING_STEALING_ARG(gc_heap* hpt));
2688 BOOL card_transition (uint8_t* po, uint8_t* end, size_t card_word_end,
2689 size_t& cg_pointers_found,
2690 size_t& n_eph, size_t& n_card_set,
2691 size_t& card, size_t& end_card,
2692 BOOL& foundp, uint8_t*& start_address,
2693 uint8_t*& limit, size_t& n_cards_cleared
2694 CARD_MARKING_STEALING_ARGS(card_marking_enumerator& card_mark_enumerator, heap_segment* seg, size_t& card_word_end_out));
2696 void mark_through_cards_for_segments(card_fn fn, BOOL relocating CARD_MARKING_STEALING_ARG(gc_heap* hpt));
2699 void repair_allocation_in_expanded_heap (generation* gen);
2701 BOOL can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index);
2703 BOOL can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index);
2705 BOOL can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count);
2706 #ifdef SEG_REUSE_STATS
2708 size_t dump_buckets (size_t* ordered_indices, int count, size_t* total_size);
2709 #endif //SEG_REUSE_STATS
2711 void build_ordered_free_spaces (heap_segment* seg);
2713 void count_plug (size_t last_plug_size, uint8_t*& last_plug);
2715 void count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug);
2717 void build_ordered_plug_indices ();
2719 void init_ordered_free_space_indices ();
2721 void trim_free_spaces_indices ();
2723 BOOL try_best_fit (BOOL end_of_segment_p);
2725 BOOL best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space);
2727 BOOL process_free_space (heap_segment* seg,
2729 size_t min_free_size,
2730 size_t min_cont_size,
2731 size_t* total_free_space,
2732 size_t* largest_free_space);
2734 size_t compute_eph_gen_starts_size();
2736 void compute_new_ephemeral_size();
2738 BOOL expand_reused_seg_p();
2740 BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size,
2741 size_t min_cont_size, allocator* al);
2743 uint8_t* allocate_in_expanded_heap (generation* gen, size_t size,
2744 BOOL& adjacentp, uint8_t* old_loc,
2746 BOOL set_padding_on_saved_p,
2747 mark* pinned_plug_entry,
2748 #endif //SHORT_PLUGS
2749 BOOL consider_bestfit, int active_new_gen_number
2750 REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
2752 void realloc_plug (size_t last_plug_size, uint8_t*& last_plug,
2753 generation* gen, uint8_t* start_address,
2754 unsigned int& active_new_gen_number,
2755 uint8_t*& last_pinned_gap, BOOL& leftp,
2758 , mark* pinned_plug_entry
2759 #endif //SHORT_PLUGS
2762 void realloc_in_brick (uint8_t* tree, uint8_t*& last_plug, uint8_t* start_address,
2764 unsigned int& active_new_gen_number,
2765 uint8_t*& last_pinned_gap, BOOL& leftp);
2767 void realloc_plugs (generation* consing_gen, heap_segment* seg,
2768 uint8_t* start_address, uint8_t* end_address,
2769 unsigned active_new_gen_number);
2772 void set_expand_in_full_gc (int condemned_gen_number);
2775 void verify_no_pins (uint8_t* start, uint8_t* end);
2778 generation* expand_heap (int condemned_generation,
2779 generation* consing_gen,
2780 heap_segment* new_heap_segment);
2783 void save_ephemeral_generation_starts();
2786 size_t get_gen0_min_size();
2789 void set_static_data();
2792 void init_static_data();
2795 bool init_dynamic_data ();
2797 float surv_to_growth (float cst, float limit, float max_limit);
2799 size_t desired_new_allocation (dynamic_data* dd, size_t out,
2800 int gen_number, int pass);
2803 void trim_youngest_desired_low_memory();
2806 void decommit_ephemeral_segment_pages();
2810 size_t trim_youngest_desired (uint32_t memory_load,
2811 size_t total_new_allocation,
2812 size_t total_min_allocation);
2814 size_t joined_youngest_desired (size_t new_allocation);
2815 #endif // HOST_64BIT
2817 size_t get_total_heap_size ();
2819 size_t get_total_committed_size();
2821 size_t get_total_fragmentation();
2823 size_t get_total_gen_fragmentation (int gen_number);
2825 size_t get_total_gen_estimated_reclaim (int gen_number);
2827 void get_memory_info (uint32_t* memory_load,
2828 uint64_t* available_physical=NULL,
2829 uint64_t* available_page_file=NULL);
2831 size_t generation_size (int gen_number);
2833 size_t get_total_survived_size();
2834 // this also resets allocated_since_last_gc
2836 size_t get_total_allocated_since_last_gc();
2838 size_t get_current_allocated();
2840 size_t get_total_allocated();
2842 size_t get_total_promoted();
2843 #ifdef BGC_SERVO_TUNING
2845 size_t get_total_generation_size (int gen_number);
2847 size_t get_total_servo_alloc (int gen_number);
2849 size_t get_total_bgc_promoted();
2851 size_t get_total_surv_size (int gen_number);
2853 size_t get_total_begin_data_size (int gen_number);
2855 size_t get_total_generation_fl_size (int gen_number);
2857 size_t get_current_gc_index (int gen_number);
2858 #endif //BGC_SERVO_TUNING
2860 size_t current_generation_size (int gen_number);
2862 size_t generation_plan_size (int gen_number);
2864 void compute_promoted_allocation (int gen_number);
2866 size_t compute_in (int gen_number);
2868 void compute_new_dynamic_data (int gen_number);
2870 gc_history_global* get_gc_data_global();
2872 gc_history_per_heap* get_gc_data_per_heap();
2874 size_t new_allocation_limit (size_t size, size_t free_size, int gen_number);
2876 size_t generation_fragmentation (generation* gen,
2877 generation* consing_gen,
2880 size_t generation_sizes (generation* gen);
2882 size_t committed_size();
2884 size_t uoh_committed_size (int gen_number, size_t* allocated);
2886 size_t approximate_new_allocation();
2888 size_t end_space_after_gc();
2890 size_t estimated_reclaim (int gen_number);
2892 BOOL decide_on_compacting (int condemned_gen_number,
2893 size_t fragmentation,
2894 BOOL& should_expand);
2896 BOOL sufficient_space_end_seg (uint8_t* start, uint8_t* seg_end,
2897 size_t end_space_required,
2898 gc_tuning_point tp);
2900 BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
2902 void sweep_uoh_objects (int gen_num);
2904 void relocate_in_uoh_objects (int gen_num);
2906 void mark_through_cards_for_uoh_objects(card_fn fn, int oldest_gen_num, BOOL relocating
2907 CARD_MARKING_STEALING_ARG(gc_heap* hpt));
2909 void descr_generations (BOOL begin_gc_p);
2912 void descr_generations_to_profiler (gen_walk_fn fn, void *context);
2914 /*------------ Multiple non isolated heaps ----------------*/
2915 #ifdef MULTIPLE_HEAPS
2917 BOOL create_thread_support (int number_of_heaps);
2919 void destroy_thread_support ();
2921 bool create_gc_thread();
2923 void gc_thread_function();
2925 #ifdef PARALLEL_MARK_LIST_SORT
2927 void sort_mark_list();
2929 void merge_mark_lists();
2931 void append_to_mark_list(uint8_t **start, uint8_t **end);
2932 #else //PARALLEL_MARK_LIST_SORT
2934 void combine_mark_lists();
2935 #endif //PARALLEL_MARK_LIST_SORT
2937 #endif //MULTIPLE_HEAPS
2941 void grow_mark_list();
2944 #ifdef BACKGROUND_GC
2947 uint8_t* high_page (heap_segment* seg, BOOL concurrent_p);
2950 void revisit_written_page (uint8_t* page, uint8_t* end,
2951 BOOL concurrent_p, uint8_t*& last_page,
2952 uint8_t*& last_object, BOOL large_objects_p,
2953 size_t& num_marked_objects);
2955 void revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p=FALSE);
2958 void concurrent_scan_dependent_handles (ScanContext *sc);
2964 void bgc_suspend_EE ();
2970 void background_scan_dependent_handles (ScanContext *sc);
2975 // Restores BGC settings if necessary.
2977 void recover_bgc_settings();
2980 BOOL should_commit_mark_array();
2983 void clear_commit_flag();
2986 void clear_commit_flag_global();
2989 void verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr);
2992 BOOL commit_mark_array_by_range (uint8_t* begin,
2994 uint32_t* mark_array_addr);
2997 BOOL commit_mark_array_new_seg (gc_heap* hp,
2999 uint32_t* new_card_table = 0,
3000 uint8_t* new_lowest_address = 0);
3003 BOOL commit_mark_array_with_check (heap_segment* seg, uint32_t* mark_array_addr);
3005 // commit the portion of the mark array that corresponds to
3006 // this segment (from beginning to reserved).
3007 // seg and heap_segment_reserved (seg) are guaranteed to be
3010 BOOL commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr);
3012 // During BGC init, we commit the mark array for all in range
3013 // segments whose mark array hasn't been committed or fully
3014 // committed. All rw segments are in range, only ro segments
3015 // can be partial in range.
3017 BOOL commit_mark_array_bgc_init();
3020 BOOL commit_new_mark_array (uint32_t* new_mark_array);
3022 // We need to commit all segments that intersect with the bgc
3023 // range. If a segment is only partially in range, we still
3024 // should commit the mark array for the whole segment as
3025 // we will set the mark array commit flag for this segment.
3027 BOOL commit_new_mark_array_global (uint32_t* new_mark_array);
3029 // We can't decommit the first and the last page in the mark array
3030 // if the beginning and ending don't happen to be page aligned.
3032 void decommit_mark_array_by_seg (heap_segment* seg);
3035 void background_mark_phase();
3038 void background_drain_mark_list (int thread);
3041 void background_grow_c_mark_list();
3044 void background_promote_callback(Object** object, ScanContext* sc, uint32_t flags);
3047 void mark_absorb_new_alloc();
3053 BOOL prepare_bgc_thread(gc_heap* gh);
3055 BOOL create_bgc_thread(gc_heap* gh);
3057 BOOL create_bgc_threads_support (int number_of_heaps);
3059 BOOL create_bgc_thread_support();
3061 int check_for_ephemeral_alloc();
3063 void wait_to_proceed();
3065 void fire_alloc_wait_event_begin (alloc_wait_reason awr);
3067 void fire_alloc_wait_event_end (alloc_wait_reason awr);
3069 uint32_t background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE);
3071 BOOL background_running_p() { return gc_background_running; }
3075 void kill_gc_thread();
3077 void bgc_thread_function();
3079 void do_background_gc();
3081 void bgc_thread_stub (void* arg);
3082 #endif //BACKGROUND_GC
3087 VOLATILE(bool) internal_gc_done;
3089 #ifdef BACKGROUND_GC
3091 uint32_t cm_in_progress;
3093 // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
3094 // we do right before the bgc starts.
3096 BOOL dont_restart_ee_p;
3099 GCEvent bgc_start_event;
3100 #endif //BACKGROUND_GC
3102 // The variables in this block are known to the DAC and must come first
3103 // in the gc_heap class.
3105 // Keeps track of the highest address allocated by Alloc
3107 uint8_t* alloc_allocated;
3109 // The ephemeral heap segment
3111 heap_segment* ephemeral_heap_segment;
3113 // The finalize queue.
3115 CFinalize* finalize_queue;
3119 oom_history oom_info;
3121 // Interesting data, recorded per-heap.
3123 size_t interesting_data_per_heap[max_idp_count];
3126 size_t compact_reasons_per_heap[max_compact_reasons_count];
3129 size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
3132 size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
3135 uint8_t** internal_root_array;
3138 size_t internal_root_array_index;
3141 BOOL heap_analyze_success;
3143 // The generation table. Must always be last.
3145 generation generation_table [total_generation_count];
3149 #define max_oom_history_count 4
3152 int oomhist_index_per_heap;
3155 oom_history oomhist_per_heap[max_oom_history_count];
3158 void add_to_oom_history_per_heap();
3161 BOOL expanded_in_fgc;
3164 uint32_t wait_for_gc_done(int32_t timeOut = INFINITE);
3166 // Returns TRUE if the current thread used to be in cooperative mode
3167 // before calling this function.
3169 bool enable_preemptive ();
3171 void disable_preemptive (bool restore_cooperative);
3173 /* ------------------- per heap members --------------------------*/
3176 #ifndef MULTIPLE_HEAPS
3177 GCEvent gc_done_event;
3178 #else // MULTIPLE_HEAPS
3179 GCEvent gc_done_event;
3180 #endif // MULTIPLE_HEAPS
3183 VOLATILE(int32_t) gc_done_event_lock;
3186 VOLATILE(bool) gc_done_event_set;
3192 void reset_gc_done();
3195 void enter_gc_done_event_lock();
3198 void exit_gc_done_event_lock();
3201 uint8_t* ephemeral_low; //lowest ephemeral address
3204 uint8_t* ephemeral_high; //highest ephemeral address
3207 uint32_t* card_table;
3212 #ifdef BACKGROUND_GC
3214 uint32_t* mark_array;
3215 #endif //BACKGROUND_GC
3219 uint32_t* card_bundle_table;
3220 #endif //CARD_BUNDLE
3222 #ifdef FEATURE_BASICFREEZE
3224 sorted_table* seg_table;
3225 #endif //FEATURE_BASICFREEZE
3228 VOLATILE(BOOL) gc_started;
3230 // The following 2 events are there to support the gen2
3231 // notification feature which is only enabled if concurrent
3234 GCEvent full_gc_approach_event;
3237 GCEvent full_gc_end_event;
3239 // Full GC Notification percentages.
3241 uint32_t fgn_maxgen_percent;
3244 uint32_t fgn_loh_percent;
3247 VOLATILE(bool) full_gc_approach_event_set;
3249 #ifdef BACKGROUND_GC
3251 BOOL fgn_last_gc_was_concurrent;
3252 #endif //BACKGROUND_GC
3255 size_t fgn_last_alloc;
3257 static uint32_t user_thread_wait (GCEvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
3259 static wait_full_gc_status full_gc_wait (GCEvent *event, int time_out_ms);
3262 uint8_t* demotion_low;
3265 uint8_t* demotion_high;
3271 uint8_t* last_gen1_pin_end;
3274 gen_to_condemn_tuning gen_to_condemn_reasons;
3277 size_t etw_allocation_running_amount[2];
3280 uint64_t total_alloc_bytes_soh;
3283 uint64_t total_alloc_bytes_uoh;
3286 int gc_policy; //sweep, compact, expand
3288 #ifdef MULTIPLE_HEAPS
3290 bool gc_thread_no_affinitize_p;
3293 GCEvent gc_start_event;
3296 GCEvent ee_suspend_event;
3299 heap_segment* new_heap_segment;
3302 size_t min_gen0_balance_delta;
3304 #define alloc_quantum_balance_units (16)
3307 size_t min_balance_threshold;
3308 #else //MULTIPLE_HEAPS
3311 size_t allocation_running_time;
3314 size_t allocation_running_amount;
3316 #endif //MULTIPLE_HEAPS
3319 gc_latency_level latency_level;
3322 gc_mechanisms settings;
3325 gc_history_global gc_data_global;
3328 uint64_t gc_last_ephemeral_decommit_time;
3331 size_t gen0_big_free_spaces;
3335 double short_plugs_pad_ratio;
3336 #endif //SHORT_PLUGS
3338 // We record the time GC work is done while EE is suspended.
3339 // suspended_start_ts is what we get right before we call
3340 // SuspendEE. We omit the time between GC end and RestartEE
3341 // because it's very short and by the time we are calling it
3342 // the settings may have changed and we'd have to do more work
3343 // to figure out the right GC to record info of.
3345 // The complications are the GCs triggered without their own
3346 // SuspendEE, in which case we will record that GC's duration
3347 // as its pause duration and the rest toward the GC that
3348 // the SuspendEE was for. The ephemeral GC we might trigger
3349 // at the beginning of a BGC and the PM triggered full GCs
3350 // fall into this case.
3352 uint64_t suspended_start_time;
3355 uint64_t end_gc_time;
3358 uint64_t total_suspended_time;
3361 uint64_t process_start_time;
3364 last_recorded_gc_info last_ephemeral_gc_info;
3367 last_recorded_gc_info last_full_blocking_gc_info;
3369 #ifdef BACKGROUND_GC
3370 // If the user didn't specify which kind of GC info to return, we need
3371 // to return the last recorded one. There's a complication with BGC as BGC
3372 // end runs concurrently. If 2 BGCs run back to back, we can't have one
3373 // update the info while the user thread is reading it (and we'd still like
3374 // to return the last BGC info otherwise if we only did BGCs we could frequently
3375 // return nothing). So we maintain 2 of these for BGC and the older one is
3376 // guaranteed to be consistent.
3378 last_recorded_gc_info last_bgc_info[2];
3379 // This is either 0 or 1.
3381 VOLATILE(int) last_bgc_info_index;
3382 // Since a BGC can finish later than blocking GCs with larger indices,
3383 // we can't just compare the index recorded in the GC info. We use this
3384 // to know whether we should be looking for a bgc info or a blocking GC,
3385 // if the user asks for the latest GC info of any kind.
3386 // This can only go from false to true concurrently so if it is true,
3387 // it means the bgc info is ready.
3389 VOLATILE(bool) is_last_recorded_bgc;
3392 void add_bgc_pause_duration_0();
3395 last_recorded_gc_info* get_completed_bgc_info();
3396 #endif //BACKGROUND_GC
3400 size_t youngest_gen_desired_th;
3404 uint32_t high_memory_load_th;
3407 uint32_t m_high_memory_load_th;
3410 uint32_t v_high_memory_load_th;
3413 bool is_restricted_physical_mem;
3416 uint64_t mem_one_percent;
3419 uint64_t total_physical_mem;
3422 uint64_t entry_available_physical_mem;
3424 // Hard limit for the heap, only supported on 64-bit.
3426 // Users can specify a hard limit for the GC heap via GCHeapHardLimit or
3427 // a percentage of the physical memory this process is allowed to use via
3428 // GCHeapHardLimitPercent. This is the maximum commit size the GC heap
3431 // The way the hard limit is decided is:
3433 // If the GCHeapHardLimit config is specified that's the value we use;
3434 // else if the GCHeapHardLimitPercent config is specified we use that
3436 // else if the process is running inside a container with a memory limit,
3437 // the hard limit is
3438 // max (20mb, 75% of the memory limit on the container).
3440 // Due to the different perf charicteristics of containers we make the
3441 // following policy changes:
3443 // 1) No longer affinitize Server GC threads by default because we wouldn't
3444 // want all the containers on the machine to only affinitize to use the
3445 // first few CPUs (and we don't know which CPUs are already used). You
3446 // can however override this by specifying the GCHeapAffinitizeMask
3447 // config which will decide which CPUs the process will affinitize the
3448 // Server GC threads to.
3450 // 2) Segment size is determined by limit / number of heaps but has a
3451 // minimum value of 16mb. This can be changed by specifying the number
3452 // of heaps via the GCHeapCount config. The minimum size is to avoid
3453 // the scenario where the hard limit is small but the process can use
3454 // many procs and we end up with tiny segments which doesn't make sense.
3456 // 3) LOH compaction occurs automatically if needed.
3458 // Since we do allow both gen0 and gen3 allocations, and we don't know
3459 // the distinction (and it's unrealistic to request users to specify
3460 // this distribution) we reserve memory this way -
3462 // For SOH we reserve (limit / number of heaps) per heap.
3463 // For LOH we reserve (limit * 2 / number of heaps) per heap.
3465 // This means the following -
3467 // + we never need to acquire new segments. This simplies the perf
3468 // calculations by a lot.
3470 // + we now need a different definition of "end of seg" because we
3471 // need to make sure the total does not exceed the limit.
3473 // + if we detect that we exceed the commit limit in the allocator we
3474 // wouldn't want to treat that as a normal commit failure because that
3475 // would mean we always do full compacting GCs.
3477 // TODO: some of the logic here applies to the general case as well
3478 // such as LOH automatic compaction. However it will require more
3479 //testing to change the general case.
3481 size_t heap_hard_limit;
3484 size_t heap_hard_limit_oh[total_oh_count - 1];
3487 CLRCriticalSection check_commit_cs;
3490 size_t current_total_committed;
3493 size_t committed_by_oh[total_oh_count];
3495 // This is what GC uses for its own bookkeeping.
3497 size_t current_total_committed_bookkeeping;
3499 // This is what GC's own book keeping consumes.
3501 size_t current_total_committed_gc_own;
3503 // This is if large pages should be used.
3505 bool use_large_pages_p;
3507 #ifdef HEAP_BALANCE_INSTRUMENTATION
3509 size_t last_gc_end_time_us;
3510 #endif //HEAP_BALANCE_INSTRUMENTATION
3513 size_t min_segment_size;
3516 size_t min_segment_size_shr;
3518 // For SOH we always allocate segments of the same
3519 // size unless no_gc_region requires larger ones.
3521 size_t soh_segment_size;
3524 size_t min_uoh_segment_size;
3527 size_t segment_info_size;
3530 uint8_t* lowest_address;
3533 uint8_t* highest_address;
3536 BOOL ephemeral_promotion;
3538 uint8_t* saved_ephemeral_plan_start[ephemeral_generation_count];
3540 size_t saved_ephemeral_plan_start_size[ephemeral_generation_count];
3543 #ifdef MULTIPLE_HEAPS
3549 VOLATILE(int) alloc_context_count;
3550 #else //MULTIPLE_HEAPS
3551 #define vm_heap ((GCHeap*) g_theGCHeap)
3552 #define heap_number (0)
3553 #endif //MULTIPLE_HEAPS
3556 uint64_t time_bgc_last;
3559 uint8_t* gc_low; // lowest address being condemned
3562 uint8_t* gc_high; //highest address being condemned
3565 size_t mark_stack_tos;
3568 size_t mark_stack_bos;
3571 size_t mark_stack_array_length;
3574 mark* mark_stack_array;
3576 #if defined (_DEBUG) && defined (VERIFY_HEAP)
3578 BOOL verify_pinned_queue_p;
3579 #endif // _DEBUG && VERIFY_HEAP
3582 uint8_t* oldest_pinned_plug;
3585 size_t num_pinned_objects;
3587 #ifdef FEATURE_LOH_COMPACTION
3589 size_t loh_pinned_queue_tos;
3592 size_t loh_pinned_queue_bos;
3595 size_t loh_pinned_queue_length;
3598 int loh_pinned_queue_decay;
3601 mark* loh_pinned_queue;
3603 // This is for forced LOH compaction via the complus env var
3605 BOOL loh_compaction_always_p;
3607 // This is set by the user.
3609 gc_loh_compaction_mode loh_compaction_mode;
3611 // We may not compact LOH on every heap if we can't
3612 // grow the pinned queue. This is to indicate whether
3613 // this heap's LOH is compacted or not. So even if
3614 // settings.loh_compaction is TRUE this may not be TRUE.
3616 BOOL loh_compacted_p;
3617 #endif //FEATURE_LOH_COMPACTION
3619 #ifdef BACKGROUND_GC
3622 EEThreadId bgc_thread_id;
3626 uint8_t* background_written_addresses [array_size+2];
3627 #endif //WRITE_WATCH
3630 VOLATILE(c_gc_state) current_c_gc_state; //tells the large object allocator to
3631 //mark the object as new since the start of gc.
3634 gc_mechanisms saved_bgc_settings;
3637 gc_history_global bgc_data_global;
3640 VOLATILE(BOOL) gc_background_running;
3643 gc_history_per_heap bgc_data_per_heap;
3646 BOOL bgc_thread_running; // gc thread is its main loop
3649 BOOL keep_bgc_threads_p;
3651 // This event is used by BGC threads to do something on
3652 // one specific thread while other BGC threads have to
3653 // wait. This is different from a join 'cause you can't
3654 // specify which thread should be doing some task
3655 // while other threads have to wait.
3656 // For example, to make the BGC threads managed threads
3657 // we need to create them on the thread that called
3658 // SuspendEE which is heap 0.
3660 GCEvent bgc_threads_sync_event;
3666 CLRCriticalSection bgc_threads_timeout_cs;
3669 GCEvent background_gc_done_event;
3672 GCEvent ee_proceed_event;
3675 bool gc_can_use_concurrent;
3678 bool temp_disable_concurrent_p;
3681 BOOL do_ephemeral_gc_p;
3684 BOOL do_concurrent_p;
3687 VOLATILE(bgc_state) current_bgc_state;
3692 bgc_state current_bgc_state;
3693 uint32_t gc_time_ms;
3694 // This is in bytes per ms; consider breaking it
3695 // into the efficiency per phase.
3696 size_t gc_efficiency;
3698 uint8_t* gen0_start;
3700 uint8_t* bgc_highest;
3701 uint8_t* bgc_lowest;
3702 uint8_t* fgc_highest;
3703 uint8_t* fgc_lowest;
3708 #define max_history_count 64
3711 int gchist_index_per_heap;
3714 gc_history gchist_per_heap[max_history_count];
3720 gc_mechanisms_store gchist[max_history_count];
3723 size_t total_promoted_bytes;
3726 size_t bgc_overflow_count;
3729 size_t bgc_begin_loh_size;
3731 size_t bgc_begin_poh_size;
3733 size_t end_loh_size;
3735 size_t end_poh_size;
3737 #ifdef BGC_SERVO_TUNING
3739 uint64_t loh_a_no_bgc;
3742 uint64_t loh_a_bgc_marking;
3745 uint64_t loh_a_bgc_planning;
3747 // Total allocated last BGC's plan + between last and this bgc +
3750 uint64_t total_loh_a_last_bgc;
3753 size_t bgc_maxgen_end_fl_size;
3754 #endif //BGC_SERVO_TUNING
3756 // We need to throttle the LOH allocations during BGC since we can't
3757 // collect LOH when BGC is in progress.
3758 // We allow the LOH heap size to double during a BGC. So for every
3759 // 10% increase we will have the LOH allocating thread sleep for one more
3760 // ms. So we are already 30% over the original heap size the thread will
3763 uint32_t bgc_alloc_spin_uoh;
3765 // This includes what we allocate at the end of segment - allocating
3766 // in free list doesn't increase the heap size.
3768 size_t bgc_loh_size_increased;
3770 size_t bgc_poh_size_increased;
3773 size_t background_soh_alloc_count;
3776 size_t background_uoh_alloc_count;
3779 VOLATILE(int32_t) uoh_alloc_thread_count;
3782 uint8_t** background_mark_stack_tos;
3785 uint8_t** background_mark_stack_array;
3788 size_t background_mark_stack_array_length;
3791 uint8_t* background_min_overflow_address;
3794 uint8_t* background_max_overflow_address;
3796 // We can't process the soh range concurrently so we
3797 // wait till final mark to process it.
3799 BOOL processed_soh_overflow_p;
3802 uint8_t* background_min_soh_overflow_address;
3805 uint8_t* background_max_soh_overflow_address;
3808 heap_segment* saved_overflow_ephemeral_seg;
3811 heap_segment* saved_sweep_ephemeral_seg;
3814 uint8_t* saved_sweep_ephemeral_start;
3817 uint8_t* background_saved_lowest_address;
3820 uint8_t* background_saved_highest_address;
3822 // This is used for synchronization between the bgc thread
3823 // for this heap and the user threads allocating on this
3826 exclusive_sync* bgc_alloc_lock;
3830 snoop_stats_data snoop_stat;
3831 #endif //SNOOP_STATS
3835 uint8_t** c_mark_list;
3838 size_t c_mark_list_length;
3841 size_t c_mark_list_index;
3842 #endif //BACKGROUND_GC
3846 uint8_t** mark_list;
3849 size_t mark_list_size;
3852 bool mark_list_overflow;
3855 uint8_t** mark_list_end;
3858 uint8_t** mark_list_index;
3861 uint8_t** g_mark_list;
3862 #ifdef PARALLEL_MARK_LIST_SORT
3864 uint8_t** g_mark_list_copy;
3866 uint8_t*** mark_list_piece_start;
3867 uint8_t*** mark_list_piece_end;
3868 #endif //PARALLEL_MARK_LIST_SORT
3872 uint8_t* min_overflow_address;
3875 uint8_t* max_overflow_address;
3877 #ifndef MULTIPLE_HEAPS
3879 uint8_t* shigh; //keeps track of the highest marked object
3882 uint8_t* slow; //keeps track of the lowest marked object
3883 #endif //MULTIPLE_HEAPS
3886 size_t allocation_quantum;
3889 size_t alloc_contexts_used;
3892 no_gc_region_info current_no_gc_region_info;
3895 size_t soh_allocation_no_gc;
3898 size_t loh_allocation_no_gc;
3904 heap_segment* saved_loh_segment_no_gc;
3907 BOOL proceed_with_gc_p;
3909 #ifdef MULTIPLE_HEAPS
3911 BOOL gradual_decommit_in_progress_p;
3914 size_t max_decommit_step_size;
3915 #endif //MULTIPLE_HEAPS
3917 #define youngest_generation (generation_of (0))
3918 #define large_object_generation (generation_of (loh_generation))
3919 #define pinned_object_generation (generation_of (poh_generation))
3921 // The more_space_lock and gc_lock is used for 3 purposes:
3923 // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock_soh)
3924 // 2) to synchronize allocations of large objects (more_space_lock_uoh)
3925 // 3) to synchronize the GC itself (gc_lock)
3928 GCSpinLock gc_lock; //lock while doing GC
3931 GCSpinLock more_space_lock_soh; //lock while allocating more space for soh
3934 GCSpinLock more_space_lock_uoh;
3936 #ifdef SYNCHRONIZATION_STATS
3939 unsigned int good_suspension;
3942 unsigned int bad_suspension;
3944 // Number of times when msl_acquire is > 200 cycles.
3946 unsigned int num_high_msl_acquire;
3948 // Number of times when msl_acquire is < 200 cycles.
3950 unsigned int num_low_msl_acquire;
3952 // Number of times the more_space_lock is acquired.
3954 unsigned int num_msl_acquired;
3956 // Total cycles it takes to acquire the more_space_lock.
3958 uint64_t total_msl_acquire;
3961 void init_heap_sync_stats()
3963 good_suspension = 0;
3965 num_msl_acquired = 0;
3966 total_msl_acquire = 0;
3967 num_high_msl_acquire = 0;
3968 num_low_msl_acquire = 0;
3969 more_space_lock.init();
3974 void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
3976 printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
3978 alloc_contexts_used,
3981 (unsigned int)(total_msl_acquire / gc_count_during_log),
3982 num_high_msl_acquire / gc_count_during_log,
3983 num_low_msl_acquire / gc_count_during_log,
3984 num_msl_acquired / gc_count_during_log,
3985 more_space_lock.num_switch_thread / gc_count_during_log,
3986 more_space_lock.num_wait_longer / gc_count_during_log,
3987 more_space_lock.num_switch_thread_w / gc_count_during_log,
3988 more_space_lock.num_disable_preemptive_w / gc_count_during_log);
3991 #endif //SYNCHRONIZATION_STATS
3993 #define NUM_LOH_ALIST (7)
3994 // bucket 0 contains sizes less than 64*1024
3995 // the "BITS" number here is the highest bit in 64*1024 - 1, zero-based as in BitScanReverse.
3996 // see first_suitable_bucket(size_t size) for details.
3997 #define BASE_LOH_ALIST_BITS (15)
3999 alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
4001 #define NUM_GEN2_ALIST (12)
4003 // bucket 0 contains sizes less than 256
4004 #define BASE_GEN2_ALIST_BITS (7)
4006 // bucket 0 contains sizes less than 128
4007 #define BASE_GEN2_ALIST_BITS (6)
4008 #endif // HOST_64BIT
4010 alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
4012 #define NUM_POH_ALIST (19)
4013 // bucket 0 contains sizes less than 256
4014 #define BASE_POH_ALIST_BITS (7)
4016 alloc_list poh_alloc_list[NUM_POH_ALIST-1];
4018 //------------------------------------------
4021 dynamic_data dynamic_data_table [total_generation_count];
4024 gc_history_per_heap gc_data_per_heap;
4027 size_t maxgen_pinned_compact_before_advance;
4031 BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
4032 // if elevate_p is FALSE, it means we are determining fragmentation for a generation
4033 // to see if we should condemn this gen; otherwise it means we are determining if
4034 // we should elevate to doing max_gen from an ephemeral gen.
4036 BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
4039 dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number);
4041 BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem);
4043 BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
4046 int generation_skip_ratio;//in %
4049 BOOL gen0_bricks_cleared;
4051 int gen0_must_clear_bricks;
4054 bool maxgen_size_inc_p;
4057 size_t full_gc_counts[gc_type_max];
4059 // the # of bytes allocates since the last full compacting GC.
4061 uint64_t loh_alloc_since_cg;
4064 BOOL elevation_requested;
4066 // if this is TRUE, we should always guarantee that we do a
4067 // full compacting GC before we OOM.
4069 BOOL last_gc_before_oom;
4072 BOOL should_expand_in_full_gc;
4074 // When we decide if we should expand the heap or not, we are
4075 // fine NOT to expand if we find enough free space in gen0's free
4076 // list or end of seg and we check this in decide_on_compacting.
4077 // This is an expensive check so we just record the fact and not
4078 // need to check in the allocator again.
4080 BOOL sufficient_gen0_space_p;
4082 #ifdef MULTIPLE_HEAPS
4084 bool gen0_allocated_after_gc_p;
4085 #endif //MULTIPLE_HEAPS
4087 // A provisional mode means we could change our mind in the middle of a GC
4088 // and want to do a different GC instead.
4090 // Right now there's only one such case which is in the middle of a gen1
4091 // GC we want to do a blocking gen2 instead. If/When we have more we should
4092 // have an enum that tells us which case in this provisional mode
4095 // When this mode is triggered, our current (only) condition says
4096 // we have high fragmentation in gen2 even after we do a compacting
4097 // full GC which is an indication of heavy pinning in gen2. In this
4098 // case we never do BGCs, we just do either gen0 or gen1's till a
4099 // gen1 needs to increase the gen2 size, in which case we finish up
4100 // the current gen1 as a sweeping GC and immediately do a compacting
4101 // full GC instead (without restarting EE).
4103 bool provisional_mode_triggered;
4106 bool pm_trigger_full_gc;
4108 // For testing only BEG
4109 // pm_stress_on currently means (since we just have one mode) we
4110 // randomly turn the mode on; and after a random # of NGC2s we
4112 // NOTE that this means concurrent will be disabled so we can
4113 // simulate what this mode is supposed to be used.
4118 size_t provisional_triggered_gc_count;
4121 size_t provisional_off_gc_count;
4122 // For testing only END
4125 size_t num_provisional_triggered;
4128 size_t allocated_since_last_gc;
4130 #ifdef BACKGROUND_GC
4132 size_t ephemeral_fgc_counts[max_generation];
4135 BOOL alloc_wait_event_p;
4138 uint8_t* next_sweep_obj;
4141 uint8_t* current_sweep_pos;
4143 #endif //BACKGROUND_GC
4146 fgm_history fgm_result;
4149 size_t eph_gen_starts_size;
4151 #ifdef GC_CONFIG_DRIVEN
4152 // 0 stores compacting GCs;
4153 // 1 stores sweeping GCs;
4155 size_t compact_or_sweep_gcs[2];
4158 size_t interesting_data_per_gc[max_idp_count];
4159 #endif //GC_CONFIG_DRIVEN
4162 BOOL ro_segments_in_range;
4164 #ifdef BACKGROUND_GC
4166 heap_segment* freeable_soh_segment;
4167 #endif //BACKGROUND_GC
4170 heap_segment* freeable_uoh_segment;
4173 heap_segment* segment_standby_list;
4176 size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
4179 size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
4182 size_t ordered_plug_indices[MAX_NUM_BUCKETS];
4185 size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
4188 BOOL ordered_plug_indices_init;
4194 uint8_t* bestfit_first_pin;
4197 BOOL commit_end_of_seg;
4200 size_t max_free_space_items; // dynamically adjusted.
4203 size_t free_space_buckets;
4206 size_t free_space_items;
4208 // -1 means we are using all the free
4209 // spaces we have (not including
4210 // end of seg space).
4212 int trimmed_free_space_index;
4215 size_t total_ephemeral_plugs;
4218 seg_free_spaces* bestfit_seg;
4220 // Note: we know this from the plan phase.
4221 // total_ephemeral_plugs actually has the same value
4222 // but while we are calculating its value we also store
4223 // info on how big the plugs are for best fit which we
4224 // don't do in plan phase.
4225 // TODO: get rid of total_ephemeral_plugs.
4227 size_t total_ephemeral_size;
4234 BOOL heap_analyze_enabled;
4237 size_t internal_root_array_length;
4239 // next two fields are used to optimize the search for the object
4240 // enclosing the current reference handled by ha_mark_object_simple.
4242 uint8_t* current_obj;
4245 size_t current_obj_size;
4247 #endif //HEAP_ANALYZE
4252 int condemned_generation_num;
4255 BOOL blocking_collection;
4257 #ifdef MULTIPLE_HEAPS
4266 #ifdef BACKGROUND_GC
4268 size_t* g_bpromoted;
4269 #endif //BACKGROUND_GC
4272 int* g_mark_stack_busy;
4277 #ifdef BACKGROUND_GC
4280 #endif //BACKGROUND_GC
4281 #endif //MULTIPLE_HEAPS
4284 size_t reserved_memory;
4286 size_t reserved_memory_limit;
4288 BOOL g_low_memory_status;
4290 #ifdef FEATURE_CARD_MARKING_STEALING
4292 VOLATILE(uint32_t) card_mark_chunk_index_soh;
4295 VOLATILE(bool) card_mark_done_soh;
4298 VOLATILE(uint32_t) card_mark_chunk_index_loh;
4301 VOLATILE(uint32_t) card_mark_chunk_index_poh;
4304 VOLATILE(bool) card_mark_done_uoh;
4307 void reset_card_marking_enumerators()
4309 // set chunk index to all 1 bits so that incrementing it yields 0 as the first index
4310 card_mark_chunk_index_soh = ~0;
4311 card_mark_done_soh = false;
4313 card_mark_chunk_index_loh = ~0;
4314 card_mark_chunk_index_poh = ~0;
4315 card_mark_done_uoh = false;
4319 bool find_next_chunk(card_marking_enumerator& card_mark_enumerator, heap_segment* seg,
4320 size_t& n_card_set, uint8_t*& start_address, uint8_t*& limit,
4321 size_t& card, size_t& end_card, size_t& card_word_end);
4322 #endif //FEATURE_CARD_MARKING_STEALING
4326 void update_collection_counts ();
4329 #define ASSERT_OFFSETS_MATCH(field) \
4330 static_assert(offsetof(dac_gc_heap, field) == offsetof(gc_heap, field), #field " offset mismatch")
4332 #ifdef MULTIPLE_HEAPS
4333 ASSERT_OFFSETS_MATCH(alloc_allocated);
4334 ASSERT_OFFSETS_MATCH(ephemeral_heap_segment);
4335 ASSERT_OFFSETS_MATCH(finalize_queue);
4336 ASSERT_OFFSETS_MATCH(oom_info);
4337 ASSERT_OFFSETS_MATCH(interesting_data_per_heap);
4338 ASSERT_OFFSETS_MATCH(compact_reasons_per_heap);
4339 ASSERT_OFFSETS_MATCH(expand_mechanisms_per_heap);
4340 ASSERT_OFFSETS_MATCH(interesting_mechanism_bits_per_heap);
4341 ASSERT_OFFSETS_MATCH(internal_root_array);
4342 ASSERT_OFFSETS_MATCH(internal_root_array_index);
4343 ASSERT_OFFSETS_MATCH(heap_analyze_success);
4344 ASSERT_OFFSETS_MATCH(generation_table);
4345 #endif // MULTIPLE_HEAPS
4347 #ifdef FEATURE_PREMORTEM_FINALIZATION
4350 #ifdef DACCESS_COMPILE
4351 friend class ::ClrDataAccess;
4352 #endif // DACCESS_COMPILE
4354 friend class CFinalizeStaticAsserts;
4358 //adjust the count and add a constant to add a segment
4359 static const int ExtraSegCount = 2;
4360 static const int FinalizerListSeg = total_generation_count + 1;
4361 static const int CriticalFinalizerListSeg = total_generation_count;
4362 //Does not correspond to a segment
4363 static const int FreeList = total_generation_count + ExtraSegCount;
4365 PTR_PTR_Object m_FillPointers[total_generation_count + ExtraSegCount];
4366 PTR_PTR_Object m_Array;
4367 PTR_PTR_Object m_EndArray;
4368 size_t m_PromotedCount;
4370 VOLATILE(int32_t) lock;
4372 EEThreadId lockowner_threadid;
4376 void MoveItem (Object** fromIndex,
4377 unsigned int fromSeg,
4378 unsigned int toSeg);
4380 inline PTR_PTR_Object& SegQueue (unsigned int Seg)
4382 return (Seg ? m_FillPointers [Seg-1] : m_Array);
4384 inline PTR_PTR_Object& SegQueueLimit (unsigned int Seg)
4386 return m_FillPointers [Seg];
4389 BOOL IsSegEmpty ( unsigned int i)
4391 ASSERT ( (int)i < FreeList);
4392 return (SegQueueLimit(i) == SegQueue (i));
4399 void EnterFinalizeLock();
4400 void LeaveFinalizeLock();
4401 bool RegisterForFinalization (int gen, Object* obj, size_t size=0);
4402 Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
4403 BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
4404 void RelocateFinalizationData (int gen, gc_heap* hp);
4405 void WalkFReachableObjects (fq_walk_fn fn);
4406 void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
4407 void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
4408 size_t GetPromotedCount();
4410 //Methods used by the shutdown code to call every finalizer
4411 size_t GetNumberFinalizableObjects();
4413 void CheckFinalizerObjects();
4416 class CFinalizeStaticAsserts {
4417 static_assert(dac_finalize_queue::ExtraSegCount == CFinalize::ExtraSegCount, "ExtraSegCount mismatch");
4418 static_assert(offsetof(dac_finalize_queue, m_FillPointers) == offsetof(CFinalize, m_FillPointers), "CFinalize layout mismatch");
4420 #endif // FEATURE_PREMORTEM_FINALIZATION
4423 size_t& dd_begin_data_size (dynamic_data* inst)
4425 return inst->begin_data_size;
4428 size_t& dd_survived_size (dynamic_data* inst)
4430 return inst->survived_size;
4432 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
4434 size_t& dd_num_npinned_plugs(dynamic_data* inst)
4436 return inst->num_npinned_plugs;
4438 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
4440 size_t& dd_pinned_survived_size (dynamic_data* inst)
4442 return inst->pinned_survived_size;
4445 size_t& dd_added_pinned_size (dynamic_data* inst)
4447 return inst->added_pinned_size;
4450 size_t& dd_artificial_pinned_survived_size (dynamic_data* inst)
4452 return inst->artificial_pinned_survived_size;
4456 size_t& dd_padding_size (dynamic_data* inst)
4458 return inst->padding_size;
4460 #endif //SHORT_PLUGS
4462 size_t& dd_current_size (dynamic_data* inst)
4464 return inst->current_size;
4467 float& dd_surv (dynamic_data* inst)
4472 size_t& dd_freach_previous_promotion (dynamic_data* inst)
4474 return inst->freach_previous_promotion;
4477 size_t& dd_desired_allocation (dynamic_data* inst)
4479 return inst->desired_allocation;
4482 size_t& dd_collection_count (dynamic_data* inst)
4484 return inst->collection_count;
4487 size_t& dd_promoted_size (dynamic_data* inst)
4489 return inst->promoted_size;
4492 float& dd_limit (dynamic_data* inst)
4494 return inst->sdata->limit;
4497 float& dd_max_limit (dynamic_data* inst)
4499 return inst->sdata->max_limit;
4502 size_t& dd_max_size (dynamic_data* inst)
4504 return inst->sdata->max_size;
4507 size_t& dd_min_size (dynamic_data* inst)
4509 return inst->min_size;
4512 ptrdiff_t& dd_new_allocation (dynamic_data* inst)
4514 return inst->new_allocation;
4517 ptrdiff_t& dd_gc_new_allocation (dynamic_data* inst)
4519 return inst->gc_new_allocation;
4522 size_t& dd_fragmentation_limit (dynamic_data* inst)
4524 return inst->sdata->fragmentation_limit;
4527 float& dd_fragmentation_burden_limit (dynamic_data* inst)
4529 return inst->sdata->fragmentation_burden_limit;
4532 float dd_v_fragmentation_burden_limit (dynamic_data* inst)
4534 return (min (2*dd_fragmentation_burden_limit (inst), 0.75f));
4537 size_t& dd_fragmentation (dynamic_data* inst)
4539 return inst->fragmentation;
4542 size_t& dd_gc_clock (dynamic_data* inst)
4544 return inst->gc_clock;
4547 uint64_t& dd_time_clock (dynamic_data* inst)
4549 return inst->time_clock;
4553 size_t& dd_gc_clock_interval (dynamic_data* inst)
4555 return inst->sdata->gc_clock;
4558 uint64_t& dd_time_clock_interval (dynamic_data* inst)
4560 return inst->sdata->time_clock;
4564 size_t& dd_gc_elapsed_time (dynamic_data* inst)
4566 return inst->gc_elapsed_time;
4570 float& dd_gc_speed (dynamic_data* inst)
4572 return inst->gc_speed;
4576 alloc_context* generation_alloc_context (generation* inst)
4578 return &(inst->allocation_context);
4582 uint8_t*& generation_allocation_start (generation* inst)
4584 return inst->allocation_start;
4587 uint8_t*& generation_allocation_pointer (generation* inst)
4589 return inst->allocation_context.alloc_ptr;
4592 uint8_t*& generation_allocation_limit (generation* inst)
4594 return inst->allocation_context.alloc_limit;
4597 allocator* generation_allocator (generation* inst)
4599 return &inst->free_list_allocator;
4603 PTR_heap_segment& generation_start_segment (generation* inst)
4605 return inst->start_segment;
4608 heap_segment*& generation_allocation_segment (generation* inst)
4610 return inst->allocation_segment;
4613 uint8_t*& generation_plan_allocation_start (generation* inst)
4615 return inst->plan_allocation_start;
4618 size_t& generation_plan_allocation_start_size (generation* inst)
4620 return inst->plan_allocation_start_size;
4623 uint8_t*& generation_allocation_context_start_region (generation* inst)
4625 return inst->allocation_context_start_region;
4628 size_t& generation_free_list_space (generation* inst)
4630 return inst->free_list_space;
4633 size_t& generation_free_obj_space (generation* inst)
4635 return inst->free_obj_space;
4638 size_t& generation_allocation_size (generation* inst)
4640 return inst->allocation_size;
4644 size_t& generation_pinned_allocated (generation* inst)
4646 return inst->pinned_allocated;
4649 size_t& generation_pinned_allocation_sweep_size (generation* inst)
4651 return inst->pinned_allocation_sweep_size;
4654 size_t& generation_pinned_allocation_compact_size (generation* inst)
4656 return inst->pinned_allocation_compact_size;
4659 size_t& generation_free_list_allocated (generation* inst)
4661 return inst->free_list_allocated;
4664 size_t& generation_end_seg_allocated (generation* inst)
4666 return inst->end_seg_allocated;
4669 BOOL& generation_allocate_end_seg_p (generation* inst)
4671 return inst->allocate_end_seg_p;
4674 size_t& generation_condemned_allocated (generation* inst)
4676 return inst->condemned_allocated;
4679 size_t& generation_sweep_allocated (generation* inst)
4681 return inst->sweep_allocated;
4683 #ifdef FREE_USAGE_STATS
4685 size_t& generation_pinned_free_obj_space (generation* inst)
4687 return inst->pinned_free_obj_space;
4690 size_t& generation_allocated_in_pinned_free (generation* inst)
4692 return inst->allocated_in_pinned_free;
4695 size_t& generation_allocated_since_last_pin (generation* inst)
4697 return inst->allocated_since_last_pin;
4699 #endif //FREE_USAGE_STATS
4701 float generation_allocator_efficiency (generation* inst)
4703 if ((generation_free_list_allocated (inst) + generation_free_obj_space (inst)) != 0)
4705 return ((float) (generation_free_list_allocated (inst)) / (float)(generation_free_list_allocated (inst) + generation_free_obj_space (inst)));
4711 size_t generation_unusable_fragmentation (generation* inst)
4713 return (size_t)(generation_free_obj_space (inst) +
4714 (1.0f-generation_allocator_efficiency(inst))*generation_free_list_space (inst));
4717 #define plug_skew sizeof(ObjHeader)
4718 // We always use USE_PADDING_TAIL when fitting so items on the free list should be
4719 // twice the min_obj_size.
4720 #define min_free_list (2*min_obj_size)
4723 uint8_t * skew[plug_skew / sizeof(uint8_t *)];
4733 //Note that these encode the fact that plug_skew is a multiple of uint8_t*.
4734 // Each of new field is prepended to the prior struct.
4736 struct plug_and_pair
4742 struct plug_and_reloc
4756 int lr; //for clearing the entire pair in one instruction
4761 struct gap_reloc_pair
4768 #define min_pre_pin_obj_size (sizeof (gap_reloc_pair) + min_obj_size)
4770 struct DECLSPEC_ALIGN(8) aligned_plug_and_gap
4772 plug_and_gap plugandgap;
4775 struct loh_obj_and_pad
4781 struct loh_padding_obj
4788 #define loh_padding_obj_size (sizeof(loh_padding_obj))
4791 #define heap_segment_flags_readonly 1
4792 #define heap_segment_flags_inrange 2
4793 #define heap_segment_flags_unmappable 4
4794 #define heap_segment_flags_loh 8
4795 #ifdef BACKGROUND_GC
4796 #define heap_segment_flags_swept 16
4797 #define heap_segment_flags_decommitted 32
4798 #define heap_segment_flags_ma_committed 64
4799 // for segments whose mark array is only partially committed.
4800 #define heap_segment_flags_ma_pcommitted 128
4801 #define heap_segment_flags_uoh_delete 256
4803 #define heap_segment_flags_poh 512
4804 #endif //BACKGROUND_GC
4806 //need to be careful to keep enough pad items to fit a relocation node
4807 //padded to QuadWord before the plug_skew
4818 PTR_heap_segment next;
4819 uint8_t* background_allocated;
4820 #ifdef MULTIPLE_HEAPS
4823 uint8_t* saved_committed;
4824 size_t saved_desired_allocation;
4826 #endif //MULTIPLE_HEAPS
4827 uint8_t* decommit_target;
4828 uint8_t* plan_allocated;
4829 uint8_t* saved_bg_allocated;
4832 // Disable this warning - we intentionally want __declspec(align()) to insert padding for us
4833 #pragma warning(disable:4324) // structure was padded due to __declspec(align())
4835 aligned_plug_and_gap padandplug;
4837 #pragma warning(default:4324) // structure was padded due to __declspec(align())
4841 static_assert(offsetof(dac_heap_segment, allocated) == offsetof(heap_segment, allocated), "DAC heap segment layout mismatch");
4842 static_assert(offsetof(dac_heap_segment, committed) == offsetof(heap_segment, committed), "DAC heap segment layout mismatch");
4843 static_assert(offsetof(dac_heap_segment, reserved) == offsetof(heap_segment, reserved), "DAC heap segment layout mismatch");
4844 static_assert(offsetof(dac_heap_segment, used) == offsetof(heap_segment, used), "DAC heap segment layout mismatch");
4845 static_assert(offsetof(dac_heap_segment, mem) == offsetof(heap_segment, mem), "DAC heap segment layout mismatch");
4846 static_assert(offsetof(dac_heap_segment, flags) == offsetof(heap_segment, flags), "DAC heap segment layout mismatch");
4847 static_assert(offsetof(dac_heap_segment, next) == offsetof(heap_segment, next), "DAC heap segment layout mismatch");
4848 static_assert(offsetof(dac_heap_segment, background_allocated) == offsetof(heap_segment, background_allocated), "DAC heap segment layout mismatch");
4849 #ifdef MULTIPLE_HEAPS
4850 static_assert(offsetof(dac_heap_segment, heap) == offsetof(heap_segment, heap), "DAC heap segment layout mismatch");
4851 #endif // MULTIPLE_HEAPS
4854 uint8_t*& heap_segment_reserved (heap_segment* inst)
4856 return inst->reserved;
4859 uint8_t*& heap_segment_committed (heap_segment* inst)
4861 return inst->committed;
4864 uint8_t*& heap_segment_decommit_target (heap_segment* inst)
4866 return inst->decommit_target;
4869 uint8_t*& heap_segment_used (heap_segment* inst)
4874 uint8_t*& heap_segment_allocated (heap_segment* inst)
4876 return inst->allocated;
4880 BOOL heap_segment_read_only_p (heap_segment* inst)
4882 return ((inst->flags & heap_segment_flags_readonly) != 0);
4886 BOOL heap_segment_in_range_p (heap_segment* inst)
4888 return (!(inst->flags & heap_segment_flags_readonly) ||
4889 ((inst->flags & heap_segment_flags_inrange) != 0));
4893 BOOL heap_segment_unmappable_p (heap_segment* inst)
4895 return (!(inst->flags & heap_segment_flags_readonly) ||
4896 ((inst->flags & heap_segment_flags_unmappable) != 0));
4900 BOOL heap_segment_uoh_p (heap_segment * inst)
4902 return !!(inst->flags & (heap_segment_flags_loh | heap_segment_flags_poh));
4905 inline gc_oh_num heap_segment_oh (heap_segment * inst)
4907 if ((inst->flags & heap_segment_flags_loh) != 0)
4909 return gc_oh_num::loh;
4911 else if ((inst->flags & heap_segment_flags_poh) != 0)
4913 return gc_oh_num::poh;
4917 return gc_oh_num::soh;
4921 #ifdef BACKGROUND_GC
4923 BOOL heap_segment_decommitted_p (heap_segment * inst)
4925 return !!(inst->flags & heap_segment_flags_decommitted);
4927 #endif //BACKGROUND_GC
4930 PTR_heap_segment & heap_segment_next (heap_segment* inst)
4935 uint8_t*& heap_segment_mem (heap_segment* inst)
4940 uint8_t*& heap_segment_plan_allocated (heap_segment* inst)
4942 return inst->plan_allocated;
4945 #ifdef BACKGROUND_GC
4947 uint8_t*& heap_segment_background_allocated (heap_segment* inst)
4949 return inst->background_allocated;
4952 uint8_t*& heap_segment_saved_bg_allocated (heap_segment* inst)
4954 return inst->saved_bg_allocated;
4956 #endif //BACKGROUND_GC
4958 #ifdef MULTIPLE_HEAPS
4960 gc_heap*& heap_segment_heap (heap_segment* inst)
4964 #endif //MULTIPLE_HEAPS
4967 generation* gc_heap::generation_of (int n)
4969 assert (((n < total_generation_count) && (n >= 0)));
4970 return &generation_table [ n ];
4974 dynamic_data* gc_heap::dynamic_data_of (int gen_number)
4976 return &dynamic_data_table [ gen_number ];
4979 #define GC_PAGE_SIZE 0x1000
4981 #define card_word_width ((size_t)32)
4984 // The value of card_size is determined empirically according to the average size of an object
4985 // In the code we also rely on the assumption that one card_table entry (uint32_t) covers an entire os page
4987 #if defined (HOST_64BIT)
4988 #define card_size ((size_t)(2*GC_PAGE_SIZE/card_word_width))
4990 #define card_size ((size_t)(GC_PAGE_SIZE/card_word_width))
4991 #endif // HOST_64BIT
4994 size_t card_word (size_t card)
4996 return card / card_word_width;
5000 unsigned card_bit (size_t card)
5002 return (unsigned)(card % card_word_width);
5006 size_t gcard_of (uint8_t* object)
5008 return (size_t)(object) / card_size;
5010 #ifdef FEATURE_CARD_MARKING_STEALING
5011 // make this 8 card bundle bits (2 MB in 64-bit architectures, 1 MB in 32-bit) - should be at least 1 card bundle bit
5012 #define CARD_MARKING_STEALING_GRANULARITY (card_size*card_word_width*card_bundle_size*8)
5014 #define THIS_ARG , __this
5015 class card_marking_enumerator
5018 heap_segment* segment;
5020 uint32_t segment_start_chunk_index;
5021 VOLATILE(uint32_t)* chunk_index_counter;
5022 uint8_t* chunk_high;
5023 uint32_t old_chunk_index;
5024 static const uint32_t INVALID_CHUNK_INDEX = ~0u;
5027 card_marking_enumerator(heap_segment* seg, uint8_t* low, VOLATILE(uint32_t)* counter) :
5028 segment(seg), gc_low(low), segment_start_chunk_index(0), chunk_index_counter(counter), chunk_high(nullptr), old_chunk_index(INVALID_CHUNK_INDEX)
5032 // move to the next chunk in this segment - return false if no more chunks in this segment
5033 bool move_next(heap_segment* seg, uint8_t*& low, uint8_t*& high);
5035 void exhaust_segment(heap_segment* seg)
5039 // make sure no more chunks in this segment - do this via move_next because we want to keep
5040 // incrementing the chunk_index_counter rather than updating it via interlocked compare exchange
5041 while (move_next(seg, low, high))
5045 uint8_t* get_chunk_high()
5052 #endif // FEATURE_CARD_MARKING_STEALING