1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
9 #pragma optimize( "t", on )
12 #define inline __forceinline
21 #pragma warning(disable:4293)
22 #pragma warning(disable:4477)
25 inline void FATAL_GC_ERROR()
27 #ifndef DACCESS_COMPILE
28 GCToOSInterface::DebugBreak();
29 #endif // DACCESS_COMPILE
30 _ASSERTE(!"Fatal Error in GC.");
31 GCToEEInterface::HandleFatalError(COR_E_EXECUTIONENGINE);
35 #pragma inline_depth(20)
38 /* the following section defines the optional features */
40 // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
41 // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION
42 // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much
43 // work to make FEATURE_STRUCTALIGN not apply to LOH so they can be both
45 #define FEATURE_LOH_COMPACTION
47 #ifdef FEATURE_64BIT_ALIGNMENT
48 // We need the following feature as part of keeping 64-bit types aligned in the GC heap.
49 #define RESPECT_LARGE_ALIGNMENT //used to keep "double" objects aligned during
51 #endif //FEATURE_64BIT_ALIGNMENT
53 #define SHORT_PLUGS //used to keep ephemeral plugs short so they fit better into the oldest generation free items
56 #define DESIRED_PLUG_LENGTH (1000)
59 #define FEATURE_PREMORTEM_FINALIZATION
62 #ifndef FEATURE_REDHAWK
64 #define COLLECTIBLE_CLASS
65 #endif // !FEATURE_REDHAWK
68 #define initial_internal_roots (1024*16)
69 #endif // HEAP_ANALYZE
71 #define MARK_LIST //used sorted list to speed up plan phase
73 #define BACKGROUND_GC //concurrent background GC (requires WRITE_WATCH)
76 #define MH_SC_MARK //scalable marking
77 //#define SNOOP_STATS //diagnostic
78 #define PARALLEL_MARK_LIST_SORT //do the sorting and merging of the multiple mark lists in server gc in parallel
81 //This is used to mark some type volatile only when the scalable marking is used.
82 #if defined (SERVER_GC) && defined (MH_SC_MARK)
83 #define SERVER_SC_MARK_VOLATILE(x) VOLATILE(x)
84 #else //SERVER_GC&&MH_SC_MARK
85 #define SERVER_SC_MARK_VOLATILE(x) x
86 #endif //SERVER_GC&&MH_SC_MARK
88 //#define MULTIPLE_HEAPS //Allow multiple heaps for servers
90 #define INTERIOR_POINTERS //Allow interior pointers in the code manager
92 #define CARD_BUNDLE //enable card bundle feature.(requires WRITE_WATCH)
94 // If this is defined we use a map for segments in order to find the heap for
95 // a segment fast. But it does use more memory as we have to cover the whole
96 // heap range and for each entry we allocate a struct of 5 ptr-size words
97 // (3 for WKS as there's only one heap).
98 #define SEG_MAPPING_TABLE
100 // If allocating the heap mapping table for the available VA consumes too
101 // much memory, you can enable this to allocate only the portion that
102 // corresponds to rw segments and grow it when needed in grow_brick_card_table.
103 // However in heap_of you will need to always compare the address with
104 // g_lowest/highest before you can look at the heap mapping table.
105 #define GROWABLE_SEG_MAPPING_TABLE
108 #define MARK_ARRAY //Mark bit in an array
109 #endif //BACKGROUND_GC
111 #if defined(BACKGROUND_GC) || defined (CARD_BUNDLE) || defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP)
112 #define WRITE_WATCH //Write Watch feature
113 #endif //BACKGROUND_GC || CARD_BUNDLE
116 #define array_size 100
119 //#define SHORT_PLUGS //keep plug short
121 #define FFIND_OBJECT //faster find_object, slower allocation
122 #define FFIND_DECAY 7 //Number of GC for which fast find will be active
124 //#define NO_WRITE_BARRIER //no write barrier, use Write Watch feature
126 //#define DEBUG_WRITE_WATCH //Additional debug for write watch
128 //#define STRESS_PINNING //Stress pinning by pinning randomly
130 //#define TRACE_GC //debug trace gc operation
131 //#define SIMPLE_DPRINTF
133 //#define TIME_GC //time allocation and garbage collection
134 //#define TIME_WRITE_WATCH //time GetWriteWatch and ResetWriteWatch calls
135 //#define COUNT_CYCLES //Use cycle counter for timing
136 //#define JOIN_STATS //amount of time spent in the join
137 //also, see TIME_SUSPEND in switches.h.
139 //#define SYNCHRONIZATION_STATS
140 //#define SEG_REUSE_STATS
142 #if defined (SYNCHRONIZATION_STATS) || defined (STAGE_STATS)
143 #define BEGIN_TIMING(x) \
145 x##_start = GCToOSInterface::QueryPerformanceCounter()
147 #define END_TIMING(x) \
149 x##_end = GCToOSInterface::QueryPerformanceCounter(); \
150 x += x##_end - x##_start
153 #define BEGIN_TIMING(x)
154 #define END_TIMING(x)
155 #define BEGIN_TIMING_CYCLES(x)
156 #define END_TIMING_CYCLES(x)
157 #endif //SYNCHRONIZATION_STATS || STAGE_STATS
159 /* End of optional features */
161 #ifdef GC_CONFIG_DRIVEN
162 void GCLogConfig (const char *fmt, ... );
163 #define cprintf(x) {GCLogConfig x;}
164 #endif //GC_CONFIG_DRIVEN
170 // For the bestfit algorithm when we relocate ephemeral generations into an
171 // existing gen2 segment.
172 // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
173 #define MIN_INDEX_POWER2 6
178 #define MAX_INDEX_POWER2 30
180 #define MAX_INDEX_POWER2 26
186 #define MAX_INDEX_POWER2 28
188 #define MAX_INDEX_POWER2 24
193 #define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1)
195 #define MAX_NUM_FREE_SPACES 200
196 #define MIN_NUM_FREE_SPACES 5
198 //Please leave these definitions intact.
205 #ifdef FEATURE_STRUCTALIGN
206 #define REQD_ALIGN_DCL ,int requiredAlignment
207 #define REQD_ALIGN_ARG ,requiredAlignment
208 #define REQD_ALIGN_AND_OFFSET_DCL ,int requiredAlignment,size_t alignmentOffset
209 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL ,int requiredAlignment=DATA_ALIGNMENT,size_t alignmentOffset=0
210 #define REQD_ALIGN_AND_OFFSET_ARG ,requiredAlignment,alignmentOffset
211 #else // FEATURE_STRUCTALIGN
212 #define REQD_ALIGN_DCL
213 #define REQD_ALIGN_ARG
214 #define REQD_ALIGN_AND_OFFSET_DCL
215 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL
216 #define REQD_ALIGN_AND_OFFSET_ARG
217 #endif // FEATURE_STRUCTALIGN
219 #ifdef MULTIPLE_HEAPS
220 #define THREAD_NUMBER_DCL ,int thread
221 #define THREAD_NUMBER_ARG ,thread
222 #define THREAD_NUMBER_FROM_CONTEXT int thread = sc->thread_number;
223 #define THREAD_FROM_HEAP int thread = heap_number;
224 #define HEAP_FROM_THREAD gc_heap* hpt = gc_heap::g_heaps[thread];
226 #define THREAD_NUMBER_DCL
227 #define THREAD_NUMBER_ARG
228 #define THREAD_NUMBER_FROM_CONTEXT
229 #define THREAD_FROM_HEAP
230 #define HEAP_FROM_THREAD gc_heap* hpt = 0;
231 #endif //MULTIPLE_HEAPS
233 //These constants are ordered
234 const int policy_sweep = 0;
235 const int policy_compact = 1;
236 const int policy_expand = 2;
241 extern int print_level;
242 extern BOOL trace_gc;
243 extern int gc_trace_fac;
248 static hlet* bindings;
253 hlet (int& place, int value)
269 #define let(p,v) hlet __x = hlet (p, v);
279 #define SEG_REUSE_LOG_0 7
280 #define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1)
281 #define DT_LOG_0 (SEG_REUSE_LOG_1 + 1)
282 #define BGC_LOG (DT_LOG_0 + 1)
283 #define GTC_LOG (DT_LOG_0 + 2)
284 #define GC_TABLE_LOG (DT_LOG_0 + 3)
285 #define JOIN_LOG (DT_LOG_0 + 4)
286 #define SPINLOCK_LOG (DT_LOG_0 + 5)
287 #define SNOOP_LOG (DT_LOG_0 + 6)
289 #ifndef DACCESS_COMPILE
291 #ifdef SIMPLE_DPRINTF
293 //#define dprintf(l,x) {if (trace_gc && ((l<=print_level)||gc_heap::settings.concurrent)) {printf ("\n");printf x ; fflush(stdout);}}
294 void GCLog (const char *fmt, ... );
295 //#define dprintf(l,x) {if (trace_gc && (l<=print_level)) {GCLog x;}}
296 //#define dprintf(l,x) {if ((l==SEG_REUSE_LOG_0) || (l==SEG_REUSE_LOG_1) || (trace_gc && (l<=3))) {GCLog x;}}
297 //#define dprintf(l,x) {if (l == DT_LOG_0) {GCLog x;}}
298 //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == BGC_LOG) || (l==GTC_LOG))) {GCLog x;}}
299 //#define dprintf(l,x) {if ((l == 1) || (l == 2222)) {GCLog x;}}
300 #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
301 //#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}}
302 //#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}}
303 //#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}}
304 #else //SIMPLE_DPRINTF
306 // The GCTrace output goes to stdout by default but can get sent to the stress log or the logfile if the
307 // reg key GCTraceFacility is set. THe stress log can only take a format string and 4 numbers or
309 #define dprintf(l,x) {if (trace_gc && (l<=print_level)) { \
310 if ( !gc_trace_fac) {printf ("\n");printf x ; fflush(stdout);} \
311 else if ( gc_trace_fac == 2) {LogSpewAlways x;LogSpewAlways ("\n");} \
312 else if ( gc_trace_fac == 1) {STRESS_LOG_VA(x);}}}
314 #endif //SIMPLE_DPRINTF
316 #else //DACCESS_COMPILE
318 #endif //DACCESS_COMPILE
323 #ifndef FEATURE_REDHAWK
325 #define assert _ASSERTE
327 #define ASSERT _ASSERTE
328 #endif // FEATURE_REDHAWK
332 struct GCDebugSpinLock {
333 VOLATILE(int32_t) lock; // -1 if free, 0 if held
334 VOLATILE(Thread *) holding_thread; // -1 if no thread holds the lock.
335 VOLATILE(BOOL) released_by_gc_p; // a GC thread released the lock.
338 : lock(-1), holding_thread((Thread*) -1)
342 typedef GCDebugSpinLock GCSpinLock;
344 #elif defined (SYNCHRONIZATION_STATS)
346 struct GCSpinLockInstru {
347 VOLATILE(int32_t) lock;
348 // number of times we went into SwitchToThread in enter_spin_lock.
349 unsigned int num_switch_thread;
350 // number of times we went into WaitLonger.
351 unsigned int num_wait_longer;
352 // number of times we went to calling SwitchToThread in WaitLonger.
353 unsigned int num_switch_thread_w;
354 // number of times we went to calling DisablePreemptiveGC in WaitLonger.
355 unsigned int num_disable_preemptive_w;
358 : lock(-1), num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0)
364 num_switch_thread = 0;
366 num_switch_thread_w = 0;
367 num_disable_preemptive_w = 0;
371 typedef GCSpinLockInstru GCSpinLock;
375 struct GCDebugSpinLock {
376 VOLATILE(int32_t) lock; // -1 if free, 0 if held
383 typedef GCDebugSpinLock GCSpinLock;
393 class seg_free_spaces;
397 class exclusive_sync;
398 class recursive_gc_sync;
399 #endif //BACKGROUND_GC
401 // The following 2 modes are of the same format as in clr\src\bcl\system\runtime\gcsettings.cs
402 // make sure you change that one if you change this one!
405 pause_batch = 0, //We are not concerned about pause length
406 pause_interactive = 1, //We are running an interactive app
407 pause_low_latency = 2, //short pauses are essential
408 //avoid long pauses from blocking full GCs unless running out of memory
409 pause_sustained_low_latency = 3,
413 enum gc_loh_compaction_mode
415 loh_compaction_default = 1, // the default mode, don't compact LOH.
416 loh_compaction_once = 2, // only compact once the next time a blocking full GC happens.
417 loh_compaction_auto = 4 // GC decides when to compact LOH, to be implemented.
420 enum set_pause_mode_status
422 set_pause_mode_success = 0,
423 set_pause_mode_no_gc = 1 // NoGCRegion is in progress, can't change pause mode.
428 tuning_deciding_condemned_gen,
429 tuning_deciding_full_gc,
430 tuning_deciding_compaction,
431 tuning_deciding_expansion,
432 tuning_deciding_promote_ephemeral
435 #if defined(TRACE_GC) && defined(BACKGROUND_GC)
436 static const char * const str_bgc_state[] =
450 #endif // defined(TRACE_GC) && defined(BACKGROUND_GC)
452 enum allocation_state
455 a_state_can_allocate,
456 a_state_cant_allocate,
458 a_state_try_fit_new_seg,
459 a_state_try_fit_new_seg_after_cg,
460 a_state_try_fit_no_seg,
461 a_state_try_fit_after_cg,
462 a_state_try_fit_after_bgc,
463 a_state_try_free_full_seg_in_bgc,
464 a_state_try_free_after_bgc,
467 a_state_acquire_seg_after_cg,
468 a_state_acquire_seg_after_bgc,
469 a_state_check_and_wait_for_bgc,
470 a_state_trigger_full_compact_gc,
471 a_state_trigger_ephemeral_gc,
472 a_state_trigger_2nd_ephemeral_gc,
473 a_state_check_retry_seg,
479 gc_type_compacting = 0,
480 gc_type_blocking = 1,
482 gc_type_background = 2,
483 #endif //BACKGROUND_GC
487 #define v_high_memory_load_th 97
489 //encapsulates the mechanism for the current gc
493 VOLATILE(size_t) gc_index; // starts from 1 for the first GC, like dd_collection_count
494 int condemned_generation;
502 int gen0_reduction_count;
503 BOOL should_lock_elevation;
504 int elevation_locked_count;
505 BOOL elevation_reduced;
508 gc_pause_mode pause_mode;
509 BOOL found_finalizers;
514 BOOL allocations_allowed;
515 #endif //BACKGROUND_GC
519 #endif // STRESS_HEAP
521 uint32_t entry_memory_load;
523 void init_mechanisms(); //for each GC
524 void first_init(); // for the life of the EE
526 void record (gc_history_global* history);
529 // This is a compact version of gc_mechanism that we use to save in the history.
530 class gc_mechanisms_store
541 bool should_lock_elevation;
542 int condemned_generation : 8;
543 int gen0_reduction_count : 8;
544 int elevation_locked_count : 8;
545 gc_reason reason : 8;
546 gc_pause_mode pause_mode : 8;
548 bgc_state b_state : 8;
549 #endif //BACKGROUND_GC
550 bool found_finalizers;
554 #endif //BACKGROUND_GC
558 #endif // STRESS_HEAP
561 uint32_t entry_memory_load;
564 void store (gc_mechanisms* gm)
566 gc_index = gm->gc_index;
567 condemned_generation = gm->condemned_generation;
568 promotion = (gm->promotion != 0);
569 compaction = (gm->compaction != 0);
570 loh_compaction = (gm->loh_compaction != 0);
571 heap_expansion = (gm->heap_expansion != 0);
572 concurrent = (gm->concurrent != 0);
573 demotion = (gm->demotion != 0);
574 card_bundles = (gm->card_bundles != 0);
575 gen0_reduction_count = gm->gen0_reduction_count;
576 should_lock_elevation = (gm->should_lock_elevation != 0);
577 elevation_locked_count = gm->elevation_locked_count;
579 pause_mode = gm->pause_mode;
580 found_finalizers = (gm->found_finalizers != 0);
583 background_p = (gm->background_p != 0);
584 b_state = gm->b_state;
585 #endif //BACKGROUND_GC
588 stress_induced = (gm->stress_induced != 0);
589 #endif // STRESS_HEAP
592 entry_memory_load = gm->entry_memory_load;
599 // GC specific statistics, tracking counts and timings for GCs occuring in the system.
600 // This writes the statistics to a file every 60 seconds, if a file is specified in
604 : public StatisticsBase
606 // initialized to the contents of COMPlus_GcMixLog, or NULL, if not present
607 static TCHAR* logFileName;
608 static FILE* logFile;
610 // number of times we executed a background GC, a foreground GC, or a
612 int cntBGC, cntFGC, cntNGC;
614 // min, max, and total time spent performing BGCs, FGCs, NGCs
615 // (BGC time includes everything between the moment the BGC starts until
616 // it completes, i.e. the times of all FGCs occuring concurrently)
617 MinMaxTot bgc, fgc, ngc;
619 // number of times we executed a compacting GC (sweeping counts can be derived)
620 int cntCompactNGC, cntCompactFGC;
623 int cntReasons[reason_max];
625 // count of condemned generation, by NGC and FGC:
626 int cntNGCGen[max_generation+1];
627 int cntFGCGen[max_generation];
629 ///////////////////////////////////////////////////////////////////////////////////////////////
630 // Internal mechanism:
632 virtual void Initialize();
633 virtual void DisplayAndUpdate();
637 static BOOL Enabled()
638 { return logFileName != NULL; }
640 void AddGCStats(const gc_mechanisms& settings, size_t timeInMSec);
643 extern GCStatistics g_GCStatistics;
644 extern GCStatistics g_LastGCStatistics;
649 typedef DPTR(class heap_segment) PTR_heap_segment;
650 typedef DPTR(class gc_heap) PTR_gc_heap;
651 typedef DPTR(PTR_gc_heap) PTR_PTR_gc_heap;
652 #ifdef FEATURE_PREMORTEM_FINALIZATION
653 typedef DPTR(class CFinalize) PTR_CFinalize;
654 #endif // FEATURE_PREMORTEM_FINALIZATION
656 //-------------------------------------
657 //generation free list. It is an array of free lists bucketed by size, starting at sizes lower than first_bucket_size
658 //and doubling each time. The last bucket (index == num_buckets) is for largest sizes with no limit
660 #define MAX_BUCKET_COUNT (13)//Max number of buckets for the small generations.
668 #ifdef FL_VERIFICATION
670 #endif //FL_VERIFICATION
672 uint8_t*& alloc_list_head () { return head;}
673 uint8_t*& alloc_list_tail () { return tail;}
674 size_t& alloc_list_damage_count(){ return damage_count; }
687 size_t frst_bucket_size;
688 alloc_list first_bucket;
690 alloc_list& alloc_list_of (unsigned int bn);
691 size_t& alloc_list_damage_count_of (unsigned int bn);
694 allocator (unsigned int num_b, size_t fbs, alloc_list* b);
698 frst_bucket_size = SIZE_T_MAX;
700 unsigned int number_of_buckets() {return (unsigned int)num_buckets;}
702 size_t first_bucket_size() {return frst_bucket_size;}
703 uint8_t*& alloc_list_head_of (unsigned int bn)
705 return alloc_list_of (bn).alloc_list_head();
707 uint8_t*& alloc_list_tail_of (unsigned int bn)
709 return alloc_list_of (bn).alloc_list_tail();
712 BOOL discard_if_no_fit_p()
714 return (num_buckets == 1);
717 // This is when we know there's nothing to repair because this free
718 // list has never gone through plan phase. Right now it's only used
719 // by the background ephemeral sweep when we copy the local free list
720 // to gen0's free list.
722 // We copy head and tail manually (vs together like copy_to_alloc_list)
723 // since we need to copy tail first because when we get the free items off
724 // of each bucket we check head first. We also need to copy the
725 // smaller buckets first so when gen0 allocation needs to thread
726 // smaller items back that bucket is guaranteed to have been full
728 void copy_with_no_repair (allocator* allocator_to_copy)
730 assert (num_buckets == allocator_to_copy->number_of_buckets());
731 for (unsigned int i = 0; i < num_buckets; i++)
733 alloc_list* al = &(allocator_to_copy->alloc_list_of (i));
734 alloc_list_tail_of(i) = al->alloc_list_tail();
735 alloc_list_head_of(i) = al->alloc_list_head();
739 void unlink_item (unsigned int bucket_number, uint8_t* item, uint8_t* previous_item, BOOL use_undo_p);
740 void thread_item (uint8_t* item, size_t size);
741 void thread_item_front (uint8_t* itme, size_t size);
742 void thread_free_item (uint8_t* free_item, uint8_t*& head, uint8_t*& tail);
743 void copy_to_alloc_list (alloc_list* toalist);
744 void copy_from_alloc_list (alloc_list* fromalist);
745 void commit_alloc_list_changes();
748 #define NUM_GEN_POWER2 (20)
749 #define BASE_GEN_SIZE (1*512)
751 // group the frequently used ones together (need intrumentation on accessors)
755 // Don't move these first two fields without adjusting the references
756 // from the __asm in jitinterface.cpp.
757 alloc_context allocation_context;
758 PTR_heap_segment start_segment;
759 uint8_t* allocation_start;
760 heap_segment* allocation_segment;
761 uint8_t* allocation_context_start_region;
762 allocator free_list_allocator;
763 size_t free_list_allocated;
764 size_t end_seg_allocated;
765 BOOL allocate_end_seg_p;
766 size_t condemned_allocated;
767 size_t free_list_space;
768 size_t free_obj_space;
769 size_t allocation_size;
770 uint8_t* plan_allocation_start;
771 size_t plan_allocation_start_size;
773 // this is the pinned plugs that got allocated into this gen.
774 size_t pinned_allocated;
775 size_t pinned_allocation_compact_size;
776 size_t pinned_allocation_sweep_size;
779 #ifdef FREE_USAGE_STATS
780 size_t gen_free_spaces[NUM_GEN_POWER2];
781 // these are non pinned plugs only
782 size_t gen_plugs[NUM_GEN_POWER2];
783 size_t gen_current_pinned_free_spaces[NUM_GEN_POWER2];
784 size_t pinned_free_obj_space;
785 // this is what got allocated into the pinned free spaces.
786 size_t allocated_in_pinned_free;
787 size_t allocated_since_last_pin;
788 #endif //FREE_USAGE_STATS
791 static_assert(offsetof(dac_generation, allocation_context) == offsetof(generation, allocation_context), "DAC generation offset mismatch");
792 static_assert(offsetof(dac_generation, start_segment) == offsetof(generation, start_segment), "DAC generation offset mismatch");
793 static_assert(offsetof(dac_generation, allocation_start) == offsetof(generation, allocation_start), "DAC generation offset mismatch");
796 // The dynamic data fields are grouped into 3 categories:
798 // calculated logical data (like desired_allocation)
799 // physical data (like fragmentation)
800 // const data (like min_gc_size), initialized at the beginning
804 ptrdiff_t new_allocation;
805 ptrdiff_t gc_new_allocation; // new allocation at beginning of gc
807 size_t desired_allocation;
809 // # of bytes taken by objects (ie, not free space) at the beginning
811 size_t begin_data_size;
812 // # of bytes taken by survived objects after mark.
813 size_t survived_size;
814 // # of bytes taken by survived pinned plugs after mark.
815 size_t pinned_survived_size;
816 size_t artificial_pinned_survived_size;
817 size_t added_pinned_size;
822 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
823 // # of plugs that are not pinned plugs.
824 size_t num_npinned_plugs;
825 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
826 //total object size after a GC, ie, doesn't include fragmentation
828 size_t collection_count;
829 size_t promoted_size;
830 size_t freach_previous_promotion;
831 size_t fragmentation; //fragmentation when we don't compact
832 size_t gc_clock; //gc# when last GC happened
833 size_t time_clock; //time when last gc started
834 size_t gc_elapsed_time; // Time it took for the gc to complete
835 float gc_speed; // speed in bytes/msec for the gc to complete
837 // min_size is always the same as min_gc_size..
841 size_t default_new_allocation;
842 size_t fragmentation_limit;
843 float fragmentation_burden_limit;
848 #define ro_in_entry 0x1
850 #ifdef SEG_MAPPING_TABLE
851 // Note that I am storing both h0 and seg0, even though in Server GC you can get to
852 // the heap* from the segment info. This is because heap_of needs to be really fast
853 // and we would not want yet another indirection.
856 // if an address is > boundary it belongs to h1; else h0.
857 // since we init h0 and h1 to 0, if we get 0 it means that
858 // address doesn't exist on managed segments. And heap_of
859 // would just return heap0 which is what it does now.
861 #ifdef MULTIPLE_HEAPS
864 #endif //MULTIPLE_HEAPS
865 // You could have an address that's inbetween 2 segments and
866 // this would return a seg, the caller then will use
867 // in_range_for_segment to determine if it's on that seg.
868 heap_segment* seg0; // this is what the seg for h0 is.
869 heap_segment* seg1; // this is what the seg for h1 is.
870 // Note that when frozen objects are used we mask seg1
871 // with 0x1 to indicate that there is a ro segment for
874 #endif //SEG_MAPPING_TABLE
877 //Alignment constant for allocation
878 #define ALIGNCONST (DATA_ALIGNMENT-1)
881 size_t Align (size_t nbytes, int alignment=ALIGNCONST)
883 return (nbytes + alignment) & ~alignment;
886 //return alignment constant for small object heap vs large object heap
888 int get_alignment_constant (BOOL small_object_p)
890 #ifdef FEATURE_STRUCTALIGN
891 // If any objects on the large object heap require 8-byte alignment,
892 // the compiler will tell us so. Let's not guess an alignment here.
894 #else // FEATURE_STRUCTALIGN
895 return small_object_p ? ALIGNCONST : 7;
896 #endif // FEATURE_STRUCTALIGN
901 size_t desired_allocation;
902 size_t new_allocation;
906 enum alloc_wait_reason
908 // When we don't care about firing an event for
912 // when we detect we are in low memory
915 // when we detect the ephemeral segment is too full
916 awr_low_ephemeral = 1,
918 // we've given out too much budget for gen0.
921 // we've given out too much budget for loh.
924 // this event is really obsolete - it's for pre-XP
925 // OSs where low mem notification is not supported.
926 awr_alloc_loh_low_mem = 4,
928 // we ran out of VM spaced to reserve on loh.
931 // ran out of space when allocating a small object
932 awr_gen0_oos_bgc = 6,
934 // ran out of space when allocating a large object
937 // waiting for BGC to let FGC happen
938 awr_fgc_wait_for_bgc = 8,
940 // wait for bgc to finish to get loh seg.
943 // we don't allow loh allocation during bgc planning.
944 awr_loh_alloc_during_plan = 10,
946 // we don't allow too much loh allocation during bgc.
947 awr_loh_alloc_during_bgc = 11
950 struct alloc_thread_wait_data
981 msl_enter_state enter_state;
982 msl_take_state take_state;
983 EEThreadId thread_id;
986 const unsigned HS_CACHE_LINE_SIZE = 128;
989 struct snoop_stats_data
993 // total number of objects that we called
995 size_t objects_checked_count;
996 // total number of time we called gc_mark
998 size_t zero_ref_count;
999 // total objects actually marked.
1000 size_t objects_marked_count;
1001 // number of objects written to the mark stack because
1003 size_t stolen_stack_count;
1004 // number of objects pushed onto the mark stack because
1005 // of the partial mark code path.
1006 size_t partial_stack_count;
1007 // number of objects pushed onto the mark stack because
1008 // of the non partial mark code path.
1009 size_t normal_stack_count;
1010 // number of references marked without mark stack.
1011 size_t non_stack_count;
1013 // number of times we detect next heap's mark stack
1015 size_t stack_idle_count;
1017 // number of times we do switch to thread.
1018 size_t switch_to_thread_count;
1020 // number of times we are checking if the next heap's
1021 // mark stack is busy.
1022 size_t check_level_count;
1023 // number of times next stack is busy and level is
1026 // how many interlocked exchange operations we did
1027 size_t interlocked_count;
1028 // numer of times parent objects stolen
1029 size_t partial_mark_parent_count;
1030 // numer of times we look at a normal stolen entry,
1031 // or the beginning/ending PM pair.
1032 size_t stolen_or_pm_count;
1033 // number of times we see 2 for the entry.
1034 size_t stolen_entry_count;
1035 // number of times we see a PM entry that's not ready.
1036 size_t pm_not_ready_count;
1037 // number of stolen normal marked objects and partial mark children.
1038 size_t normal_count;
1039 // number of times the bottom of mark stack was cleared.
1040 size_t stack_bottom_clear_count;
1042 #endif //SNOOP_STATS
1044 struct no_gc_region_info
1046 size_t soh_allocation_size;
1047 size_t loh_allocation_size;
1050 size_t num_gcs_induced;
1051 start_no_gc_region_status start_status;
1052 gc_pause_mode saved_pause_mode;
1053 size_t saved_gen0_min_size;
1054 size_t saved_gen3_min_size;
1058 // if you change these, make sure you update them for sos (strike.cpp) as well.
1061 // Right now I am only recording data from blocking GCs. When recording from BGC,
1062 // it should have its own copy just like gc_data_per_heap.
1063 // for BGCs we will have a very different set of datapoints to record.
1064 enum interesting_data_point
1069 idp_converted_pin = 3,
1072 idp_pre_and_post_pin = 6,
1073 idp_pre_short_padded = 7,
1074 idp_post_short_padded = 8,
1078 //class definition of the internal class
1081 friend struct ::_DacGlobals;
1082 #ifdef DACCESS_COMPILE
1083 friend class ::ClrDataAccess;
1084 friend class ::DacHeapWalker;
1085 #endif //DACCESS_COMPILE
1087 friend class GCHeap;
1088 #ifdef FEATURE_PREMORTEM_FINALIZATION
1089 friend class CFinalize;
1090 #endif // FEATURE_PREMORTEM_FINALIZATION
1091 friend struct ::alloc_context;
1092 friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, uint32_t dwFlags);
1093 friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1094 friend class t_join;
1095 friend class gc_mechanisms;
1096 friend class seg_free_spaces;
1098 #ifdef BACKGROUND_GC
1099 friend class exclusive_sync;
1100 friend class recursive_gc_sync;
1101 #endif //BACKGROUND_GC
1103 #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1104 friend void checkGCWriteBarrier();
1105 friend void initGCShadow();
1106 #endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1108 friend void PopulateDacVars(GcDacVars *gcDacVars);
1110 #ifdef MULTIPLE_HEAPS
1111 typedef void (gc_heap::* card_fn) (uint8_t**, int);
1112 #define call_fn(fn) (this->*fn)
1115 typedef void (* card_fn) (uint8_t**);
1116 #define call_fn(fn) (*fn)
1117 #define __this (gc_heap*)0
1124 void print_free_list (int gen, heap_segment* seg);
1127 #ifdef SYNCHRONIZATION_STATS
1130 void init_sync_stats()
1132 #ifdef MULTIPLE_HEAPS
1133 for (int i = 0; i < gc_heap::n_heaps; i++)
1135 gc_heap::g_heaps[i]->init_heap_sync_stats();
1137 #else //MULTIPLE_HEAPS
1138 init_heap_sync_stats();
1139 #endif //MULTIPLE_HEAPS
1143 void print_sync_stats(unsigned int gc_count_during_log)
1145 // bad/good gl acquire is accumulative during the log interval (because the numbers are too small)
1146 // min/max msl_acquire is the min/max during the log interval, not each GC.
1147 // Threads is however many allocation threads for the last GC.
1148 // num of msl acquired, avg_msl, high and low are all for each GC.
1149 printf("%2s%2s%10s%10s%12s%6s%4s%8s( st, wl, stw, dpw)\n",
1150 "H", "T", "good_sus", "bad_sus", "avg_msl", "high", "low", "num_msl");
1152 #ifdef MULTIPLE_HEAPS
1153 for (int i = 0; i < gc_heap::n_heaps; i++)
1155 gc_heap::g_heaps[i]->print_heap_sync_stats(i, gc_count_during_log);
1157 #else //MULTIPLE_HEAPS
1158 print_heap_sync_stats(0, gc_count_during_log);
1159 #endif //MULTIPLE_HEAPS
1162 #endif //SYNCHRONIZATION_STATS
1165 void verify_soh_segment_list();
1167 void verify_mark_array_cleared (heap_segment* seg);
1169 void verify_mark_array_cleared();
1171 void verify_seg_end_mark_array_cleared();
1173 void verify_partial();
1177 void verify_free_lists();
1179 void verify_heap (BOOL begin_gc_p);
1180 #endif //VERIFY_HEAP
1183 void fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num);
1186 void fire_pevents();
1188 #ifdef FEATURE_BASICFREEZE
1189 static void walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef);
1193 heap_segment* make_heap_segment (uint8_t* new_pages,
1197 l_heap* make_large_heap (uint8_t* new_pages, size_t size, BOOL managed);
1200 gc_heap* make_gc_heap(
1201 #if defined (MULTIPLE_HEAPS)
1204 #endif //MULTIPLE_HEAPS
1208 void destroy_gc_heap(gc_heap* heap);
1211 HRESULT initialize_gc (size_t segment_size,
1213 #ifdef MULTIPLE_HEAPS
1214 , unsigned number_of_heaps
1215 #endif //MULTIPLE_HEAPS
1222 CObjectHeader* allocate (size_t jsize,
1223 alloc_context* acontext);
1225 #ifdef MULTIPLE_HEAPS
1226 static void balance_heaps (alloc_context* acontext);
1228 gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
1230 void gc_thread_stub (void* arg);
1231 #endif //MULTIPLE_HEAPS
1233 CObjectHeader* try_fast_alloc (size_t jsize);
1235 // For LOH allocations we only update the alloc_bytes_loh in allocation
1236 // context - we don't actually use the ptr/limit from it so I am
1237 // making this explicit by not passing in the alloc_context.
1239 CObjectHeader* allocate_large_object (size_t size, int64_t& alloc_bytes);
1241 #ifdef FEATURE_STRUCTALIGN
1243 uint8_t* pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size);
1244 #endif // FEATURE_STRUCTALIGN
1253 BOOL expand_soh_with_minimal_gc();
1255 // EE is always suspended when this method is called.
1256 // returning FALSE means we actually didn't do a GC. This happens
1257 // when we figured that we needed to do a BGC.
1259 int garbage_collect (int n);
1262 void init_records();
1265 uint32_t* make_card_table (uint8_t* start, uint8_t* end);
1268 void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
1271 int grow_brick_card_tables (uint8_t* start,
1274 heap_segment* new_seg,
1279 BOOL is_mark_set (uint8_t* o);
1281 #ifdef FEATURE_BASICFREEZE
1283 bool frozen_object_p(Object* obj);
1284 #endif // FEATURE_BASICFREEZE
1289 void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1292 void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1294 struct walk_relocate_args
1298 mark* pinned_plug_entry;
1299 void* profiling_context;
1304 void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type);
1307 void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
1308 walk_relocate_args* args);
1311 void walk_relocation (void* profiling_context, record_surv_fn fn);
1314 void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
1317 void walk_finalize_queue (fq_walk_fn fn);
1319 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1321 void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn);
1322 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1324 // used in blocking GCs after plan phase so this walks the plugs.
1326 void walk_survivors_relocation (void* profiling_context, record_surv_fn fn);
1328 void walk_survivors_for_loh (void* profiling_context, record_surv_fn fn);
1331 int generation_to_condemn (int n,
1332 BOOL* blocking_collection_p,
1333 BOOL* elevation_requested_p,
1337 int joined_generation_to_condemn (BOOL should_evaluate_elevation, int n_initial, BOOL* blocking_collection
1338 STRESS_HEAP_ARG(int n_original));
1341 size_t min_reclaim_fragmentation_threshold (uint32_t num_heaps);
1344 uint64_t min_high_fragmentation_threshold (uint64_t available_mem, uint32_t num_heaps);
1347 void concurrent_print_time_delta (const char* msg);
1349 void free_list_info (int gen_num, const char* msg);
1351 // in svr GC on entry and exit of this method, the GC threads are not
1357 void save_data_for_no_gc();
1360 void restore_data_for_no_gc();
1363 void update_collection_counts_for_no_gc();
1366 BOOL should_proceed_with_gc();
1369 void record_gcs_during_no_gc();
1372 BOOL find_loh_free_for_no_gc();
1375 BOOL find_loh_space_for_no_gc();
1378 BOOL commit_loh_for_no_gc (heap_segment* seg);
1381 start_no_gc_region_status prepare_for_no_gc_region (uint64_t total_size,
1382 BOOL loh_size_known,
1384 BOOL disallow_full_blocking);
1387 BOOL loh_allocated_for_no_gc();
1390 void release_no_gc_loh_segments();
1393 void thread_no_gc_loh_segments();
1396 void check_and_set_no_gc_oom();
1399 void allocate_for_no_gc_after_gc();
1402 void set_loh_allocations_for_no_gc();
1405 void set_soh_allocations_for_no_gc();
1408 void prepare_for_no_gc_after_gc();
1411 void set_allocations_for_no_gc();
1414 BOOL should_proceed_for_no_gc();
1417 start_no_gc_region_status get_start_no_gc_region_status();
1420 end_no_gc_region_status end_no_gc_region();
1423 void handle_failure_for_no_gc();
1426 void fire_etw_allocation_event (size_t allocation_amount, int gen_number, uint8_t* object_address);
1429 void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject);
1432 size_t limit_from_size (size_t size, size_t room, int gen_number,
1435 int try_allocate_more_space (alloc_context* acontext, size_t jsize,
1436 int alloc_generation_number);
1438 BOOL allocate_more_space (alloc_context* acontext, size_t jsize,
1439 int alloc_generation_number);
1442 size_t get_full_compact_gc_count();
1445 BOOL short_on_end_of_seg (int gen_number,
1450 BOOL a_fit_free_list_p (int gen_number,
1452 alloc_context* acontext,
1455 #ifdef BACKGROUND_GC
1457 void wait_for_background (alloc_wait_reason awr);
1460 void wait_for_bgc_high_memory (alloc_wait_reason awr);
1463 void bgc_loh_alloc_clr (uint8_t* alloc_start,
1465 alloc_context* acontext,
1470 #endif //BACKGROUND_GC
1472 #ifdef BACKGROUND_GC
1474 void wait_for_background_planning (alloc_wait_reason awr);
1477 BOOL bgc_loh_should_allocate();
1478 #endif //BACKGROUND_GC
1480 #define max_saved_spinlock_info 48
1482 #ifdef SPINLOCK_HISTORY
1484 int spinlock_info_index;
1487 spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
1488 #endif //SPINLOCK_HISTORY
1491 void add_saved_spinlock_info (
1492 msl_enter_state enter_state,
1493 msl_take_state take_state);
1496 BOOL a_fit_free_list_large_p (size_t size,
1497 alloc_context* acontext,
1501 BOOL a_fit_segment_end_p (int gen_number,
1504 alloc_context* acontext,
1506 BOOL* commit_failed_p);
1508 BOOL loh_a_fit_segment_end_p (int gen_number,
1510 alloc_context* acontext,
1512 BOOL* commit_failed_p,
1515 BOOL loh_get_new_seg (generation* gen,
1518 BOOL* commit_failed_p,
1522 size_t get_large_seg_size (size_t size);
1525 BOOL retry_full_compact_gc (size_t size);
1528 BOOL check_and_wait_for_bgc (alloc_wait_reason awr,
1529 BOOL* did_full_compact_gc);
1532 BOOL trigger_full_compact_gc (gc_reason gr,
1536 BOOL trigger_ephemeral_gc (gc_reason gr);
1539 BOOL soh_try_fit (int gen_number,
1541 alloc_context* acontext,
1543 BOOL* commit_failed_p,
1544 BOOL* short_seg_end_p);
1546 BOOL loh_try_fit (int gen_number,
1548 alloc_context* acontext,
1550 BOOL* commit_failed_p,
1554 BOOL allocate_small (int gen_number,
1556 alloc_context* acontext,
1559 #ifdef RECORD_LOH_STATE
1560 #define max_saved_loh_states 12
1562 int loh_state_index;
1564 struct loh_state_info
1566 allocation_state alloc_state;
1567 EEThreadId thread_id;
1571 loh_state_info last_loh_states[max_saved_loh_states];
1573 void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id);
1574 #endif //RECORD_LOH_STATE
1576 BOOL allocate_large (int gen_number,
1578 alloc_context* acontext,
1582 int init_semi_shared();
1584 int init_gc_heap (int heap_number);
1586 void self_destroy();
1588 void destroy_semi_shared();
1590 void repair_allocation_contexts (BOOL repair_p);
1592 void fix_allocation_contexts (BOOL for_gc_p);
1594 void fix_youngest_allocation_area (BOOL for_gc_p);
1596 void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
1599 void fix_large_allocation_area (BOOL for_gc_p);
1601 void fix_older_allocation_area (generation* older_gen);
1603 void set_allocation_heap_segment (generation* gen);
1605 void reset_allocation_pointers (generation* gen, uint8_t* start);
1607 int object_gennum (uint8_t* o);
1609 int object_gennum_plan (uint8_t* o);
1611 void init_heap_segment (heap_segment* seg);
1613 void delete_heap_segment (heap_segment* seg, BOOL consider_hoarding=FALSE);
1614 #ifdef FEATURE_BASICFREEZE
1616 BOOL insert_ro_segment (heap_segment* seg);
1618 void remove_ro_segment (heap_segment* seg);
1619 #endif //FEATURE_BASICFREEZE
1621 BOOL set_ro_segment_in_range (heap_segment* seg);
1623 BOOL unprotect_segment (heap_segment* seg);
1625 heap_segment* soh_get_segment_to_expand();
1627 heap_segment* get_segment (size_t size, BOOL loh_p);
1629 void seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp);
1631 void seg_mapping_table_remove_segment (heap_segment* seg);
1633 heap_segment* get_large_segment (size_t size, BOOL* did_full_compact_gc);
1635 void thread_loh_segment (heap_segment* new_seg);
1637 heap_segment* get_segment_for_loh (size_t size
1638 #ifdef MULTIPLE_HEAPS
1640 #endif //MULTIPLE_HEAPS
1643 void reset_heap_segment_pages (heap_segment* seg);
1645 void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
1647 void decommit_heap_segment (heap_segment* seg);
1649 void clear_gen0_bricks();
1650 #ifdef BACKGROUND_GC
1652 void rearrange_small_heap_segments();
1653 #endif //BACKGROUND_GC
1655 void rearrange_large_heap_segments();
1657 void rearrange_heap_segments(BOOL compacting);
1660 void reset_write_watch_for_gc_heap(void* base_address, size_t region_size);
1662 void get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended);
1665 void switch_one_quantum();
1667 void reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size);
1669 void switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size);
1671 void reset_write_watch (BOOL concurrent_p);
1673 void adjust_ephemeral_limits ();
1675 void make_generation (generation& gen, heap_segment* seg,
1676 uint8_t* start, uint8_t* pointer);
1679 #define USE_PADDING_FRONT 1
1680 #define USE_PADDING_TAIL 2
1683 BOOL size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1684 uint8_t* old_loc=0, int use_padding=USE_PADDING_TAIL);
1686 BOOL a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1690 void handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
1691 uint8_t* allocated, uint8_t* reserved);
1694 size_t card_of ( uint8_t* object);
1696 uint8_t* brick_address (size_t brick);
1698 size_t brick_of (uint8_t* add);
1700 uint8_t* card_address (size_t card);
1702 size_t card_to_brick (size_t card);
1704 void clear_card (size_t card);
1706 void set_card (size_t card);
1708 BOOL card_set_p (size_t card);
1710 void card_table_set_bit (uint8_t* location);
1714 void update_card_table_bundle();
1716 void reset_card_table_write_watch();
1718 void card_bundle_clear(size_t cardb);
1720 void card_bundle_set (size_t cardb);
1722 void card_bundles_set (size_t start_cardb, size_t end_cardb);
1724 void verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word);
1726 void verify_card_bundles();
1728 BOOL card_bundle_set_p (size_t cardb);
1730 BOOL find_card_dword (size_t& cardw, size_t cardw_end);
1732 void enable_card_bundles();
1734 BOOL card_bundles_enabled();
1736 #endif //CARD_BUNDLE
1739 BOOL find_card (uint32_t* card_table, size_t& card,
1740 size_t card_word_end, size_t& end_card);
1742 BOOL grow_heap_segment (heap_segment* seg, uint8_t* high_address);
1744 int grow_heap_segment (heap_segment* seg, uint8_t* high_address, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL);
1746 void copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
1747 short* old_brick_table,
1749 uint8_t* start, uint8_t* end);
1751 void init_brick_card_range (heap_segment* seg);
1753 void copy_brick_card_table_l_heap ();
1755 void copy_brick_card_table();
1757 void clear_brick_table (uint8_t* from, uint8_t* end);
1759 void set_brick (size_t index, ptrdiff_t val);
1761 int brick_entry (size_t index);
1764 unsigned int mark_array_marked (uint8_t* add);
1766 void mark_array_set_marked (uint8_t* add);
1768 BOOL is_mark_bit_set (uint8_t* add);
1770 void gmark_array_set_marked (uint8_t* add);
1772 void set_mark_array_bit (size_t mark_bit);
1774 BOOL mark_array_bit_set (size_t mark_bit);
1776 void mark_array_clear_marked (uint8_t* add);
1778 void clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only=TRUE
1779 #ifdef FEATURE_BASICFREEZE
1780 , BOOL read_only=FALSE
1781 #endif // FEATURE_BASICFREEZE
1783 #ifdef BACKGROUND_GC
1785 void seg_clear_mark_array_bits_soh (heap_segment* seg);
1787 void clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1789 void bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1791 void clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p);
1794 void set_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1796 void check_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1797 #endif //VERIFY_HEAP
1798 #endif //BACKGROUND_GC
1802 BOOL large_object_marked (uint8_t* o, BOOL clearp);
1804 #ifdef BACKGROUND_GC
1806 BOOL background_allowed_p();
1807 #endif //BACKGROUND_GC
1810 void send_full_gc_notification (int gen_num, BOOL due_to_alloc_p);
1813 void check_for_full_gc (int gen_num, size_t size);
1816 void adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
1819 void adjust_limit_clr (uint8_t* start, size_t limit_size,
1820 alloc_context* acontext, heap_segment* seg,
1821 int align_const, int gen_number);
1823 void leave_allocation_segment (generation* gen);
1826 void init_free_and_plug();
1829 void print_free_and_plug (const char* msg);
1832 void add_gen_plug (int gen_number, size_t plug_size);
1835 void add_gen_free (int gen_number, size_t free_size);
1838 void add_item_to_current_pinned_free (int gen_number, size_t free_size);
1841 void remove_gen_free (int gen_number, size_t free_size);
1844 uint8_t* allocate_in_older_generation (generation* gen, size_t size,
1845 int from_gen_number,
1847 REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1849 generation* ensure_ephemeral_heap_segment (generation* consing_gen);
1851 uint8_t* allocate_in_condemned_generations (generation* gen,
1853 int from_gen_number,
1855 BOOL* convert_to_pinned_p=NULL,
1856 uint8_t* next_pinned_plug=0,
1857 heap_segment* current_seg=0,
1858 #endif //SHORT_PLUGS
1860 REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1861 #ifdef INTERIOR_POINTERS
1862 // Verifies that interior is actually in the range of seg; otherwise
1865 heap_segment* find_segment (uint8_t* interior, BOOL small_segment_only_p);
1868 heap_segment* find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p);
1871 uint8_t* find_object_for_relocation (uint8_t* o, uint8_t* low, uint8_t* high);
1872 #endif //INTERIOR_POINTERS
1875 gc_heap* heap_of (uint8_t* object);
1878 gc_heap* heap_of_gc (uint8_t* object);
1881 size_t& promoted_bytes (int);
1884 uint8_t* find_object (uint8_t* o, uint8_t* low);
1887 dynamic_data* dynamic_data_of (int gen_number);
1889 ptrdiff_t get_desired_allocation (int gen_number);
1891 ptrdiff_t get_new_allocation (int gen_number);
1893 ptrdiff_t get_allocation (int gen_number);
1895 bool new_allocation_allowed (int gen_number);
1896 #ifdef BACKGROUND_GC
1898 void allow_new_allocation (int gen_number);
1900 void disallow_new_allocation (int gen_number);
1901 #endif //BACKGROUND_GC
1903 void reset_pinned_queue();
1905 void reset_pinned_queue_bos();
1907 void set_allocator_next_pin (generation* gen);
1909 void set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit);
1911 void enque_pinned_plug (generation* gen, uint8_t* plug, size_t len);
1913 void enque_pinned_plug (uint8_t* plug,
1914 BOOL save_pre_plug_info_p,
1915 uint8_t* last_object_in_last_plug);
1917 void merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size);
1919 void set_pinned_info (uint8_t* last_pinned_plug,
1921 uint8_t* alloc_pointer,
1922 uint8_t*& alloc_limit);
1924 void set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen);
1926 void save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug);
1928 size_t deque_pinned_plug ();
1930 mark* pinned_plug_of (size_t bos);
1932 mark* oldest_pin ();
1934 mark* before_oldest_pin();
1936 BOOL pinned_plug_que_empty_p ();
1938 void make_mark_stack (mark* arr);
1941 int& mark_stack_busy();
1943 VOLATILE(uint8_t*)& ref_mark_stack (gc_heap* hp, int index);
1945 #ifdef BACKGROUND_GC
1947 size_t& bpromoted_bytes (int);
1949 void make_background_mark_stack (uint8_t** arr);
1951 void make_c_mark_list (uint8_t** arr);
1952 #endif //BACKGROUND_GC
1954 generation* generation_of (int n);
1956 BOOL gc_mark1 (uint8_t* o);
1958 BOOL gc_mark (uint8_t* o, uint8_t* low, uint8_t* high);
1960 uint8_t* mark_object(uint8_t* o THREAD_NUMBER_DCL);
1963 void ha_mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
1964 #endif //HEAP_ANALYZE
1966 void mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
1968 void mark_object_simple1 (uint8_t* o, uint8_t* start THREAD_NUMBER_DCL);
1975 #ifdef BACKGROUND_GC
1978 BOOL background_marked (uint8_t* o);
1980 BOOL background_mark1 (uint8_t* o);
1982 BOOL background_mark (uint8_t* o, uint8_t* low, uint8_t* high);
1984 uint8_t* background_mark_object (uint8_t* o THREAD_NUMBER_DCL);
1986 void background_mark_simple (uint8_t* o THREAD_NUMBER_DCL);
1988 void background_mark_simple1 (uint8_t* o THREAD_NUMBER_DCL);
1990 void background_promote (Object**, ScanContext* , uint32_t);
1992 BOOL background_object_marked (uint8_t* o, BOOL clearp);
1994 void init_background_gc();
1996 uint8_t* background_next_end (heap_segment*, BOOL);
1998 void generation_delete_heap_segment (generation*,
1999 heap_segment*, heap_segment*, heap_segment*);
2001 void set_mem_verify (uint8_t*, uint8_t*, uint8_t);
2003 void process_background_segment_end (heap_segment*, generation*, uint8_t*,
2004 heap_segment*, BOOL*);
2006 void process_n_background_segments (heap_segment*, heap_segment*, generation* gen);
2008 BOOL fgc_should_consider_object (uint8_t* o,
2010 BOOL consider_bgc_mark_p,
2011 BOOL check_current_sweep_p,
2012 BOOL check_saved_sweep_p);
2014 void should_check_bgc_mark (heap_segment* seg,
2015 BOOL* consider_bgc_mark_p,
2016 BOOL* check_current_sweep_p,
2017 BOOL* check_saved_sweep_p);
2019 void background_ephemeral_sweep();
2021 void background_sweep ();
2023 void background_mark_through_object (uint8_t* oo THREAD_NUMBER_DCL);
2025 uint8_t* background_seg_end (heap_segment* seg, BOOL concurrent_p);
2027 uint8_t* background_first_overflow (uint8_t* min_add,
2030 BOOL small_object_p);
2032 void background_process_mark_overflow_internal (int condemned_gen_number,
2033 uint8_t* min_add, uint8_t* max_add,
2036 BOOL background_process_mark_overflow (BOOL concurrent_p);
2038 // for foreground GC to get hold of background structures containing refs
2041 scan_background_roots (promote_func* fn, int hn, ScanContext *pSC);
2044 BOOL bgc_mark_array_range (heap_segment* seg,
2046 uint8_t** range_beg,
2047 uint8_t** range_end);
2049 void bgc_verify_mark_array_cleared (heap_segment* seg);
2051 void verify_mark_bits_cleared (uint8_t* obj, size_t s);
2053 void clear_all_mark_array();
2054 #endif //BACKGROUND_GC
2057 uint8_t* next_end (heap_segment* seg, uint8_t* f);
2059 void fix_card_table ();
2061 void mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
2063 BOOL process_mark_overflow (int condemned_gen_number);
2065 void process_mark_overflow_internal (int condemned_gen_number,
2066 uint8_t* min_address, uint8_t* max_address);
2070 void print_snoop_stat();
2071 #endif //SNOOP_STATS
2076 BOOL check_next_mark_stack (gc_heap* next_heap);
2081 void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
2084 void mark_phase (int condemned_gen_number, BOOL mark_only_p);
2087 void pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* high);
2089 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
2091 size_t get_total_pinned_objects();
2092 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
2095 void reset_mark_stack ();
2097 uint8_t* insert_node (uint8_t* new_node, size_t sequence_number,
2098 uint8_t* tree, uint8_t* last_node);
2100 size_t update_brick_table (uint8_t* tree, size_t current_brick,
2101 uint8_t* x, uint8_t* plug_end);
2104 void plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate);
2107 void realloc_plan_generation_start (generation* gen, generation* consing_gen);
2110 void plan_generation_starts (generation*& consing_gen);
2113 void advance_pins_for_demotion (generation* gen);
2116 void process_ephemeral_boundaries(uint8_t* x, int& active_new_gen_number,
2117 int& active_old_gen_number,
2118 generation*& consing_gen,
2119 BOOL& allocate_in_condemned);
2121 void seg_clear_mark_bits (heap_segment* seg);
2123 void sweep_ro_segments (heap_segment* start_seg);
2125 void convert_to_pinned_plug (BOOL& last_npinned_plug_p,
2126 BOOL& last_pinned_plug_p,
2127 BOOL& pinned_plug_p,
2129 size_t& artificial_pinned_size);
2131 void store_plug_gap_info (uint8_t* plug_start,
2133 BOOL& last_npinned_plug_p,
2134 BOOL& last_pinned_plug_p,
2135 uint8_t*& last_pinned_plug,
2136 BOOL& pinned_plug_p,
2137 uint8_t* last_object_in_last_plug,
2138 BOOL& merge_with_last_pin_p,
2139 // this is only for verification purpose
2140 size_t last_plug_len);
2142 void plan_phase (int condemned_gen_number);
2145 void record_interesting_data_point (interesting_data_point idp);
2147 #ifdef GC_CONFIG_DRIVEN
2149 void record_interesting_info_per_heap();
2151 void record_global_mechanisms();
2153 BOOL should_do_sweeping_gc (BOOL compact_p);
2154 #endif //GC_CONFIG_DRIVEN
2156 #ifdef FEATURE_LOH_COMPACTION
2157 // plan_loh can allocate memory so it can fail. If it fails, we will
2158 // fall back to sweeping.
2166 void relocate_in_loh_compact();
2169 void walk_relocation_for_loh (void* profiling_context, record_surv_fn fn);
2172 BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
2175 void loh_set_allocator_next_pin();
2178 BOOL loh_pinned_plug_que_empty_p();
2181 size_t loh_deque_pinned_plug();
2184 mark* loh_pinned_plug_of (size_t bos);
2187 mark* loh_oldest_pin();
2190 BOOL loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit);
2193 uint8_t* loh_allocate_in_condemned (uint8_t* old_loc, size_t size);
2196 BOOL loh_object_p (uint8_t* o);
2199 BOOL should_compact_loh();
2201 // If the LOH compaction mode is just to compact once,
2202 // we need to see if we should reset it back to not compact.
2203 // We would only reset if every heap's LOH was compacted.
2205 void check_loh_compact_mode (BOOL all_heaps_compacted_p);
2206 #endif //FEATURE_LOH_COMPACTION
2209 void decommit_ephemeral_segment_pages (int condemned_gen_number);
2211 void fix_generation_bounds (int condemned_gen_number,
2212 generation* consing_gen);
2214 uint8_t* generation_limit (int gen_number);
2216 struct make_free_args
2218 int free_list_gen_number;
2219 uint8_t* current_gen_limit;
2220 generation* free_list_gen;
2221 uint8_t* highest_plug;
2224 uint8_t* allocate_at_end (size_t size);
2226 BOOL ensure_gap_allocation (int condemned_gen_number);
2227 // make_free_lists is only called by blocking GCs.
2229 void make_free_lists (int condemned_gen_number);
2231 void make_free_list_in_brick (uint8_t* tree, make_free_args* args);
2233 void thread_gap (uint8_t* gap_start, size_t size, generation* gen);
2235 void loh_thread_gap_front (uint8_t* gap_start, size_t size, generation* gen);
2237 void make_unused_array (uint8_t* x, size_t size, BOOL clearp=FALSE, BOOL resetp=FALSE);
2239 void clear_unused_array (uint8_t* x, size_t size);
2241 void relocate_address (uint8_t** old_address THREAD_NUMBER_DCL);
2242 struct relocate_args
2248 mark* pinned_plug_entry;
2252 void reloc_survivor_helper (uint8_t** pval);
2254 void check_class_object_demotion (uint8_t* obj);
2256 void check_class_object_demotion_internal (uint8_t* obj);
2259 void check_demotion_helper (uint8_t** pval, uint8_t* parent_obj);
2262 void relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end);
2265 void verify_pins_with_post_plug_info (const char* msg);
2267 #ifdef COLLECTIBLE_CLASS
2269 void unconditional_set_card_collectible (uint8_t* obj);
2270 #endif //COLLECTIBLE_CLASS
2273 void relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry);
2276 void relocate_obj_helper (uint8_t* x, size_t s);
2279 void reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc);
2282 void relocate_pre_plug_info (mark* pinned_plug_entry);
2285 void relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned);
2288 void relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end,
2289 BOOL check_last_object_p,
2290 mark* pinned_plug_entry);
2292 void relocate_survivors_in_brick (uint8_t* tree, relocate_args* args);
2295 void update_oldest_pinned_plug();
2298 void relocate_survivors (int condemned_gen_number,
2299 uint8_t* first_condemned_address );
2301 void relocate_phase (int condemned_gen_number,
2302 uint8_t* first_condemned_address);
2308 ptrdiff_t last_plug_relocation;
2309 uint8_t* before_last_plug;
2310 size_t current_compacted_brick;
2312 mark* pinned_plug_entry;
2313 BOOL check_gennum_p;
2318 dprintf (3, ("last plug: %Ix, last plug reloc: %Ix, before last: %Ix, b: %Ix",
2319 last_plug, last_plug_relocation, before_last_plug, current_compacted_brick));
2324 void copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2326 void gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2328 void compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args);
2330 void compact_in_brick (uint8_t* tree, compact_args* args);
2333 mark* get_next_pinned_entry (uint8_t* tree,
2334 BOOL* has_pre_plug_info_p,
2335 BOOL* has_post_plug_info_p,
2339 mark* get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p);
2342 void recover_saved_pinned_info();
2345 void compact_phase (int condemned_gen_number, uint8_t*
2346 first_condemned_address, BOOL clear_cards);
2348 void clear_cards (size_t start_card, size_t end_card);
2350 void clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address);
2352 void copy_cards (size_t dst_card, size_t src_card,
2353 size_t end_card, BOOL nextp);
2355 void copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2357 #ifdef BACKGROUND_GC
2359 void copy_mark_bits (size_t dst_mark_bit, size_t src_mark_bit, size_t end_mark_bit);
2361 void copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2362 #endif //BACKGROUND_GC
2366 BOOL ephemeral_pointer_p (uint8_t* o);
2368 void fix_brick_to_highest (uint8_t* o, uint8_t* next_o);
2370 uint8_t* find_first_object (uint8_t* start_address, uint8_t* first_object);
2372 uint8_t* compute_next_boundary (uint8_t* low, int gen_number, BOOL relocating);
2374 void keep_card_live (uint8_t* o, size_t& n_gen,
2375 size_t& cg_pointers_found);
2377 void mark_through_cards_helper (uint8_t** poo, size_t& ngen,
2378 size_t& cg_pointers_found,
2379 card_fn fn, uint8_t* nhigh,
2380 uint8_t* next_boundary);
2383 BOOL card_transition (uint8_t* po, uint8_t* end, size_t card_word_end,
2384 size_t& cg_pointers_found,
2385 size_t& n_eph, size_t& n_card_set,
2386 size_t& card, size_t& end_card,
2387 BOOL& foundp, uint8_t*& start_address,
2388 uint8_t*& limit, size_t& n_cards_cleared);
2390 void mark_through_cards_for_segments (card_fn fn, BOOL relocating);
2393 void repair_allocation_in_expanded_heap (generation* gen);
2395 BOOL can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index);
2397 BOOL can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index);
2399 BOOL can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count);
2400 #ifdef SEG_REUSE_STATS
2402 size_t dump_buckets (size_t* ordered_indices, int count, size_t* total_size);
2403 #endif //SEG_REUSE_STATS
2405 void build_ordered_free_spaces (heap_segment* seg);
2407 void count_plug (size_t last_plug_size, uint8_t*& last_plug);
2409 void count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug);
2411 void build_ordered_plug_indices ();
2413 void init_ordered_free_space_indices ();
2415 void trim_free_spaces_indices ();
2417 BOOL try_best_fit (BOOL end_of_segment_p);
2419 BOOL best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space);
2421 BOOL process_free_space (heap_segment* seg,
2423 size_t min_free_size,
2424 size_t min_cont_size,
2425 size_t* total_free_space,
2426 size_t* largest_free_space);
2428 size_t compute_eph_gen_starts_size();
2430 void compute_new_ephemeral_size();
2432 BOOL expand_reused_seg_p();
2434 BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size,
2435 size_t min_cont_size, allocator* al);
2437 uint8_t* allocate_in_expanded_heap (generation* gen, size_t size,
2438 BOOL& adjacentp, uint8_t* old_loc,
2440 BOOL set_padding_on_saved_p,
2441 mark* pinned_plug_entry,
2442 #endif //SHORT_PLUGS
2443 BOOL consider_bestfit, int active_new_gen_number
2444 REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
2446 void realloc_plug (size_t last_plug_size, uint8_t*& last_plug,
2447 generation* gen, uint8_t* start_address,
2448 unsigned int& active_new_gen_number,
2449 uint8_t*& last_pinned_gap, BOOL& leftp,
2452 , mark* pinned_plug_entry
2453 #endif //SHORT_PLUGS
2456 void realloc_in_brick (uint8_t* tree, uint8_t*& last_plug, uint8_t* start_address,
2458 unsigned int& active_new_gen_number,
2459 uint8_t*& last_pinned_gap, BOOL& leftp);
2461 void realloc_plugs (generation* consing_gen, heap_segment* seg,
2462 uint8_t* start_address, uint8_t* end_address,
2463 unsigned active_new_gen_number);
2466 void set_expand_in_full_gc (int condemned_gen_number);
2469 void verify_no_pins (uint8_t* start, uint8_t* end);
2472 generation* expand_heap (int condemned_generation,
2473 generation* consing_gen,
2474 heap_segment* new_heap_segment);
2477 void save_ephemeral_generation_starts();
2480 bool init_dynamic_data ();
2482 float surv_to_growth (float cst, float limit, float max_limit);
2484 size_t desired_new_allocation (dynamic_data* dd, size_t out,
2485 int gen_number, int pass);
2488 void trim_youngest_desired_low_memory();
2491 void decommit_ephemeral_segment_pages();
2495 size_t trim_youngest_desired (uint32_t memory_load,
2496 size_t total_new_allocation,
2497 size_t total_min_allocation);
2499 size_t joined_youngest_desired (size_t new_allocation);
2502 size_t get_total_heap_size ();
2504 size_t get_total_committed_size();
2507 void get_memory_info (uint32_t* memory_load,
2508 uint64_t* available_physical=NULL,
2509 uint64_t* available_page_file=NULL);
2511 size_t generation_size (int gen_number);
2513 size_t get_total_survived_size();
2515 size_t get_current_allocated();
2517 size_t get_total_allocated();
2519 size_t current_generation_size (int gen_number);
2521 size_t generation_plan_size (int gen_number);
2523 void compute_promoted_allocation (int gen_number);
2525 size_t compute_in (int gen_number);
2527 void compute_new_dynamic_data (int gen_number);
2529 gc_history_per_heap* get_gc_data_per_heap();
2531 size_t new_allocation_limit (size_t size, size_t free_size, int gen_number);
2533 size_t generation_fragmentation (generation* gen,
2534 generation* consing_gen,
2537 size_t generation_sizes (generation* gen);
2539 size_t committed_size();
2541 size_t approximate_new_allocation();
2543 size_t end_space_after_gc();
2545 BOOL decide_on_compacting (int condemned_gen_number,
2546 size_t fragmentation,
2547 BOOL& should_expand);
2549 BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
2551 void reset_large_object (uint8_t* o);
2553 void sweep_large_objects ();
2555 void relocate_in_large_objects ();
2557 void mark_through_cards_for_large_objects (card_fn fn, BOOL relocating);
2559 void descr_segment (heap_segment* seg);
2561 void descr_card_table ();
2563 void descr_generations (BOOL begin_gc_p);
2566 void descr_generations_to_profiler (gen_walk_fn fn, void *context);
2568 /*------------ Multiple non isolated heaps ----------------*/
2569 #ifdef MULTIPLE_HEAPS
2571 BOOL create_thread_support (unsigned number_of_heaps);
2573 void destroy_thread_support ();
2575 bool create_gc_thread();
2577 void gc_thread_function();
2579 #ifdef PARALLEL_MARK_LIST_SORT
2581 void sort_mark_list();
2583 void merge_mark_lists();
2585 void append_to_mark_list(uint8_t **start, uint8_t **end);
2586 #else //PARALLEL_MARK_LIST_SORT
2588 void combine_mark_lists();
2589 #endif //PARALLEL_MARK_LIST_SORT
2591 #endif //MULTIPLE_HEAPS
2593 /*------------ End of Multiple non isolated heaps ---------*/
2595 #ifndef SEG_MAPPING_TABLE
2597 heap_segment* segment_of (uint8_t* add, ptrdiff_t & delta,
2598 BOOL verify_p = FALSE);
2599 #endif //SEG_MAPPING_TABLE
2601 #ifdef BACKGROUND_GC
2603 //this is called by revisit....
2605 uint8_t* high_page (heap_segment* seg, BOOL concurrent_p);
2608 void revisit_written_page (uint8_t* page, uint8_t* end, BOOL concurrent_p,
2609 heap_segment* seg, uint8_t*& last_page,
2610 uint8_t*& last_object, BOOL large_objects_p,
2611 size_t& num_marked_objects);
2613 void revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p=FALSE);
2616 void concurrent_scan_dependent_handles (ScanContext *sc);
2622 void bgc_suspend_EE ();
2628 void background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags);
2631 void background_scan_dependent_handles (ScanContext *sc);
2636 // Restores BGC settings if necessary.
2638 void recover_bgc_settings();
2641 void save_bgc_data_per_heap();
2644 BOOL should_commit_mark_array();
2647 void clear_commit_flag();
2650 void clear_commit_flag_global();
2653 void verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr);
2656 void verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr);
2659 BOOL commit_mark_array_by_range (uint8_t* begin,
2661 uint32_t* mark_array_addr);
2664 BOOL commit_mark_array_new_seg (gc_heap* hp,
2666 uint32_t* new_card_table = 0,
2667 uint8_t* new_lowest_address = 0);
2670 BOOL commit_mark_array_with_check (heap_segment* seg, uint32_t* mark_array_addr);
2672 // commit the portion of the mark array that corresponds to
2673 // this segment (from beginning to reserved).
2674 // seg and heap_segment_reserved (seg) are guaranteed to be
2677 BOOL commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr);
2679 // During BGC init, we commit the mark array for all in range
2680 // segments whose mark array hasn't been committed or fully
2681 // committed. All rw segments are in range, only ro segments
2682 // can be partial in range.
2684 BOOL commit_mark_array_bgc_init (uint32_t* mark_array_addr);
2687 BOOL commit_new_mark_array (uint32_t* new_mark_array);
2689 // We need to commit all segments that intersect with the bgc
2690 // range. If a segment is only partially in range, we still
2691 // should commit the mark array for the whole segment as
2692 // we will set the mark array commit flag for this segment.
2694 BOOL commit_new_mark_array_global (uint32_t* new_mark_array);
2696 // We can't decommit the first and the last page in the mark array
2697 // if the beginning and ending don't happen to be page aligned.
2699 void decommit_mark_array_by_seg (heap_segment* seg);
2702 void background_mark_phase();
2705 void background_drain_mark_list (int thread);
2708 void background_grow_c_mark_list();
2711 void background_promote_callback(Object** object, ScanContext* sc, uint32_t flags);
2714 void mark_absorb_new_alloc();
2720 BOOL prepare_bgc_thread(gc_heap* gh);
2722 BOOL create_bgc_thread(gc_heap* gh);
2724 BOOL create_bgc_threads_support (int number_of_heaps);
2726 BOOL create_bgc_thread_support();
2728 int check_for_ephemeral_alloc();
2730 void wait_to_proceed();
2732 void fire_alloc_wait_event_begin (alloc_wait_reason awr);
2734 void fire_alloc_wait_event_end (alloc_wait_reason awr);
2736 void background_gc_wait_lh (alloc_wait_reason awr = awr_ignored);
2738 uint32_t background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE);
2742 void kill_gc_thread();
2744 uint32_t bgc_thread_function();
2746 void do_background_gc();
2748 uint32_t __stdcall bgc_thread_stub (void* arg);
2750 #endif //BACKGROUND_GC
2755 VOLATILE(bool) internal_gc_done;
2757 #ifdef BACKGROUND_GC
2759 uint32_t cm_in_progress;
2761 // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
2762 // we do right before the bgc starts.
2764 BOOL dont_restart_ee_p;
2767 GCEvent bgc_start_event;
2768 #endif //BACKGROUND_GC
2770 // The variables in this block are known to the DAC and must come first
2771 // in the gc_heap class.
2773 // Keeps track of the highest address allocated by Alloc
2775 uint8_t* alloc_allocated;
2777 // The ephemeral heap segment
2779 heap_segment* ephemeral_heap_segment;
2781 // The finalize queue.
2783 CFinalize* finalize_queue;
2787 oom_history oom_info;
2789 // Interesting data, recorded per-heap.
2791 size_t interesting_data_per_heap[max_idp_count];
2794 size_t compact_reasons_per_heap[max_compact_reasons_count];
2797 size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
2800 size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
2803 uint8_t** internal_root_array;
2806 size_t internal_root_array_index;
2809 BOOL heap_analyze_success;
2811 // The generation table. Must always be last.
2813 generation generation_table [NUMBERGENERATIONS + 1];
2818 BOOL expanded_in_fgc;
2821 uint32_t wait_for_gc_done(int32_t timeOut = INFINITE);
2823 // Returns TRUE if the thread used to be in cooperative mode
2824 // before calling this function.
2826 BOOL enable_preemptive (Thread* current_thread);
2828 void disable_preemptive (Thread* current_thread, BOOL restore_cooperative);
2830 /* ------------------- per heap members --------------------------*/
2833 #ifndef MULTIPLE_HEAPS
2834 GCEvent gc_done_event;
2835 #else // MULTIPLE_HEAPS
2836 GCEvent gc_done_event;
2837 #endif // MULTIPLE_HEAPS
2840 VOLATILE(int32_t) gc_done_event_lock;
2843 VOLATILE(bool) gc_done_event_set;
2849 void reset_gc_done();
2852 void enter_gc_done_event_lock();
2855 void exit_gc_done_event_lock();
2858 uint8_t* ephemeral_low; //lowest ephemeral address
2861 uint8_t* ephemeral_high; //highest ephemeral address
2864 uint32_t* card_table;
2871 uint32_t* mark_array;
2876 uint32_t* card_bundle_table;
2877 #endif //CARD_BUNDLE
2879 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
2881 sorted_table* seg_table;
2882 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
2885 VOLATILE(BOOL) gc_started;
2887 // The following 2 events are there to support the gen2
2888 // notification feature which is only enabled if concurrent
2891 GCEvent full_gc_approach_event;
2894 GCEvent full_gc_end_event;
2896 // Full GC Notification percentages.
2898 uint32_t fgn_maxgen_percent;
2901 uint32_t fgn_loh_percent;
2904 VOLATILE(bool) full_gc_approach_event_set;
2906 #ifdef BACKGROUND_GC
2908 BOOL fgn_last_gc_was_concurrent;
2909 #endif //BACKGROUND_GC
2912 size_t fgn_last_alloc;
2914 static uint32_t user_thread_wait (GCEvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
2916 static wait_full_gc_status full_gc_wait (GCEvent *event, int time_out_ms);
2919 uint8_t* demotion_low;
2922 uint8_t* demotion_high;
2928 uint8_t* last_gen1_pin_end;
2931 gen_to_condemn_tuning gen_to_condemn_reasons;
2934 size_t etw_allocation_running_amount[2];
2937 int gc_policy; //sweep, compact, expand
2939 #ifdef MULTIPLE_HEAPS
2941 bool gc_thread_no_affinitize_p;
2944 GCEvent gc_start_event;
2947 GCEvent ee_suspend_event;
2950 heap_segment* new_heap_segment;
2952 #define alloc_quantum_balance_units (16)
2955 size_t min_balance_threshold;
2956 #else //MULTIPLE_HEAPS
2959 size_t allocation_running_time;
2962 size_t allocation_running_amount;
2964 #endif //MULTIPLE_HEAPS
2967 gc_mechanisms settings;
2970 gc_history_global gc_data_global;
2973 size_t gc_last_ephemeral_decommit_time;
2976 size_t gc_gen0_desired_high;
2979 size_t gen0_big_free_spaces;
2983 double short_plugs_pad_ratio;
2984 #endif //SHORT_PLUGS
2988 size_t youngest_gen_desired_th;
2992 uint32_t high_memory_load_th;
2995 uint64_t mem_one_percent;
2998 uint64_t total_physical_mem;
3001 uint64_t entry_available_physical_mem;
3004 size_t last_gc_index;
3007 size_t min_segment_size;
3010 uint8_t* lowest_address;
3013 uint8_t* highest_address;
3016 BOOL ephemeral_promotion;
3018 uint8_t* saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
3020 size_t saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];
3023 #ifdef MULTIPLE_HEAPS
3029 VOLATILE(int) alloc_context_count;
3030 #else //MULTIPLE_HEAPS
3031 #define vm_heap ((GCHeap*) g_theGCHeap)
3032 #define heap_number (0)
3033 #endif //MULTIPLE_HEAPS
3036 size_t time_bgc_last;
3039 uint8_t* gc_low; // lowest address being condemned
3042 uint8_t* gc_high; //highest address being condemned
3045 size_t mark_stack_tos;
3048 size_t mark_stack_bos;
3051 size_t mark_stack_array_length;
3054 mark* mark_stack_array;
3057 BOOL verify_pinned_queue_p;
3060 uint8_t* oldest_pinned_plug;
3062 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
3064 size_t num_pinned_objects;
3065 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
3067 #ifdef FEATURE_LOH_COMPACTION
3069 size_t loh_pinned_queue_tos;
3072 size_t loh_pinned_queue_bos;
3075 size_t loh_pinned_queue_length;
3078 int loh_pinned_queue_decay;
3081 mark* loh_pinned_queue;
3083 // This is for forced LOH compaction via the complus env var
3085 BOOL loh_compaction_always_p;
3087 // This is set by the user.
3089 gc_loh_compaction_mode loh_compaction_mode;
3091 // We may not compact LOH on every heap if we can't
3092 // grow the pinned queue. This is to indicate whether
3093 // this heap's LOH is compacted or not. So even if
3094 // settings.loh_compaction is TRUE this may not be TRUE.
3096 BOOL loh_compacted_p;
3097 #endif //FEATURE_LOH_COMPACTION
3099 #ifdef BACKGROUND_GC
3102 EEThreadId bgc_thread_id;
3106 uint8_t* background_written_addresses [array_size+2];
3107 #endif //WRITE_WATCH
3110 VOLATILE(c_gc_state) current_c_gc_state; //tells the large object allocator to
3111 //mark the object as new since the start of gc.
3114 gc_mechanisms saved_bgc_settings;
3117 gc_history_per_heap bgc_data_per_heap;
3120 BOOL bgc_thread_running; // gc thread is its main loop
3123 BOOL keep_bgc_threads_p;
3125 // This event is used by BGC threads to do something on
3126 // one specific thread while other BGC threads have to
3127 // wait. This is different from a join 'cause you can't
3128 // specify which thread should be doing some task
3129 // while other threads have to wait.
3130 // For example, to make the BGC threads managed threads
3131 // we need to create them on the thread that called
3132 // SuspendEE which is heap 0.
3134 GCEvent bgc_threads_sync_event;
3140 CLRCriticalSection bgc_threads_timeout_cs;
3143 GCEvent background_gc_done_event;
3146 GCEvent ee_proceed_event;
3149 GCEvent gc_lh_block_event;
3152 bool gc_can_use_concurrent;
3155 bool temp_disable_concurrent_p;
3158 BOOL do_ephemeral_gc_p;
3161 BOOL do_concurrent_p;
3164 VOLATILE(bgc_state) current_bgc_state;
3169 bgc_state current_bgc_state;
3170 uint32_t gc_time_ms;
3171 // This is in bytes per ms; consider breaking it
3172 // into the efficiency per phase.
3173 size_t gc_efficiency;
3175 uint8_t* gen0_start;
3177 uint8_t* bgc_highest;
3178 uint8_t* bgc_lowest;
3179 uint8_t* fgc_highest;
3180 uint8_t* fgc_lowest;
3185 #define max_history_count 64
3188 int gchist_index_per_heap;
3191 gc_history gchist_per_heap[max_history_count];
3197 gc_mechanisms_store gchist[max_history_count];
3200 void add_to_history_per_heap();
3203 void add_to_history();
3206 size_t total_promoted_bytes;
3209 size_t bgc_overflow_count;
3212 size_t bgc_begin_loh_size;
3214 size_t end_loh_size;
3216 // We need to throttle the LOH allocations during BGC since we can't
3217 // collect LOH when BGC is in progress.
3218 // We allow the LOH heap size to double during a BGC. So for every
3219 // 10% increase we will have the LOH allocating thread sleep for one more
3220 // ms. So we are already 30% over the original heap size the thread will
3223 uint32_t bgc_alloc_spin_loh;
3225 // This includes what we allocate at the end of segment - allocating
3226 // in free list doesn't increase the heap size.
3228 size_t bgc_loh_size_increased;
3231 size_t bgc_loh_allocated_in_free;
3234 size_t background_soh_alloc_count;
3237 size_t background_loh_alloc_count;
3240 uint8_t** background_mark_stack_tos;
3243 uint8_t** background_mark_stack_array;
3246 size_t background_mark_stack_array_length;
3249 uint8_t* background_min_overflow_address;
3252 uint8_t* background_max_overflow_address;
3254 // We can't process the soh range concurrently so we
3255 // wait till final mark to process it.
3257 BOOL processed_soh_overflow_p;
3260 uint8_t* background_min_soh_overflow_address;
3263 uint8_t* background_max_soh_overflow_address;
3266 heap_segment* saved_overflow_ephemeral_seg;
3269 heap_segment* saved_sweep_ephemeral_seg;
3272 uint8_t* saved_sweep_ephemeral_start;
3275 uint8_t* background_saved_lowest_address;
3278 uint8_t* background_saved_highest_address;
3280 // This is used for synchronization between the bgc thread
3281 // for this heap and the user threads allocating on this
3284 exclusive_sync* bgc_alloc_lock;
3288 snoop_stats_data snoop_stat;
3289 #endif //SNOOP_STATS
3293 uint8_t** c_mark_list;
3296 size_t c_mark_list_length;
3299 size_t c_mark_list_index;
3300 #endif //BACKGROUND_GC
3304 uint8_t** mark_list;
3307 size_t mark_list_size;
3310 uint8_t** mark_list_end;
3313 uint8_t** mark_list_index;
3316 uint8_t** g_mark_list;
3317 #ifdef PARALLEL_MARK_LIST_SORT
3319 uint8_t** g_mark_list_copy;
3321 uint8_t*** mark_list_piece_start;
3322 uint8_t*** mark_list_piece_end;
3323 #endif //PARALLEL_MARK_LIST_SORT
3327 uint8_t* min_overflow_address;
3330 uint8_t* max_overflow_address;
3333 uint8_t* shigh; //keeps track of the highest marked object
3336 uint8_t* slow; //keeps track of the lowest marked object
3339 size_t allocation_quantum;
3342 size_t alloc_contexts_used;
3345 no_gc_region_info current_no_gc_region_info;
3348 size_t soh_allocation_no_gc;
3351 size_t loh_allocation_no_gc;
3357 heap_segment* saved_loh_segment_no_gc;
3360 BOOL proceed_with_gc_p;
3362 #define youngest_generation (generation_of (0))
3363 #define large_object_generation (generation_of (max_generation+1))
3365 // The more_space_lock and gc_lock is used for 3 purposes:
3367 // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock)
3368 // 2) to synchronize allocations of large objects (more_space_lock)
3369 // 3) to synchronize the GC itself (gc_lock)
3372 GCSpinLock gc_lock; //lock while doing GC
3375 GCSpinLock more_space_lock; //lock while allocating more space
3377 #ifdef SYNCHRONIZATION_STATS
3380 unsigned int good_suspension;
3383 unsigned int bad_suspension;
3385 // Number of times when msl_acquire is > 200 cycles.
3387 unsigned int num_high_msl_acquire;
3389 // Number of times when msl_acquire is < 200 cycles.
3391 unsigned int num_low_msl_acquire;
3393 // Number of times the more_space_lock is acquired.
3395 unsigned int num_msl_acquired;
3397 // Total cycles it takes to acquire the more_space_lock.
3399 uint64_t total_msl_acquire;
3402 void init_heap_sync_stats()
3404 good_suspension = 0;
3406 num_msl_acquired = 0;
3407 total_msl_acquire = 0;
3408 num_high_msl_acquire = 0;
3409 num_low_msl_acquire = 0;
3410 more_space_lock.init();
3415 void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
3417 printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
3419 alloc_contexts_used,
3422 (unsigned int)(total_msl_acquire / gc_count_during_log),
3423 num_high_msl_acquire / gc_count_during_log,
3424 num_low_msl_acquire / gc_count_during_log,
3425 num_msl_acquired / gc_count_during_log,
3426 more_space_lock.num_switch_thread / gc_count_during_log,
3427 more_space_lock.num_wait_longer / gc_count_during_log,
3428 more_space_lock.num_switch_thread_w / gc_count_during_log,
3429 more_space_lock.num_disable_preemptive_w / gc_count_during_log);
3432 #endif //SYNCHRONIZATION_STATS
3434 #define NUM_LOH_ALIST (7)
3435 #define BASE_LOH_ALIST (64*1024)
3437 alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
3439 #define NUM_GEN2_ALIST (12)
3441 #define BASE_GEN2_ALIST (1*256)
3443 #define BASE_GEN2_ALIST (1*128)
3446 alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
3448 //------------------------------------------
3451 dynamic_data dynamic_data_table [NUMBERGENERATIONS+1];
3454 gc_history_per_heap gc_data_per_heap;
3457 size_t maxgen_pinned_compact_before_advance;
3461 BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
3462 // if elevate_p is FALSE, it means we are determining fragmentation for a generation
3463 // to see if we should condemn this gen; otherwise it means we are determining if
3464 // we should elevate to doing max_gen from an ephemeral gen.
3466 BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
3469 dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number);
3471 BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem);
3473 BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
3476 int generation_skip_ratio;//in %
3479 BOOL gen0_bricks_cleared;
3482 int gen0_must_clear_bricks;
3483 #endif //FFIND_OBJECT
3486 size_t full_gc_counts[gc_type_max];
3488 // the # of bytes allocates since the last full compacting GC.
3490 uint64_t loh_alloc_since_cg;
3493 BOOL elevation_requested;
3495 // if this is TRUE, we should always guarantee that we do a
3496 // full compacting GC before we OOM.
3498 BOOL last_gc_before_oom;
3501 BOOL should_expand_in_full_gc;
3503 #ifdef BACKGROUND_GC
3505 size_t ephemeral_fgc_counts[max_generation];
3508 BOOL alloc_wait_event_p;
3511 uint8_t* next_sweep_obj;
3514 uint8_t* current_sweep_pos;
3516 #endif //BACKGROUND_GC
3519 fgm_history fgm_result;
3522 size_t eph_gen_starts_size;
3524 #ifdef GC_CONFIG_DRIVEN
3529 size_t time_since_init;
3531 // 0 stores compacting GCs;
3532 // 1 stores sweeping GCs;
3534 size_t compact_or_sweep_gcs[2];
3537 size_t interesting_data_per_gc[max_idp_count];
3539 #endif //GC_CONFIG_DRIVEN
3542 BOOL ro_segments_in_range;
3544 #ifdef BACKGROUND_GC
3546 heap_segment* freeable_small_heap_segment;
3547 #endif //BACKGROUND_GC
3550 heap_segment* freeable_large_heap_segment;
3553 heap_segment* segment_standby_list;
3556 size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
3559 size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
3562 size_t ordered_plug_indices[MAX_NUM_BUCKETS];
3565 size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
3568 BOOL ordered_plug_indices_init;
3574 uint8_t* bestfit_first_pin;
3577 BOOL commit_end_of_seg;
3580 size_t max_free_space_items; // dynamically adjusted.
3583 size_t free_space_buckets;
3586 size_t free_space_items;
3588 // -1 means we are using all the free
3589 // spaces we have (not including
3590 // end of seg space).
3592 int trimmed_free_space_index;
3595 size_t total_ephemeral_plugs;
3598 seg_free_spaces* bestfit_seg;
3600 // Note: we know this from the plan phase.
3601 // total_ephemeral_plugs actually has the same value
3602 // but while we are calculating its value we also store
3603 // info on how big the plugs are for best fit which we
3604 // don't do in plan phase.
3605 // TODO: get rid of total_ephemeral_plugs.
3607 size_t total_ephemeral_size;
3614 BOOL heap_analyze_enabled;
3617 size_t internal_root_array_length;
3619 // next two fields are used to optimize the search for the object
3620 // enclosing the current reference handled by ha_mark_object_simple.
3622 uint8_t* current_obj;
3625 size_t current_obj_size;
3627 #endif //HEAP_ANALYZE
3629 /* ----------------------- global members ----------------------- */
3633 int condemned_generation_num;
3636 BOOL blocking_collection;
3638 #ifdef MULTIPLE_HEAPS
3647 #ifdef BACKGROUND_GC
3649 size_t* g_bpromoted;
3650 #endif //BACKGROUND_GC
3653 int* g_mark_stack_busy;
3658 #ifdef BACKGROUND_GC
3661 #endif //BACKGROUND_GC
3662 #endif //MULTIPLE_HEAPS
3665 size_t reserved_memory;
3667 size_t reserved_memory_limit;
3669 BOOL g_low_memory_status;
3673 void update_collection_counts ();
3677 #define ASSERT_OFFSETS_MATCH(field) \
3678 static_assert_no_msg(offsetof(dac_gc_heap, field) == offsetof(gc_heap, field))
3680 #ifdef MULTIPLE_HEAPS
3681 ASSERT_OFFSETS_MATCH(alloc_allocated);
3682 ASSERT_OFFSETS_MATCH(ephemeral_heap_segment);
3683 ASSERT_OFFSETS_MATCH(finalize_queue);
3684 ASSERT_OFFSETS_MATCH(oom_info);
3685 ASSERT_OFFSETS_MATCH(interesting_data_per_heap);
3686 ASSERT_OFFSETS_MATCH(compact_reasons_per_heap);
3687 ASSERT_OFFSETS_MATCH(expand_mechanisms_per_heap);
3688 ASSERT_OFFSETS_MATCH(interesting_mechanism_bits_per_heap);
3689 ASSERT_OFFSETS_MATCH(internal_root_array);
3690 ASSERT_OFFSETS_MATCH(internal_root_array_index);
3691 ASSERT_OFFSETS_MATCH(heap_analyze_success);
3692 ASSERT_OFFSETS_MATCH(generation_table);
3693 #endif // MULTIPLE_HEAPS
3695 #ifdef FEATURE_PREMORTEM_FINALIZATION
3698 #ifdef DACCESS_COMPILE
3699 friend class ::ClrDataAccess;
3700 #endif // DACCESS_COMPILE
3702 friend class CFinalizeStaticAsserts;
3706 //adjust the count and add a constant to add a segment
3707 static const int ExtraSegCount = 2;
3708 static const int FinalizerListSeg = NUMBERGENERATIONS+1;
3709 static const int CriticalFinalizerListSeg = NUMBERGENERATIONS;
3710 //Does not correspond to a segment
3711 static const int FreeList = NUMBERGENERATIONS+ExtraSegCount;
3713 PTR_PTR_Object m_FillPointers[NUMBERGENERATIONS+ExtraSegCount];
3714 PTR_PTR_Object m_Array;
3715 PTR_PTR_Object m_EndArray;
3716 size_t m_PromotedCount;
3718 VOLATILE(int32_t) lock;
3720 EEThreadId lockowner_threadid;
3724 void MoveItem (Object** fromIndex,
3725 unsigned int fromSeg,
3726 unsigned int toSeg);
3728 inline PTR_PTR_Object& SegQueue (unsigned int Seg)
3730 return (Seg ? m_FillPointers [Seg-1] : m_Array);
3732 inline PTR_PTR_Object& SegQueueLimit (unsigned int Seg)
3734 return m_FillPointers [Seg];
3737 BOOL IsSegEmpty ( unsigned int i)
3739 ASSERT ( (int)i < FreeList);
3740 return (SegQueueLimit(i) == SegQueue (i));
3744 BOOL FinalizeSegForAppDomain (AppDomain *pDomain,
3745 BOOL fRunFinalizers,
3751 void EnterFinalizeLock();
3752 void LeaveFinalizeLock();
3753 bool RegisterForFinalization (int gen, Object* obj, size_t size=0);
3754 Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
3755 BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
3756 void RelocateFinalizationData (int gen, gc_heap* hp);
3757 void WalkFReachableObjects (fq_walk_fn fn);
3758 void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
3759 void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
3760 size_t GetPromotedCount();
3762 //Methods used by the shutdown code to call every finalizer
3763 void SetSegForShutDown(BOOL fHasLock);
3764 size_t GetNumberFinalizableObjects();
3765 void DiscardNonCriticalObjects();
3767 //Methods used by the app domain unloading call to finalize objects in an app domain
3768 bool FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers);
3770 void CheckFinalizerObjects();
3774 class CFinalizeStaticAsserts {
3775 static_assert(dac_finalize_queue::ExtraSegCount == CFinalize::ExtraSegCount, "ExtraSegCount mismatch");
3776 static_assert(offsetof(dac_finalize_queue, m_FillPointers) == offsetof(CFinalize, m_FillPointers), "CFinalize layout mismatch");
3780 #endif // FEATURE_PREMORTEM_FINALIZATION
3783 size_t& dd_begin_data_size (dynamic_data* inst)
3785 return inst->begin_data_size;
3788 size_t& dd_survived_size (dynamic_data* inst)
3790 return inst->survived_size;
3792 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
3794 size_t& dd_num_npinned_plugs(dynamic_data* inst)
3796 return inst->num_npinned_plugs;
3798 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
3800 size_t& dd_pinned_survived_size (dynamic_data* inst)
3802 return inst->pinned_survived_size;
3805 size_t& dd_added_pinned_size (dynamic_data* inst)
3807 return inst->added_pinned_size;
3810 size_t& dd_artificial_pinned_survived_size (dynamic_data* inst)
3812 return inst->artificial_pinned_survived_size;
3816 size_t& dd_padding_size (dynamic_data* inst)
3818 return inst->padding_size;
3820 #endif //SHORT_PLUGS
3822 size_t& dd_current_size (dynamic_data* inst)
3824 return inst->current_size;
3827 float& dd_surv (dynamic_data* inst)
3832 size_t& dd_freach_previous_promotion (dynamic_data* inst)
3834 return inst->freach_previous_promotion;
3837 size_t& dd_desired_allocation (dynamic_data* inst)
3839 return inst->desired_allocation;
3842 size_t& dd_collection_count (dynamic_data* inst)
3844 return inst->collection_count;
3847 size_t& dd_promoted_size (dynamic_data* inst)
3849 return inst->promoted_size;
3852 float& dd_limit (dynamic_data* inst)
3857 float& dd_max_limit (dynamic_data* inst)
3859 return inst->max_limit;
3862 size_t& dd_min_gc_size (dynamic_data* inst)
3864 return inst->min_gc_size;
3867 size_t& dd_max_size (dynamic_data* inst)
3869 return inst->max_size;
3872 size_t& dd_min_size (dynamic_data* inst)
3874 return inst->min_size;
3877 ptrdiff_t& dd_new_allocation (dynamic_data* inst)
3879 return inst->new_allocation;
3882 ptrdiff_t& dd_gc_new_allocation (dynamic_data* inst)
3884 return inst->gc_new_allocation;
3887 size_t& dd_default_new_allocation (dynamic_data* inst)
3889 return inst->default_new_allocation;
3892 size_t& dd_fragmentation_limit (dynamic_data* inst)
3894 return inst->fragmentation_limit;
3897 float& dd_fragmentation_burden_limit (dynamic_data* inst)
3899 return inst->fragmentation_burden_limit;
3902 float dd_v_fragmentation_burden_limit (dynamic_data* inst)
3904 return (min (2*dd_fragmentation_burden_limit (inst), 0.75f));
3907 size_t& dd_fragmentation (dynamic_data* inst)
3909 return inst->fragmentation;
3913 size_t& dd_gc_clock (dynamic_data* inst)
3915 return inst->gc_clock;
3918 size_t& dd_time_clock (dynamic_data* inst)
3920 return inst->time_clock;
3924 size_t& dd_gc_elapsed_time (dynamic_data* inst)
3926 return inst->gc_elapsed_time;
3930 float& dd_gc_speed (dynamic_data* inst)
3932 return inst->gc_speed;
3936 alloc_context* generation_alloc_context (generation* inst)
3938 return &(inst->allocation_context);
3942 uint8_t*& generation_allocation_start (generation* inst)
3944 return inst->allocation_start;
3947 uint8_t*& generation_allocation_pointer (generation* inst)
3949 return inst->allocation_context.alloc_ptr;
3952 uint8_t*& generation_allocation_limit (generation* inst)
3954 return inst->allocation_context.alloc_limit;
3957 allocator* generation_allocator (generation* inst)
3959 return &inst->free_list_allocator;
3963 PTR_heap_segment& generation_start_segment (generation* inst)
3965 return inst->start_segment;
3968 heap_segment*& generation_allocation_segment (generation* inst)
3970 return inst->allocation_segment;
3973 uint8_t*& generation_plan_allocation_start (generation* inst)
3975 return inst->plan_allocation_start;
3978 size_t& generation_plan_allocation_start_size (generation* inst)
3980 return inst->plan_allocation_start_size;
3983 uint8_t*& generation_allocation_context_start_region (generation* inst)
3985 return inst->allocation_context_start_region;
3988 size_t& generation_free_list_space (generation* inst)
3990 return inst->free_list_space;
3993 size_t& generation_free_obj_space (generation* inst)
3995 return inst->free_obj_space;
3998 size_t& generation_allocation_size (generation* inst)
4000 return inst->allocation_size;
4004 size_t& generation_pinned_allocated (generation* inst)
4006 return inst->pinned_allocated;
4009 size_t& generation_pinned_allocation_sweep_size (generation* inst)
4011 return inst->pinned_allocation_sweep_size;
4014 size_t& generation_pinned_allocation_compact_size (generation* inst)
4016 return inst->pinned_allocation_compact_size;
4019 size_t& generation_free_list_allocated (generation* inst)
4021 return inst->free_list_allocated;
4024 size_t& generation_end_seg_allocated (generation* inst)
4026 return inst->end_seg_allocated;
4029 BOOL& generation_allocate_end_seg_p (generation* inst)
4031 return inst->allocate_end_seg_p;
4034 size_t& generation_condemned_allocated (generation* inst)
4036 return inst->condemned_allocated;
4038 #ifdef FREE_USAGE_STATS
4040 size_t& generation_pinned_free_obj_space (generation* inst)
4042 return inst->pinned_free_obj_space;
4045 size_t& generation_allocated_in_pinned_free (generation* inst)
4047 return inst->allocated_in_pinned_free;
4050 size_t& generation_allocated_since_last_pin (generation* inst)
4052 return inst->allocated_since_last_pin;
4054 #endif //FREE_USAGE_STATS
4056 float generation_allocator_efficiency (generation* inst)
4058 if ((generation_free_list_allocated (inst) + generation_free_obj_space (inst)) != 0)
4060 return ((float) (generation_free_list_allocated (inst)) / (float)(generation_free_list_allocated (inst) + generation_free_obj_space (inst)));
4066 size_t generation_unusable_fragmentation (generation* inst)
4068 return (size_t)(generation_free_obj_space (inst) +
4069 (1.0f-generation_allocator_efficiency(inst))*generation_free_list_space (inst));
4072 #define plug_skew sizeof(ObjHeader)
4073 // We always use USE_PADDING_TAIL when fitting so items on the free list should be
4074 // twice the min_obj_size.
4075 #define min_free_list (2*min_obj_size)
4078 uint8_t * skew[plug_skew / sizeof(uint8_t *)];
4088 //Note that these encode the fact that plug_skew is a multiple of uint8_t*.
4089 // Each of new field is prepended to the prior struct.
4091 struct plug_and_pair
4097 struct plug_and_reloc
4111 int lr; //for clearing the entire pair in one instruction
4116 struct gap_reloc_pair
4123 #define min_pre_pin_obj_size (sizeof (gap_reloc_pair) + min_obj_size)
4125 struct DECLSPEC_ALIGN(8) aligned_plug_and_gap
4127 plug_and_gap plugandgap;
4130 struct loh_obj_and_pad
4136 struct loh_padding_obj
4143 #define loh_padding_obj_size (sizeof(loh_padding_obj))
4146 #define heap_segment_flags_readonly 1
4147 #define heap_segment_flags_inrange 2
4148 #define heap_segment_flags_unmappable 4
4149 #define heap_segment_flags_loh 8
4150 #ifdef BACKGROUND_GC
4151 #define heap_segment_flags_swept 16
4152 #define heap_segment_flags_decommitted 32
4153 #define heap_segment_flags_ma_committed 64
4154 // for segments whose mark array is only partially committed.
4155 #define heap_segment_flags_ma_pcommitted 128
4156 #endif //BACKGROUND_GC
4158 //need to be careful to keep enough pad items to fit a relocation node
4159 //padded to QuadWord before the plug_skew
4170 PTR_heap_segment next;
4171 uint8_t* background_allocated;
4172 #ifdef MULTIPLE_HEAPS
4174 #endif //MULTIPLE_HEAPS
4175 uint8_t* plan_allocated;
4176 uint8_t* saved_bg_allocated;
4179 // Disable this warning - we intentionally want __declspec(align()) to insert padding for us
4180 #pragma warning(disable:4324) // structure was padded due to __declspec(align())
4182 aligned_plug_and_gap padandplug;
4184 #pragma warning(default:4324) // structure was padded due to __declspec(align())
4188 static_assert(offsetof(dac_heap_segment, allocated) == offsetof(heap_segment, allocated), "DAC heap segment layout mismatch");
4189 static_assert(offsetof(dac_heap_segment, committed) == offsetof(heap_segment, committed), "DAC heap segment layout mismatch");
4190 static_assert(offsetof(dac_heap_segment, reserved) == offsetof(heap_segment, reserved), "DAC heap segment layout mismatch");
4191 static_assert(offsetof(dac_heap_segment, used) == offsetof(heap_segment, used), "DAC heap segment layout mismatch");
4192 static_assert(offsetof(dac_heap_segment, mem) == offsetof(heap_segment, mem), "DAC heap segment layout mismatch");
4193 static_assert(offsetof(dac_heap_segment, flags) == offsetof(heap_segment, flags), "DAC heap segment layout mismatch");
4194 static_assert(offsetof(dac_heap_segment, next) == offsetof(heap_segment, next), "DAC heap segment layout mismatch");
4195 static_assert(offsetof(dac_heap_segment, background_allocated) == offsetof(heap_segment, background_allocated), "DAC heap segment layout mismatch");
4196 #ifdef MULTIPLE_HEAPS
4197 static_assert(offsetof(dac_heap_segment, heap) == offsetof(heap_segment, heap), "DAC heap segment layout mismatch");
4198 #endif // MULTIPLE_HEAPS
4201 uint8_t*& heap_segment_reserved (heap_segment* inst)
4203 return inst->reserved;
4206 uint8_t*& heap_segment_committed (heap_segment* inst)
4208 return inst->committed;
4211 uint8_t*& heap_segment_used (heap_segment* inst)
4216 uint8_t*& heap_segment_allocated (heap_segment* inst)
4218 return inst->allocated;
4222 BOOL heap_segment_read_only_p (heap_segment* inst)
4224 return ((inst->flags & heap_segment_flags_readonly) != 0);
4228 BOOL heap_segment_in_range_p (heap_segment* inst)
4230 return (!(inst->flags & heap_segment_flags_readonly) ||
4231 ((inst->flags & heap_segment_flags_inrange) != 0));
4235 BOOL heap_segment_unmappable_p (heap_segment* inst)
4237 return (!(inst->flags & heap_segment_flags_readonly) ||
4238 ((inst->flags & heap_segment_flags_unmappable) != 0));
4242 BOOL heap_segment_loh_p (heap_segment * inst)
4244 return !!(inst->flags & heap_segment_flags_loh);
4247 #ifdef BACKGROUND_GC
4249 BOOL heap_segment_decommitted_p (heap_segment * inst)
4251 return !!(inst->flags & heap_segment_flags_decommitted);
4253 #endif //BACKGROUND_GC
4256 PTR_heap_segment & heap_segment_next (heap_segment* inst)
4261 uint8_t*& heap_segment_mem (heap_segment* inst)
4266 uint8_t*& heap_segment_plan_allocated (heap_segment* inst)
4268 return inst->plan_allocated;
4271 #ifdef BACKGROUND_GC
4273 uint8_t*& heap_segment_background_allocated (heap_segment* inst)
4275 return inst->background_allocated;
4278 uint8_t*& heap_segment_saved_bg_allocated (heap_segment* inst)
4280 return inst->saved_bg_allocated;
4282 #endif //BACKGROUND_GC
4284 #ifdef MULTIPLE_HEAPS
4286 gc_heap*& heap_segment_heap (heap_segment* inst)
4290 #endif //MULTIPLE_HEAPS
4293 generation* gc_heap::generation_of (int n)
4295 assert (((n <= max_generation+1) && (n >= 0)));
4296 return &generation_table [ n ];
4300 dynamic_data* gc_heap::dynamic_data_of (int gen_number)
4302 return &dynamic_data_table [ gen_number ];
4305 #define card_word_width ((size_t)32)
4308 // The value of card_size is determined empirically according to the average size of an object
4309 // In the code we also rely on the assumption that one card_table entry (uint32_t) covers an entire os page
4312 #define card_size ((size_t)(2*OS_PAGE_SIZE/card_word_width))
4314 #define card_size ((size_t)(OS_PAGE_SIZE/card_word_width))
4317 // Returns the index of the card word a card is in
4319 size_t card_word (size_t card)
4321 return card / card_word_width;
4324 // Returns the index of a card within its card word
4326 unsigned card_bit (size_t card)
4328 return (unsigned)(card % card_word_width);
4332 size_t gcard_of (uint8_t* object)
4334 return (size_t)(object) / card_size;