2 // Copyright (c) Microsoft. All rights reserved.
3 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
10 #pragma optimize( "t", on )
13 #define inline __forceinline
21 inline void FATAL_GC_ERROR()
24 _ASSERTE(!"Fatal Error in GC.");
25 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
29 #pragma inline_depth(20)
32 /* the following section defines the optional features */
34 // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
35 // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION
36 // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much
37 // work to make FEATURE_STRUCTALIGN not apply to LOH so they can be both
39 #define FEATURE_LOH_COMPACTION
41 #ifdef FEATURE_64BIT_ALIGNMENT
42 // We need the following feature as part of keeping 64-bit types aligned in the GC heap.
43 #define RESPECT_LARGE_ALIGNMENT //used to keep "double" objects aligned during
45 #endif //FEATURE_64BIT_ALIGNMENT
47 #define SHORT_PLUGS //used to keep ephemeral plugs short so they fit better into the oldest generation free items
49 #define DESIRED_PLUG_LENGTH (1000)
52 #define FEATURE_PREMORTEM_FINALIZATION
55 #ifndef FEATURE_REDHAWK
57 #define COLLECTIBLE_CLASS
58 #endif // !FEATURE_REDHAWK
61 #define initial_internal_roots (1024*16)
62 #endif // HEAP_ANALYZE
64 #define MARK_LIST //used sorted list to speed up plan phase
66 #define BACKGROUND_GC //concurrent background GC (requires WRITE_WATCH)
69 #define MH_SC_MARK //scalable marking
70 //#define SNOOP_STATS //diagnostic
71 #define PARALLEL_MARK_LIST_SORT //do the sorting and merging of the multiple mark lists in server gc in parallel
74 //This is used to mark some type volatile only when the scalable marking is used.
75 #if defined (SERVER_GC) && defined (MH_SC_MARK)
76 #define SERVER_SC_MARK_VOLATILE(x) VOLATILE(x)
77 #else //SERVER_GC&&MH_SC_MARK
78 #define SERVER_SC_MARK_VOLATILE(x) x
79 #endif //SERVER_GC&&MH_SC_MARK
81 //#define MULTIPLE_HEAPS //Allow multiple heaps for servers
83 #define INTERIOR_POINTERS //Allow interior pointers in the code manager
85 #define CARD_BUNDLE //enable card bundle feature.(requires WRITE_WATCH)
87 // If this is defined we use a map for segments in order to find the heap for
88 // a segment fast. But it does use more memory as we have to cover the whole
89 // heap range and for each entry we allocate a struct of 5 ptr-size words
90 // (3 for WKS as there's only one heap).
91 #define SEG_MAPPING_TABLE
93 // If allocating the heap mapping table for the available VA consumes too
94 // much memory, you can enable this to allocate only the portion that
95 // corresponds to rw segments and grow it when needed in grow_brick_card_table.
96 // However in heap_of you will need to always compare the address with
97 // g_lowest/highest before you can look at the heap mapping table.
98 #define GROWABLE_SEG_MAPPING_TABLE
101 #define MARK_ARRAY //Mark bit in an array
102 #endif //BACKGROUND_GC
104 #if defined(BACKGROUND_GC) || defined (CARD_BUNDLE)
105 #define WRITE_WATCH //Write Watch feature
106 #endif //BACKGROUND_GC || CARD_BUNDLE
109 #define array_size 100
112 //#define SHORT_PLUGS //keep plug short
114 #define FFIND_OBJECT //faster find_object, slower allocation
115 #define FFIND_DECAY 7 //Number of GC for which fast find will be active
117 //#define NO_WRITE_BARRIER //no write barrier, use Write Watch feature
119 //#define DEBUG_WRITE_WATCH //Additional debug for write watch
121 //#define STRESS_PINNING //Stress pinning by pinning randomly
123 //#define TRACE_GC //debug trace gc operation
124 //#define SIMPLE_DPRINTF
126 //#define CATCH_GC //catches exception during GC
128 //#define TIME_GC //time allocation and garbage collection
129 //#define TIME_WRITE_WATCH //time GetWriteWatch and ResetWriteWatch calls
130 //#define COUNT_CYCLES //Use cycle counter for timing
131 //#define JOIN_STATS //amount of time spent in the join
132 //also, see TIME_SUSPEND in switches.h.
134 //#define SYNCHRONIZATION_STATS
135 //#define SEG_REUSE_STATS
137 #if defined (SYNCHRONIZATION_STATS) || defined (STAGE_STATS)
138 #define BEGIN_TIMING(x) \
139 LARGE_INTEGER x##_start; \
140 QueryPerformanceCounter (&x##_start)
142 #define END_TIMING(x) \
143 LARGE_INTEGER x##_end; \
144 QueryPerformanceCounter (&x##_end); \
145 x += x##_end.QuadPart - x##_start.QuadPart
148 #define BEGIN_TIMING(x)
149 #define END_TIMING(x)
150 #define BEGIN_TIMING_CYCLES(x)
151 #define END_TIMING_CYCLES(x)
152 #endif //SYNCHRONIZATION_STATS || STAGE_STATS
154 #define NO_CATCH_HANDLERS //to debug gc1, remove the catch handlers
156 /* End of optional features */
162 #define NUMBERGENERATIONS 4 //Max number of generations
164 // For the bestfit algorithm when we relocate ephemeral generations into an
165 // existing gen2 segment.
166 // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
167 #define MIN_INDEX_POWER2 6
172 #define MAX_INDEX_POWER2 30
174 #define MAX_INDEX_POWER2 26
180 #define MAX_INDEX_POWER2 28
182 #define MAX_INDEX_POWER2 24
187 #define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1)
189 #define MAX_NUM_FREE_SPACES 200
190 #define MIN_NUM_FREE_SPACES 5
192 //Please leave these definitions intact.
194 #define CLREvent CLREventStatic
196 #ifdef CreateFileMapping
198 #undef CreateFileMapping
200 #endif //CreateFileMapping
202 #define CreateFileMapping WszCreateFileMapping
205 #ifdef InitializeCriticalSection
206 #undef InitializeCriticalSection
207 #endif //ifdef InitializeCriticalSection
208 #define InitializeCriticalSection UnsafeInitializeCriticalSection
210 #ifdef DeleteCriticalSection
211 #undef DeleteCriticalSection
212 #endif //ifdef DeleteCriticalSection
213 #define DeleteCriticalSection UnsafeDeleteCriticalSection
215 #ifdef EnterCriticalSection
216 #undef EnterCriticalSection
217 #endif //ifdef EnterCriticalSection
218 #define EnterCriticalSection UnsafeEEEnterCriticalSection
220 #ifdef LeaveCriticalSection
221 #undef LeaveCriticalSection
222 #endif //ifdef LeaveCriticalSection
223 #define LeaveCriticalSection UnsafeEELeaveCriticalSection
225 #ifdef TryEnterCriticalSection
226 #undef TryEnterCriticalSection
227 #endif //ifdef TryEnterCriticalSection
228 #define TryEnterCriticalSection UnsafeEETryEnterCriticalSection
230 #ifdef CreateSemaphore
231 #undef CreateSemaphore
232 #endif //CreateSemaphore
233 #define CreateSemaphore UnsafeCreateSemaphore
237 #endif //ifdef CreateEvent
238 #define CreateEvent UnsafeCreateEvent
242 #endif //ifdef VirtualAlloc
243 #define VirtualAlloc ClrVirtualAlloc
247 #endif //ifdef VirtualFree
248 #define VirtualFree ClrVirtualFree
252 #endif //ifdef VirtualQuery
253 #define VirtualQuery ClrVirtualQuery
255 #ifdef VirtualProtect
256 #undef VirtualProtect
257 #endif //ifdef VirtualProtect
258 #define VirtualProtect ClrVirtualProtect
264 #ifdef FEATURE_STRUCTALIGN
265 #define REQD_ALIGN_DCL ,int requiredAlignment
266 #define REQD_ALIGN_ARG ,requiredAlignment
267 #define REQD_ALIGN_AND_OFFSET_DCL ,int requiredAlignment,size_t alignmentOffset
268 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL ,int requiredAlignment=DATA_ALIGNMENT,size_t alignmentOffset=0
269 #define REQD_ALIGN_AND_OFFSET_ARG ,requiredAlignment,alignmentOffset
270 #else // FEATURE_STRUCTALIGN
271 #define REQD_ALIGN_DCL
272 #define REQD_ALIGN_ARG
273 #define REQD_ALIGN_AND_OFFSET_DCL
274 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL
275 #define REQD_ALIGN_AND_OFFSET_ARG
276 #endif // FEATURE_STRUCTALIGN
278 #ifdef MULTIPLE_HEAPS
279 #define THREAD_NUMBER_DCL ,int thread
280 #define THREAD_NUMBER_ARG ,thread
281 #define THREAD_NUMBER_FROM_CONTEXT int thread = sc->thread_number;
282 #define THREAD_FROM_HEAP int thread = heap_number;
283 #define HEAP_FROM_THREAD gc_heap* hpt = gc_heap::g_heaps[thread];
285 #define THREAD_NUMBER_DCL
286 #define THREAD_NUMBER_ARG
287 #define THREAD_NUMBER_FROM_CONTEXT
288 #define THREAD_FROM_HEAP
289 #define HEAP_FROM_THREAD gc_heap* hpt = 0;
290 #endif //MULTIPLE_HEAPS
292 //These constants are ordered
293 const int policy_sweep = 0;
294 const int policy_compact = 1;
295 const int policy_expand = 2;
300 extern int print_level;
301 extern BOOL trace_gc;
302 extern int gc_trace_fac;
307 static hlet* bindings;
312 hlet (int& place, int value)
328 #define let(p,v) hlet __x = hlet (p, v);
338 #define SEG_REUSE_LOG_0 7
339 #define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1)
340 #define DT_LOG_0 (SEG_REUSE_LOG_1 + 1)
341 #define BGC_LOG (DT_LOG_0 + 1)
342 #define GTC_LOG (DT_LOG_0 + 2)
343 #define GC_TABLE_LOG (DT_LOG_0 + 3)
344 #define JOIN_LOG (DT_LOG_0 + 4)
345 #define SPINLOCK_LOG (DT_LOG_0 + 5)
346 #define SNOOP_LOG (DT_LOG_0 + 6)
348 #ifndef DACCESS_COMPILE
350 #ifdef SIMPLE_DPRINTF
352 //#define dprintf(l,x) {if (trace_gc && ((l<=print_level)||gc_heap::settings.concurrent)) {printf ("\n");printf x ; fflush(stdout);}}
353 void LogValist(const char *fmt, va_list args);
354 void GCLog (const char *fmt, ... );
355 //#define dprintf(l,x) {if (trace_gc && (l<=print_level)) {GCLog x;}}
356 //#define dprintf(l,x) {if ((l==SEG_REUSE_LOG_0) || (l==SEG_REUSE_LOG_1) || (trace_gc && (l<=3))) {GCLog x;}}
357 //#define dprintf(l,x) {if (l == DT_LOG_0) {GCLog x;}}
358 //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == BGC_LOG) || (l==GTC_LOG))) {GCLog x;}}
359 //#define dprintf(l,x) {if (l==GTC_LOG) {GCLog x;}}
360 //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == 1234)) ) {GCLog x;}}
361 //#define dprintf(l,x) {if ((l <= 1) || (l == 2222)) {GCLog x;}}
362 #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
363 //#define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG) ||(l == DT_LOG_0)) {GCLog x;}}
364 //#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}}
365 //#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}}
366 //#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}}
368 #else //SIMPLE_DPRINTF
370 // The GCTrace output goes to stdout by default but can get sent to the stress log or the logfile if the
371 // reg key GCTraceFacility is set. THe stress log can only take a format string and 4 numbers or
373 #define dprintf(l,x) {if (trace_gc && (l<=print_level)) { \
374 if ( !gc_trace_fac) {printf ("\n");printf x ; fflush(stdout);} \
375 else if ( gc_trace_fac == 2) {LogSpewAlways x;LogSpewAlways ("\n");} \
376 else if ( gc_trace_fac == 1) {STRESS_LOG_VA(x);}}}
378 #endif //SIMPLE_DPRINTF
380 #else //DACCESS_COMPILE
382 #endif //DACCESS_COMPILE
387 #ifndef FEATURE_REDHAWK
389 #define assert _ASSERTE
391 #define ASSERT _ASSERTE
392 #endif // FEATURE_REDHAWK
396 struct GCDebugSpinLock {
397 VOLATILE(LONG) lock; // -1 if free, 0 if held
398 VOLATILE(Thread *) holding_thread; // -1 if no thread holds the lock.
399 VOLATILE(BOOL) released_by_gc_p; // a GC thread released the lock.
402 : lock(-1), holding_thread((Thread*) -1)
407 typedef GCDebugSpinLock GCSpinLock;
409 #elif defined (SYNCHRONIZATION_STATS)
411 struct GCSpinLockInstru {
413 // number of times we went into SwitchToThread in enter_spin_lock.
414 unsigned int num_switch_thread;
415 // number of times we went into WaitLonger.
416 unsigned int num_wait_longer;
417 // number of times we went to calling SwitchToThread in WaitLonger.
418 unsigned int num_switch_thread_w;
419 // number of times we went to calling DisablePreemptiveGC in WaitLonger.
420 unsigned int num_disable_preemptive_w;
423 : lock(-1), num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0)
429 num_switch_thread = 0;
431 num_switch_thread_w = 0;
432 num_disable_preemptive_w = 0;
436 typedef GCSpinLockInstru GCSpinLock;
440 struct GCDebugSpinLock {
441 VOLATILE(LONG) lock; // -1 if free, 0 if held
448 typedef GCDebugSpinLock GCSpinLock;
458 class seg_free_spaces;
462 class exclusive_sync;
463 class recursive_gc_sync;
464 #endif //BACKGROUND_GC
466 // The following 2 modes are of the same format as in clr\src\bcl\system\runtime\gcsettings.cs
467 // make sure you change that one if you change this one!
470 pause_batch = 0, //We are not concerned about pause length
471 pause_interactive = 1, //We are running an interactive app
472 pause_low_latency = 2, //short pauses are essential
473 //avoid long pauses from blocking full GCs unless running out of memory
474 pause_sustained_low_latency = 3,
478 enum gc_loh_compaction_mode
480 loh_compaction_default = 1, // the default mode, don't compact LOH.
481 loh_compaction_once = 2, // only compact once the next time a blocking full GC happens.
482 loh_compaction_auto = 4 // GC decides when to compact LOH, to be implemented.
485 enum set_pause_mode_status
487 set_pause_mode_success = 0,
488 set_pause_mode_no_gc = 1 // NoGCRegion is in progress, can't change pause mode.
493 tuning_deciding_condemned_gen,
494 tuning_deciding_full_gc,
495 tuning_deciding_compaction,
496 tuning_deciding_expansion,
497 tuning_deciding_promote_ephemeral
500 #if defined(TRACE_GC) && defined(BACKGROUND_GC)
501 static const char * const str_bgc_state[] =
515 #endif // defined(TRACE_GC) && defined(BACKGROUND_GC)
517 enum allocation_state
520 a_state_can_allocate,
521 a_state_cant_allocate,
523 a_state_try_fit_new_seg,
524 a_state_try_fit_new_seg_after_cg,
525 a_state_try_fit_no_seg,
526 a_state_try_fit_after_cg,
527 a_state_try_fit_after_bgc,
528 a_state_try_free_full_seg_in_bgc,
529 a_state_try_free_after_bgc,
532 a_state_acquire_seg_after_cg,
533 a_state_acquire_seg_after_bgc,
534 a_state_check_and_wait_for_bgc,
535 a_state_trigger_full_compact_gc,
536 a_state_trigger_ephemeral_gc,
537 a_state_trigger_2nd_ephemeral_gc,
538 a_state_check_retry_seg,
544 gc_type_compacting = 0,
545 gc_type_blocking = 1,
547 gc_type_background = 2,
548 #endif //BACKGROUND_GC
553 //encapsulates the mechanism for the current gc
557 VOLATILE(SIZE_T) gc_index; // starts from 1 for the first GC, like dd_collection_count
558 int condemned_generation;
566 int gen0_reduction_count;
567 BOOL should_lock_elevation;
568 int elevation_locked_count;
571 gc_pause_mode pause_mode;
572 BOOL found_finalizers;
577 BOOL allocations_allowed;
578 #endif //BACKGROUND_GC
582 #endif // STRESS_HEAP
585 DWORD entry_memory_load;
588 void init_mechanisms(); //for each GC
589 void first_init(); // for the life of the EE
591 void record (gc_history_global* history);
594 // This is a compact version of gc_mechanism that we use to save in the history.
595 class gc_mechanisms_store
606 bool should_lock_elevation;
607 int condemned_generation : 8;
608 int gen0_reduction_count : 8;
609 int elevation_locked_count : 8;
610 gc_reason reason : 8;
611 gc_pause_mode pause_mode : 8;
613 bgc_state b_state : 8;
614 #endif //BACKGROUND_GC
615 bool found_finalizers;
619 #endif //BACKGROUND_GC
623 #endif // STRESS_HEAP
626 DWORD entry_memory_load;
629 void store (gc_mechanisms* gm)
631 gc_index = gm->gc_index;
632 condemned_generation = gm->condemned_generation;
633 promotion = (gm->promotion != 0);
634 compaction = (gm->compaction != 0);
635 loh_compaction = (gm->loh_compaction != 0);
636 heap_expansion = (gm->heap_expansion != 0);
637 concurrent = (gm->concurrent != 0);
638 demotion = (gm->demotion != 0);
639 card_bundles = (gm->card_bundles != 0);
640 gen0_reduction_count = gm->gen0_reduction_count;
641 should_lock_elevation = (gm->should_lock_elevation != 0);
642 elevation_locked_count = gm->elevation_locked_count;
644 pause_mode = gm->pause_mode;
645 found_finalizers = (gm->found_finalizers != 0);
648 background_p = (gm->background_p != 0);
649 b_state = gm->b_state;
650 #endif //BACKGROUND_GC
653 stress_induced = (gm->stress_induced != 0);
654 #endif // STRESS_HEAP
657 entry_memory_load = gm->entry_memory_load;
664 // GC specific statistics, tracking counts and timings for GCs occuring in the system.
665 // This writes the statistics to a file every 60 seconds, if a file is specified in
669 : public StatisticsBase
671 // initialized to the contents of COMPLUS_GcMixLog, or NULL, if not present
672 static WCHAR* logFileName;
673 static FILE* logFile;
675 // number of times we executed a background GC, a foreground GC, or a
677 int cntBGC, cntFGC, cntNGC;
679 // min, max, and total time spent performing BGCs, FGCs, NGCs
680 // (BGC time includes everything between the moment the BGC starts until
681 // it completes, i.e. the times of all FGCs occuring concurrently)
682 MinMaxTot bgc, fgc, ngc;
684 // number of times we executed a compacting GC (sweeping counts can be derived)
685 int cntCompactNGC, cntCompactFGC;
688 int cntReasons[reason_max];
690 // count of condemned generation, by NGC and FGC:
691 int cntNGCGen[max_generation+1];
692 int cntFGCGen[max_generation];
694 ///////////////////////////////////////////////////////////////////////////////////////////////
695 // Internal mechanism:
697 virtual void Initialize();
698 virtual void DisplayAndUpdate();
702 static BOOL Enabled()
703 { return logFileName != NULL; }
705 void AddGCStats(const gc_mechanisms& settings, size_t timeInMSec);
708 extern GCStatistics g_GCStatistics;
709 extern GCStatistics g_LastGCStatistics;
714 typedef DPTR(class heap_segment) PTR_heap_segment;
715 typedef DPTR(class gc_heap) PTR_gc_heap;
716 typedef DPTR(PTR_gc_heap) PTR_PTR_gc_heap;
717 #ifdef FEATURE_PREMORTEM_FINALIZATION
718 typedef DPTR(class CFinalize) PTR_CFinalize;
719 #endif // FEATURE_PREMORTEM_FINALIZATION
721 //-------------------------------------
722 //generation free list. It is an array of free lists bucketed by size, starting at sizes lower than first_bucket_size
723 //and doubling each time. The last bucket (index == num_buckets) is for largest sizes with no limit
725 #define MAX_BUCKET_COUNT (13)//Max number of buckets for the small generations.
731 BYTE*& alloc_list_head () { return head;}
732 BYTE*& alloc_list_tail () { return tail;}
744 size_t frst_bucket_size;
745 alloc_list first_bucket;
747 alloc_list& alloc_list_of (unsigned int bn);
750 allocator (unsigned int num_b, size_t fbs, alloc_list* b);
754 frst_bucket_size = SIZE_T_MAX;
756 unsigned int number_of_buckets() {return (unsigned int)num_buckets;}
758 size_t first_bucket_size() {return frst_bucket_size;}
759 BYTE*& alloc_list_head_of (unsigned int bn)
761 return alloc_list_of (bn).alloc_list_head();
763 BYTE*& alloc_list_tail_of (unsigned int bn)
765 return alloc_list_of (bn).alloc_list_tail();
768 BOOL discard_if_no_fit_p()
770 return (num_buckets == 1);
773 // This is when we know there's nothing to repair because this free
774 // list has never gone through plan phase. Right now it's only used
775 // by the background ephemeral sweep when we copy the local free list
776 // to gen0's free list.
778 // We copy head and tail manually (vs together like copy_to_alloc_list)
779 // since we need to copy tail first because when we get the free items off
780 // of each bucket we check head first. We also need to copy the
781 // smaller buckets first so when gen0 allocation needs to thread
782 // smaller items back that bucket is guaranteed to have been full
784 void copy_with_no_repair (allocator* allocator_to_copy)
786 assert (num_buckets == allocator_to_copy->number_of_buckets());
787 for (unsigned int i = 0; i < num_buckets; i++)
789 alloc_list* al = &(allocator_to_copy->alloc_list_of (i));
790 alloc_list_tail_of(i) = al->alloc_list_tail();
791 alloc_list_head_of(i) = al->alloc_list_head();
795 void unlink_item (unsigned int bucket_number, BYTE* item, BYTE* previous_item, BOOL use_undo_p);
796 void thread_item (BYTE* item, size_t size);
797 void thread_item_front (BYTE* itme, size_t size);
798 void thread_free_item (BYTE* free_item, BYTE*& head, BYTE*& tail);
799 void copy_to_alloc_list (alloc_list* toalist);
800 void copy_from_alloc_list (alloc_list* fromalist);
801 void commit_alloc_list_changes();
804 #define NUM_GEN_POWER2 (20)
805 #define BASE_GEN_SIZE (1*512)
807 // group the frequently used ones together (need intrumentation on accessors)
811 // Don't move these first two fields without adjusting the references
812 // from the __asm in jitinterface.cpp.
813 alloc_context allocation_context;
814 heap_segment* allocation_segment;
815 PTR_heap_segment start_segment;
816 BYTE* allocation_context_start_region;
817 BYTE* allocation_start;
818 allocator free_list_allocator;
819 size_t free_list_allocated;
820 size_t end_seg_allocated;
821 BOOL allocate_end_seg_p;
822 size_t condemned_allocated;
823 size_t free_list_space;
824 size_t free_obj_space;
825 size_t allocation_size;
826 BYTE* plan_allocation_start;
827 size_t plan_allocation_start_size;
829 // this is the pinned plugs that got allocated into this gen.
830 size_t pinned_allocated;
831 size_t pinned_allocation_compact_size;
832 size_t pinned_allocation_sweep_size;
835 #ifdef FREE_USAGE_STATS
836 size_t gen_free_spaces[NUM_GEN_POWER2];
837 // these are non pinned plugs only
838 size_t gen_plugs[NUM_GEN_POWER2];
839 size_t gen_current_pinned_free_spaces[NUM_GEN_POWER2];
840 size_t pinned_free_obj_space;
841 // this is what got allocated into the pinned free spaces.
842 size_t allocated_in_pinned_free;
843 size_t allocated_since_last_pin;
844 #endif //FREE_USAGE_STATS
847 // The dynamic data fields are grouped into 3 categories:
849 // calculated logical data (like desired_allocation)
850 // physical data (like fragmentation)
851 // const data (like min_gc_size), initialized at the beginning
855 ptrdiff_t new_allocation;
856 ptrdiff_t gc_new_allocation; // new allocation at beginning of gc
858 size_t desired_allocation;
860 // # of bytes taken by objects (ie, not free space) at the beginning
862 size_t begin_data_size;
863 // # of bytes taken by survived objects after mark.
864 size_t survived_size;
865 // # of bytes taken by survived pinned plugs after mark.
866 size_t pinned_survived_size;
867 size_t artificial_pinned_survived_size;
868 size_t added_pinned_size;
873 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
874 // # of plugs that are not pinned plugs.
875 size_t num_npinned_plugs;
876 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
877 //total object size after a GC, ie, doesn't include fragmentation
879 size_t collection_count;
880 size_t promoted_size;
881 size_t freach_previous_promotion;
882 size_t fragmentation; //fragmentation when we don't compact
883 size_t gc_clock; //gc# when last GC happened
884 size_t time_clock; //time when last gc started
885 size_t gc_elapsed_time; // Time it took for the gc to complete
886 float gc_speed; // speed in bytes/msec for the gc to complete
888 // min_size is always the same as min_gc_size..
892 size_t default_new_allocation;
893 size_t fragmentation_limit;
894 float fragmentation_burden_limit;
899 #define ro_in_entry 0x1
901 #ifdef SEG_MAPPING_TABLE
902 // Note that I am storing both h0 and seg0, even though in Server GC you can get to
903 // the heap* from the segment info. This is because heap_of needs to be really fast
904 // and we would not want yet another indirection.
907 // if an address is > boundary it belongs to h1; else h0.
908 // since we init h0 and h1 to 0, if we get 0 it means that
909 // address doesn't exist on managed segments. And heap_of
910 // would just return heap0 which is what it does now.
912 #ifdef MULTIPLE_HEAPS
915 #endif //MULTIPLE_HEAPS
916 // You could have an address that's inbetween 2 segments and
917 // this would return a seg, the caller then will use
918 // in_range_for_segment to determine if it's on that seg.
919 heap_segment* seg0; // this is what the seg for h0 is.
920 heap_segment* seg1; // this is what the seg for h1 is.
921 // Note that when frozen objects are used we mask seg1
922 // with 0x1 to indicate that there is a ro segment for
925 #endif //SEG_MAPPING_TABLE
928 //Alignment constant for allocation
929 #define ALIGNCONST (DATA_ALIGNMENT-1)
932 size_t Align (size_t nbytes, int alignment=ALIGNCONST)
934 return (nbytes + alignment) & ~alignment;
937 //return alignment constant for small object heap vs large object heap
939 int get_alignment_constant (BOOL small_object_p)
941 #ifdef FEATURE_STRUCTALIGN
942 // If any objects on the large object heap require 8-byte alignment,
943 // the compiler will tell us so. Let's not guess an alignment here.
945 #else // FEATURE_STRUCTALIGN
946 return small_object_p ? ALIGNCONST : 7;
947 #endif // FEATURE_STRUCTALIGN
952 size_t desired_allocation;
953 size_t new_allocation;
957 enum alloc_wait_reason
959 // When we don't care about firing an event for
963 // when we detect we are in low memory
966 // when we detect the ephemeral segment is too full
967 awr_low_ephemeral = 1,
969 // we've given out too much budget for gen0.
972 // we've given out too much budget for loh.
975 // this event is really obsolete - it's for pre-XP
976 // OSs where low mem notification is not supported.
977 awr_alloc_loh_low_mem = 4,
979 // we ran out of VM spaced to reserve on loh.
982 // ran out of space when allocating a small object
983 awr_gen0_oos_bgc = 6,
985 // ran out of space when allocating a large object
988 // waiting for BGC to let FGC happen
989 awr_fgc_wait_for_bgc = 8,
991 // wait for bgc to finish to get loh seg.
994 // we don't allow loh allocation during bgc planning.
995 awr_loh_alloc_during_plan = 10,
997 // we don't allow too much loh allocation during bgc.
998 awr_loh_alloc_during_bgc = 11
1001 struct alloc_thread_wait_data
1018 mt_alloc_small_cant,
1019 mt_alloc_large_cant,
1024 enum msl_enter_state
1030 struct spinlock_info
1032 msl_enter_state enter_state;
1033 msl_take_state take_state;
1037 const unsigned HS_CACHE_LINE_SIZE = 128;
1040 struct snoop_stats_data
1044 // total number of objects that we called
1046 size_t objects_checked_count;
1047 // total number of time we called gc_mark
1048 // on a 0 reference.
1049 size_t zero_ref_count;
1050 // total objects actually marked.
1051 size_t objects_marked_count;
1052 // number of objects written to the mark stack because
1054 size_t stolen_stack_count;
1055 // number of objects pushed onto the mark stack because
1056 // of the partial mark code path.
1057 size_t partial_stack_count;
1058 // number of objects pushed onto the mark stack because
1059 // of the non partial mark code path.
1060 size_t normal_stack_count;
1061 // number of references marked without mark stack.
1062 size_t non_stack_count;
1064 // number of times we detect next heap's mark stack
1066 size_t stack_idle_count;
1068 // number of times we do switch to thread.
1069 size_t switch_to_thread_count;
1071 // number of times we are checking if the next heap's
1072 // mark stack is busy.
1073 size_t check_level_count;
1074 // number of times next stack is busy and level is
1077 // how many interlocked exchange operations we did
1078 size_t interlocked_count;
1079 // numer of times parent objects stolen
1080 size_t partial_mark_parent_count;
1081 // numer of times we look at a normal stolen entry,
1082 // or the beginning/ending PM pair.
1083 size_t stolen_or_pm_count;
1084 // number of times we see 2 for the entry.
1085 size_t stolen_entry_count;
1086 // number of times we see a PM entry that's not ready.
1087 size_t pm_not_ready_count;
1088 // number of stolen normal marked objects and partial mark children.
1089 size_t normal_count;
1090 // number of times the bottom of mark stack was cleared.
1091 size_t stack_bottom_clear_count;
1093 #endif //SNOOP_STATS
1095 struct no_gc_region_info
1097 size_t soh_allocation_size;
1098 size_t loh_allocation_size;
1101 size_t num_gcs_induced;
1102 start_no_gc_region_status start_status;
1103 gc_pause_mode saved_pause_mode;
1104 size_t saved_gen0_min_size;
1105 size_t saved_gen3_min_size;
1109 //class definition of the internal class
1110 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
1111 extern void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1112 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
1115 friend struct ::_DacGlobals;
1116 #ifdef DACCESS_COMPILE
1117 friend class ::ClrDataAccess;
1118 friend class ::DacHeapWalker;
1119 #endif //DACCESS_COMPILE
1121 friend class GCHeap;
1122 #ifdef FEATURE_PREMORTEM_FINALIZATION
1123 friend class CFinalize;
1124 #endif // FEATURE_PREMORTEM_FINALIZATION
1125 friend struct ::alloc_context;
1126 friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, DWORD dwFlags);
1127 friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1128 friend class t_join;
1129 friend class gc_mechanisms;
1130 friend class seg_free_spaces;
1132 #ifdef BACKGROUND_GC
1133 friend class exclusive_sync;
1134 friend class recursive_gc_sync;
1135 #endif //BACKGROUND_GC
1137 #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1138 friend void checkGCWriteBarrier();
1139 friend void initGCShadow();
1140 #endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1142 #ifdef MULTIPLE_HEAPS
1143 typedef void (gc_heap::* card_fn) (BYTE**, int);
1144 #define call_fn(fn) (this->*fn)
1147 typedef void (* card_fn) (BYTE**);
1148 #define call_fn(fn) (*fn)
1149 #define __this (gc_heap*)0
1156 void print_free_list (int gen, heap_segment* seg);
1159 #ifdef SYNCHRONIZATION_STATS
1162 void init_sync_stats()
1164 #ifdef MULTIPLE_HEAPS
1165 for (int i = 0; i < gc_heap::n_heaps; i++)
1167 gc_heap::g_heaps[i]->init_heap_sync_stats();
1169 #else //MULTIPLE_HEAPS
1170 init_heap_sync_stats();
1171 #endif //MULTIPLE_HEAPS
1175 void print_sync_stats(unsigned int gc_count_during_log)
1177 // bad/good gl acquire is accumulative during the log interval (because the numbers are too small)
1178 // min/max msl_acquire is the min/max during the log interval, not each GC.
1179 // Threads is however many allocation threads for the last GC.
1180 // num of msl acquired, avg_msl, high and low are all for each GC.
1181 printf("%2s%2s%10s%10s%12s%6s%4s%8s( st, wl, stw, dpw)\n",
1182 "H", "T", "good_sus", "bad_sus", "avg_msl", "high", "low", "num_msl");
1184 #ifdef MULTIPLE_HEAPS
1185 for (int i = 0; i < gc_heap::n_heaps; i++)
1187 gc_heap::g_heaps[i]->print_heap_sync_stats(i, gc_count_during_log);
1189 #else //MULTIPLE_HEAPS
1190 print_heap_sync_stats(0, gc_count_during_log);
1191 #endif //MULTIPLE_HEAPS
1194 #endif //SYNCHRONIZATION_STATS
1197 void verify_soh_segment_list();
1199 void verify_mark_array_cleared (heap_segment* seg);
1201 void verify_mark_array_cleared();
1203 void verify_seg_end_mark_array_cleared();
1205 void verify_partial();
1209 void verify_free_lists();
1211 void verify_heap (BOOL begin_gc_p);
1212 #endif //VERIFY_HEAP
1215 void fire_pevents();
1217 #ifdef FEATURE_BASICFREEZE
1218 static void walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef);
1222 heap_segment* make_heap_segment (BYTE* new_pages,
1226 l_heap* make_large_heap (BYTE* new_pages, size_t size, BOOL managed);
1229 gc_heap* make_gc_heap(
1230 #if defined (MULTIPLE_HEAPS)
1233 #endif //MULTIPLE_HEAPS
1237 void destroy_gc_heap(gc_heap* heap);
1240 HRESULT initialize_gc (size_t segment_size,
1242 #ifdef MULTIPLE_HEAPS
1243 , unsigned number_of_heaps
1244 #endif //MULTIPLE_HEAPS
1251 CObjectHeader* allocate (size_t jsize,
1252 alloc_context* acontext);
1254 #ifdef MULTIPLE_HEAPS
1255 static void balance_heaps (alloc_context* acontext);
1257 gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
1259 DWORD __stdcall gc_thread_stub (void* arg);
1260 #endif //MULTIPLE_HEAPS
1262 CObjectHeader* try_fast_alloc (size_t jsize);
1264 // For LOH allocations we only update the alloc_bytes_loh in allocation
1265 // context - we don't actually use the ptr/limit from it so I am
1266 // making this explicit by not passing in the alloc_context.
1268 CObjectHeader* allocate_large_object (size_t size, __int64& alloc_bytes);
1270 #ifdef FEATURE_STRUCTALIGN
1272 BYTE* pad_for_alignment_large (BYTE* newAlloc, int requiredAlignment, size_t size);
1273 #endif // FEATURE_STRUCTALIGN
1282 BOOL expand_soh_with_minimal_gc();
1284 // EE is always suspended when this method is called.
1285 // returning FALSE means we actually didn't do a GC. This happens
1286 // when we figured that we needed to do a BGC.
1288 int garbage_collect (int n);
1291 DWORD* make_card_table (BYTE* start, BYTE* end);
1294 void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
1297 int grow_brick_card_tables (BYTE* start,
1300 heap_segment* new_seg,
1305 BOOL is_mark_set (BYTE* o);
1310 void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1312 struct walk_relocate_args
1316 mark* pinned_plug_entry;
1320 void walk_plug (BYTE* plug, size_t size, BOOL check_last_object_p,
1321 walk_relocate_args* args, size_t profiling_context);
1324 void walk_relocation (int condemned_gen_number,
1325 BYTE* first_condemned_address, size_t profiling_context);
1328 void walk_relocation_in_brick (BYTE* tree, walk_relocate_args* args, size_t profiling_context);
1330 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1332 void walk_relocation_for_bgc(size_t profiling_context);
1335 void make_free_lists_for_profiler_for_bgc();
1336 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1339 int generation_to_condemn (int n,
1340 BOOL* blocking_collection_p,
1341 BOOL* elevation_requested_p,
1345 int joined_generation_to_condemn (BOOL should_evaluate_elevation, int n_initial, BOOL* blocking_collection
1346 STRESS_HEAP_ARG(int n_original));
1349 size_t min_reclaim_fragmentation_threshold(ULONGLONG total_mem, DWORD num_heaps);
1352 ULONGLONG min_high_fragmentation_threshold(ULONGLONG available_mem, DWORD num_heaps);
1355 void concurrent_print_time_delta (const char* msg);
1357 void free_list_info (int gen_num, const char* msg);
1359 // in svr GC on entry and exit of this method, the GC threads are not
1365 void save_data_for_no_gc();
1368 void restore_data_for_no_gc();
1371 void update_collection_counts_for_no_gc();
1374 BOOL should_proceed_with_gc();
1377 void record_gcs_during_no_gc();
1380 BOOL find_loh_free_for_no_gc();
1383 BOOL find_loh_space_for_no_gc();
1386 BOOL commit_loh_for_no_gc (heap_segment* seg);
1389 start_no_gc_region_status prepare_for_no_gc_region (ULONGLONG total_size,
1390 BOOL loh_size_known,
1392 BOOL disallow_full_blocking);
1395 BOOL loh_allocated_for_no_gc();
1398 void release_no_gc_loh_segments();
1401 void thread_no_gc_loh_segments();
1404 void allocate_for_no_gc_after_gc();
1407 void set_loh_allocations_for_no_gc();
1410 void set_soh_allocations_for_no_gc();
1413 void prepare_for_no_gc_after_gc();
1416 void set_allocations_for_no_gc();
1419 BOOL should_proceed_for_no_gc();
1422 start_no_gc_region_status get_start_no_gc_region_status();
1425 end_no_gc_region_status end_no_gc_region();
1428 void handle_failure_for_no_gc();
1431 void fire_etw_allocation_event (size_t allocation_amount, int gen_number, BYTE* object_address);
1434 void fire_etw_pin_object_event (BYTE* object, BYTE** ppObject);
1437 size_t limit_from_size (size_t size, size_t room, int gen_number,
1440 int try_allocate_more_space (alloc_context* acontext, size_t jsize,
1441 int alloc_generation_number);
1443 BOOL allocate_more_space (alloc_context* acontext, size_t jsize,
1444 int alloc_generation_number);
1447 size_t get_full_compact_gc_count();
1450 BOOL short_on_end_of_seg (int gen_number,
1455 BOOL a_fit_free_list_p (int gen_number,
1457 alloc_context* acontext,
1460 #ifdef BACKGROUND_GC
1462 void wait_for_background (alloc_wait_reason awr);
1465 void wait_for_bgc_high_memory (alloc_wait_reason awr);
1468 void bgc_loh_alloc_clr (BYTE* alloc_start,
1470 alloc_context* acontext,
1475 #endif //BACKGROUND_GC
1477 #ifdef BACKGROUND_GC
1479 void wait_for_background_planning (alloc_wait_reason awr);
1482 BOOL bgc_loh_should_allocate();
1483 #endif //BACKGROUND_GC
1485 #define max_saved_spinlock_info 48
1487 #ifdef SPINLOCK_HISTORY
1489 int spinlock_info_index;
1492 spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
1493 #endif //SPINLOCK_HISTORY
1496 void add_saved_spinlock_info (
1497 msl_enter_state enter_state,
1498 msl_take_state take_state);
1501 BOOL a_fit_free_list_large_p (size_t size,
1502 alloc_context* acontext,
1506 BOOL a_fit_segment_end_p (int gen_number,
1509 alloc_context* acontext,
1511 BOOL* commit_failed_p);
1513 BOOL loh_a_fit_segment_end_p (int gen_number,
1515 alloc_context* acontext,
1517 BOOL* commit_failed_p,
1520 BOOL loh_get_new_seg (generation* gen,
1523 BOOL* commit_failed_p,
1527 size_t get_large_seg_size (size_t size);
1530 BOOL retry_full_compact_gc (size_t size);
1533 BOOL check_and_wait_for_bgc (alloc_wait_reason awr,
1534 BOOL* did_full_compact_gc);
1537 BOOL trigger_full_compact_gc (gc_reason gr,
1541 BOOL trigger_ephemeral_gc (gc_reason gr);
1544 BOOL soh_try_fit (int gen_number,
1546 alloc_context* acontext,
1548 BOOL* commit_failed_p,
1549 BOOL* short_seg_end_p);
1551 BOOL loh_try_fit (int gen_number,
1553 alloc_context* acontext,
1555 BOOL* commit_failed_p,
1559 BOOL allocate_small (int gen_number,
1561 alloc_context* acontext,
1567 c_gc_state_planning,
1571 #ifdef RECORD_LOH_STATE
1572 #define max_saved_loh_states 12
1574 int loh_state_index;
1576 struct loh_state_info
1578 allocation_state alloc_state;
1583 loh_state_info last_loh_states[max_saved_loh_states];
1585 void add_saved_loh_state (allocation_state loh_state_to_save, DWORD thread_id);
1586 #endif //RECORD_LOH_STATE
1588 BOOL allocate_large (int gen_number,
1590 alloc_context* acontext,
1594 int init_semi_shared();
1596 int init_gc_heap (int heap_number);
1598 void self_destroy();
1600 void destroy_semi_shared();
1602 void repair_allocation_contexts (BOOL repair_p);
1604 void fix_allocation_contexts (BOOL for_gc_p);
1606 void fix_youngest_allocation_area (BOOL for_gc_p);
1608 void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
1611 void fix_large_allocation_area (BOOL for_gc_p);
1613 void fix_older_allocation_area (generation* older_gen);
1615 void set_allocation_heap_segment (generation* gen);
1617 void reset_allocation_pointers (generation* gen, BYTE* start);
1619 int object_gennum (BYTE* o);
1621 int object_gennum_plan (BYTE* o);
1623 void init_heap_segment (heap_segment* seg);
1625 void delete_heap_segment (heap_segment* seg, BOOL consider_hoarding=FALSE);
1626 #ifdef FEATURE_BASICFREEZE
1628 BOOL insert_ro_segment (heap_segment* seg);
1630 void remove_ro_segment (heap_segment* seg);
1631 #endif //FEATURE_BASICFREEZE
1633 BOOL set_ro_segment_in_range (heap_segment* seg);
1635 BOOL unprotect_segment (heap_segment* seg);
1637 heap_segment* soh_get_segment_to_expand();
1639 heap_segment* get_segment (size_t size, BOOL loh_p);
1641 void seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp);
1643 void seg_mapping_table_remove_segment (heap_segment* seg);
1645 heap_segment* get_large_segment (size_t size, BOOL* did_full_compact_gc);
1647 void thread_loh_segment (heap_segment* new_seg);
1649 heap_segment* get_segment_for_loh (size_t size
1650 #ifdef MULTIPLE_HEAPS
1652 #endif //MULTIPLE_HEAPS
1655 void reset_heap_segment_pages (heap_segment* seg);
1657 void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
1659 void decommit_heap_segment (heap_segment* seg);
1661 void clear_gen0_bricks();
1662 #ifdef BACKGROUND_GC
1664 void rearrange_small_heap_segments();
1665 #endif //BACKGROUND_GC
1667 void rearrange_large_heap_segments();
1669 void rearrange_heap_segments(BOOL compacting);
1671 void switch_one_quantum();
1673 void reset_ww_by_chunk (BYTE* start_address, size_t total_reset_size);
1675 void switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size);
1677 void reset_write_watch (BOOL concurrent_p);
1679 void adjust_ephemeral_limits ();
1681 void make_generation (generation& gen, heap_segment* seg,
1682 BYTE* start, BYTE* pointer);
1685 #define USE_PADDING_FRONT 1
1686 #define USE_PADDING_TAIL 2
1689 BOOL size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, BYTE* alloc_pointer, BYTE* alloc_limit,
1690 BYTE* old_loc=0, int use_padding=USE_PADDING_TAIL);
1692 BOOL a_size_fit_p (size_t size, BYTE* alloc_pointer, BYTE* alloc_limit,
1696 void handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
1697 BYTE* allocated, BYTE* reserved);
1700 size_t card_of ( BYTE* object);
1702 BYTE* brick_address (size_t brick);
1704 size_t brick_of (BYTE* add);
1706 BYTE* card_address (size_t card);
1708 size_t card_to_brick (size_t card);
1710 void clear_card (size_t card);
1712 void set_card (size_t card);
1714 BOOL card_set_p (size_t card);
1716 void card_table_set_bit (BYTE* location);
1720 void update_card_table_bundle();
1722 void reset_card_table_write_watch();
1724 void card_bundle_clear(size_t cardb);
1726 void card_bundles_set (size_t start_cardb, size_t end_cardb);
1728 BOOL card_bundle_set_p (size_t cardb);
1730 BOOL find_card_dword (size_t& cardw, size_t cardw_end);
1732 void enable_card_bundles();
1734 BOOL card_bundles_enabled();
1736 #endif //CARD_BUNDLE
1739 BOOL find_card (DWORD* card_table, size_t& card,
1740 size_t card_word_end, size_t& end_card);
1742 BOOL grow_heap_segment (heap_segment* seg, BYTE* high_address);
1744 int grow_heap_segment (heap_segment* seg, BYTE* high_address, BYTE* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL);
1746 void copy_brick_card_range (BYTE* la, DWORD* old_card_table,
1747 short* old_brick_table,
1749 BYTE* start, BYTE* end, BOOL heap_expand);
1751 void init_brick_card_range (heap_segment* seg);
1753 void copy_brick_card_table_l_heap ();
1755 void copy_brick_card_table(BOOL heap_expand);
1757 void clear_brick_table (BYTE* from, BYTE* end);
1759 void set_brick (size_t index, ptrdiff_t val);
1761 int brick_entry (size_t index);
1764 unsigned int mark_array_marked (BYTE* add);
1766 void mark_array_set_marked (BYTE* add);
1768 BOOL is_mark_bit_set (BYTE* add);
1770 void gc_heap::gmark_array_set_marked (BYTE* add);
1772 void set_mark_array_bit (size_t mark_bit);
1774 BOOL mark_array_bit_set (size_t mark_bit);
1776 void mark_array_clear_marked (BYTE* add);
1778 void clear_mark_array (BYTE* from, BYTE* end, BOOL check_only=TRUE);
1779 #ifdef BACKGROUND_GC
1781 void seg_clear_mark_array_bits_soh (heap_segment* seg);
1783 void clear_batch_mark_array_bits (BYTE* start, BYTE* end);
1785 void bgc_clear_batch_mark_array_bits (BYTE* start, BYTE* end);
1787 void clear_mark_array_by_objects (BYTE* from, BYTE* end, BOOL loh_p);
1790 void set_batch_mark_array_bits (BYTE* start, BYTE* end);
1792 void check_batch_mark_array_bits (BYTE* start, BYTE* end);
1793 #endif //VERIFY_HEAP
1794 #endif //BACKGROUND_GC
1798 BOOL large_object_marked (BYTE* o, BOOL clearp);
1800 #ifdef BACKGROUND_GC
1802 BOOL background_allowed_p();
1803 #endif //BACKGROUND_GC
1806 void send_full_gc_notification (int gen_num, BOOL due_to_alloc_p);
1809 void check_for_full_gc (int gen_num, size_t size);
1812 void adjust_limit (BYTE* start, size_t limit_size, generation* gen,
1815 void adjust_limit_clr (BYTE* start, size_t limit_size,
1816 alloc_context* acontext, heap_segment* seg,
1819 void leave_allocation_segment (generation* gen);
1822 void init_free_and_plug();
1825 void print_free_and_plug (const char* msg);
1828 void add_gen_plug (int gen_number, size_t plug_size);
1831 void add_gen_free (int gen_number, size_t free_size);
1834 void add_item_to_current_pinned_free (int gen_number, size_t free_size);
1837 void remove_gen_free (int gen_number, size_t free_size);
1840 BYTE* allocate_in_older_generation (generation* gen, size_t size,
1841 int from_gen_number,
1843 REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1845 generation* ensure_ephemeral_heap_segment (generation* consing_gen);
1847 BYTE* allocate_in_condemned_generations (generation* gen,
1849 int from_gen_number,
1851 BYTE* next_pinned_plug=0,
1852 heap_segment* current_seg=0,
1853 #endif //SHORT_PLUGS
1855 REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1856 #ifdef INTERIOR_POINTERS
1857 // Verifies that interior is actually in the range of seg; otherwise
1860 heap_segment* find_segment (BYTE* interior, BOOL small_segment_only_p);
1863 heap_segment* find_segment_per_heap (BYTE* interior, BOOL small_segment_only_p);
1866 BYTE* find_object_for_relocation (BYTE* o, BYTE* low, BYTE* high);
1867 #endif //INTERIOR_POINTERS
1870 gc_heap* heap_of (BYTE* object);
1873 gc_heap* heap_of_gc (BYTE* object);
1876 size_t& promoted_bytes (int);
1879 BYTE* find_object (BYTE* o, BYTE* low);
1882 dynamic_data* dynamic_data_of (int gen_number);
1884 ptrdiff_t get_desired_allocation (int gen_number);
1886 ptrdiff_t get_new_allocation (int gen_number);
1888 ptrdiff_t get_allocation (int gen_number);
1890 bool new_allocation_allowed (int gen_number);
1891 #ifdef BACKGROUND_GC
1893 void allow_new_allocation (int gen_number);
1895 void disallow_new_allocation (int gen_number);
1896 #endif //BACKGROUND_GC
1898 void reset_pinned_queue();
1900 void reset_pinned_queue_bos();
1902 void set_allocator_next_pin (generation* gen);
1904 void set_allocator_next_pin (BYTE* alloc_pointer, BYTE*& alloc_limit);
1906 void enque_pinned_plug (generation* gen, BYTE* plug, size_t len);
1908 void enque_pinned_plug (BYTE* plug,
1909 BOOL save_pre_plug_info_p,
1910 BYTE* last_object_in_last_plug);
1912 void merge_with_last_pinned_plug (BYTE* last_pinned_plug, size_t plug_size);
1914 void set_pinned_info (BYTE* last_pinned_plug,
1916 BYTE* alloc_pointer,
1917 BYTE*& alloc_limit);
1919 void set_pinned_info (BYTE* last_pinned_plug, size_t plug_len, generation* gen);
1921 void save_post_plug_info (BYTE* last_pinned_plug, BYTE* last_object_in_last_plug, BYTE* post_plug);
1923 size_t deque_pinned_plug ();
1925 mark* pinned_plug_of (size_t bos);
1927 mark* oldest_pin ();
1929 mark* before_oldest_pin();
1931 BOOL pinned_plug_que_empty_p ();
1933 void make_mark_stack (mark* arr);
1936 int& mark_stack_busy();
1938 VOLATILE(BYTE*)& ref_mark_stack (gc_heap* hp, int index);
1940 #ifdef BACKGROUND_GC
1942 size_t& bpromoted_bytes (int);
1944 void make_background_mark_stack (BYTE** arr);
1946 void make_c_mark_list (BYTE** arr);
1947 #endif //BACKGROUND_GC
1949 generation* generation_of (int n);
1951 BOOL gc_mark1 (BYTE* o);
1953 BOOL gc_mark (BYTE* o, BYTE* low, BYTE* high);
1955 BYTE* mark_object(BYTE* o THREAD_NUMBER_DCL);
1958 void ha_mark_object_simple (BYTE** o THREAD_NUMBER_DCL);
1959 #endif //HEAP_ANALYZE
1961 void mark_object_simple (BYTE** o THREAD_NUMBER_DCL);
1963 void mark_object_simple1 (BYTE* o, BYTE* start THREAD_NUMBER_DCL);
1970 #ifdef BACKGROUND_GC
1973 BOOL background_marked (BYTE* o);
1975 BOOL background_mark1 (BYTE* o);
1977 BOOL background_mark (BYTE* o, BYTE* low, BYTE* high);
1979 BYTE* background_mark_object (BYTE* o THREAD_NUMBER_DCL);
1981 void background_mark_simple (BYTE* o THREAD_NUMBER_DCL);
1983 void background_mark_simple1 (BYTE* o THREAD_NUMBER_DCL);
1985 void background_promote (Object**, ScanContext* , DWORD);
1987 BOOL background_object_marked (BYTE* o, BOOL clearp);
1989 void init_background_gc();
1991 BYTE* background_next_end (heap_segment*, BOOL);
1993 void generation_delete_heap_segment (generation*,
1994 heap_segment*, heap_segment*, heap_segment*);
1996 void set_mem_verify (BYTE*, BYTE*, BYTE);
1998 void process_background_segment_end (heap_segment*, generation*, BYTE*,
1999 heap_segment*, BOOL*);
2001 void process_n_background_segments (heap_segment*, heap_segment*, generation* gen);
2003 BOOL fgc_should_consider_object (BYTE* o,
2005 BOOL consider_bgc_mark_p,
2006 BOOL check_current_sweep_p,
2007 BOOL check_saved_sweep_p);
2009 void should_check_bgc_mark (heap_segment* seg,
2010 BOOL* consider_bgc_mark_p,
2011 BOOL* check_current_sweep_p,
2012 BOOL* check_saved_sweep_p);
2014 void background_ephemeral_sweep();
2016 void background_sweep ();
2018 void background_mark_through_object (BYTE* oo THREAD_NUMBER_DCL);
2020 BYTE* background_seg_end (heap_segment* seg, BOOL concurrent_p);
2022 BYTE* background_first_overflow (BYTE* min_add,
2025 BOOL small_object_p);
2027 void background_process_mark_overflow_internal (int condemned_gen_number,
2028 BYTE* min_add, BYTE* max_add,
2031 BOOL background_process_mark_overflow (BOOL concurrent_p);
2033 // for foreground GC to get hold of background structures containing refs
2036 scan_background_roots (promote_func* fn, int hn, ScanContext *pSC);
2039 BOOL bgc_mark_array_range (heap_segment* seg,
2044 void bgc_verify_mark_array_cleared (heap_segment* seg);
2046 void verify_mark_bits_cleared (BYTE* obj, size_t s);
2048 void clear_all_mark_array();
2049 #endif //BACKGROUND_GC
2052 BYTE* next_end (heap_segment* seg, BYTE* f);
2054 void fix_card_table ();
2056 void mark_through_object (BYTE* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
2058 BOOL process_mark_overflow (int condemned_gen_number);
2060 void process_mark_overflow_internal (int condemned_gen_number,
2061 BYTE* min_address, BYTE* max_address);
2065 void print_snoop_stat();
2066 #endif //SNOOP_STATS
2071 BOOL check_next_mark_stack (gc_heap* next_heap);
2076 void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
2079 void mark_phase (int condemned_gen_number, BOOL mark_only_p);
2082 void pin_object (BYTE* o, BYTE** ppObject, BYTE* low, BYTE* high);
2084 void reset_mark_stack ();
2086 BYTE* insert_node (BYTE* new_node, size_t sequence_number,
2087 BYTE* tree, BYTE* last_node);
2089 size_t update_brick_table (BYTE* tree, size_t current_brick,
2090 BYTE* x, BYTE* plug_end);
2093 void plan_generation_start (generation* gen, generation* consing_gen, BYTE* next_plug_to_allocate);
2096 void realloc_plan_generation_start (generation* gen, generation* consing_gen);
2099 void plan_generation_starts (generation*& consing_gen);
2102 void advance_pins_for_demotion (generation* gen);
2105 void process_ephemeral_boundaries(BYTE* x, int& active_new_gen_number,
2106 int& active_old_gen_number,
2107 generation*& consing_gen,
2108 BOOL& allocate_in_condemned);
2110 void seg_clear_mark_bits (heap_segment* seg);
2112 void sweep_ro_segments (heap_segment* start_seg);
2114 void store_plug_gap_info (BYTE* plug_start,
2116 BOOL& last_npinned_plug_p,
2117 BOOL& last_pinned_plug_p,
2118 BYTE*& last_pinned_plug,
2119 BOOL& pinned_plug_p,
2120 BYTE* last_object_in_last_plug,
2121 BOOL& merge_with_last_pin_p,
2122 // this is only for verification purpose
2123 size_t last_plug_len);
2125 void plan_phase (int condemned_gen_number);
2127 #ifdef FEATURE_LOH_COMPACTION
2128 // plan_loh can allocate memory so it can fail. If it fails, we will
2129 // fall back to sweeping.
2137 void relocate_in_loh_compact();
2139 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2141 void walk_relocation_loh (size_t profiling_context);
2142 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2145 BOOL loh_enque_pinned_plug (BYTE* plug, size_t len);
2148 void loh_set_allocator_next_pin();
2151 BOOL loh_pinned_plug_que_empty_p();
2154 size_t loh_deque_pinned_plug();
2157 mark* loh_pinned_plug_of (size_t bos);
2160 mark* loh_oldest_pin();
2163 BOOL loh_size_fit_p (size_t size, BYTE* alloc_pointer, BYTE* alloc_limit);
2166 BYTE* loh_allocate_in_condemned (BYTE* old_loc, size_t size);
2169 BOOL loh_object_p (BYTE* o);
2172 BOOL should_compact_loh();
2174 // If the LOH compaction mode is just to compact once,
2175 // we need to see if we should reset it back to not compact.
2176 // We would only reset if every heap's LOH was compacted.
2178 void check_loh_compact_mode (BOOL all_heaps_compacted_p);
2179 #endif //FEATURE_LOH_COMPACTION
2182 void decommit_ephemeral_segment_pages (int condemned_gen_number);
2184 void fix_generation_bounds (int condemned_gen_number,
2185 generation* consing_gen);
2187 BYTE* generation_limit (int gen_number);
2189 struct make_free_args
2191 int free_list_gen_number;
2192 BYTE* current_gen_limit;
2193 generation* free_list_gen;
2197 BYTE* allocate_at_end (size_t size);
2199 BOOL ensure_gap_allocation (int condemned_gen_number);
2200 // make_free_lists is only called by blocking GCs.
2202 void make_free_lists (int condemned_gen_number);
2204 void make_free_list_in_brick (BYTE* tree, make_free_args* args);
2206 void thread_gap (BYTE* gap_start, size_t size, generation* gen);
2208 void loh_thread_gap_front (BYTE* gap_start, size_t size, generation* gen);
2210 void make_unused_array (BYTE* x, size_t size, BOOL clearp=FALSE, BOOL resetp=FALSE);
2212 void clear_unused_array (BYTE* x, size_t size);
2214 void relocate_address (BYTE** old_address THREAD_NUMBER_DCL);
2215 struct relocate_args
2221 mark* pinned_plug_entry;
2225 void reloc_survivor_helper (BYTE** pval);
2227 void check_class_object_demotion (BYTE* obj);
2229 void check_class_object_demotion_internal (BYTE* obj);
2232 void check_demotion_helper (BYTE** pval, BYTE* parent_obj);
2235 void relocate_survivor_helper (BYTE* plug, BYTE* plug_end);
2238 void verify_pins_with_post_plug_info (const char* msg);
2240 #ifdef COLLECTIBLE_CLASS
2242 void unconditional_set_card_collectible (BYTE* obj);
2243 #endif //COLLECTIBLE_CLASS
2246 void relocate_shortened_survivor_helper (BYTE* plug, BYTE* plug_end, mark* pinned_plug_entry);
2249 void relocate_obj_helper (BYTE* x, size_t s);
2252 void reloc_ref_in_shortened_obj (BYTE** address_to_set_card, BYTE** address_to_reloc);
2255 void relocate_pre_plug_info (mark* pinned_plug_entry);
2258 void relocate_shortened_obj_helper (BYTE* x, size_t s, BYTE* end, mark* pinned_plug_entry, BOOL is_pinned);
2261 void relocate_survivors_in_plug (BYTE* plug, BYTE* plug_end,
2262 BOOL check_last_object_p,
2263 mark* pinned_plug_entry);
2265 void relocate_survivors_in_brick (BYTE* tree, relocate_args* args);
2268 void update_oldest_pinned_plug();
2271 void relocate_survivors (int condemned_gen_number,
2272 BYTE* first_condemned_address );
2274 void relocate_phase (int condemned_gen_number,
2275 BYTE* first_condemned_address);
2281 ptrdiff_t last_plug_relocation;
2282 BYTE* before_last_plug;
2283 size_t current_compacted_brick;
2285 mark* pinned_plug_entry;
2286 BOOL check_gennum_p;
2291 dprintf (3, ("last plug: %Ix, last plug reloc: %Ix, before last: %Ix, b: %Ix",
2292 last_plug, last_plug_relocation, before_last_plug, current_compacted_brick));
2297 void copy_cards_range (BYTE* dest, BYTE* src, size_t len, BOOL copy_cards_p);
2299 void gcmemcopy (BYTE* dest, BYTE* src, size_t len, BOOL copy_cards_p);
2301 void compact_plug (BYTE* plug, size_t size, BOOL check_last_object_p, compact_args* args);
2303 void compact_in_brick (BYTE* tree, compact_args* args);
2306 mark* get_next_pinned_entry (BYTE* tree,
2307 BOOL* has_pre_plug_info_p,
2308 BOOL* has_post_plug_info_p,
2312 mark* get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p);
2315 void recover_saved_pinned_info();
2318 void compact_phase (int condemned_gen_number, BYTE*
2319 first_condemned_address, BOOL clear_cards);
2321 void clear_cards (size_t start_card, size_t end_card);
2323 void clear_card_for_addresses (BYTE* start_address, BYTE* end_address);
2325 void copy_cards (size_t dst_card, size_t src_card,
2326 size_t end_card, BOOL nextp);
2328 void copy_cards_for_addresses (BYTE* dest, BYTE* src, size_t len);
2330 #ifdef BACKGROUND_GC
2332 void copy_mark_bits (size_t dst_mark_bit, size_t src_mark_bit, size_t end_mark_bit);
2334 void copy_mark_bits_for_addresses (BYTE* dest, BYTE* src, size_t len);
2335 #endif //BACKGROUND_GC
2339 BOOL ephemeral_pointer_p (BYTE* o);
2341 void fix_brick_to_highest (BYTE* o, BYTE* next_o);
2343 BYTE* find_first_object (BYTE* start_address, BYTE* first_object);
2345 BYTE* compute_next_boundary (BYTE* low, int gen_number, BOOL relocating);
2347 void keep_card_live (BYTE* o, size_t& n_gen,
2348 size_t& cg_pointers_found);
2350 void mark_through_cards_helper (BYTE** poo, size_t& ngen,
2351 size_t& cg_pointers_found,
2352 card_fn fn, BYTE* nhigh,
2353 BYTE* next_boundary);
2356 BOOL card_transition (BYTE* po, BYTE* end, size_t card_word_end,
2357 size_t& cg_pointers_found,
2358 size_t& n_eph, size_t& n_card_set,
2359 size_t& card, size_t& end_card,
2360 BOOL& foundp, BYTE*& start_address,
2361 BYTE*& limit, size_t& n_cards_cleared);
2363 void mark_through_cards_for_segments (card_fn fn, BOOL relocating);
2366 void repair_allocation_in_expanded_heap (generation* gen);
2368 BOOL can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index);
2370 BOOL can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index);
2372 BOOL can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count);
2373 #ifdef SEG_REUSE_STATS
2375 size_t dump_buckets (size_t* ordered_indices, int count, size_t* total_size);
2376 #endif //SEG_REUSE_STATS
2378 void build_ordered_free_spaces (heap_segment* seg);
2380 void count_plug (size_t last_plug_size, BYTE*& last_plug);
2382 void count_plugs_in_brick (BYTE* tree, BYTE*& last_plug);
2384 void build_ordered_plug_indices ();
2386 void init_ordered_free_space_indices ();
2388 void trim_free_spaces_indices ();
2390 BOOL try_best_fit (BOOL end_of_segment_p);
2392 BOOL best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space);
2394 BOOL process_free_space (heap_segment* seg,
2396 size_t min_free_size,
2397 size_t min_cont_size,
2398 size_t* total_free_space,
2399 size_t* largest_free_space);
2401 size_t compute_eph_gen_starts_size();
2403 void compute_new_ephemeral_size();
2405 BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size,
2406 size_t min_cont_size, allocator* al);
2408 BYTE* allocate_in_expanded_heap (generation* gen, size_t size,
2409 BOOL& adjacentp, BYTE* old_loc,
2411 BOOL set_padding_on_saved_p,
2412 mark* pinned_plug_entry,
2413 #endif //SHORT_PLUGS
2414 BOOL consider_bestfit, int active_new_gen_number
2415 REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
2417 void realloc_plug (size_t last_plug_size, BYTE*& last_plug,
2418 generation* gen, BYTE* start_address,
2419 unsigned int& active_new_gen_number,
2420 BYTE*& last_pinned_gap, BOOL& leftp,
2423 , mark* pinned_plug_entry
2424 #endif //SHORT_PLUGS
2427 void realloc_in_brick (BYTE* tree, BYTE*& last_plug, BYTE* start_address,
2429 unsigned int& active_new_gen_number,
2430 BYTE*& last_pinned_gap, BOOL& leftp);
2432 void realloc_plugs (generation* consing_gen, heap_segment* seg,
2433 BYTE* start_address, BYTE* end_address,
2434 unsigned active_new_gen_number);
2437 void set_expand_in_full_gc (int condemned_gen_number);
2440 void verify_no_pins (BYTE* start, BYTE* end);
2443 generation* expand_heap (int condemned_generation,
2444 generation* consing_gen,
2445 heap_segment* new_heap_segment);
2448 void save_ephemeral_generation_starts();
2450 static size_t get_time_now();
2453 bool init_dynamic_data ();
2455 float surv_to_growth (float cst, float limit, float max_limit);
2457 size_t desired_new_allocation (dynamic_data* dd, size_t out,
2458 int gen_number, int pass);
2461 void trim_youngest_desired_low_memory();
2464 void decommit_ephemeral_segment_pages();
2468 size_t trim_youngest_desired (DWORD memory_load,
2469 size_t total_new_allocation,
2470 size_t total_min_allocation);
2472 size_t joined_youngest_desired (size_t new_allocation);
2475 size_t get_total_heap_size ();
2477 size_t generation_size (int gen_number);
2479 size_t get_total_survived_size();
2481 size_t get_current_allocated();
2483 size_t get_total_allocated();
2485 size_t current_generation_size (int gen_number);
2487 size_t generation_plan_size (int gen_number);
2489 void compute_promoted_allocation (int gen_number);
2491 size_t compute_in (int gen_number);
2493 void compute_new_dynamic_data (int gen_number);
2495 gc_history_per_heap* get_gc_data_per_heap();
2497 size_t new_allocation_limit (size_t size, size_t free_size, int gen_number);
2499 size_t generation_fragmentation (generation* gen,
2500 generation* consing_gen,
2503 size_t generation_sizes (generation* gen);
2505 size_t approximate_new_allocation();
2507 size_t end_space_after_gc();
2509 BOOL decide_on_compacting (int condemned_gen_number,
2510 size_t fragmentation,
2511 BOOL& should_expand);
2513 BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
2515 void reset_large_object (BYTE* o);
2517 void sweep_large_objects ();
2519 void relocate_in_large_objects ();
2521 void mark_through_cards_for_large_objects (card_fn fn, BOOL relocating);
2523 void descr_segment (heap_segment* seg);
2525 void descr_card_table ();
2527 void descr_generations (BOOL begin_gc_p);
2529 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2531 void descr_generations_to_profiler (gen_walk_fn fn, void *context);
2533 void record_survived_for_profiler(int condemned_gen_number, BYTE * first_condemned_address);
2535 void notify_profiler_of_surviving_large_objects ();
2536 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2538 /*------------ Multiple non isolated heaps ----------------*/
2539 #ifdef MULTIPLE_HEAPS
2541 BOOL create_thread_support (unsigned number_of_heaps);
2543 void destroy_thread_support ();
2545 HANDLE create_gc_thread();
2547 DWORD gc_thread_function();
2549 #ifdef PARALLEL_MARK_LIST_SORT
2551 void sort_mark_list();
2553 void merge_mark_lists();
2555 void append_to_mark_list(BYTE **start, BYTE **end);
2556 #else //PARALLEL_MARK_LIST_SORT
2558 void combine_mark_lists();
2559 #endif //PARALLEL_MARK_LIST_SORT
2561 #endif //MULTIPLE_HEAPS
2563 /*------------ End of Multiple non isolated heaps ---------*/
2565 #ifndef SEG_MAPPING_TABLE
2567 heap_segment* segment_of (BYTE* add, ptrdiff_t & delta,
2568 BOOL verify_p = FALSE);
2569 #endif //SEG_MAPPING_TABLE
2571 #ifdef BACKGROUND_GC
2573 //this is called by revisit....
2575 BYTE* high_page (heap_segment* seg, BOOL concurrent_p);
2578 void revisit_written_page (BYTE* page, BYTE* end, BOOL concurrent_p,
2579 heap_segment* seg, BYTE*& last_page,
2580 BYTE*& last_object, BOOL large_objects_p,
2581 size_t& num_marked_objects);
2583 void revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p=FALSE);
2586 void concurrent_scan_dependent_handles (ScanContext *sc);
2592 void bgc_suspend_EE ();
2598 void background_verify_mark (Object*& object, ScanContext* sc, DWORD flags);
2601 void background_scan_dependent_handles (ScanContext *sc);
2606 // Restores BGC settings if necessary.
2608 void recover_bgc_settings();
2611 void save_bgc_data_per_heap();
2614 BOOL should_commit_mark_array();
2617 void clear_commit_flag();
2620 void clear_commit_flag_global();
2623 void verify_mark_array_cleared (heap_segment* seg, DWORD* mark_array_addr);
2626 void verify_mark_array_cleared (BYTE* begin, BYTE* end, DWORD* mark_array_addr);
2629 BOOL commit_mark_array_by_range (BYTE* begin,
2631 DWORD* mark_array_addr);
2634 BOOL commit_mark_array_new_seg (gc_heap* hp,
2636 BYTE* new_lowest_address = 0);
2639 BOOL commit_mark_array_with_check (heap_segment* seg, DWORD* mark_array_addr);
2641 // commit the portion of the mark array that corresponds to
2642 // this segment (from beginning to reserved).
2643 // seg and heap_segment_reserved (seg) are guaranteed to be
2646 BOOL commit_mark_array_by_seg (heap_segment* seg, DWORD* mark_array_addr);
2648 // During BGC init, we commit the mark array for all in range
2649 // segments whose mark array hasn't been committed or fully
2650 // committed. All rw segments are in range, only ro segments
2651 // can be partial in range.
2653 BOOL commit_mark_array_bgc_init (DWORD* mark_array_addr);
2656 BOOL commit_new_mark_array (DWORD* new_mark_array);
2658 // We need to commit all segments that intersect with the bgc
2659 // range. If a segment is only partially in range, we still
2660 // should commit the mark array for the whole segment as
2661 // we will set the mark array commit flag for this segment.
2663 BOOL commit_new_mark_array_global (DWORD* new_mark_array);
2665 // We can't decommit the first and the last page in the mark array
2666 // if the beginning and ending don't happen to be page aligned.
2668 void decommit_mark_array_by_seg (heap_segment* seg);
2671 void background_mark_phase();
2674 void background_drain_mark_list (int thread);
2677 void background_grow_c_mark_list();
2680 void background_promote_callback(Object** object, ScanContext* sc, DWORD flags);
2683 void mark_absorb_new_alloc();
2689 BOOL prepare_bgc_thread(gc_heap* gh);
2691 BOOL create_bgc_thread(gc_heap* gh);
2693 BOOL create_bgc_threads_support (int number_of_heaps);
2695 BOOL create_bgc_thread_support();
2697 int check_for_ephemeral_alloc();
2699 void wait_to_proceed();
2701 void fire_alloc_wait_event_begin (alloc_wait_reason awr);
2703 void fire_alloc_wait_event_end (alloc_wait_reason awr);
2705 void background_gc_wait_lh (alloc_wait_reason awr = awr_ignored);
2707 DWORD background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE);
2711 void kill_gc_thread();
2713 DWORD bgc_thread_function();
2715 void do_background_gc();
2717 DWORD __stdcall bgc_thread_stub (void* arg);
2719 #ifdef FEATURE_REDHAWK
2720 // Helper used to wrap the start routine of background GC threads so we can do things like initialize the
2721 // Redhawk thread state which requires running in the new thread's context.
2722 static DWORD WINAPI rh_bgc_thread_stub(void * pContext);
2724 // Context passed to the above.
2725 struct rh_bgc_thread_ctx
2727 PTHREAD_START_ROUTINE m_pRealStartRoutine;
2728 gc_heap * m_pRealContext;
2730 #endif //FEATURE_REDHAWK
2732 #endif //BACKGROUND_GC
2737 VOLATILE(bool) internal_gc_done;
2739 #ifdef BACKGROUND_GC
2741 DWORD cm_in_progress;
2744 BOOL expanded_in_fgc;
2746 // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
2747 // we do right before the bgc starts.
2749 BOOL dont_restart_ee_p;
2752 CLREvent bgc_start_event;
2753 #endif //BACKGROUND_GC
2756 DWORD wait_for_gc_done(INT32 timeOut = INFINITE);
2758 // Returns TRUE if the thread used to be in cooperative mode
2759 // before calling this function.
2761 BOOL enable_preemptive (Thread* current_thread);
2763 void disable_preemptive (Thread* current_thread, BOOL restore_cooperative);
2765 /* ------------------- per heap members --------------------------*/
2768 #ifndef MULTIPLE_HEAPS
2769 CLREvent gc_done_event;
2770 #else // MULTIPLE_HEAPS
2771 CLREvent gc_done_event;
2772 #endif // MULTIPLE_HEAPS
2775 VOLATILE(LONG) gc_done_event_lock;
2778 VOLATILE(bool) gc_done_event_set;
2784 void reset_gc_done();
2787 void enter_gc_done_event_lock();
2790 void exit_gc_done_event_lock();
2792 #ifdef MULTIPLE_HEAPS
2794 BYTE* ephemeral_low; //lowest ephemeral address
2797 BYTE* ephemeral_high; //highest ephemeral address
2798 #endif //MULTIPLE_HEAPS
2807 #ifdef MULTIPLE_HEAPS
2811 SPTR_DECL(DWORD, mark_array);
2812 #endif //MULTIPLE_HEAPS
2817 DWORD* card_bundle_table;
2818 #endif //CARD_BUNDLE
2820 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
2822 sorted_table* seg_table;
2823 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
2826 VOLATILE(BOOL) gc_started;
2828 // The following 2 events are there to support the gen2
2829 // notification feature which is only enabled if concurrent
2832 CLREvent full_gc_approach_event;
2835 CLREvent full_gc_end_event;
2837 // Full GC Notification percentages.
2839 DWORD fgn_maxgen_percent;
2842 DWORD fgn_loh_percent;
2845 VOLATILE(bool) full_gc_approach_event_set;
2847 #ifdef BACKGROUND_GC
2849 BOOL fgn_last_gc_was_concurrent;
2850 #endif //BACKGROUND_GC
2853 size_t fgn_last_alloc;
2855 static DWORD user_thread_wait (CLREvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
2857 static wait_full_gc_status full_gc_wait (CLREvent *event, int time_out_ms);
2863 BYTE* demotion_high;
2869 BYTE* last_gen1_pin_end;
2872 gen_to_condemn_tuning gen_to_condemn_reasons;
2875 size_t etw_allocation_running_amount[2];
2878 int gc_policy; //sweep, compact, expand
2880 #ifdef MULTIPLE_HEAPS
2882 CLREvent gc_start_event;
2885 CLREvent ee_suspend_event;
2888 heap_segment* new_heap_segment;
2890 #define alloc_quantum_balance_units (16)
2893 size_t min_balance_threshold;
2894 #else //MULTIPLE_HEAPS
2897 size_t allocation_running_time;
2900 size_t allocation_running_amount;
2902 #endif //MULTIPLE_HEAPS
2905 gc_mechanisms settings;
2908 gc_history_global gc_data_global;
2911 size_t gc_last_ephemeral_decommit_time;
2914 size_t gc_gen0_desired_high;
2917 size_t gen0_big_free_spaces;
2921 size_t youngest_gen_desired_th;
2924 size_t mem_one_percent;
2927 ULONGLONG total_physical_mem;
2930 ULONGLONG available_physical_mem;
2934 size_t last_gc_index;
2937 size_t min_segment_size;
2940 BYTE* lowest_address;
2943 BYTE* highest_address;
2946 BOOL ephemeral_promotion;
2948 BYTE* saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
2950 size_t saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];
2953 #ifdef MULTIPLE_HEAPS
2959 VOLATILE(int) alloc_context_count;
2960 #else //MULTIPLE_HEAPS
2961 #define vm_heap ((GCHeap*) g_pGCHeap)
2962 #define heap_number (0)
2963 #endif //MULTIPLE_HEAPS
2965 #ifndef MULTIPLE_HEAPS
2966 SPTR_DECL(heap_segment,ephemeral_heap_segment);
2969 heap_segment* ephemeral_heap_segment;
2970 #endif // !MULTIPLE_HEAPS
2973 size_t time_bgc_last;
2976 BYTE* gc_low; // lowest address being condemned
2979 BYTE* gc_high; //highest address being condemned
2982 size_t mark_stack_tos;
2985 size_t mark_stack_bos;
2988 size_t mark_stack_array_length;
2991 mark* mark_stack_array;
2994 BOOL verify_pinned_queue_p;
2997 BYTE* oldest_pinned_plug;
2999 #ifdef FEATURE_LOH_COMPACTION
3001 size_t loh_pinned_queue_tos;
3004 size_t loh_pinned_queue_bos;
3007 size_t loh_pinned_queue_length;
3010 int loh_pinned_queue_decay;
3013 mark* loh_pinned_queue;
3015 // This is for forced LOH compaction via the complus env var
3017 BOOL loh_compaction_always_p;
3019 // This is set by the user.
3021 gc_loh_compaction_mode loh_compaction_mode;
3023 // We may not compact LOH on every heap if we can't
3024 // grow the pinned queue. This is to indicate whether
3025 // this heap's LOH is compacted or not. So even if
3026 // settings.loh_compaction is TRUE this may not be TRUE.
3028 BOOL loh_compacted_p;
3029 #endif //FEATURE_LOH_COMPACTION
3031 #ifdef BACKGROUND_GC
3034 DWORD bgc_thread_id;
3038 BYTE* background_written_addresses [array_size+2];
3039 #endif //WRITE_WATCH
3041 #if defined (DACCESS_COMPILE) && !defined (MULTIPLE_HEAPS)
3042 // doesn't need to be volatile for DAC.
3043 SVAL_DECL(c_gc_state, current_c_gc_state);
3046 VOLATILE(c_gc_state) current_c_gc_state; //tells the large object allocator to
3047 //mark the object as new since the start of gc.
3048 #endif //DACCESS_COMPILE && !MULTIPLE_HEAPS
3051 gc_mechanisms saved_bgc_settings;
3054 gc_history_per_heap saved_bgc_data_per_heap;
3057 BOOL bgc_data_saved_p;
3060 BOOL bgc_thread_running; // gc thread is its main loop
3063 BOOL keep_bgc_threads_p;
3065 // This event is used by BGC threads to do something on
3066 // one specific thread while other BGC threads have to
3067 // wait. This is different from a join 'cause you can't
3068 // specify which thread should be doing some task
3069 // while other threads have to wait.
3070 // For example, to make the BGC threads managed threads
3071 // we need to create them on the thread that called
3072 // SuspendEE which is heap 0.
3074 CLREvent bgc_threads_sync_event;
3080 CRITICAL_SECTION bgc_threads_timeout_cs;
3083 CLREvent background_gc_done_event;
3086 CLREvent background_gc_create_event;
3089 CLREvent ee_proceed_event;
3092 CLREvent gc_lh_block_event;
3095 BOOL gc_can_use_concurrent;
3098 BOOL temp_disable_concurrent_p;
3101 BOOL do_ephemeral_gc_p;
3104 BOOL do_concurrent_p;
3107 VOLATILE(bgc_state) current_bgc_state;
3112 bgc_state current_bgc_state;
3114 // This is in bytes per ms; consider breaking it
3115 // into the efficiency per phase.
3116 size_t gc_efficiency;
3128 #define max_history_count 64
3131 int gchist_index_per_heap;
3134 gc_history gchist_per_heap[max_history_count];
3140 gc_mechanisms_store gchist[max_history_count];
3143 void add_to_history_per_heap();
3146 void add_to_history();
3149 size_t total_promoted_bytes;
3152 size_t bgc_overflow_count;
3155 size_t bgc_begin_loh_size;
3157 size_t end_loh_size;
3159 // We need to throttle the LOH allocations during BGC since we can't
3160 // collect LOH when BGC is in progress.
3161 // We allow the LOH heap size to double during a BGC. So for every
3162 // 10% increase we will have the LOH allocating thread sleep for one more
3163 // ms. So we are already 30% over the original heap size the thread will
3166 DWORD bgc_alloc_spin_loh;
3168 // This includes what we allocate at the end of segment - allocating
3169 // in free list doesn't increase the heap size.
3171 size_t bgc_loh_size_increased;
3174 size_t bgc_loh_allocated_in_free;
3177 size_t background_soh_alloc_count;
3180 size_t background_loh_alloc_count;
3183 BYTE** background_mark_stack_tos;
3186 BYTE** background_mark_stack_array;
3189 size_t background_mark_stack_array_length;
3192 BYTE* background_min_overflow_address;
3195 BYTE* background_max_overflow_address;
3197 // We can't process the soh range concurrently so we
3198 // wait till final mark to process it.
3200 BOOL processed_soh_overflow_p;
3203 BYTE* background_min_soh_overflow_address;
3206 BYTE* background_max_soh_overflow_address;
3209 heap_segment* saved_overflow_ephemeral_seg;
3211 #ifndef MULTIPLE_HEAPS
3212 SPTR_DECL(heap_segment, saved_sweep_ephemeral_seg);
3214 SPTR_DECL(BYTE, saved_sweep_ephemeral_start);
3216 SPTR_DECL(BYTE, background_saved_lowest_address);
3218 SPTR_DECL(BYTE, background_saved_highest_address);
3222 heap_segment* saved_sweep_ephemeral_seg;
3225 BYTE* saved_sweep_ephemeral_start;
3228 BYTE* background_saved_lowest_address;
3231 BYTE* background_saved_highest_address;
3232 #endif //!MULTIPLE_HEAPS
3234 // This is used for synchronization between the bgc thread
3235 // for this heap and the user threads allocating on this
3238 exclusive_sync* bgc_alloc_lock;
3242 snoop_stats_data snoop_stat;
3243 #endif //SNOOP_STATS
3250 size_t c_mark_list_length;
3253 size_t c_mark_list_index;
3254 #endif //BACKGROUND_GC
3261 size_t mark_list_size;
3264 BYTE** mark_list_end;
3267 BYTE** mark_list_index;
3271 #ifdef PARALLEL_MARK_LIST_SORT
3273 BYTE** g_mark_list_copy;
3275 BYTE*** mark_list_piece_start;
3276 BYTE*** mark_list_piece_end;
3277 #endif //PARALLEL_MARK_LIST_SORT
3281 BYTE* min_overflow_address;
3284 BYTE* max_overflow_address;
3287 BYTE* shigh; //keeps track of the highest marked object
3290 BYTE* slow; //keeps track of the lowest marked object
3293 size_t allocation_quantum;
3296 size_t alloc_contexts_used;
3299 no_gc_region_info current_no_gc_region_info;
3302 size_t soh_allocation_no_gc;
3305 size_t loh_allocation_no_gc;
3308 heap_segment* saved_loh_segment_no_gc;
3311 BOOL proceed_with_gc_p;
3313 #define youngest_generation (generation_of (0))
3314 #define large_object_generation (generation_of (max_generation+1))
3316 #ifndef MULTIPLE_HEAPS
3317 SPTR_DECL(BYTE,alloc_allocated);
3320 BYTE* alloc_allocated; //keeps track of the highest
3321 //address allocated by alloc
3322 #endif // !MULTIPLE_HEAPS
3324 // The more_space_lock and gc_lock is used for 3 purposes:
3326 // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock)
3327 // 2) to synchronize allocations of large objects (more_space_lock)
3328 // 3) to synchronize the GC itself (gc_lock)
3331 GCSpinLock gc_lock; //lock while doing GC
3334 GCSpinLock more_space_lock; //lock while allocating more space
3336 #ifdef SYNCHRONIZATION_STATS
3339 unsigned int good_suspension;
3342 unsigned int bad_suspension;
3344 // Number of times when msl_acquire is > 200 cycles.
3346 unsigned int num_high_msl_acquire;
3348 // Number of times when msl_acquire is < 200 cycles.
3350 unsigned int num_low_msl_acquire;
3352 // Number of times the more_space_lock is acquired.
3354 unsigned int num_msl_acquired;
3356 // Total cycles it takes to acquire the more_space_lock.
3358 ULONGLONG total_msl_acquire;
3361 void init_heap_sync_stats()
3363 good_suspension = 0;
3365 num_msl_acquired = 0;
3366 total_msl_acquire = 0;
3367 num_high_msl_acquire = 0;
3368 num_low_msl_acquire = 0;
3369 more_space_lock.init();
3374 void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
3376 printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
3378 alloc_contexts_used,
3381 (unsigned int)(total_msl_acquire / gc_count_during_log),
3382 num_high_msl_acquire / gc_count_during_log,
3383 num_low_msl_acquire / gc_count_during_log,
3384 num_msl_acquired / gc_count_during_log,
3385 more_space_lock.num_switch_thread / gc_count_during_log,
3386 more_space_lock.num_wait_longer / gc_count_during_log,
3387 more_space_lock.num_switch_thread_w / gc_count_during_log,
3388 more_space_lock.num_disable_preemptive_w / gc_count_during_log);
3391 #endif //SYNCHRONIZATION_STATS
3393 #ifdef MULTIPLE_HEAPS
3395 generation generation_table [NUMBERGENERATIONS+1];
3399 #define NUM_LOH_ALIST (7)
3400 #define BASE_LOH_ALIST (64*1024)
3402 alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
3404 #define NUM_GEN2_ALIST (12)
3405 #define BASE_GEN2_ALIST (1*64)
3407 alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
3409 //------------------------------------------
3412 dynamic_data dynamic_data_table [NUMBERGENERATIONS+1];
3415 gc_history_per_heap gc_data_per_heap;
3419 BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
3420 // if elevate_p is FALSE, it means we are determining fragmentation for a generation
3421 // to see if we should condemn this gen; otherwise it means we are determining if
3422 // we should elevate to doing max_gen from an ephemeral gen.
3424 BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
3427 dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number, ULONGLONG total_mem);
3429 BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, ULONGLONG available_mem);
3431 BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
3434 int generation_skip_ratio;//in %
3437 BOOL gen0_bricks_cleared;
3440 int gen0_must_clear_bricks;
3441 #endif //FFIND_OBJECT
3444 size_t full_gc_counts[gc_type_max];
3446 // the # of bytes allocates since the last full compacting GC.
3448 unsigned __int64 loh_alloc_since_cg;
3451 BOOL elevation_requested;
3453 // if this is TRUE, we should always guarantee that we do a
3454 // full compacting GC before we OOM.
3456 BOOL last_gc_before_oom;
3459 BOOL should_expand_in_full_gc;
3461 #ifdef BACKGROUND_GC
3463 size_t ephemeral_fgc_counts[max_generation];
3466 BOOL alloc_wait_event_p;
3468 #ifndef MULTIPLE_HEAPS
3469 SPTR_DECL(BYTE, next_sweep_obj);
3472 BYTE* next_sweep_obj;
3473 #endif //MULTIPLE_HEAPS
3476 BYTE* current_sweep_pos;
3478 #endif //BACKGROUND_GC
3480 #ifndef MULTIPLE_HEAPS
3481 SVAL_DECL(oom_history, oom_info);
3482 #ifdef FEATURE_PREMORTEM_FINALIZATION
3483 SPTR_DECL(CFinalize,finalize_queue);
3484 #endif //FEATURE_PREMORTEM_FINALIZATION
3488 oom_history oom_info;
3490 #ifdef FEATURE_PREMORTEM_FINALIZATION
3492 PTR_CFinalize finalize_queue;
3493 #endif //FEATURE_PREMORTEM_FINALIZATION
3494 #endif // !MULTIPLE_HEAPS
3497 fgm_history fgm_result;
3500 size_t eph_gen_starts_size;
3503 BOOL ro_segments_in_range;
3505 #ifdef BACKGROUND_GC
3507 heap_segment* freeable_small_heap_segment;
3508 #endif //BACKGROUND_GC
3511 heap_segment* freeable_large_heap_segment;
3514 heap_segment* segment_standby_list;
3517 size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
3520 size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
3523 size_t ordered_plug_indices[MAX_NUM_BUCKETS];
3526 size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
3529 BOOL ordered_plug_indices_init;
3535 BYTE* bestfit_first_pin;
3538 BOOL commit_end_of_seg;
3541 size_t max_free_space_items; // dynamically adjusted.
3544 size_t free_space_buckets;
3547 size_t free_space_items;
3549 // -1 means we are using all the free
3550 // spaces we have (not including
3551 // end of seg space).
3553 int trimmed_free_space_index;
3556 size_t total_ephemeral_plugs;
3559 seg_free_spaces* bestfit_seg;
3561 // Note: we know this from the plan phase.
3562 // total_ephemeral_plugs actually has the same value
3563 // but while we are calculating its value we also store
3564 // info on how big the plugs are for best fit which we
3565 // don't do in plan phase.
3566 // TODO: get rid of total_ephemeral_plugs.
3568 size_t total_ephemeral_size;
3575 BOOL heap_analyze_enabled;
3578 size_t internal_root_array_length;
3580 #ifndef MULTIPLE_HEAPS
3581 SPTR_DECL(PTR_BYTE, internal_root_array);
3582 SVAL_DECL(size_t, internal_root_array_index);
3583 SVAL_DECL(BOOL, heap_analyze_success);
3586 BYTE** internal_root_array;
3589 size_t internal_root_array_index;
3592 BOOL heap_analyze_success;
3593 #endif // !MULTIPLE_HEAPS
3595 // next two fields are used to optimize the search for the object
3596 // enclosing the current reference handled by ha_mark_object_simple.
3601 size_t current_obj_size;
3603 #endif //HEAP_ANALYZE
3605 /* ----------------------- global members ----------------------- */
3609 int condemned_generation_num;
3612 BOOL blocking_collection;
3614 #ifdef MULTIPLE_HEAPS
3615 SVAL_DECL(int, n_heaps);
3616 SPTR_DECL(PTR_gc_heap, g_heaps);
3619 HANDLE* g_gc_threads; // keep all of the gc threads.
3622 #ifdef BACKGROUND_GC
3624 size_t* g_bpromoted;
3625 #endif //BACKGROUND_GC
3628 int* g_mark_stack_busy;
3633 #ifdef BACKGROUND_GC
3636 #endif //BACKGROUND_GC
3637 #endif //MULTIPLE_HEAPS
3640 size_t reserved_memory;
3642 size_t reserved_memory_limit;
3644 BOOL g_low_memory_status;
3648 void update_collection_counts ();
3653 #ifdef FEATURE_PREMORTEM_FINALIZATION
3656 #ifdef DACCESS_COMPILE
3657 friend class ::ClrDataAccess;
3658 #endif // DACCESS_COMPILE
3661 //adjust the count and add a constant to add a segment
3662 static const int ExtraSegCount = 2;
3663 static const int FinalizerListSeg = NUMBERGENERATIONS+1;
3664 static const int CriticalFinalizerListSeg = NUMBERGENERATIONS;
3665 //Does not correspond to a segment
3666 static const int FreeList = NUMBERGENERATIONS+ExtraSegCount;
3668 PTR_PTR_Object m_Array;
3669 PTR_PTR_Object m_FillPointers[NUMBERGENERATIONS+ExtraSegCount];
3670 PTR_PTR_Object m_EndArray;
3671 size_t m_PromotedCount;
3673 VOLATILE(LONG) lock;
3675 DWORD lockowner_threadid;
3679 void MoveItem (Object** fromIndex,
3680 unsigned int fromSeg,
3681 unsigned int toSeg);
3683 inline PTR_PTR_Object& SegQueue (unsigned int Seg)
3685 return (Seg ? m_FillPointers [Seg-1] : m_Array);
3687 inline PTR_PTR_Object& SegQueueLimit (unsigned int Seg)
3689 return m_FillPointers [Seg];
3692 BOOL IsSegEmpty ( unsigned int i)
3694 ASSERT ( (int)i < FreeList);
3695 return (SegQueueLimit(i) == SegQueue (i));
3699 BOOL FinalizeSegForAppDomain (AppDomain *pDomain,
3700 BOOL fRunFinalizers,
3706 void EnterFinalizeLock();
3707 void LeaveFinalizeLock();
3708 bool RegisterForFinalization (int gen, Object* obj, size_t size=0);
3709 Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
3710 BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
3711 void RelocateFinalizationData (int gen, gc_heap* hp);
3713 void WalkFReachableObjects (gc_heap* hp);
3714 #endif //GC_PROFILING
3715 void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
3716 void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
3717 size_t GetPromotedCount();
3719 //Methods used by the shutdown code to call every finalizer
3720 void SetSegForShutDown(BOOL fHasLock);
3721 size_t GetNumberFinalizableObjects();
3722 void DiscardNonCriticalObjects();
3724 //Methods used by the app domain unloading call to finalize objects in an app domain
3725 BOOL FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers);
3727 void CheckFinalizerObjects();
3729 #endif // FEATURE_PREMORTEM_FINALIZATION
3732 size_t& dd_begin_data_size (dynamic_data* inst)
3734 return inst->begin_data_size;
3737 size_t& dd_survived_size (dynamic_data* inst)
3739 return inst->survived_size;
3741 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
3743 size_t& dd_num_npinned_plugs(dynamic_data* inst)
3745 return inst->num_npinned_plugs;
3747 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
3749 size_t& dd_pinned_survived_size (dynamic_data* inst)
3751 return inst->pinned_survived_size;
3754 size_t& dd_added_pinned_size (dynamic_data* inst)
3756 return inst->added_pinned_size;
3759 size_t& dd_artificial_pinned_survived_size (dynamic_data* inst)
3761 return inst->artificial_pinned_survived_size;
3765 size_t& dd_padding_size (dynamic_data* inst)
3767 return inst->padding_size;
3769 #endif //SHORT_PLUGS
3771 size_t& dd_current_size (dynamic_data* inst)
3773 return inst->current_size;
3776 float& dd_surv (dynamic_data* inst)
3781 size_t& dd_freach_previous_promotion (dynamic_data* inst)
3783 return inst->freach_previous_promotion;
3786 size_t& dd_desired_allocation (dynamic_data* inst)
3788 return inst->desired_allocation;
3791 size_t& dd_collection_count (dynamic_data* inst)
3793 return inst->collection_count;
3796 size_t& dd_promoted_size (dynamic_data* inst)
3798 return inst->promoted_size;
3801 float& dd_limit (dynamic_data* inst)
3806 float& dd_max_limit (dynamic_data* inst)
3808 return inst->max_limit;
3811 size_t& dd_min_gc_size (dynamic_data* inst)
3813 return inst->min_gc_size;
3816 size_t& dd_max_size (dynamic_data* inst)
3818 return inst->max_size;
3821 size_t& dd_min_size (dynamic_data* inst)
3823 return inst->min_size;
3826 ptrdiff_t& dd_new_allocation (dynamic_data* inst)
3828 return inst->new_allocation;
3831 ptrdiff_t& dd_gc_new_allocation (dynamic_data* inst)
3833 return inst->gc_new_allocation;
3836 size_t& dd_default_new_allocation (dynamic_data* inst)
3838 return inst->default_new_allocation;
3841 size_t& dd_fragmentation_limit (dynamic_data* inst)
3843 return inst->fragmentation_limit;
3846 float& dd_fragmentation_burden_limit (dynamic_data* inst)
3848 return inst->fragmentation_burden_limit;
3851 float dd_v_fragmentation_burden_limit (dynamic_data* inst)
3853 return (min (2*dd_fragmentation_burden_limit (inst), 0.75f));
3856 size_t& dd_fragmentation (dynamic_data* inst)
3858 return inst->fragmentation;
3862 size_t& dd_gc_clock (dynamic_data* inst)
3864 return inst->gc_clock;
3867 size_t& dd_time_clock (dynamic_data* inst)
3869 return inst->time_clock;
3873 size_t& dd_gc_elapsed_time (dynamic_data* inst)
3875 return inst->gc_elapsed_time;
3879 float& dd_gc_speed (dynamic_data* inst)
3881 return inst->gc_speed;
3885 alloc_context* generation_alloc_context (generation* inst)
3887 return &(inst->allocation_context);
3891 BYTE*& generation_allocation_start (generation* inst)
3893 return inst->allocation_start;
3896 BYTE*& generation_allocation_pointer (generation* inst)
3898 return inst->allocation_context.alloc_ptr;
3901 BYTE*& generation_allocation_limit (generation* inst)
3903 return inst->allocation_context.alloc_limit;
3906 allocator* generation_allocator (generation* inst)
3908 return &inst->free_list_allocator;
3912 PTR_heap_segment& generation_start_segment (generation* inst)
3914 return inst->start_segment;
3917 heap_segment*& generation_allocation_segment (generation* inst)
3919 return inst->allocation_segment;
3922 BYTE*& generation_plan_allocation_start (generation* inst)
3924 return inst->plan_allocation_start;
3927 size_t& generation_plan_allocation_start_size (generation* inst)
3929 return inst->plan_allocation_start_size;
3932 BYTE*& generation_allocation_context_start_region (generation* inst)
3934 return inst->allocation_context_start_region;
3937 size_t& generation_free_list_space (generation* inst)
3939 return inst->free_list_space;
3942 size_t& generation_free_obj_space (generation* inst)
3944 return inst->free_obj_space;
3947 size_t& generation_allocation_size (generation* inst)
3949 return inst->allocation_size;
3953 size_t& generation_pinned_allocated (generation* inst)
3955 return inst->pinned_allocated;
3958 size_t& generation_pinned_allocation_sweep_size (generation* inst)
3960 return inst->pinned_allocation_sweep_size;
3963 size_t& generation_pinned_allocation_compact_size (generation* inst)
3965 return inst->pinned_allocation_compact_size;
3968 size_t& generation_free_list_allocated (generation* inst)
3970 return inst->free_list_allocated;
3973 size_t& generation_end_seg_allocated (generation* inst)
3975 return inst->end_seg_allocated;
3978 BOOL& generation_allocate_end_seg_p (generation* inst)
3980 return inst->allocate_end_seg_p;
3983 size_t& generation_condemned_allocated (generation* inst)
3985 return inst->condemned_allocated;
3987 #ifdef FREE_USAGE_STATS
3989 size_t& generation_pinned_free_obj_space (generation* inst)
3991 return inst->pinned_free_obj_space;
3994 size_t& generation_allocated_in_pinned_free (generation* inst)
3996 return inst->allocated_in_pinned_free;
3999 size_t& generation_allocated_since_last_pin (generation* inst)
4001 return inst->allocated_since_last_pin;
4003 #endif //FREE_USAGE_STATS
4005 float generation_allocator_efficiency (generation* inst)
4007 if ((generation_free_list_allocated (inst) + generation_free_obj_space (inst)) != 0)
4009 return ((float) (generation_free_list_allocated (inst)) / (float)(generation_free_list_allocated (inst) + generation_free_obj_space (inst)));
4015 size_t generation_unusable_fragmentation (generation* inst)
4017 return (size_t)(generation_free_obj_space (inst) +
4018 (1.0f-generation_allocator_efficiency(inst))*generation_free_list_space (inst));
4021 #define plug_skew sizeof(ObjHeader)
4022 #define min_obj_size (sizeof(BYTE*)+plug_skew+sizeof(size_t))//syncblock + vtable+ first field
4023 #define min_free_list (sizeof(BYTE*)+min_obj_size) //Need one slot more
4024 //Note that this encodes the fact that plug_skew is a multiple of BYTE*.
4027 BYTE * skew[plug_skew / sizeof(BYTE *)];
4037 //Note that these encode the fact that plug_skew is a multiple of BYTE*.
4038 // Each of new field is prepended to the prior struct.
4040 struct plug_and_pair
4046 struct plug_and_reloc
4060 int lr; //for clearing the entire pair in one instruction
4065 struct gap_reloc_pair
4072 #define min_pre_pin_obj_size (sizeof (gap_reloc_pair) + min_obj_size)
4074 struct DECLSPEC_ALIGN(8) aligned_plug_and_gap
4076 plug_and_gap plugandgap;
4079 struct loh_obj_and_pad
4085 struct loh_padding_obj
4092 #define loh_padding_obj_size (sizeof(loh_padding_obj))
4095 #define heap_segment_flags_readonly 1
4096 #define heap_segment_flags_inrange 2
4097 #define heap_segment_flags_unmappable 4
4098 #define heap_segment_flags_loh 8
4099 #ifdef BACKGROUND_GC
4100 #define heap_segment_flags_swept 16
4101 #define heap_segment_flags_decommitted 32
4102 #define heap_segment_flags_ma_committed 64
4103 // for segments whose mark array is only partially committed.
4104 #define heap_segment_flags_ma_pcommitted 128
4105 #endif //BACKGROUND_GC
4107 //need to be careful to keep enough pad items to fit a relocation node
4108 //padded to QuadWord before the plug_skew
4119 PTR_heap_segment next;
4120 BYTE* plan_allocated;
4121 #ifdef BACKGROUND_GC
4122 BYTE* background_allocated;
4123 BYTE* saved_bg_allocated;
4124 #endif //BACKGROUND_GC
4126 #ifdef MULTIPLE_HEAPS
4128 #endif //MULTIPLE_HEAPS
4131 // Disable this warning - we intentionally want __declspec(align()) to insert padding for us
4132 #pragma warning(disable:4324) // structure was padded due to __declspec(align())
4134 aligned_plug_and_gap padandplug;
4136 #pragma warning(default:4324) // structure was padded due to __declspec(align())
4141 BYTE*& heap_segment_reserved (heap_segment* inst)
4143 return inst->reserved;
4146 BYTE*& heap_segment_committed (heap_segment* inst)
4148 return inst->committed;
4151 BYTE*& heap_segment_used (heap_segment* inst)
4156 BYTE*& heap_segment_allocated (heap_segment* inst)
4158 return inst->allocated;
4162 BOOL heap_segment_read_only_p (heap_segment* inst)
4164 return ((inst->flags & heap_segment_flags_readonly) != 0);
4168 BOOL heap_segment_in_range_p (heap_segment* inst)
4170 return (!(inst->flags & heap_segment_flags_readonly) ||
4171 ((inst->flags & heap_segment_flags_inrange) != 0));
4175 BOOL heap_segment_unmappable_p (heap_segment* inst)
4177 return (!(inst->flags & heap_segment_flags_readonly) ||
4178 ((inst->flags & heap_segment_flags_unmappable) != 0));
4182 BOOL heap_segment_loh_p (heap_segment * inst)
4184 return !!(inst->flags & heap_segment_flags_loh);
4187 #ifdef BACKGROUND_GC
4189 BOOL heap_segment_decommitted_p (heap_segment * inst)
4191 return !!(inst->flags & heap_segment_flags_decommitted);
4193 #endif //BACKGROUND_GC
4196 PTR_heap_segment & heap_segment_next (heap_segment* inst)
4201 BYTE*& heap_segment_mem (heap_segment* inst)
4206 BYTE*& heap_segment_plan_allocated (heap_segment* inst)
4208 return inst->plan_allocated;
4211 #ifdef BACKGROUND_GC
4213 BYTE*& heap_segment_background_allocated (heap_segment* inst)
4215 return inst->background_allocated;
4218 BYTE*& heap_segment_saved_bg_allocated (heap_segment* inst)
4220 return inst->saved_bg_allocated;
4222 #endif //BACKGROUND_GC
4224 #ifdef MULTIPLE_HEAPS
4226 gc_heap*& heap_segment_heap (heap_segment* inst)
4230 #endif //MULTIPLE_HEAPS
4232 #ifndef MULTIPLE_HEAPS
4234 #ifndef DACCESS_COMPILE
4236 #endif //!DACCESS_COMPILE
4238 GARY_DECL(generation,generation_table,NUMBERGENERATIONS+1);
4240 #ifndef DACCESS_COMPILE
4242 #endif //!DACCESS_COMPILE
4244 #endif //MULTIPLE_HEAPS
4247 generation* gc_heap::generation_of (int n)
4249 assert (((n <= max_generation+1) && (n >= 0)));
4250 return &generation_table [ n ];
4254 dynamic_data* gc_heap::dynamic_data_of (int gen_number)
4256 return &dynamic_data_table [ gen_number ];
4259 extern "C" BYTE* g_ephemeral_low;
4260 extern "C" BYTE* g_ephemeral_high;
4262 #define card_word_width ((size_t)32)
4265 // The value of card_size is determined empirically according to the average size of an object
4266 // In the code we also rely on the assumption that one card_table entry (DWORD) covers an entire os page
4268 #if defined (_WIN64)
4269 #define card_size ((size_t)(2*OS_PAGE_SIZE/card_word_width))
4271 #define card_size ((size_t)(OS_PAGE_SIZE/card_word_width))
4275 size_t card_word (size_t card)
4277 return card / card_word_width;
4281 unsigned card_bit (size_t card)
4283 return (unsigned)(card % card_word_width);
4287 size_t gcard_of (BYTE* object)
4289 return (size_t)(object) / card_size;