b13cd24fa47a1539bcae3001b972f4b4e83e04d1
[platform/upstream/coreclr.git] / src / gc / gcpriv.h
1 // Copyright (c) Microsoft. All rights reserved.
2 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
3 //
4 //
5 // Copyright (c) Microsoft. All rights reserved.
6 // Licensed under the MIT license. See LICENSE file in the project root for full license information. 
7 //
8 // optimize for speed
9
10
11 #ifndef _DEBUG
12 #ifdef _MSC_VER
13 #pragma optimize( "t", on )
14 #endif
15 #endif
16
17 #ifdef __GNUC__
18 #define inline __attribute__((always_inline)) inline
19 #else
20 #define inline __forceinline
21 #endif // __GNUC__
22
23 #include "gc.h"
24
25 //#define DT_LOG
26
27 #include "gcrecord.h"
28
29 #ifdef _MSC_VER
30 #pragma warning(disable:4293)
31 #pragma warning(disable:4477)
32 #endif //_MSC_VER
33
34 inline void FATAL_GC_ERROR()
35 {
36 #ifndef DACCESS_COMPILE
37     GCToOSInterface::DebugBreak();
38 #endif // DACCESS_COMPILE
39     _ASSERTE(!"Fatal Error in GC.");
40     GCToEEInterface::HandleFatalError(COR_E_EXECUTIONENGINE);
41 }
42
43 #ifdef _MSC_VER
44 #pragma inline_depth(20)
45 #endif
46
47 /* the following section defines the optional features */
48
49 // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
50 // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION
51 // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much 
52 // work to make FEATURE_STRUCTALIGN not apply to LOH so they can be both
53 // turned on.
54 #define FEATURE_LOH_COMPACTION
55
56 #ifdef FEATURE_64BIT_ALIGNMENT
57 // We need the following feature as part of keeping 64-bit types aligned in the GC heap.
58 #define RESPECT_LARGE_ALIGNMENT //used to keep "double" objects aligned during
59                                 //relocation
60 #endif //FEATURE_64BIT_ALIGNMENT
61
62 #define SHORT_PLUGS //used to keep ephemeral plugs short so they fit better into the oldest generation free items
63
64 #ifdef SHORT_PLUGS
65 #define DESIRED_PLUG_LENGTH (1000)
66 #endif //SHORT_PLUGS
67
68 #define FEATURE_PREMORTEM_FINALIZATION
69 #define GC_HISTORY
70
71 #ifndef FEATURE_REDHAWK
72 #define HEAP_ANALYZE
73 #define COLLECTIBLE_CLASS
74 #endif // !FEATURE_REDHAWK
75
76 #ifdef HEAP_ANALYZE
77 #define initial_internal_roots        (1024*16)
78 #endif // HEAP_ANALYZE
79
80 #define MARK_LIST         //used sorted list to speed up plan phase
81
82 #define BACKGROUND_GC   //concurrent background GC (requires WRITE_WATCH)
83
84 #ifdef SERVER_GC
85 #define MH_SC_MARK //scalable marking
86 //#define SNOOP_STATS //diagnostic
87 #define PARALLEL_MARK_LIST_SORT //do the sorting and merging of the multiple mark lists in server gc in parallel
88 #endif //SERVER_GC
89
90 //This is used to mark some type volatile only when the scalable marking is used. 
91 #if defined (SERVER_GC) && defined (MH_SC_MARK)
92 #define SERVER_SC_MARK_VOLATILE(x) VOLATILE(x)
93 #else //SERVER_GC&&MH_SC_MARK
94 #define SERVER_SC_MARK_VOLATILE(x) x
95 #endif //SERVER_GC&&MH_SC_MARK
96
97 //#define MULTIPLE_HEAPS         //Allow multiple heaps for servers
98
99 #define INTERIOR_POINTERS   //Allow interior pointers in the code manager
100
101 #define CARD_BUNDLE         //enable card bundle feature.(requires WRITE_WATCH)
102
103 // If this is defined we use a map for segments in order to find the heap for 
104 // a segment fast. But it does use more memory as we have to cover the whole
105 // heap range and for each entry we allocate a struct of 5 ptr-size words
106 // (3 for WKS as there's only one heap). 
107 #define SEG_MAPPING_TABLE
108
109 // If allocating the heap mapping table for the available VA consumes too
110 // much memory, you can enable this to allocate only the portion that
111 // corresponds to rw segments and grow it when needed in grow_brick_card_table.
112 // However in heap_of you will need to always compare the address with
113 // g_lowest/highest before you can look at the heap mapping table.
114 #define GROWABLE_SEG_MAPPING_TABLE
115
116 #ifdef BACKGROUND_GC
117 #define MARK_ARRAY      //Mark bit in an array
118 #endif //BACKGROUND_GC
119
120 #if defined(BACKGROUND_GC) || defined (CARD_BUNDLE) || defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP)
121 #define WRITE_WATCH     //Write Watch feature
122 #endif //BACKGROUND_GC || CARD_BUNDLE
123
124 #ifdef WRITE_WATCH
125 #define array_size 100
126 #endif //WRITE_WATCH
127
128 //#define SHORT_PLUGS           //keep plug short
129
130 #define FFIND_OBJECT        //faster find_object, slower allocation
131 #define FFIND_DECAY  7      //Number of GC for which fast find will be active
132
133 #ifndef MAX_LONGPATH
134 #define MAX_LONGPATH 1024
135 #endif // MAX_LONGPATH
136
137 //#define DEBUG_WRITE_WATCH //Additional debug for write watch
138
139 //#define STRESS_PINNING    //Stress pinning by pinning randomly
140
141 //#define TRACE_GC          //debug trace gc operation
142 //#define SIMPLE_DPRINTF
143
144 //#define TIME_GC           //time allocation and garbage collection
145 //#define TIME_WRITE_WATCH  //time GetWriteWatch and ResetWriteWatch calls
146 //#define COUNT_CYCLES  //Use cycle counter for timing
147 //#define JOIN_STATS         //amount of time spent in the join
148 //also, see TIME_SUSPEND in switches.h.
149
150 //#define SYNCHRONIZATION_STATS
151 //#define SEG_REUSE_STATS
152
153 #if defined (SYNCHRONIZATION_STATS) || defined (STAGE_STATS)
154 #define BEGIN_TIMING(x) \
155     int64_t x##_start; \
156     x##_start = GCToOSInterface::QueryPerformanceCounter()
157
158 #define END_TIMING(x) \
159     int64_t x##_end; \
160     x##_end = GCToOSInterface::QueryPerformanceCounter(); \
161     x += x##_end - x##_start
162
163 #else
164 #define BEGIN_TIMING(x)
165 #define END_TIMING(x)
166 #define BEGIN_TIMING_CYCLES(x)
167 #define END_TIMING_CYCLES(x)
168 #endif //SYNCHRONIZATION_STATS || STAGE_STATS
169
170 /* End of optional features */
171
172 #ifdef GC_CONFIG_DRIVEN
173 void GCLogConfig (const char *fmt, ... );
174 #define cprintf(x) {GCLogConfig x;}
175 #endif //GC_CONFIG_DRIVEN
176
177 #ifdef _DEBUG
178 #define TRACE_GC
179 #endif
180
181 // For the bestfit algorithm when we relocate ephemeral generations into an 
182 // existing gen2 segment.
183 // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
184 #define MIN_INDEX_POWER2 6
185
186 #ifdef SERVER_GC
187
188 #ifdef BIT64
189 #define MAX_INDEX_POWER2 30
190 #else
191 #define MAX_INDEX_POWER2 26
192 #endif  // BIT64
193
194 #else //SERVER_GC
195
196 #ifdef BIT64
197 #define MAX_INDEX_POWER2 28
198 #else
199 #define MAX_INDEX_POWER2 24
200 #endif  // BIT64
201
202 #endif //SERVER_GC
203
204 #define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1)
205
206 #define MAX_NUM_FREE_SPACES 200 
207 #define MIN_NUM_FREE_SPACES 5 
208
209 //Please leave these definitions intact.
210 // hosted api
211 #ifdef memcpy
212 #undef memcpy
213 #endif //memcpy
214
215 #ifdef FEATURE_STRUCTALIGN
216 #define REQD_ALIGN_DCL ,int requiredAlignment
217 #define REQD_ALIGN_ARG ,requiredAlignment
218 #define REQD_ALIGN_AND_OFFSET_DCL ,int requiredAlignment,size_t alignmentOffset
219 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL ,int requiredAlignment=DATA_ALIGNMENT,size_t alignmentOffset=0
220 #define REQD_ALIGN_AND_OFFSET_ARG ,requiredAlignment,alignmentOffset
221 #else // FEATURE_STRUCTALIGN
222 #define REQD_ALIGN_DCL
223 #define REQD_ALIGN_ARG
224 #define REQD_ALIGN_AND_OFFSET_DCL
225 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL
226 #define REQD_ALIGN_AND_OFFSET_ARG
227 #endif // FEATURE_STRUCTALIGN
228
229 #ifdef MULTIPLE_HEAPS
230 #define THREAD_NUMBER_DCL ,int thread
231 #define THREAD_NUMBER_ARG ,thread
232 #define THREAD_NUMBER_FROM_CONTEXT int thread = sc->thread_number;
233 #define THREAD_FROM_HEAP  int thread = heap_number;
234 #define HEAP_FROM_THREAD  gc_heap* hpt = gc_heap::g_heaps[thread];
235 #else
236 #define THREAD_NUMBER_DCL
237 #define THREAD_NUMBER_ARG
238 #define THREAD_NUMBER_FROM_CONTEXT
239 #define THREAD_FROM_HEAP
240 #define HEAP_FROM_THREAD  gc_heap* hpt = 0;
241 #endif //MULTIPLE_HEAPS
242
243 //These constants are ordered
244 const int policy_sweep = 0;
245 const int policy_compact = 1;
246 const int policy_expand  = 2;
247
248 #ifdef TRACE_GC
249 #define SEG_REUSE_LOG_0 7
250 #define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1)
251 #define DT_LOG_0 (SEG_REUSE_LOG_1 + 1)
252 #define BGC_LOG (DT_LOG_0 + 1)
253 #define GTC_LOG (DT_LOG_0 + 2)
254 #define GC_TABLE_LOG (DT_LOG_0 + 3)
255 #define JOIN_LOG (DT_LOG_0 + 4)
256 #define SPINLOCK_LOG (DT_LOG_0 + 5)
257 #define SNOOP_LOG (DT_LOG_0 + 6)
258
259 #ifndef DACCESS_COMPILE
260
261 #ifdef SIMPLE_DPRINTF
262
263 //#define dprintf(l,x) {if (trace_gc && ((l<=print_level)||gc_heap::settings.concurrent)) {printf ("\n");printf x ; fflush(stdout);}}
264 void GCLog (const char *fmt, ... );
265 //#define dprintf(l,x) {if (trace_gc && (l<=print_level)) {GCLog x;}}
266 //#define dprintf(l,x) {if ((l==SEG_REUSE_LOG_0) || (l==SEG_REUSE_LOG_1) || (trace_gc && (l<=3))) {GCLog x;}}
267 //#define dprintf(l,x) {if (l == DT_LOG_0) {GCLog x;}}
268 //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == BGC_LOG) || (l==GTC_LOG))) {GCLog x;}}
269 //#define dprintf(l,x) {if ((l == 1) || (l == 2222)) {GCLog x;}}
270 #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
271 //#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}}
272 //#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}}
273 //#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}}
274 #else //SIMPLE_DPRINTF
275 // Nobody used the logging mechanism that used to be here. If we find ourselves
276 // wanting to inspect GC logs on unmodified builds, we can use this define here
277 // to do so.
278 #define dprintf(l, x)
279
280 #endif //SIMPLE_DPRINTF
281
282 #else //DACCESS_COMPILE
283 #define dprintf(l,x)
284 #endif //DACCESS_COMPILE
285 #else //TRACE_GC
286 #define dprintf(l,x)
287 #endif //TRACE_GC
288
289 #if !defined(FEATURE_REDHAWK) && !defined(BUILD_AS_STANDALONE)
290 #undef  assert
291 #define assert _ASSERTE
292 #undef  ASSERT
293 #define ASSERT _ASSERTE
294 #endif // FEATURE_REDHAWK
295
296 struct GCDebugSpinLock {
297     VOLATILE(int32_t) lock;                   // -1 if free, 0 if held
298 #ifdef _DEBUG
299     VOLATILE(Thread *) holding_thread;     // -1 if no thread holds the lock.
300     VOLATILE(BOOL) released_by_gc_p;       // a GC thread released the lock.
301 #endif
302 #if defined (SYNCHRONIZATION_STATS)
303     // number of times we went into SwitchToThread in enter_spin_lock.
304     unsigned int num_switch_thread;
305     // number of times we went into WaitLonger.
306     unsigned int num_wait_longer;
307     // number of times we went to calling SwitchToThread in WaitLonger.
308     unsigned int num_switch_thread_w;
309     // number of times we went to calling DisablePreemptiveGC in WaitLonger.
310     unsigned int num_disable_preemptive_w;
311 #endif
312
313     GCDebugSpinLock()
314         : lock(-1)
315 #ifdef _DEBUG
316         , holding_thread((Thread*) -1)
317 #endif
318 #if defined (SYNCHRONIZATION_STATS)
319         , num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0)
320 #endif
321     {
322     }
323
324 #if defined (SYNCHRONIZATION_STATS)
325     void init()
326     {
327         num_switch_thread = 0;
328         num_wait_longer = 0;
329         num_switch_thread_w = 0;
330         num_disable_preemptive_w = 0;
331     }
332 #endif
333 };
334 typedef GCDebugSpinLock GCSpinLock;
335
336 class mark;
337 class heap_segment;
338 class CObjectHeader;
339 class l_heap;
340 class sorted_table;
341 class c_synchronize;
342 class seg_free_spaces;
343 class gc_heap;
344
345 #ifdef BACKGROUND_GC
346 class exclusive_sync;
347 class recursive_gc_sync;
348 #endif //BACKGROUND_GC
349
350 // The following 2 modes are of the same format as in clr\src\bcl\system\runtime\gcsettings.cs
351 // make sure you change that one if you change this one!
352 enum gc_pause_mode
353 {
354     pause_batch = 0, //We are not concerned about pause length
355     pause_interactive = 1,     //We are running an interactive app
356     pause_low_latency = 2,     //short pauses are essential
357     //avoid long pauses from blocking full GCs unless running out of memory
358     pause_sustained_low_latency = 3,
359     pause_no_gc = 4
360 };
361
362 enum gc_loh_compaction_mode
363 {
364     loh_compaction_default = 1, // the default mode, don't compact LOH.
365     loh_compaction_once = 2, // only compact once the next time a blocking full GC happens.
366     loh_compaction_auto = 4 // GC decides when to compact LOH, to be implemented.
367 };
368
369 enum set_pause_mode_status
370 {
371     set_pause_mode_success = 0,
372     set_pause_mode_no_gc = 1 // NoGCRegion is in progress, can't change pause mode.
373 };
374
375 /*
376  Latency modes required user to have specific GC knowledge (eg, budget, full blocking GC).
377  We are trying to move away from them as it makes a lot more sense for users to tell 
378  us what's the most important out of the perf aspects that make sense to them. 
379
380  In general there are 3 such aspects:
381
382  + memory footprint
383  + throughput
384  + pause predictibility
385
386  Currently the following levels are supported. We may (and will likely) add more
387  in the future.
388
389  +----------+--------------------+---------------------------------------+
390  | Level    | Optimization Goals | Latency Charactaristics               |
391  +==========+====================+=======================================+
392  | 0        | memory footprint   | pauses can be long and more frequent  |
393  +----------+--------------------+---------------------------------------+
394  | 1        | balanced           | pauses are more predictable and more  |
395  |          |                    | frequent. the longest pauses are      |
396  |          |                    | shorter than 1.                       |
397  +----------+--------------------+---------------------------------------+
398 */
399 enum gc_latency_level
400 {
401     latency_level_first = 0,
402     latency_level_memory_footprint = latency_level_first,
403     latency_level_balanced = 1,
404     latency_level_last = latency_level_balanced,
405     latency_level_default = latency_level_balanced
406 };
407
408 enum gc_tuning_point
409 {
410     tuning_deciding_condemned_gen = 0,
411     tuning_deciding_full_gc = 1,
412     tuning_deciding_compaction = 2,
413     tuning_deciding_expansion = 3,
414     tuning_deciding_promote_ephemeral = 4,
415     tuning_deciding_short_on_seg = 5
416 };
417
418 #if defined(TRACE_GC) && defined(BACKGROUND_GC)
419 static const char * const str_bgc_state[] =
420 {
421     "not_in_process",
422     "mark_handles",
423     "mark_stack",
424     "revisit_soh",
425     "revisit_loh",
426     "overflow_soh",
427     "overflow_loh",
428     "final_marking",
429     "sweep_soh",
430     "sweep_loh",
431     "plan_phase"
432 };
433 #endif // defined(TRACE_GC) && defined(BACKGROUND_GC)
434
435 enum allocation_state
436 {
437     a_state_start = 0,
438     a_state_can_allocate,
439     a_state_cant_allocate,
440     // This could be due to having to wait till a GC is done,
441     // or having to try a different heap.
442     a_state_retry_allocate,
443     a_state_try_fit,
444     a_state_try_fit_new_seg,
445     a_state_try_fit_after_cg,
446     a_state_try_fit_after_bgc,
447     a_state_try_free_full_seg_in_bgc, 
448     a_state_try_free_after_bgc,
449     a_state_try_seg_end,
450     a_state_acquire_seg,
451     a_state_acquire_seg_after_cg,
452     a_state_acquire_seg_after_bgc,
453     a_state_check_and_wait_for_bgc,
454     a_state_trigger_full_compact_gc,
455     a_state_trigger_ephemeral_gc,
456     a_state_trigger_2nd_ephemeral_gc,
457     a_state_check_retry_seg,
458     a_state_max
459 };
460
461 enum gc_type
462 {
463     gc_type_compacting = 0,
464     gc_type_blocking = 1,
465 #ifdef BACKGROUND_GC
466     gc_type_background = 2,
467 #endif //BACKGROUND_GC
468     gc_type_max = 3
469 };
470
471 //encapsulates the mechanism for the current gc
472 class gc_mechanisms
473 {
474 public:
475     VOLATILE(size_t) gc_index; // starts from 1 for the first GC, like dd_collection_count
476     int condemned_generation;
477     BOOL promotion;
478     BOOL compaction;
479     BOOL loh_compaction;
480     BOOL heap_expansion;
481     uint32_t concurrent;
482     BOOL demotion;
483     BOOL card_bundles;
484     int  gen0_reduction_count;
485     BOOL should_lock_elevation;
486     int elevation_locked_count;
487     BOOL elevation_reduced;
488     BOOL minimal_gc;
489     gc_reason reason;
490     gc_pause_mode pause_mode;
491     BOOL found_finalizers;
492
493 #ifdef BACKGROUND_GC
494     BOOL background_p;
495     bgc_state b_state;
496     BOOL allocations_allowed;
497 #endif //BACKGROUND_GC
498
499 #ifdef STRESS_HEAP
500     BOOL stress_induced;
501 #endif // STRESS_HEAP
502
503     // These are opportunistically set
504     uint32_t entry_memory_load;
505     uint32_t exit_memory_load;
506
507     void init_mechanisms(); //for each GC
508     void first_init(); // for the life of the EE
509
510     void record (gc_history_global* history);
511 };
512
513 // This is a compact version of gc_mechanism that we use to save in the history.
514 class gc_mechanisms_store
515 {
516 public:
517     size_t gc_index; 
518     bool promotion;
519     bool compaction;
520     bool loh_compaction;
521     bool heap_expansion;
522     bool concurrent;
523     bool demotion;
524     bool card_bundles;
525     bool should_lock_elevation;
526     int condemned_generation   : 8; 
527     int gen0_reduction_count   : 8;
528     int elevation_locked_count : 8;
529     gc_reason reason           : 8;
530     gc_pause_mode pause_mode   : 8;
531 #ifdef BACKGROUND_GC
532     bgc_state b_state          : 8;
533 #endif //BACKGROUND_GC
534     bool found_finalizers;
535
536 #ifdef BACKGROUND_GC
537     bool background_p;
538 #endif //BACKGROUND_GC
539
540 #ifdef STRESS_HEAP
541     bool stress_induced;
542 #endif // STRESS_HEAP
543
544 #ifdef BIT64
545     uint32_t entry_memory_load;
546 #endif // BIT64
547
548     void store (gc_mechanisms* gm)
549     {
550         gc_index                = gm->gc_index; 
551         condemned_generation    = gm->condemned_generation;
552         promotion               = (gm->promotion != 0);
553         compaction              = (gm->compaction != 0);
554         loh_compaction          = (gm->loh_compaction != 0);
555         heap_expansion          = (gm->heap_expansion != 0);
556         concurrent              = (gm->concurrent != 0);
557         demotion                = (gm->demotion != 0);
558         card_bundles            = (gm->card_bundles != 0);
559         gen0_reduction_count    = gm->gen0_reduction_count;
560         should_lock_elevation   = (gm->should_lock_elevation != 0);
561         elevation_locked_count  = gm->elevation_locked_count;
562         reason                  = gm->reason;
563         pause_mode              = gm->pause_mode;
564         found_finalizers        = (gm->found_finalizers != 0);
565
566 #ifdef BACKGROUND_GC
567         background_p            = (gm->background_p != 0);
568         b_state                 = gm->b_state;
569 #endif //BACKGROUND_GC
570
571 #ifdef STRESS_HEAP
572         stress_induced          = (gm->stress_induced != 0);
573 #endif // STRESS_HEAP
574
575 #ifdef BIT64
576         entry_memory_load       = gm->entry_memory_load;
577 #endif // BIT64        
578     }
579 };
580
581 #ifdef GC_STATS
582
583 // GC specific statistics, tracking counts and timings for GCs occuring in the system.
584 // This writes the statistics to a file every 60 seconds, if a file is specified in
585 // COMPlus_GcMixLog
586
587 struct GCStatistics
588     : public StatisticsBase
589 {
590     // initialized to the contents of COMPlus_GcMixLog, or NULL, if not present
591     static char* logFileName;
592     static FILE*  logFile;
593
594     // number of times we executed a background GC, a foreground GC, or a
595     // non-concurrent GC
596     int cntBGC, cntFGC, cntNGC;
597
598     // min, max, and total time spent performing BGCs, FGCs, NGCs
599     // (BGC time includes everything between the moment the BGC starts until 
600     // it completes, i.e. the times of all FGCs occuring concurrently)
601     MinMaxTot bgc, fgc, ngc;
602
603     // number of times we executed a compacting GC (sweeping counts can be derived)
604     int cntCompactNGC, cntCompactFGC;
605
606     // count of reasons
607     int cntReasons[reason_max];
608
609     // count of condemned generation, by NGC and FGC:
610     int cntNGCGen[max_generation+1];
611     int cntFGCGen[max_generation];
612     
613     ///////////////////////////////////////////////////////////////////////////////////////////////
614     // Internal mechanism:
615
616     virtual void Initialize();
617     virtual void DisplayAndUpdate();
618
619     // Public API
620
621     static BOOL Enabled()
622     { return logFileName != NULL; }
623
624     void AddGCStats(const gc_mechanisms& settings, size_t timeInMSec);
625 };
626
627 extern GCStatistics g_GCStatistics;
628 extern GCStatistics g_LastGCStatistics;
629
630 #endif // GC_STATS
631
632 typedef DPTR(class heap_segment)               PTR_heap_segment;
633 typedef DPTR(class gc_heap)                    PTR_gc_heap;
634 typedef DPTR(PTR_gc_heap)                      PTR_PTR_gc_heap;
635 #ifdef FEATURE_PREMORTEM_FINALIZATION
636 typedef DPTR(class CFinalize)                  PTR_CFinalize;
637 #endif // FEATURE_PREMORTEM_FINALIZATION
638
639 //-------------------------------------
640 //generation free list. It is an array of free lists bucketed by size, starting at sizes lower than first_bucket_size 
641 //and doubling each time. The last bucket (index == num_buckets) is for largest sizes with no limit
642
643 #define MAX_BUCKET_COUNT (13)//Max number of buckets for the small generations. 
644 class alloc_list 
645 {
646     uint8_t* head;
647     uint8_t* tail;
648
649     size_t damage_count;
650 public:
651 #ifdef FL_VERIFICATION
652     size_t item_count;
653 #endif //FL_VERIFICATION
654
655     uint8_t*& alloc_list_head () { return head;}
656     uint8_t*& alloc_list_tail () { return tail;}
657     size_t& alloc_list_damage_count(){ return damage_count; }
658     alloc_list()
659     {
660         head = 0; 
661         tail = 0; 
662         damage_count = 0;
663     }
664 };
665
666
667 class allocator 
668 {
669     size_t num_buckets;
670     size_t frst_bucket_size;
671     alloc_list first_bucket;
672     alloc_list* buckets;
673     alloc_list& alloc_list_of (unsigned int bn);
674     size_t& alloc_list_damage_count_of (unsigned int bn);
675
676 public:
677     allocator (unsigned int num_b, size_t fbs, alloc_list* b);
678     allocator()
679     {
680         num_buckets = 1;
681         frst_bucket_size = SIZE_T_MAX;
682     }
683     unsigned int number_of_buckets() {return (unsigned int)num_buckets;}
684
685     size_t first_bucket_size() {return frst_bucket_size;}
686     uint8_t*& alloc_list_head_of (unsigned int bn)
687     {
688         return alloc_list_of (bn).alloc_list_head();
689     }
690     uint8_t*& alloc_list_tail_of (unsigned int bn)
691     {
692         return alloc_list_of (bn).alloc_list_tail();
693     }
694     void clear();
695     BOOL discard_if_no_fit_p()
696     {
697         return (num_buckets == 1);
698     }
699
700     // This is when we know there's nothing to repair because this free
701     // list has never gone through plan phase. Right now it's only used
702     // by the background ephemeral sweep when we copy the local free list
703     // to gen0's free list.
704     //
705     // We copy head and tail manually (vs together like copy_to_alloc_list)
706     // since we need to copy tail first because when we get the free items off
707     // of each bucket we check head first. We also need to copy the
708     // smaller buckets first so when gen0 allocation needs to thread
709     // smaller items back that bucket is guaranteed to have been full
710     // copied.
711     void copy_with_no_repair (allocator* allocator_to_copy)
712     {
713         assert (num_buckets == allocator_to_copy->number_of_buckets());
714         for (unsigned int i = 0; i < num_buckets; i++)
715         {
716             alloc_list* al = &(allocator_to_copy->alloc_list_of (i));
717             alloc_list_tail_of(i) = al->alloc_list_tail();
718             alloc_list_head_of(i) = al->alloc_list_head();
719         }
720     }
721
722     void unlink_item (unsigned int bucket_number, uint8_t* item, uint8_t* previous_item, BOOL use_undo_p);
723     void thread_item (uint8_t* item, size_t size);
724     void thread_item_front (uint8_t* itme, size_t size);
725     void thread_free_item (uint8_t* free_item, uint8_t*& head, uint8_t*& tail);
726     void copy_to_alloc_list (alloc_list* toalist);
727     void copy_from_alloc_list (alloc_list* fromalist);
728     void commit_alloc_list_changes();
729 };
730
731 #define NUM_GEN_POWER2 (20)
732 #define BASE_GEN_SIZE (1*512)
733
734 // group the frequently used ones together (need intrumentation on accessors)
735 class generation
736 {
737 public:
738     // Don't move these first two fields without adjusting the references
739     // from the __asm in jitinterface.cpp.
740     alloc_context   allocation_context;
741     PTR_heap_segment start_segment;
742     uint8_t*        allocation_start;
743     heap_segment*   allocation_segment;
744     uint8_t*        allocation_context_start_region;
745     allocator       free_list_allocator;
746     size_t          free_list_allocated;
747     size_t          end_seg_allocated;
748     BOOL            allocate_end_seg_p;
749     size_t          condemned_allocated;
750     size_t          free_list_space;
751     size_t          free_obj_space;
752     size_t          allocation_size;
753     uint8_t*        plan_allocation_start;
754     size_t          plan_allocation_start_size;
755
756     // this is the pinned plugs that got allocated into this gen.
757     size_t          pinned_allocated;
758     size_t          pinned_allocation_compact_size;
759     size_t          pinned_allocation_sweep_size;
760     int             gen_num;
761
762 #ifdef FREE_USAGE_STATS
763     size_t          gen_free_spaces[NUM_GEN_POWER2];
764     // these are non pinned plugs only
765     size_t          gen_plugs[NUM_GEN_POWER2];
766     size_t          gen_current_pinned_free_spaces[NUM_GEN_POWER2];
767     size_t          pinned_free_obj_space;
768     // this is what got allocated into the pinned free spaces.
769     size_t          allocated_in_pinned_free;
770     size_t          allocated_since_last_pin;
771 #endif //FREE_USAGE_STATS
772 };
773
774 static_assert(offsetof(dac_generation, allocation_context) == offsetof(generation, allocation_context), "DAC generation offset mismatch");
775 static_assert(offsetof(dac_generation, start_segment) == offsetof(generation, start_segment), "DAC generation offset mismatch");
776 static_assert(offsetof(dac_generation, allocation_start) == offsetof(generation, allocation_start), "DAC generation offset mismatch");
777
778 // static data remains the same after it's initialized.
779 // It's per generation.
780 // TODO: for gen_time_tuning, we should put the multipliers in static data.
781 struct static_data
782 {
783     size_t min_size;
784     size_t max_size;
785     size_t fragmentation_limit;
786     float fragmentation_burden_limit;
787     float limit;
788     float max_limit;
789     size_t time_clock; // time after which to collect generation, in performance counts (see QueryPerformanceCounter)
790     size_t gc_clock; // nubmer of gcs after which to collect generation
791 };
792
793 // The dynamic data fields are grouped into 3 categories:
794 //
795 // calculated logical data (like desired_allocation)
796 // physical data (like fragmentation)
797 // const data (sdata), initialized at the beginning
798 class dynamic_data
799 {
800 public:
801     ptrdiff_t new_allocation;
802     ptrdiff_t gc_new_allocation; // new allocation at beginning of gc
803     float     surv;
804     size_t    desired_allocation;
805
806     // # of bytes taken by objects (ie, not free space) at the beginning
807     // of the GC.
808     size_t    begin_data_size;
809     // # of bytes taken by survived objects after mark.
810     size_t    survived_size;
811     // # of bytes taken by survived pinned plugs after mark.
812     size_t    pinned_survived_size;
813     size_t    artificial_pinned_survived_size;
814     size_t    added_pinned_size;
815
816 #ifdef SHORT_PLUGS
817     size_t    padding_size;
818 #endif //SHORT_PLUGS
819 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
820     // # of plugs that are not pinned plugs.
821     size_t    num_npinned_plugs;
822 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
823     //total object size after a GC, ie, doesn't include fragmentation
824     size_t    current_size; 
825     size_t    collection_count;
826     size_t    promoted_size;
827     size_t    freach_previous_promotion;
828     size_t    fragmentation;    //fragmentation when we don't compact
829     size_t    gc_clock;         //gc# when last GC happened
830     size_t    time_clock;       //time when last gc started
831     size_t    gc_elapsed_time;  // Time it took for the gc to complete
832     float     gc_speed;         //  speed in bytes/msec for the gc to complete
833
834     size_t    min_size;
835
836     static_data* sdata;
837 };
838
839 #define ro_in_entry 0x1
840
841 #ifdef SEG_MAPPING_TABLE
842 // Note that I am storing both h0 and seg0, even though in Server GC you can get to 
843 // the heap* from the segment info. This is because heap_of needs to be really fast
844 // and we would not want yet another indirection.
845 struct seg_mapping
846 {
847     // if an address is > boundary it belongs to h1; else h0.
848     // since we init h0 and h1 to 0, if we get 0 it means that
849     // address doesn't exist on managed segments. And heap_of 
850     // would just return heap0 which is what it does now.
851     uint8_t* boundary;
852 #ifdef MULTIPLE_HEAPS
853     gc_heap* h0;
854     gc_heap* h1;
855 #endif //MULTIPLE_HEAPS
856     // You could have an address that's inbetween 2 segments and 
857     // this would return a seg, the caller then will use 
858     // in_range_for_segment to determine if it's on that seg.
859     heap_segment* seg0; // this is what the seg for h0 is.
860     heap_segment* seg1; // this is what the seg for h1 is.
861     // Note that when frozen objects are used we mask seg1
862     // with 0x1 to indicate that there is a ro segment for
863     // this entry.
864 };
865 #endif //SEG_MAPPING_TABLE
866
867 // alignment helpers
868 //Alignment constant for allocation
869 #define ALIGNCONST (DATA_ALIGNMENT-1)
870
871 inline
872 size_t Align (size_t nbytes, int alignment=ALIGNCONST)
873 {
874     return (nbytes + alignment) & ~alignment;
875 }
876
877 //return alignment constant for small object heap vs large object heap
878 inline
879 int get_alignment_constant (BOOL small_object_p)
880 {
881 #ifdef FEATURE_STRUCTALIGN
882     // If any objects on the large object heap require 8-byte alignment,
883     // the compiler will tell us so.  Let's not guess an alignment here.
884     return ALIGNCONST;
885 #else // FEATURE_STRUCTALIGN
886     return small_object_p ? ALIGNCONST : 7;
887 #endif // FEATURE_STRUCTALIGN
888 }
889
890 struct etw_opt_info
891 {
892     size_t desired_allocation;
893     size_t new_allocation;
894     int    gen_number;
895 };
896
897 // Note, I am not removing the ones that are no longer used
898 // because the older versions of the runtime still use them
899 // and ETW interprets them.
900 enum alloc_wait_reason
901 {
902     // When we don't care about firing an event for
903     // this.
904     awr_ignored = -1,
905
906     // when we detect we are in low memory
907     awr_low_memory = 0,
908
909     // when we detect the ephemeral segment is too full
910     awr_low_ephemeral = 1,
911
912     // we've given out too much budget for gen0.
913     awr_gen0_alloc = 2,
914
915     // we've given out too much budget for loh.
916     awr_loh_alloc = 3,
917
918     // this event is really obsolete - it's for pre-XP
919     // OSs where low mem notification is not supported.
920     awr_alloc_loh_low_mem = 4,
921
922     // we ran out of VM spaced to reserve on loh.
923     awr_loh_oos = 5, 
924
925     // ran out of space when allocating a small object
926     awr_gen0_oos_bgc = 6,
927
928     // ran out of space when allocating a large object
929     awr_loh_oos_bgc = 7,
930
931     // waiting for BGC to let FGC happen
932     awr_fgc_wait_for_bgc = 8,
933
934     // wait for bgc to finish to get loh seg. 
935     // no longer used with the introduction of loh msl.
936     awr_get_loh_seg = 9,
937
938     // we don't allow loh allocation during bgc planning.
939     // no longer used with the introduction of loh msl.
940     awr_loh_alloc_during_plan = 10,
941
942     // we don't allow too much loh allocation during bgc.
943     awr_loh_alloc_during_bgc = 11
944 };
945
946 struct alloc_thread_wait_data
947 {
948     int awr;
949 };
950
951 enum msl_take_state
952 {
953     mt_get_large_seg = 0,
954     mt_bgc_loh_sweep,
955     mt_wait_bgc,
956     mt_block_gc,
957     mt_clr_mem,
958     mt_clr_large_mem,
959     mt_t_eph_gc,
960     mt_t_full_gc,
961     mt_alloc_small,
962     mt_alloc_large,
963     mt_alloc_small_cant,
964     mt_alloc_large_cant,
965     mt_try_alloc,
966     mt_try_budget
967 };
968
969 enum msl_enter_state
970 {
971     me_acquire,
972     me_release
973 };
974
975 struct spinlock_info
976 {
977     msl_enter_state enter_state;
978     msl_take_state take_state;
979     EEThreadId thread_id;
980     bool loh_p;
981 };
982
983 #define HS_CACHE_LINE_SIZE 128
984
985 #ifdef SNOOP_STATS
986 struct snoop_stats_data
987 {
988     int heap_index;
989
990     // total number of objects that we called
991     // gc_mark on.
992     size_t objects_checked_count;
993     // total number of time we called gc_mark
994     // on a 0 reference.
995     size_t zero_ref_count;
996     // total objects actually marked.
997     size_t objects_marked_count;
998     // number of objects written to the mark stack because
999     // of mark_stolen.
1000     size_t stolen_stack_count;
1001     // number of objects pushed onto the mark stack because
1002     // of the partial mark code path.
1003     size_t partial_stack_count;
1004     // number of objects pushed onto the mark stack because
1005     // of the non partial mark code path.
1006     size_t normal_stack_count;
1007     // number of references marked without mark stack.
1008     size_t non_stack_count;
1009
1010     // number of times we detect next heap's mark stack
1011     // is not busy.
1012     size_t stack_idle_count;
1013
1014     // number of times we do switch to thread.
1015     size_t switch_to_thread_count;
1016
1017     // number of times we are checking if the next heap's
1018     // mark stack is busy.
1019     size_t check_level_count;
1020     // number of times next stack is busy and level is 
1021     // at the bottom.
1022     size_t busy_count;
1023     // how many interlocked exchange operations we did
1024     size_t interlocked_count;
1025     // numer of times parent objects stolen
1026     size_t partial_mark_parent_count;
1027     // numer of times we look at a normal stolen entry, 
1028     // or the beginning/ending PM pair.
1029     size_t stolen_or_pm_count; 
1030     // number of times we see 2 for the entry.
1031     size_t stolen_entry_count; 
1032     // number of times we see a PM entry that's not ready.
1033     size_t pm_not_ready_count; 
1034     // number of stolen normal marked objects and partial mark children.
1035     size_t normal_count;
1036     // number of times the bottom of mark stack was cleared.
1037     size_t stack_bottom_clear_count;
1038 };
1039 #endif //SNOOP_STATS
1040
1041 struct no_gc_region_info
1042 {
1043     size_t soh_allocation_size;
1044     size_t loh_allocation_size;
1045     size_t started;
1046     size_t num_gcs;
1047     size_t num_gcs_induced;
1048     start_no_gc_region_status start_status;
1049     gc_pause_mode saved_pause_mode;
1050     size_t saved_gen0_min_size;
1051     size_t saved_gen3_min_size;
1052     BOOL minimal_gc_p;
1053 };
1054
1055 // if you change these, make sure you update them for sos (strike.cpp) as well.
1056 // 
1057 // !!!NOTE!!!
1058 // Right now I am only recording data from blocking GCs. When recording from BGC,
1059 // it should have its own copy just like gc_data_per_heap.
1060 // for BGCs we will have a very different set of datapoints to record.
1061 enum interesting_data_point
1062 {
1063     idp_pre_short = 0,
1064     idp_post_short = 1,
1065     idp_merged_pin = 2,
1066     idp_converted_pin = 3,
1067     idp_pre_pin = 4,
1068     idp_post_pin = 5,
1069     idp_pre_and_post_pin = 6,
1070     idp_pre_short_padded = 7,
1071     idp_post_short_padded = 8,
1072     max_idp_count
1073 };
1074
1075 //class definition of the internal class
1076 class gc_heap
1077 {
1078     friend class GCHeap;
1079 #ifdef FEATURE_PREMORTEM_FINALIZATION
1080     friend class CFinalize;
1081 #endif // FEATURE_PREMORTEM_FINALIZATION
1082     friend struct ::alloc_context;
1083     friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, uint32_t dwFlags);
1084     friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1085     friend class t_join;
1086     friend class gc_mechanisms;
1087     friend class seg_free_spaces;
1088
1089 #ifdef BACKGROUND_GC
1090     friend class exclusive_sync;
1091     friend class recursive_gc_sync;
1092 #endif //BACKGROUND_GC
1093
1094 #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1095     friend void checkGCWriteBarrier();
1096     friend void initGCShadow();
1097 #endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1098
1099     friend void PopulateDacVars(GcDacVars *gcDacVars);
1100
1101 #ifdef MULTIPLE_HEAPS
1102     typedef void (gc_heap::* card_fn) (uint8_t**, int);
1103 #define call_fn(fn) (this->*fn)
1104 #define __this this
1105 #else
1106     typedef void (* card_fn) (uint8_t**);
1107 #define call_fn(fn) (*fn)
1108 #define __this (gc_heap*)0
1109 #endif
1110
1111 public:
1112
1113 #ifdef TRACE_GC
1114     PER_HEAP
1115     void print_free_list (int gen, heap_segment* seg);
1116 #endif // TRACE_GC
1117
1118 #ifdef SYNCHRONIZATION_STATS
1119
1120     PER_HEAP_ISOLATED
1121     void init_sync_stats()
1122     {
1123 #ifdef MULTIPLE_HEAPS
1124         for (int i = 0; i < gc_heap::n_heaps; i++)
1125         {
1126             gc_heap::g_heaps[i]->init_heap_sync_stats();
1127         }
1128 #else  //MULTIPLE_HEAPS
1129         init_heap_sync_stats();
1130 #endif  //MULTIPLE_HEAPS
1131     }
1132
1133     PER_HEAP_ISOLATED
1134     void print_sync_stats(unsigned int gc_count_during_log)
1135     {
1136         // bad/good gl acquire is accumulative during the log interval (because the numbers are too small)
1137         // min/max msl_acquire is the min/max during the log interval, not each GC.
1138         // Threads is however many allocation threads for the last GC.
1139         // num of msl acquired, avg_msl, high and low are all for each GC.
1140         printf("%2s%2s%10s%10s%12s%6s%4s%8s(  st,  wl, stw, dpw)\n",
1141             "H", "T", "good_sus", "bad_sus", "avg_msl", "high", "low", "num_msl");
1142
1143 #ifdef MULTIPLE_HEAPS
1144         for (int i = 0; i < gc_heap::n_heaps; i++)
1145         {
1146             gc_heap::g_heaps[i]->print_heap_sync_stats(i, gc_count_during_log);
1147         }
1148 #else  //MULTIPLE_HEAPS
1149         print_heap_sync_stats(0, gc_count_during_log);
1150 #endif  //MULTIPLE_HEAPS
1151     }
1152
1153 #endif //SYNCHRONIZATION_STATS
1154
1155     PER_HEAP
1156     void verify_soh_segment_list();
1157     PER_HEAP
1158     void verify_mark_array_cleared (heap_segment* seg);
1159     PER_HEAP
1160     void verify_mark_array_cleared();
1161     PER_HEAP
1162     void verify_seg_end_mark_array_cleared();
1163     PER_HEAP
1164     void verify_partial();
1165
1166 #ifdef VERIFY_HEAP
1167     PER_HEAP
1168     void verify_free_lists(); 
1169     PER_HEAP
1170     void verify_heap (BOOL begin_gc_p);
1171 #endif //VERIFY_HEAP
1172
1173     PER_HEAP_ISOLATED
1174     void fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num);
1175
1176     PER_HEAP_ISOLATED
1177     void fire_pevents();
1178
1179 #ifdef FEATURE_BASICFREEZE
1180     static void walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef);
1181 #endif
1182
1183     static
1184     heap_segment* make_heap_segment (uint8_t* new_pages,
1185                                      size_t size, 
1186                                      int h_number);
1187     static
1188     l_heap* make_large_heap (uint8_t* new_pages, size_t size, BOOL managed);
1189
1190     static
1191     gc_heap* make_gc_heap(
1192 #if defined (MULTIPLE_HEAPS)
1193         GCHeap* vm_heap,
1194         int heap_number
1195 #endif //MULTIPLE_HEAPS
1196         );
1197
1198     static
1199     void destroy_gc_heap(gc_heap* heap);
1200
1201     static
1202     HRESULT initialize_gc  (size_t segment_size,
1203                             size_t heap_size
1204 #ifdef MULTIPLE_HEAPS
1205                             , unsigned number_of_heaps
1206 #endif //MULTIPLE_HEAPS
1207         );
1208
1209     static
1210     void shutdown_gc();
1211
1212     // If the hard limit is specified, take that into consideration
1213     // and this means it may modify the # of heaps.
1214     PER_HEAP_ISOLATED
1215     size_t get_segment_size_hard_limit (uint32_t* num_heaps, bool should_adjust_num_heaps);
1216
1217     PER_HEAP_ISOLATED
1218     bool should_retry_other_heap (size_t size);
1219
1220     PER_HEAP
1221     CObjectHeader* allocate (size_t jsize,
1222                              alloc_context* acontext);
1223
1224 #ifdef MULTIPLE_HEAPS
1225     static void balance_heaps (alloc_context* acontext);
1226     static 
1227     gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
1228     static
1229     void gc_thread_stub (void* arg);
1230 #endif //MULTIPLE_HEAPS
1231
1232     // For LOH allocations we only update the alloc_bytes_loh in allocation
1233     // context - we don't actually use the ptr/limit from it so I am
1234     // making this explicit by not passing in the alloc_context.
1235     PER_HEAP
1236     CObjectHeader* allocate_large_object (size_t size, int64_t& alloc_bytes);
1237
1238 #ifdef FEATURE_STRUCTALIGN
1239     PER_HEAP
1240     uint8_t* pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size);
1241 #endif // FEATURE_STRUCTALIGN
1242
1243     PER_HEAP_ISOLATED
1244     void do_pre_gc();
1245
1246     PER_HEAP_ISOLATED
1247     void do_post_gc();
1248
1249     PER_HEAP
1250     BOOL expand_soh_with_minimal_gc();
1251
1252     // EE is always suspended when this method is called.
1253     // returning FALSE means we actually didn't do a GC. This happens
1254     // when we figured that we needed to do a BGC.
1255     PER_HEAP
1256     void garbage_collect (int n);
1257
1258     // Since we don't want to waste a join just to do this, I am doing
1259     // doing this at the last join in gc1.
1260     PER_HEAP_ISOLATED
1261     void pm_full_gc_init_or_clear();
1262
1263     // This does a GC when pm_trigger_full_gc is set
1264     PER_HEAP
1265     void garbage_collect_pm_full_gc();
1266
1267     PER_HEAP_ISOLATED
1268     bool is_pm_ratio_exceeded();
1269
1270     PER_HEAP
1271     void init_records();
1272
1273     static 
1274     uint32_t* make_card_table (uint8_t* start, uint8_t* end);
1275
1276     static
1277     void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
1278
1279     static
1280     int grow_brick_card_tables (uint8_t* start,
1281                                 uint8_t* end,
1282                                 size_t size,
1283                                 heap_segment* new_seg, 
1284                                 gc_heap* hp,
1285                                 BOOL loh_p);
1286
1287     PER_HEAP
1288     BOOL is_mark_set (uint8_t* o);
1289
1290 #ifdef FEATURE_BASICFREEZE
1291     PER_HEAP_ISOLATED
1292     bool frozen_object_p(Object* obj);
1293 #endif // FEATURE_BASICFREEZE
1294
1295 protected:
1296
1297     PER_HEAP_ISOLATED
1298     void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1299
1300     PER_HEAP
1301     void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1302
1303     struct walk_relocate_args
1304     {
1305         uint8_t* last_plug;
1306         BOOL is_shortened;
1307         mark* pinned_plug_entry;
1308         void* profiling_context;
1309         record_surv_fn fn;
1310     };
1311
1312     PER_HEAP
1313     void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type);
1314
1315     PER_HEAP
1316     void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
1317                     walk_relocate_args* args);
1318
1319     PER_HEAP
1320     void walk_relocation (void* profiling_context, record_surv_fn fn);
1321
1322     PER_HEAP
1323     void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
1324
1325     PER_HEAP
1326     void walk_finalize_queue (fq_walk_fn fn);
1327
1328 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1329     PER_HEAP
1330     void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn);
1331 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1332
1333     // used in blocking GCs after plan phase so this walks the plugs.
1334     PER_HEAP
1335     void walk_survivors_relocation (void* profiling_context, record_surv_fn fn);
1336     PER_HEAP
1337     void walk_survivors_for_loh (void* profiling_context, record_surv_fn fn);
1338
1339     PER_HEAP
1340     int generation_to_condemn (int n, 
1341                                BOOL* blocking_collection_p,
1342                                BOOL* elevation_requested_p,
1343                                BOOL check_only_p);
1344
1345     PER_HEAP_ISOLATED
1346     int joined_generation_to_condemn (BOOL should_evaluate_elevation, 
1347                                       int initial_gen, 
1348                                       int current_gen,
1349                                       BOOL* blocking_collection
1350                                       STRESS_HEAP_ARG(int n_original));
1351
1352     PER_HEAP
1353     size_t min_reclaim_fragmentation_threshold (uint32_t num_heaps);
1354
1355     PER_HEAP_ISOLATED
1356     uint64_t min_high_fragmentation_threshold (uint64_t available_mem, uint32_t num_heaps);
1357
1358     PER_HEAP
1359     void concurrent_print_time_delta (const char* msg);
1360     PER_HEAP
1361     void free_list_info (int gen_num, const char* msg);
1362
1363     // in svr GC on entry and exit of this method, the GC threads are not 
1364     // synchronized
1365     PER_HEAP
1366     void gc1();
1367
1368     PER_HEAP_ISOLATED
1369     void save_data_for_no_gc();
1370
1371     PER_HEAP_ISOLATED
1372     void restore_data_for_no_gc();
1373
1374     PER_HEAP_ISOLATED
1375     void update_collection_counts_for_no_gc();
1376
1377     PER_HEAP_ISOLATED
1378     BOOL should_proceed_with_gc();
1379
1380     PER_HEAP_ISOLATED
1381     void record_gcs_during_no_gc();
1382
1383     PER_HEAP
1384     BOOL find_loh_free_for_no_gc();
1385
1386     PER_HEAP
1387     BOOL find_loh_space_for_no_gc();
1388
1389     PER_HEAP
1390     BOOL commit_loh_for_no_gc (heap_segment* seg);
1391
1392     PER_HEAP_ISOLATED
1393     start_no_gc_region_status prepare_for_no_gc_region (uint64_t total_size,
1394                                                         BOOL loh_size_known,
1395                                                         uint64_t loh_size,
1396                                                         BOOL disallow_full_blocking);
1397
1398     PER_HEAP
1399     BOOL loh_allocated_for_no_gc();
1400
1401     PER_HEAP_ISOLATED
1402     void release_no_gc_loh_segments();    
1403
1404     PER_HEAP_ISOLATED
1405     void thread_no_gc_loh_segments();
1406
1407     PER_HEAP
1408     void check_and_set_no_gc_oom();
1409
1410     PER_HEAP
1411     void allocate_for_no_gc_after_gc();
1412
1413     PER_HEAP
1414     void set_loh_allocations_for_no_gc();
1415
1416     PER_HEAP
1417     void set_soh_allocations_for_no_gc();
1418
1419     PER_HEAP
1420     void prepare_for_no_gc_after_gc();
1421
1422     PER_HEAP_ISOLATED
1423     void set_allocations_for_no_gc();
1424
1425     PER_HEAP_ISOLATED
1426     BOOL should_proceed_for_no_gc();
1427
1428     PER_HEAP_ISOLATED
1429     start_no_gc_region_status get_start_no_gc_region_status();
1430
1431     PER_HEAP_ISOLATED
1432     end_no_gc_region_status end_no_gc_region();
1433
1434     PER_HEAP_ISOLATED
1435     void handle_failure_for_no_gc();
1436
1437     PER_HEAP
1438     void fire_etw_allocation_event (size_t allocation_amount, int gen_number, uint8_t* object_address);
1439
1440     PER_HEAP
1441     void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject);
1442
1443     PER_HEAP
1444     size_t limit_from_size (size_t size, size_t room, int gen_number,
1445                             int align_const);
1446     PER_HEAP
1447     allocation_state try_allocate_more_space (alloc_context* acontext, size_t jsize,
1448                                               int alloc_generation_number);
1449     PER_HEAP
1450     BOOL allocate_more_space (alloc_context* acontext, size_t jsize,
1451                               int alloc_generation_number);
1452
1453     PER_HEAP
1454     size_t get_full_compact_gc_count();
1455
1456     PER_HEAP
1457     BOOL short_on_end_of_seg (int gen_number,
1458                               heap_segment* seg,
1459                               int align_const);
1460
1461     PER_HEAP
1462     BOOL a_fit_free_list_p (int gen_number, 
1463                             size_t size, 
1464                             alloc_context* acontext,
1465                             int align_const);
1466
1467 #ifdef BACKGROUND_GC
1468     PER_HEAP
1469     void wait_for_background (alloc_wait_reason awr, bool loh_p);
1470
1471     PER_HEAP
1472     void wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p);
1473
1474     PER_HEAP
1475     void bgc_loh_alloc_clr (uint8_t* alloc_start,
1476                             size_t size, 
1477                             alloc_context* acontext,
1478                             int align_const, 
1479                             int lock_index,
1480                             BOOL check_used_p,
1481                             heap_segment* seg);
1482 #endif //BACKGROUND_GC
1483     
1484 #ifdef BACKGROUND_GC
1485     PER_HEAP
1486     void bgc_track_loh_alloc();
1487
1488     PER_HEAP
1489     void bgc_untrack_loh_alloc();
1490
1491     PER_HEAP
1492     BOOL bgc_loh_should_allocate();
1493 #endif //BACKGROUND_GC
1494
1495 #define max_saved_spinlock_info 48
1496
1497 #ifdef SPINLOCK_HISTORY
1498     PER_HEAP
1499     int spinlock_info_index;
1500
1501     PER_HEAP
1502     spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
1503 #endif //SPINLOCK_HISTORY
1504
1505     PER_HEAP
1506     void add_saved_spinlock_info (
1507             bool loh_p, 
1508             msl_enter_state enter_state, 
1509             msl_take_state take_state);
1510
1511     PER_HEAP
1512     void trigger_gc_for_alloc (int gen_number, gc_reason reason, 
1513                                GCSpinLock* spin_lock, bool loh_p, 
1514                                msl_take_state take_state);
1515
1516     PER_HEAP
1517     BOOL a_fit_free_list_large_p (size_t size, 
1518                                   alloc_context* acontext,
1519                                   int align_const);
1520
1521     PER_HEAP
1522     BOOL a_fit_segment_end_p (int gen_number,
1523                               heap_segment* seg,
1524                               size_t size, 
1525                               alloc_context* acontext,
1526                               int align_const,
1527                               BOOL* commit_failed_p);
1528     PER_HEAP
1529     BOOL loh_a_fit_segment_end_p (int gen_number,
1530                                   size_t size, 
1531                                   alloc_context* acontext,
1532                                   int align_const,
1533                                   BOOL* commit_failed_p,
1534                                   oom_reason* oom_r);
1535     PER_HEAP
1536     BOOL loh_get_new_seg (generation* gen,
1537                           size_t size,
1538                           int align_const,
1539                           BOOL* commit_failed_p,
1540                           oom_reason* oom_r);
1541
1542     PER_HEAP_ISOLATED
1543     size_t get_large_seg_size (size_t size);
1544
1545     PER_HEAP
1546     BOOL retry_full_compact_gc (size_t size);
1547
1548     PER_HEAP
1549     BOOL check_and_wait_for_bgc (alloc_wait_reason awr,
1550                                  BOOL* did_full_compact_gc,
1551                                  bool loh_p);
1552
1553     PER_HEAP
1554     BOOL trigger_full_compact_gc (gc_reason gr, 
1555                                   oom_reason* oom_r,
1556                                   bool loh_p);
1557
1558     PER_HEAP
1559     BOOL trigger_ephemeral_gc (gc_reason gr);
1560
1561     PER_HEAP
1562     BOOL soh_try_fit (int gen_number,
1563                       size_t size, 
1564                       alloc_context* acontext,
1565                       int align_const,
1566                       BOOL* commit_failed_p,
1567                       BOOL* short_seg_end_p);
1568     PER_HEAP
1569     BOOL loh_try_fit (int gen_number,
1570                       size_t size, 
1571                       alloc_context* acontext,
1572                       int align_const,
1573                       BOOL* commit_failed_p,
1574                       oom_reason* oom_r);
1575
1576     PER_HEAP
1577     allocation_state allocate_small (int gen_number,
1578                                      size_t size, 
1579                                      alloc_context* acontext,
1580                                      int align_const);
1581
1582 #ifdef RECORD_LOH_STATE
1583     #define max_saved_loh_states 12
1584     PER_HEAP
1585     int loh_state_index;
1586
1587     struct loh_state_info
1588     {
1589         allocation_state alloc_state;
1590         EEThreadId thread_id;
1591     };
1592
1593     PER_HEAP
1594     loh_state_info last_loh_states[max_saved_loh_states];
1595     PER_HEAP
1596     void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id);
1597 #endif //RECORD_LOH_STATE
1598     PER_HEAP
1599     allocation_state allocate_large (int gen_number,
1600                                      size_t size, 
1601                                      alloc_context* acontext,
1602                                      int align_const);
1603
1604     PER_HEAP_ISOLATED
1605     int init_semi_shared();
1606     PER_HEAP
1607     int init_gc_heap (int heap_number);
1608     PER_HEAP
1609     void self_destroy();
1610     PER_HEAP_ISOLATED
1611     void destroy_semi_shared();
1612     PER_HEAP
1613     void repair_allocation_contexts (BOOL repair_p);
1614     PER_HEAP
1615     void fix_allocation_contexts (BOOL for_gc_p);
1616     PER_HEAP
1617     void fix_youngest_allocation_area (BOOL for_gc_p);
1618     PER_HEAP
1619     void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
1620                                  int align_const);
1621     PER_HEAP
1622     void fix_large_allocation_area (BOOL for_gc_p);
1623     PER_HEAP
1624     void fix_older_allocation_area (generation* older_gen);
1625     PER_HEAP
1626     void set_allocation_heap_segment (generation* gen);
1627     PER_HEAP
1628     void reset_allocation_pointers (generation* gen, uint8_t* start);
1629     PER_HEAP
1630     int object_gennum (uint8_t* o);
1631     PER_HEAP
1632     int object_gennum_plan (uint8_t* o);
1633     PER_HEAP_ISOLATED
1634     void init_heap_segment (heap_segment* seg);
1635     PER_HEAP
1636     void delete_heap_segment (heap_segment* seg, BOOL consider_hoarding=FALSE);
1637 #ifdef FEATURE_BASICFREEZE
1638     PER_HEAP
1639     BOOL insert_ro_segment (heap_segment* seg);
1640     PER_HEAP
1641     void remove_ro_segment (heap_segment* seg);
1642 #endif //FEATURE_BASICFREEZE
1643     PER_HEAP
1644     BOOL set_ro_segment_in_range (heap_segment* seg);
1645     PER_HEAP
1646     BOOL unprotect_segment (heap_segment* seg);
1647     PER_HEAP
1648     heap_segment* soh_get_segment_to_expand();
1649     PER_HEAP
1650     heap_segment* get_segment (size_t size, BOOL loh_p);
1651     PER_HEAP_ISOLATED
1652     void seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp);
1653     PER_HEAP_ISOLATED
1654     void seg_mapping_table_remove_segment (heap_segment* seg);
1655     PER_HEAP
1656     heap_segment* get_large_segment (size_t size, BOOL* did_full_compact_gc);
1657     PER_HEAP
1658     void thread_loh_segment (heap_segment* new_seg);
1659     PER_HEAP_ISOLATED
1660     heap_segment* get_segment_for_loh (size_t size
1661 #ifdef MULTIPLE_HEAPS
1662                                       , gc_heap* hp
1663 #endif //MULTIPLE_HEAPS
1664                                       );
1665     PER_HEAP
1666     void reset_heap_segment_pages (heap_segment* seg);
1667     PER_HEAP
1668     void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
1669     PER_HEAP
1670     void decommit_heap_segment (heap_segment* seg);
1671     PER_HEAP_ISOLATED
1672     bool virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number);
1673     PER_HEAP_ISOLATED
1674     bool virtual_commit (void* address, size_t size, int h_number=-1, bool* hard_limit_exceeded_p=NULL);
1675     PER_HEAP_ISOLATED
1676     bool virtual_decommit (void* address, size_t size, int h_number=-1);
1677     PER_HEAP
1678     void clear_gen0_bricks();
1679 #ifdef BACKGROUND_GC
1680     PER_HEAP
1681     void rearrange_small_heap_segments();
1682 #endif //BACKGROUND_GC
1683     PER_HEAP
1684     void rearrange_large_heap_segments();
1685     PER_HEAP
1686     void rearrange_heap_segments(BOOL compacting);
1687
1688     PER_HEAP_ISOLATED
1689     void reset_write_watch_for_gc_heap(void* base_address, size_t region_size);
1690     PER_HEAP_ISOLATED
1691     void get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended);
1692
1693     PER_HEAP
1694     void switch_one_quantum();
1695     PER_HEAP
1696     void reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size);
1697     PER_HEAP
1698     void switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size);
1699     PER_HEAP
1700     void reset_write_watch (BOOL concurrent_p);
1701     PER_HEAP
1702     void adjust_ephemeral_limits();
1703     PER_HEAP
1704     void make_generation (generation& gen, heap_segment* seg,
1705                           uint8_t* start, uint8_t* pointer);
1706
1707
1708 #define USE_PADDING_FRONT 1
1709 #define USE_PADDING_TAIL  2
1710
1711     PER_HEAP
1712     BOOL size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1713                      uint8_t* old_loc=0, int use_padding=USE_PADDING_TAIL);
1714     PER_HEAP
1715     BOOL a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1716                        int align_const);
1717
1718     PER_HEAP
1719     void handle_oom (int heap_num, oom_reason reason, size_t alloc_size, 
1720                      uint8_t* allocated, uint8_t* reserved);
1721
1722     PER_HEAP
1723     size_t card_of ( uint8_t* object);
1724     PER_HEAP
1725     uint8_t* brick_address (size_t brick);
1726     PER_HEAP
1727     size_t brick_of (uint8_t* add);
1728     PER_HEAP
1729     uint8_t* card_address (size_t card);
1730     PER_HEAP
1731     size_t card_to_brick (size_t card);
1732     PER_HEAP
1733     void clear_card (size_t card);
1734     PER_HEAP
1735     void set_card (size_t card);
1736     PER_HEAP
1737     BOOL  card_set_p (size_t card);
1738     PER_HEAP
1739     void card_table_set_bit (uint8_t* location);
1740
1741 #ifdef CARD_BUNDLE
1742     PER_HEAP
1743     void update_card_table_bundle();
1744     PER_HEAP
1745     void reset_card_table_write_watch();
1746     PER_HEAP
1747     void card_bundle_clear(size_t cardb);
1748     PER_HEAP
1749     void card_bundle_set (size_t cardb);
1750     PER_HEAP
1751     void card_bundles_set (size_t start_cardb, size_t end_cardb);
1752     PER_HEAP
1753     void verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word);
1754     PER_HEAP
1755     void verify_card_bundles();
1756     PER_HEAP
1757     BOOL card_bundle_set_p (size_t cardb);
1758     PER_HEAP
1759     BOOL find_card_dword (size_t& cardw, size_t cardw_end);
1760     PER_HEAP
1761     void enable_card_bundles();
1762     PER_HEAP_ISOLATED
1763     BOOL card_bundles_enabled();
1764
1765 #endif //CARD_BUNDLE
1766
1767     PER_HEAP
1768     BOOL find_card (uint32_t* card_table, size_t& card,
1769                     size_t card_word_end, size_t& end_card);
1770     PER_HEAP
1771     BOOL grow_heap_segment (heap_segment* seg, uint8_t* high_address, bool* hard_limit_exceeded_p=NULL);
1772     PER_HEAP
1773     int grow_heap_segment (heap_segment* seg, uint8_t* high_address, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL);
1774     PER_HEAP
1775     void copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
1776                                 short* old_brick_table,
1777                                 heap_segment* seg,
1778                                 uint8_t* start, uint8_t* end);
1779     PER_HEAP
1780     void init_brick_card_range (heap_segment* seg);
1781     PER_HEAP
1782     void copy_brick_card_table_l_heap ();
1783     PER_HEAP
1784     void copy_brick_card_table();
1785     PER_HEAP
1786     void clear_brick_table (uint8_t* from, uint8_t* end);
1787     PER_HEAP
1788     void set_brick (size_t index, ptrdiff_t val);
1789     PER_HEAP
1790     int get_brick_entry (size_t index);
1791 #ifdef MARK_ARRAY
1792     PER_HEAP
1793     unsigned int mark_array_marked (uint8_t* add);
1794     PER_HEAP
1795     void mark_array_set_marked (uint8_t* add);
1796     PER_HEAP
1797     BOOL is_mark_bit_set (uint8_t* add);
1798     PER_HEAP
1799     void gmark_array_set_marked (uint8_t* add);
1800     PER_HEAP
1801     void set_mark_array_bit (size_t mark_bit);
1802     PER_HEAP
1803     BOOL mark_array_bit_set (size_t mark_bit);
1804     PER_HEAP
1805     void mark_array_clear_marked (uint8_t* add);
1806     PER_HEAP
1807     void clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only=TRUE
1808 #ifdef FEATURE_BASICFREEZE
1809         , BOOL read_only=FALSE
1810 #endif // FEATURE_BASICFREEZE
1811         );
1812 #ifdef BACKGROUND_GC
1813     PER_HEAP
1814     void seg_clear_mark_array_bits_soh (heap_segment* seg);
1815     PER_HEAP
1816     void clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1817     PER_HEAP
1818     void bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1819     PER_HEAP
1820     void clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p);
1821 #ifdef VERIFY_HEAP
1822     PER_HEAP
1823     void set_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1824     PER_HEAP
1825     void check_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1826 #endif //VERIFY_HEAP
1827 #endif //BACKGROUND_GC
1828 #endif //MARK_ARRAY
1829
1830     PER_HEAP
1831     BOOL large_object_marked (uint8_t* o, BOOL clearp);
1832
1833 #ifdef BACKGROUND_GC
1834     PER_HEAP
1835     BOOL background_allowed_p();
1836 #endif //BACKGROUND_GC
1837
1838     PER_HEAP_ISOLATED
1839     void send_full_gc_notification (int gen_num, BOOL due_to_alloc_p);
1840
1841     PER_HEAP
1842     void check_for_full_gc (int gen_num, size_t size);
1843
1844     PER_HEAP
1845     void adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
1846                        int gen_number);
1847     PER_HEAP
1848     void adjust_limit_clr (uint8_t* start, size_t limit_size,
1849                            alloc_context* acontext, heap_segment* seg,
1850                            int align_const, int gen_number);
1851     PER_HEAP
1852     void  leave_allocation_segment (generation* gen);
1853
1854     PER_HEAP
1855     void init_free_and_plug();
1856
1857     PER_HEAP
1858     void print_free_and_plug (const char* msg);
1859
1860     PER_HEAP
1861     void add_gen_plug (int gen_number, size_t plug_size);
1862
1863     PER_HEAP
1864     void add_gen_free (int gen_number, size_t free_size);
1865
1866     PER_HEAP
1867     void add_item_to_current_pinned_free (int gen_number, size_t free_size);
1868     
1869     PER_HEAP
1870     void remove_gen_free (int gen_number, size_t free_size);
1871
1872     PER_HEAP
1873     uint8_t* allocate_in_older_generation (generation* gen, size_t size,
1874                                         int from_gen_number,
1875                                         uint8_t* old_loc=0
1876                                         REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1877     PER_HEAP
1878     generation*  ensure_ephemeral_heap_segment (generation* consing_gen);
1879     PER_HEAP
1880     uint8_t* allocate_in_condemned_generations (generation* gen,
1881                                              size_t size,
1882                                              int from_gen_number,
1883 #ifdef SHORT_PLUGS
1884                                              BOOL* convert_to_pinned_p=NULL,
1885                                              uint8_t* next_pinned_plug=0,
1886                                              heap_segment* current_seg=0,
1887 #endif //SHORT_PLUGS
1888                                              uint8_t* old_loc=0
1889                                              REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1890 #ifdef INTERIOR_POINTERS
1891     // Verifies that interior is actually in the range of seg; otherwise 
1892     // returns 0.
1893     PER_HEAP_ISOLATED
1894     heap_segment* find_segment (uint8_t* interior, BOOL small_segment_only_p);
1895
1896     PER_HEAP
1897     heap_segment* find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p);
1898
1899     PER_HEAP
1900     uint8_t* find_object_for_relocation (uint8_t* o, uint8_t* low, uint8_t* high);
1901 #endif //INTERIOR_POINTERS
1902
1903     PER_HEAP_ISOLATED
1904     gc_heap* heap_of (uint8_t* object);
1905
1906     PER_HEAP_ISOLATED
1907     gc_heap* heap_of_gc (uint8_t* object);
1908
1909     PER_HEAP_ISOLATED
1910     size_t&  promoted_bytes (int);
1911
1912     PER_HEAP
1913     uint8_t* find_object (uint8_t* o, uint8_t* low);
1914
1915     PER_HEAP
1916     dynamic_data* dynamic_data_of (int gen_number);
1917     PER_HEAP
1918     ptrdiff_t  get_desired_allocation (int gen_number);
1919     PER_HEAP
1920     ptrdiff_t  get_new_allocation (int gen_number);
1921     PER_HEAP
1922     ptrdiff_t  get_allocation (int gen_number);
1923     PER_HEAP
1924     bool new_allocation_allowed (int gen_number);
1925 #ifdef BACKGROUND_GC
1926     PER_HEAP_ISOLATED
1927     void allow_new_allocation (int gen_number);
1928     PER_HEAP_ISOLATED
1929     void disallow_new_allocation (int gen_number);
1930 #endif //BACKGROUND_GC
1931     PER_HEAP
1932     void reset_pinned_queue();
1933     PER_HEAP
1934     void reset_pinned_queue_bos();
1935     PER_HEAP
1936     void set_allocator_next_pin (generation* gen);
1937     PER_HEAP
1938     void set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit);
1939     PER_HEAP
1940     void enque_pinned_plug (generation* gen, uint8_t* plug, size_t len);
1941     PER_HEAP
1942     void enque_pinned_plug (uint8_t* plug,
1943                             BOOL save_pre_plug_info_p,
1944                             uint8_t* last_object_in_last_plug);
1945     PER_HEAP
1946     void merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size);
1947     PER_HEAP
1948     void set_pinned_info (uint8_t* last_pinned_plug,
1949                           size_t plug_len,
1950                           uint8_t* alloc_pointer,
1951                           uint8_t*& alloc_limit);
1952     PER_HEAP
1953     void set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen);
1954     PER_HEAP
1955     void save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug);
1956     PER_HEAP
1957     size_t deque_pinned_plug ();
1958     PER_HEAP
1959     mark* pinned_plug_of (size_t bos);
1960     PER_HEAP
1961     mark* oldest_pin ();
1962     PER_HEAP
1963     mark* before_oldest_pin();
1964     PER_HEAP
1965     BOOL pinned_plug_que_empty_p ();
1966     PER_HEAP
1967     void make_mark_stack (mark* arr);
1968 #ifdef MH_SC_MARK
1969     PER_HEAP
1970     int& mark_stack_busy();
1971     PER_HEAP
1972     VOLATILE(uint8_t*)& ref_mark_stack (gc_heap* hp, int index);
1973 #endif
1974 #ifdef BACKGROUND_GC
1975     PER_HEAP_ISOLATED
1976     size_t&  bpromoted_bytes (int);
1977     PER_HEAP
1978     void make_background_mark_stack (uint8_t** arr);
1979     PER_HEAP
1980     void make_c_mark_list (uint8_t** arr);
1981 #endif //BACKGROUND_GC
1982     PER_HEAP
1983     generation* generation_of (int  n);
1984     PER_HEAP
1985     BOOL gc_mark1 (uint8_t* o);
1986     PER_HEAP
1987     BOOL gc_mark (uint8_t* o, uint8_t* low, uint8_t* high);
1988     PER_HEAP
1989     uint8_t* mark_object(uint8_t* o THREAD_NUMBER_DCL);
1990 #ifdef HEAP_ANALYZE
1991     PER_HEAP
1992     void ha_mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
1993 #endif //HEAP_ANALYZE
1994     PER_HEAP
1995     void mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
1996     PER_HEAP
1997     void mark_object_simple1 (uint8_t* o, uint8_t* start THREAD_NUMBER_DCL);
1998
1999 #ifdef MH_SC_MARK
2000     PER_HEAP
2001     void mark_steal ();
2002 #endif //MH_SC_MARK
2003
2004 #ifdef BACKGROUND_GC
2005
2006     PER_HEAP
2007     BOOL background_marked (uint8_t* o);
2008     PER_HEAP
2009     BOOL background_mark1 (uint8_t* o);
2010     PER_HEAP
2011     BOOL background_mark (uint8_t* o, uint8_t* low, uint8_t* high);
2012     PER_HEAP
2013     uint8_t* background_mark_object (uint8_t* o THREAD_NUMBER_DCL);
2014     PER_HEAP
2015     void background_mark_simple (uint8_t* o THREAD_NUMBER_DCL);
2016     PER_HEAP
2017     void background_mark_simple1 (uint8_t* o THREAD_NUMBER_DCL);
2018     PER_HEAP_ISOLATED
2019     void background_promote (Object**, ScanContext* , uint32_t);
2020     PER_HEAP
2021     BOOL background_object_marked (uint8_t* o, BOOL clearp);
2022     PER_HEAP
2023     void init_background_gc();
2024     PER_HEAP
2025     uint8_t* background_next_end (heap_segment*, BOOL);
2026     // while we are in LOH sweep we can't modify the segment list
2027     // there so we mark them as to be deleted and deleted them
2028     // at the next chance we get.
2029     PER_HEAP
2030     void background_delay_delete_loh_segments();
2031     PER_HEAP
2032     void generation_delete_heap_segment (generation*, 
2033                                          heap_segment*, heap_segment*, heap_segment*);
2034     PER_HEAP
2035     void set_mem_verify (uint8_t*, uint8_t*, uint8_t);
2036     PER_HEAP
2037     void process_background_segment_end (heap_segment*, generation*, uint8_t*,
2038                                      heap_segment*, BOOL*);
2039     PER_HEAP
2040     void process_n_background_segments (heap_segment*, heap_segment*, generation* gen);
2041     PER_HEAP
2042     BOOL fgc_should_consider_object (uint8_t* o,
2043                                      heap_segment* seg,
2044                                      BOOL consider_bgc_mark_p,
2045                                      BOOL check_current_sweep_p,
2046                                      BOOL check_saved_sweep_p);
2047     PER_HEAP
2048     void should_check_bgc_mark (heap_segment* seg, 
2049                                 BOOL* consider_bgc_mark_p, 
2050                                 BOOL* check_current_sweep_p,
2051                                 BOOL* check_saved_sweep_p);
2052     PER_HEAP
2053     void background_ephemeral_sweep();
2054     PER_HEAP
2055     void background_sweep ();
2056     PER_HEAP
2057     void background_mark_through_object (uint8_t* oo THREAD_NUMBER_DCL);
2058     PER_HEAP
2059     uint8_t* background_seg_end (heap_segment* seg, BOOL concurrent_p);
2060     PER_HEAP
2061     uint8_t* background_first_overflow (uint8_t* min_add,
2062                                      heap_segment* seg,
2063                                      BOOL concurrent_p, 
2064                                      BOOL small_object_p);
2065     PER_HEAP
2066     void background_process_mark_overflow_internal (int condemned_gen_number,
2067                                                     uint8_t* min_add, uint8_t* max_add,
2068                                                     BOOL concurrent_p);
2069     PER_HEAP
2070     BOOL background_process_mark_overflow (BOOL concurrent_p);
2071
2072     // for foreground GC to get hold of background structures containing refs
2073     PER_HEAP
2074     void
2075     scan_background_roots (promote_func* fn, int hn, ScanContext *pSC);
2076
2077     PER_HEAP
2078     BOOL bgc_mark_array_range (heap_segment* seg, 
2079                                BOOL whole_seg_p,
2080                                uint8_t** range_beg,
2081                                uint8_t** range_end);
2082     PER_HEAP
2083     void bgc_verify_mark_array_cleared (heap_segment* seg);
2084     PER_HEAP
2085     void verify_mark_bits_cleared (uint8_t* obj, size_t s);
2086     PER_HEAP
2087     void clear_all_mark_array();
2088 #endif //BACKGROUND_GC
2089
2090     PER_HEAP
2091     uint8_t* next_end (heap_segment* seg, uint8_t* f);
2092     PER_HEAP
2093     void mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
2094     PER_HEAP
2095     BOOL process_mark_overflow (int condemned_gen_number);
2096     PER_HEAP
2097     void process_mark_overflow_internal (int condemned_gen_number,
2098                                          uint8_t* min_address, uint8_t* max_address);
2099
2100 #ifdef SNOOP_STATS
2101     PER_HEAP
2102     void print_snoop_stat();
2103 #endif //SNOOP_STATS
2104
2105 #ifdef MH_SC_MARK
2106
2107     PER_HEAP
2108     BOOL check_next_mark_stack (gc_heap* next_heap);
2109
2110 #endif //MH_SC_MARK
2111
2112     PER_HEAP
2113     void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
2114
2115     PER_HEAP
2116     void mark_phase (int condemned_gen_number, BOOL mark_only_p);
2117
2118     PER_HEAP
2119     void pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* high);
2120
2121 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
2122     PER_HEAP_ISOLATED
2123     size_t get_total_pinned_objects();
2124 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
2125
2126     PER_HEAP
2127     void reset_mark_stack ();
2128     PER_HEAP
2129     uint8_t* insert_node (uint8_t* new_node, size_t sequence_number,
2130                        uint8_t* tree, uint8_t* last_node);
2131     PER_HEAP
2132     size_t update_brick_table (uint8_t* tree, size_t current_brick,
2133                                uint8_t* x, uint8_t* plug_end);
2134
2135     PER_HEAP
2136     void plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate);
2137
2138     PER_HEAP
2139     void realloc_plan_generation_start (generation* gen, generation* consing_gen);
2140
2141     PER_HEAP
2142     void plan_generation_starts (generation*& consing_gen);
2143
2144     PER_HEAP
2145     void advance_pins_for_demotion (generation* gen);
2146
2147     PER_HEAP
2148     void process_ephemeral_boundaries(uint8_t* x, int& active_new_gen_number,
2149                                       int& active_old_gen_number,
2150                                       generation*& consing_gen,
2151                                       BOOL& allocate_in_condemned);
2152     PER_HEAP
2153     void seg_clear_mark_bits (heap_segment* seg);
2154     PER_HEAP
2155     void sweep_ro_segments (heap_segment* start_seg);
2156     PER_HEAP
2157     void convert_to_pinned_plug (BOOL& last_npinned_plug_p, 
2158                                  BOOL& last_pinned_plug_p, 
2159                                  BOOL& pinned_plug_p,
2160                                  size_t ps,
2161                                  size_t& artificial_pinned_size);
2162     PER_HEAP
2163     void store_plug_gap_info (uint8_t* plug_start,
2164                               uint8_t* plug_end,
2165                               BOOL& last_npinned_plug_p, 
2166                               BOOL& last_pinned_plug_p, 
2167                               uint8_t*& last_pinned_plug,
2168                               BOOL& pinned_plug_p,
2169                               uint8_t* last_object_in_last_plug,
2170                               BOOL& merge_with_last_pin_p,
2171                               // this is only for verification purpose
2172                               size_t last_plug_len);
2173     PER_HEAP
2174     void plan_phase (int condemned_gen_number);
2175
2176     PER_HEAP
2177     void record_interesting_data_point (interesting_data_point idp);
2178
2179 #ifdef GC_CONFIG_DRIVEN
2180     PER_HEAP
2181     void record_interesting_info_per_heap();
2182     PER_HEAP_ISOLATED
2183     void record_global_mechanisms();
2184     PER_HEAP_ISOLATED
2185     BOOL should_do_sweeping_gc (BOOL compact_p);
2186 #endif //GC_CONFIG_DRIVEN
2187
2188 #ifdef FEATURE_LOH_COMPACTION
2189     // plan_loh can allocate memory so it can fail. If it fails, we will
2190     // fall back to sweeping.  
2191     PER_HEAP
2192     BOOL plan_loh();
2193
2194     PER_HEAP
2195     void compact_loh();
2196
2197     PER_HEAP
2198     void relocate_in_loh_compact();
2199
2200     PER_HEAP
2201     void walk_relocation_for_loh (void* profiling_context, record_surv_fn fn);
2202
2203     PER_HEAP
2204     BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
2205
2206     PER_HEAP
2207     void loh_set_allocator_next_pin();
2208
2209     PER_HEAP
2210     BOOL loh_pinned_plug_que_empty_p();
2211
2212     PER_HEAP
2213     size_t loh_deque_pinned_plug();
2214
2215     PER_HEAP
2216     mark* loh_pinned_plug_of (size_t bos);
2217
2218     PER_HEAP
2219     mark* loh_oldest_pin();
2220
2221     PER_HEAP
2222     BOOL loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit);
2223
2224     PER_HEAP
2225     uint8_t* loh_allocate_in_condemned (uint8_t* old_loc, size_t size);
2226
2227     PER_HEAP_ISOLATED
2228     BOOL loh_object_p (uint8_t* o);
2229
2230     PER_HEAP_ISOLATED
2231     BOOL loh_compaction_requested();
2232
2233     // If the LOH compaction mode is just to compact once,
2234     // we need to see if we should reset it back to not compact.
2235     // We would only reset if every heap's LOH was compacted.
2236     PER_HEAP_ISOLATED
2237     void check_loh_compact_mode  (BOOL all_heaps_compacted_p);
2238 #endif //FEATURE_LOH_COMPACTION
2239
2240     PER_HEAP
2241     void decommit_ephemeral_segment_pages (int condemned_gen_number);
2242     PER_HEAP
2243     void fix_generation_bounds (int condemned_gen_number,
2244                                 generation* consing_gen);
2245     PER_HEAP
2246     uint8_t* generation_limit (int gen_number);
2247
2248     struct make_free_args
2249     {
2250         int free_list_gen_number;
2251         uint8_t* current_gen_limit;
2252         generation* free_list_gen;
2253         uint8_t* highest_plug;
2254     };
2255     PER_HEAP
2256     uint8_t* allocate_at_end (size_t size);
2257     PER_HEAP
2258     BOOL ensure_gap_allocation (int condemned_gen_number);
2259     // make_free_lists is only called by blocking GCs.
2260     PER_HEAP
2261     void make_free_lists (int condemned_gen_number);
2262     PER_HEAP
2263     void make_free_list_in_brick (uint8_t* tree, make_free_args* args);
2264     PER_HEAP
2265     void thread_gap (uint8_t* gap_start, size_t size, generation*  gen);
2266     PER_HEAP
2267     void loh_thread_gap_front (uint8_t* gap_start, size_t size, generation*  gen);
2268     PER_HEAP
2269     void make_unused_array (uint8_t* x, size_t size, BOOL clearp=FALSE, BOOL resetp=FALSE);
2270     PER_HEAP
2271     void clear_unused_array (uint8_t* x, size_t size);
2272     PER_HEAP
2273     void relocate_address (uint8_t** old_address THREAD_NUMBER_DCL);
2274     struct relocate_args
2275     {
2276         uint8_t* last_plug;
2277         uint8_t* low;
2278         uint8_t* high;
2279         BOOL is_shortened;
2280         mark* pinned_plug_entry;
2281     };
2282
2283     PER_HEAP
2284     void reloc_survivor_helper (uint8_t** pval);
2285     PER_HEAP
2286     void check_class_object_demotion (uint8_t* obj);
2287     PER_HEAP
2288     void check_class_object_demotion_internal (uint8_t* obj);
2289
2290     PER_HEAP 
2291     void check_demotion_helper (uint8_t** pval, uint8_t* parent_obj);
2292
2293     PER_HEAP
2294     void relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end);
2295
2296     PER_HEAP
2297     void verify_pins_with_post_plug_info (const char* msg);
2298
2299 #ifdef COLLECTIBLE_CLASS
2300     PER_HEAP
2301     void unconditional_set_card_collectible (uint8_t* obj);
2302 #endif //COLLECTIBLE_CLASS
2303
2304     PER_HEAP
2305     void relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry);
2306     
2307     PER_HEAP
2308     void relocate_obj_helper (uint8_t* x, size_t s);
2309
2310     PER_HEAP
2311     void reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc);
2312
2313     PER_HEAP
2314     void relocate_pre_plug_info (mark* pinned_plug_entry);
2315
2316     PER_HEAP
2317     void relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned);
2318
2319     PER_HEAP
2320     void relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end,
2321                                      BOOL check_last_object_p, 
2322                                      mark* pinned_plug_entry);
2323     PER_HEAP
2324     void relocate_survivors_in_brick (uint8_t* tree, relocate_args* args);
2325
2326     PER_HEAP
2327     void update_oldest_pinned_plug();
2328
2329     PER_HEAP
2330     void relocate_survivors (int condemned_gen_number,
2331                              uint8_t* first_condemned_address );
2332     PER_HEAP
2333     void relocate_phase (int condemned_gen_number,
2334                          uint8_t* first_condemned_address);
2335
2336     struct compact_args
2337     {
2338         BOOL copy_cards_p;
2339         uint8_t* last_plug;
2340         ptrdiff_t last_plug_relocation;
2341         uint8_t* before_last_plug;
2342         size_t current_compacted_brick;
2343         BOOL is_shortened;
2344         mark* pinned_plug_entry;
2345         BOOL check_gennum_p;
2346         int src_gennum;
2347
2348         void print()
2349         {
2350             dprintf (3, ("last plug: %Ix, last plug reloc: %Ix, before last: %Ix, b: %Ix",
2351                 last_plug, last_plug_relocation, before_last_plug, current_compacted_brick));
2352         }
2353     };
2354
2355     PER_HEAP
2356     void copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2357     PER_HEAP
2358     void  gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2359     PER_HEAP
2360     void compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args);
2361     PER_HEAP
2362     void compact_in_brick (uint8_t* tree, compact_args* args);
2363
2364     PER_HEAP
2365     mark* get_next_pinned_entry (uint8_t* tree,
2366                                  BOOL* has_pre_plug_info_p,
2367                                  BOOL* has_post_plug_info_p,
2368                                  BOOL deque_p=TRUE);
2369
2370     PER_HEAP
2371     mark* get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p);
2372
2373     PER_HEAP
2374     void recover_saved_pinned_info();
2375
2376     PER_HEAP
2377     void compact_phase (int condemned_gen_number, uint8_t*
2378                         first_condemned_address, BOOL clear_cards);
2379     PER_HEAP
2380     void clear_cards (size_t start_card, size_t end_card);
2381     PER_HEAP
2382     void clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address);
2383     PER_HEAP
2384     void copy_cards (size_t dst_card, size_t src_card,
2385                      size_t end_card, BOOL nextp);
2386     PER_HEAP
2387     void copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2388
2389 #ifdef BACKGROUND_GC
2390     PER_HEAP
2391     void copy_mark_bits (size_t dst_mark_bit, size_t src_mark_bit, size_t end_mark_bit);
2392     PER_HEAP
2393     void copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2394 #endif //BACKGROUND_GC
2395
2396
2397     PER_HEAP
2398     BOOL ephemeral_pointer_p (uint8_t* o);
2399     PER_HEAP
2400     void fix_brick_to_highest (uint8_t* o, uint8_t* next_o);
2401     PER_HEAP
2402     uint8_t* find_first_object (uint8_t* start_address, uint8_t* first_object);
2403     PER_HEAP
2404     uint8_t* compute_next_boundary (uint8_t* low, int gen_number, BOOL relocating);
2405     PER_HEAP
2406     void keep_card_live (uint8_t* o, size_t& n_gen,
2407                          size_t& cg_pointers_found);
2408     PER_HEAP
2409     void mark_through_cards_helper (uint8_t** poo, size_t& ngen,
2410                                     size_t& cg_pointers_found,
2411                                     card_fn fn, uint8_t* nhigh,
2412                                     uint8_t* next_boundary);
2413
2414     PER_HEAP
2415     BOOL card_transition (uint8_t* po, uint8_t* end, size_t card_word_end,
2416                                size_t& cg_pointers_found, 
2417                                size_t& n_eph, size_t& n_card_set,
2418                                size_t& card, size_t& end_card,
2419                                BOOL& foundp, uint8_t*& start_address,
2420                                uint8_t*& limit, size_t& n_cards_cleared);
2421     PER_HEAP
2422     void mark_through_cards_for_segments (card_fn fn, BOOL relocating);
2423
2424     PER_HEAP
2425     void repair_allocation_in_expanded_heap (generation* gen);
2426     PER_HEAP
2427     BOOL can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index);
2428     PER_HEAP
2429     BOOL can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index);
2430     PER_HEAP
2431     BOOL can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count);
2432 #ifdef SEG_REUSE_STATS
2433     PER_HEAP
2434     size_t dump_buckets (size_t* ordered_indices, int count, size_t* total_size);
2435 #endif //SEG_REUSE_STATS
2436     PER_HEAP
2437     void build_ordered_free_spaces (heap_segment* seg);
2438     PER_HEAP
2439     void count_plug (size_t last_plug_size, uint8_t*& last_plug);
2440     PER_HEAP
2441     void count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug);
2442     PER_HEAP
2443     void build_ordered_plug_indices ();
2444     PER_HEAP
2445     void init_ordered_free_space_indices ();
2446     PER_HEAP
2447     void trim_free_spaces_indices ();
2448     PER_HEAP
2449     BOOL try_best_fit (BOOL end_of_segment_p);
2450     PER_HEAP
2451     BOOL best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space);
2452     PER_HEAP
2453     BOOL process_free_space (heap_segment* seg, 
2454                              size_t free_space,
2455                              size_t min_free_size, 
2456                              size_t min_cont_size,
2457                              size_t* total_free_space,
2458                              size_t* largest_free_space);
2459     PER_HEAP
2460     size_t compute_eph_gen_starts_size();
2461     PER_HEAP
2462     void compute_new_ephemeral_size();
2463     PER_HEAP
2464     BOOL expand_reused_seg_p();
2465     PER_HEAP
2466     BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size,
2467                             size_t min_cont_size, allocator* al);
2468     PER_HEAP
2469     uint8_t* allocate_in_expanded_heap (generation* gen, size_t size,
2470                                      BOOL& adjacentp, uint8_t* old_loc,
2471 #ifdef SHORT_PLUGS
2472                                      BOOL set_padding_on_saved_p,
2473                                      mark* pinned_plug_entry,
2474 #endif //SHORT_PLUGS
2475                                      BOOL consider_bestfit, int active_new_gen_number
2476                                      REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
2477     PER_HEAP
2478     void realloc_plug (size_t last_plug_size, uint8_t*& last_plug,
2479                        generation* gen, uint8_t* start_address,
2480                        unsigned int& active_new_gen_number,
2481                        uint8_t*& last_pinned_gap, BOOL& leftp,
2482                        BOOL shortened_p
2483 #ifdef SHORT_PLUGS
2484                        , mark* pinned_plug_entry
2485 #endif //SHORT_PLUGS
2486                        );
2487     PER_HEAP
2488     void realloc_in_brick (uint8_t* tree, uint8_t*& last_plug, uint8_t* start_address,
2489                            generation* gen,
2490                            unsigned int& active_new_gen_number,
2491                            uint8_t*& last_pinned_gap, BOOL& leftp);
2492     PER_HEAP
2493     void realloc_plugs (generation* consing_gen, heap_segment* seg,
2494                         uint8_t* start_address, uint8_t* end_address,
2495                         unsigned active_new_gen_number);
2496
2497     PER_HEAP
2498     void set_expand_in_full_gc (int condemned_gen_number);
2499
2500     PER_HEAP
2501     void verify_no_pins (uint8_t* start, uint8_t* end);
2502
2503     PER_HEAP
2504     generation* expand_heap (int condemned_generation,
2505                              generation* consing_gen,
2506                              heap_segment* new_heap_segment);
2507
2508     PER_HEAP
2509     void save_ephemeral_generation_starts();
2510
2511     PER_HEAP_ISOLATED
2512     size_t get_gen0_min_size();
2513
2514     PER_HEAP
2515     void set_static_data();
2516
2517     PER_HEAP_ISOLATED
2518     void init_static_data();
2519
2520     PER_HEAP
2521     bool init_dynamic_data ();
2522     PER_HEAP
2523     float surv_to_growth (float cst, float limit, float max_limit);
2524     PER_HEAP
2525     size_t desired_new_allocation (dynamic_data* dd, size_t out,
2526                                    int gen_number, int pass);
2527
2528     PER_HEAP
2529     void trim_youngest_desired_low_memory();
2530
2531     PER_HEAP
2532     void decommit_ephemeral_segment_pages();
2533
2534 #ifdef BIT64
2535     PER_HEAP_ISOLATED
2536     size_t trim_youngest_desired (uint32_t memory_load,
2537                                   size_t total_new_allocation,
2538                                   size_t total_min_allocation);
2539     PER_HEAP_ISOLATED
2540     size_t joined_youngest_desired (size_t new_allocation);
2541 #endif // BIT64
2542     PER_HEAP_ISOLATED
2543     size_t get_total_heap_size ();
2544     PER_HEAP_ISOLATED
2545     size_t get_total_committed_size();
2546     PER_HEAP_ISOLATED
2547     size_t get_total_fragmentation();
2548     PER_HEAP_ISOLATED
2549     size_t get_total_gen_fragmentation (int gen_number);
2550     PER_HEAP_ISOLATED
2551     size_t get_total_gen_estimated_reclaim (int gen_number);
2552     PER_HEAP_ISOLATED
2553     void get_memory_info (uint32_t* memory_load, 
2554                           uint64_t* available_physical=NULL,
2555                           uint64_t* available_page_file=NULL);
2556     PER_HEAP
2557     size_t generation_size (int gen_number);
2558     PER_HEAP_ISOLATED
2559     size_t get_total_survived_size();
2560     // this also resets allocated_since_last_gc
2561     PER_HEAP_ISOLATED
2562     size_t get_total_allocated_since_last_gc();
2563     PER_HEAP
2564     size_t get_current_allocated();
2565     PER_HEAP_ISOLATED
2566     size_t get_total_allocated();
2567     PER_HEAP
2568     size_t current_generation_size (int gen_number);
2569     PER_HEAP
2570     size_t generation_plan_size (int gen_number);
2571     PER_HEAP
2572     void  compute_promoted_allocation (int gen_number);
2573     PER_HEAP
2574     size_t  compute_in (int gen_number);
2575     PER_HEAP
2576     void compute_new_dynamic_data (int gen_number);
2577     PER_HEAP
2578     gc_history_per_heap* get_gc_data_per_heap();
2579     PER_HEAP
2580     size_t new_allocation_limit (size_t size, size_t free_size, int gen_number);
2581     PER_HEAP
2582     size_t generation_fragmentation (generation* gen,
2583                                      generation* consing_gen,
2584                                      uint8_t* end);
2585     PER_HEAP
2586     size_t generation_sizes (generation* gen);
2587     PER_HEAP
2588     size_t committed_size();
2589     PER_HEAP
2590     size_t committed_size (bool loh_p, size_t* allocated);
2591     PER_HEAP
2592     size_t approximate_new_allocation();
2593     PER_HEAP
2594     size_t end_space_after_gc();
2595     PER_HEAP
2596     size_t estimated_reclaim (int gen_number);
2597     PER_HEAP
2598     BOOL decide_on_compacting (int condemned_gen_number,
2599                                size_t fragmentation,
2600                                BOOL& should_expand);
2601     PER_HEAP
2602     BOOL sufficient_space_end_seg (uint8_t* start, uint8_t* seg_end, 
2603                                    size_t end_space_required, 
2604                                    gc_tuning_point tp);
2605     PER_HEAP
2606     BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
2607     PER_HEAP
2608     void reset_large_object (uint8_t* o);
2609     PER_HEAP
2610     void sweep_large_objects ();
2611     PER_HEAP
2612     void relocate_in_large_objects ();
2613     PER_HEAP
2614     void mark_through_cards_for_large_objects (card_fn fn, BOOL relocating);
2615     PER_HEAP
2616     void descr_segment (heap_segment* seg);
2617     PER_HEAP
2618     void descr_generations (BOOL begin_gc_p);
2619
2620     PER_HEAP_ISOLATED
2621     void descr_generations_to_profiler (gen_walk_fn fn, void *context);
2622
2623     /*------------ Multiple non isolated heaps ----------------*/
2624 #ifdef MULTIPLE_HEAPS
2625     PER_HEAP_ISOLATED
2626     BOOL   create_thread_support (unsigned number_of_heaps);
2627     PER_HEAP_ISOLATED
2628     void destroy_thread_support ();
2629     PER_HEAP
2630     bool create_gc_thread();
2631     PER_HEAP
2632     void gc_thread_function();
2633 #ifdef MARK_LIST
2634 #ifdef PARALLEL_MARK_LIST_SORT
2635     PER_HEAP
2636     void sort_mark_list();
2637     PER_HEAP
2638     void merge_mark_lists();
2639     PER_HEAP
2640     void append_to_mark_list(uint8_t **start, uint8_t **end);
2641 #else //PARALLEL_MARK_LIST_SORT
2642     PER_HEAP_ISOLATED
2643     void combine_mark_lists();
2644 #endif //PARALLEL_MARK_LIST_SORT
2645 #endif
2646 #endif //MULTIPLE_HEAPS
2647
2648     /*------------ End of Multiple non isolated heaps ---------*/
2649
2650 #ifndef SEG_MAPPING_TABLE
2651     PER_HEAP_ISOLATED
2652     heap_segment* segment_of (uint8_t* add,  ptrdiff_t & delta,
2653                               BOOL verify_p = FALSE);
2654 #endif //SEG_MAPPING_TABLE
2655
2656 #ifdef BACKGROUND_GC
2657
2658     //this is called by revisit....
2659     PER_HEAP
2660     uint8_t* high_page (heap_segment* seg, BOOL concurrent_p);
2661
2662     PER_HEAP
2663     void revisit_written_page (uint8_t* page, uint8_t* end, BOOL concurrent_p,
2664                                heap_segment* seg,  uint8_t*& last_page,
2665                                uint8_t*& last_object, BOOL large_objects_p,
2666                                size_t& num_marked_objects);
2667     PER_HEAP
2668     void revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p=FALSE);
2669
2670     PER_HEAP
2671     void concurrent_scan_dependent_handles (ScanContext *sc);
2672
2673     PER_HEAP_ISOLATED
2674     void suspend_EE ();
2675
2676     PER_HEAP_ISOLATED
2677     void bgc_suspend_EE ();
2678
2679     PER_HEAP_ISOLATED
2680     void restart_EE ();
2681
2682     PER_HEAP
2683     void background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags);
2684
2685     PER_HEAP
2686     void background_scan_dependent_handles (ScanContext *sc);
2687
2688     PER_HEAP
2689     void allow_fgc();
2690
2691     // Restores BGC settings if necessary.
2692     PER_HEAP_ISOLATED
2693     void recover_bgc_settings();
2694
2695     PER_HEAP
2696     void save_bgc_data_per_heap();
2697
2698     PER_HEAP
2699     BOOL should_commit_mark_array();
2700
2701     PER_HEAP
2702     void clear_commit_flag();
2703
2704     PER_HEAP_ISOLATED
2705     void clear_commit_flag_global();
2706
2707     PER_HEAP_ISOLATED
2708     void verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr);
2709
2710     PER_HEAP_ISOLATED
2711     void verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr);
2712
2713     PER_HEAP_ISOLATED
2714     BOOL commit_mark_array_by_range (uint8_t* begin,
2715                                      uint8_t* end,
2716                                      uint32_t* mark_array_addr);
2717
2718     PER_HEAP_ISOLATED
2719     BOOL commit_mark_array_new_seg (gc_heap* hp, 
2720                                     heap_segment* seg,
2721                                     uint32_t* new_card_table = 0,
2722                                     uint8_t* new_lowest_address = 0);
2723
2724     PER_HEAP_ISOLATED
2725     BOOL commit_mark_array_with_check (heap_segment* seg, uint32_t* mark_array_addr);
2726
2727     // commit the portion of the mark array that corresponds to 
2728     // this segment (from beginning to reserved).
2729     // seg and heap_segment_reserved (seg) are guaranteed to be 
2730     // page aligned.
2731     PER_HEAP_ISOLATED
2732     BOOL commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr);
2733
2734     // During BGC init, we commit the mark array for all in range
2735     // segments whose mark array hasn't been committed or fully
2736     // committed. All rw segments are in range, only ro segments
2737     // can be partial in range.
2738     PER_HEAP
2739     BOOL commit_mark_array_bgc_init (uint32_t* mark_array_addr);
2740
2741     PER_HEAP
2742     BOOL commit_new_mark_array (uint32_t* new_mark_array);
2743
2744     // We need to commit all segments that intersect with the bgc
2745     // range. If a segment is only partially in range, we still
2746     // should commit the mark array for the whole segment as 
2747     // we will set the mark array commit flag for this segment.
2748     PER_HEAP_ISOLATED
2749     BOOL commit_new_mark_array_global (uint32_t* new_mark_array);
2750
2751     // We can't decommit the first and the last page in the mark array
2752     // if the beginning and ending don't happen to be page aligned.
2753     PER_HEAP
2754     void decommit_mark_array_by_seg (heap_segment* seg);
2755
2756     PER_HEAP
2757     void background_mark_phase();
2758
2759     PER_HEAP
2760     void background_drain_mark_list (int thread);
2761
2762     PER_HEAP
2763     void background_grow_c_mark_list();
2764
2765     PER_HEAP_ISOLATED
2766     void background_promote_callback(Object** object, ScanContext* sc, uint32_t flags);
2767
2768     PER_HEAP
2769     void mark_absorb_new_alloc();
2770
2771     PER_HEAP
2772     void restart_vm();
2773
2774     PER_HEAP
2775     BOOL prepare_bgc_thread(gc_heap* gh);
2776     PER_HEAP
2777     BOOL create_bgc_thread(gc_heap* gh);
2778     PER_HEAP_ISOLATED
2779     BOOL create_bgc_threads_support (int number_of_heaps);
2780     PER_HEAP
2781     BOOL create_bgc_thread_support();
2782     PER_HEAP_ISOLATED
2783     int check_for_ephemeral_alloc();
2784     PER_HEAP_ISOLATED
2785     void wait_to_proceed();
2786     PER_HEAP_ISOLATED
2787     void fire_alloc_wait_event_begin (alloc_wait_reason awr);
2788     PER_HEAP_ISOLATED
2789     void fire_alloc_wait_event_end (alloc_wait_reason awr);
2790     PER_HEAP
2791     void background_gc_wait_lh (alloc_wait_reason awr = awr_ignored);
2792     PER_HEAP
2793     uint32_t background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE);
2794     PER_HEAP_ISOLATED
2795     void start_c_gc();
2796     PER_HEAP
2797     void kill_gc_thread();
2798     PER_HEAP
2799     void bgc_thread_function();
2800     PER_HEAP_ISOLATED
2801     void do_background_gc();
2802     static
2803     void bgc_thread_stub (void* arg);
2804 #endif //BACKGROUND_GC
2805  
2806 public:
2807
2808     PER_HEAP_ISOLATED
2809     VOLATILE(bool) internal_gc_done;
2810
2811 #ifdef BACKGROUND_GC
2812     PER_HEAP_ISOLATED
2813     uint32_t cm_in_progress;
2814
2815     // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
2816     // we do right before the bgc starts.
2817     PER_HEAP_ISOLATED
2818     BOOL     dont_restart_ee_p;
2819
2820     PER_HEAP_ISOLATED
2821     GCEvent bgc_start_event;
2822 #endif //BACKGROUND_GC
2823
2824     // The variables in this block are known to the DAC and must come first
2825     // in the gc_heap class.
2826
2827     // Keeps track of the highest address allocated by Alloc
2828     PER_HEAP
2829     uint8_t* alloc_allocated;
2830
2831     // The ephemeral heap segment
2832     PER_HEAP
2833     heap_segment* ephemeral_heap_segment;
2834
2835     // The finalize queue.
2836     PER_HEAP
2837     CFinalize* finalize_queue;
2838
2839     // OOM info.
2840     PER_HEAP
2841     oom_history oom_info;
2842
2843     // Interesting data, recorded per-heap.
2844     PER_HEAP
2845     size_t interesting_data_per_heap[max_idp_count];
2846
2847     PER_HEAP
2848     size_t compact_reasons_per_heap[max_compact_reasons_count];
2849
2850     PER_HEAP
2851     size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
2852
2853     PER_HEAP
2854     size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
2855
2856     PER_HEAP
2857     uint8_t** internal_root_array;
2858
2859     PER_HEAP
2860     size_t internal_root_array_index;
2861
2862     PER_HEAP
2863     BOOL heap_analyze_success;
2864
2865     // The generation table. Must always be last.
2866     PER_HEAP
2867     generation generation_table [NUMBERGENERATIONS + 1];
2868
2869     // End DAC zone
2870
2871     PER_HEAP
2872     BOOL expanded_in_fgc;
2873
2874     PER_HEAP_ISOLATED
2875     uint32_t wait_for_gc_done(int32_t timeOut = INFINITE);
2876
2877     // Returns TRUE if the current thread used to be in cooperative mode 
2878     // before calling this function.
2879     PER_HEAP_ISOLATED
2880     bool enable_preemptive ();
2881     PER_HEAP_ISOLATED
2882     void disable_preemptive (bool restore_cooperative);
2883
2884     /* ------------------- per heap members --------------------------*/
2885
2886     PER_HEAP
2887 #ifndef MULTIPLE_HEAPS
2888     GCEvent gc_done_event;
2889 #else // MULTIPLE_HEAPS
2890     GCEvent gc_done_event;
2891 #endif // MULTIPLE_HEAPS
2892
2893     PER_HEAP
2894     VOLATILE(int32_t) gc_done_event_lock;
2895
2896     PER_HEAP
2897     VOLATILE(bool) gc_done_event_set;
2898
2899     PER_HEAP 
2900     void set_gc_done();
2901
2902     PER_HEAP 
2903     void reset_gc_done();
2904
2905     PER_HEAP
2906     void enter_gc_done_event_lock();
2907
2908     PER_HEAP
2909     void exit_gc_done_event_lock();
2910
2911     PER_HEAP
2912     uint8_t*  ephemeral_low;      //lowest ephemeral address
2913
2914     PER_HEAP
2915     uint8_t*  ephemeral_high;     //highest ephemeral address
2916
2917     PER_HEAP
2918     uint32_t* card_table;
2919
2920     PER_HEAP
2921     short* brick_table;
2922
2923 #ifdef MARK_ARRAY
2924     PER_HEAP
2925     uint32_t* mark_array;
2926 #endif //MARK_ARRAY
2927
2928 #ifdef CARD_BUNDLE
2929     PER_HEAP
2930     uint32_t* card_bundle_table;
2931 #endif //CARD_BUNDLE
2932
2933 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
2934     PER_HEAP_ISOLATED
2935     sorted_table* seg_table;
2936 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
2937
2938     PER_HEAP_ISOLATED
2939     VOLATILE(BOOL) gc_started;
2940
2941     // The following 2 events are there to support the gen2 
2942     // notification feature which is only enabled if concurrent
2943     // GC is disabled.
2944     PER_HEAP_ISOLATED
2945     GCEvent full_gc_approach_event;
2946
2947     PER_HEAP_ISOLATED
2948     GCEvent full_gc_end_event;
2949
2950     // Full GC Notification percentages.
2951     PER_HEAP_ISOLATED
2952     uint32_t fgn_maxgen_percent;
2953
2954     PER_HEAP_ISOLATED
2955     uint32_t fgn_loh_percent;
2956
2957     PER_HEAP_ISOLATED
2958     VOLATILE(bool) full_gc_approach_event_set;
2959
2960 #ifdef BACKGROUND_GC
2961     PER_HEAP_ISOLATED
2962     BOOL fgn_last_gc_was_concurrent;
2963 #endif //BACKGROUND_GC
2964
2965     PER_HEAP
2966     size_t fgn_last_alloc;
2967
2968     static uint32_t user_thread_wait (GCEvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
2969
2970     static wait_full_gc_status full_gc_wait (GCEvent *event, int time_out_ms);
2971
2972     PER_HEAP
2973     uint8_t* demotion_low;
2974
2975     PER_HEAP
2976     uint8_t* demotion_high;
2977
2978     PER_HEAP
2979     BOOL demote_gen1_p;
2980
2981     PER_HEAP
2982     uint8_t* last_gen1_pin_end;
2983
2984     PER_HEAP
2985     gen_to_condemn_tuning gen_to_condemn_reasons;
2986
2987     PER_HEAP
2988     size_t etw_allocation_running_amount[2];
2989
2990     PER_HEAP
2991     int gc_policy;  //sweep, compact, expand
2992
2993 #ifdef MULTIPLE_HEAPS
2994     PER_HEAP_ISOLATED
2995     bool gc_thread_no_affinitize_p;
2996
2997     PER_HEAP_ISOLATED
2998     GCEvent gc_start_event;
2999
3000     PER_HEAP_ISOLATED
3001     GCEvent ee_suspend_event;
3002
3003     PER_HEAP
3004     heap_segment* new_heap_segment;
3005
3006 #define alloc_quantum_balance_units (16)
3007
3008     PER_HEAP_ISOLATED
3009     size_t min_balance_threshold;
3010 #else //MULTIPLE_HEAPS
3011
3012     PER_HEAP
3013     size_t allocation_running_time;
3014
3015     PER_HEAP
3016     size_t allocation_running_amount;
3017
3018 #endif //MULTIPLE_HEAPS
3019
3020     PER_HEAP_ISOLATED
3021     gc_latency_level latency_level;
3022
3023     PER_HEAP_ISOLATED
3024     gc_mechanisms settings;
3025
3026     PER_HEAP_ISOLATED
3027     gc_history_global gc_data_global;
3028
3029     PER_HEAP_ISOLATED
3030     size_t gc_last_ephemeral_decommit_time;
3031
3032     PER_HEAP_ISOLATED
3033     size_t gc_gen0_desired_high;
3034
3035     PER_HEAP
3036     size_t gen0_big_free_spaces;
3037
3038 #ifdef SHORT_PLUGS
3039     PER_HEAP_ISOLATED
3040     double short_plugs_pad_ratio;
3041 #endif //SHORT_PLUGS
3042
3043 #ifdef BIT64
3044     PER_HEAP_ISOLATED
3045     size_t youngest_gen_desired_th;
3046 #endif //BIT64
3047
3048     PER_HEAP_ISOLATED
3049     uint32_t last_gc_memory_load;
3050
3051     PER_HEAP_ISOLATED
3052     size_t last_gc_heap_size;
3053
3054     PER_HEAP_ISOLATED
3055     size_t last_gc_fragmentation;
3056
3057     PER_HEAP_ISOLATED
3058     uint32_t high_memory_load_th;
3059
3060     PER_HEAP_ISOLATED
3061     uint32_t m_high_memory_load_th;
3062
3063     PER_HEAP_ISOLATED
3064     uint32_t v_high_memory_load_th;
3065
3066     PER_HEAP_ISOLATED
3067     uint64_t mem_one_percent;
3068
3069     PER_HEAP_ISOLATED
3070     uint64_t total_physical_mem;
3071
3072     PER_HEAP_ISOLATED
3073     uint64_t entry_available_physical_mem;
3074
3075     // Hard limit for the heap, only supported on 64-bit.
3076     // 
3077     // Users can specify a hard limit for the GC heap via GCHeapHardLimit or
3078     // a percentage of the physical memory this process is allowed to use via
3079     // GCHeapHardLimitPercent. This is the maximum commit size the GC heap 
3080     // can consume.
3081     //
3082     // The way the hard limit is decided is:
3083     // 
3084     // If the GCHeapHardLimit config is specified that's the value we use;
3085     // else if the GCHeapHardLimitPercent config is specified we use that 
3086     // value;
3087     // else if the process is running inside a container with a memory limit,
3088     // the hard limit is 
3089     // max (20mb, 75% of the memory limit on the container).
3090     //
3091     // Due to the different perf charicteristics of containers we make the 
3092     // following policy changes:
3093     // 
3094     // 1) No longer affinitize Server GC threads by default because we wouldn't 
3095     // want all the containers on the machine to only affinitize to use the
3096     // first few CPUs (and we don't know which CPUs are already used). You
3097     // can however override this by specifying the GCHeapAffinitizeMask
3098     // config which will decide which CPUs the process will affinitize the
3099     // Server GC threads to.
3100     // 
3101     // 2) Segment size is determined by limit / number of heaps but has a 
3102     // minimum value of 16mb. This can be changed by specifying the number
3103     // of heaps via the GCHeapCount config. The minimum size is to avoid 
3104     // the scenario where the hard limit is small but the process can use 
3105     // many procs and we end up with tiny segments which doesn't make sense.
3106     //
3107     // 3) LOH compaction occurs automatically if needed.
3108     //
3109     // Since we do allow both gen0 and gen3 allocations, and we don't know 
3110     // the distinction (and it's unrealistic to request users to specify
3111     // this distribution) we reserve memory this way - 
3112     // 
3113     // For SOH we reserve (limit / number of heaps) per heap. 
3114     // For LOH we reserve (limit * 2 / number of heaps) per heap. 
3115     //
3116     // This means the following -
3117     // 
3118     // + we never need to acquire new segments. This simplies the perf
3119     // calculations by a lot.
3120     //
3121     // + we now need a different definition of "end of seg" because we
3122     // need to make sure the total does not exceed the limit.
3123     //
3124     // + if we detect that we exceed the commit limit in the allocator we
3125     // wouldn't want to treat that as a normal commit failure because that
3126     // would mean we always do full compacting GCs.
3127     // 
3128     // TODO: some of the logic here applies to the general case as well
3129     // such as LOH automatic compaction. However it will require more 
3130     //testing to change the general case.
3131     PER_HEAP_ISOLATED
3132     size_t heap_hard_limit;
3133
3134     PER_HEAP_ISOLATED
3135     CLRCriticalSection check_commit_cs;
3136
3137     PER_HEAP_ISOLATED
3138     size_t current_total_committed;
3139
3140     // This is what GC uses for its own bookkeeping.
3141     PER_HEAP_ISOLATED
3142     size_t current_total_committed_bookkeeping;
3143
3144     // This is what GC's own book keeping consumes.
3145     PER_HEAP_ISOLATED
3146     size_t current_total_committed_gc_own;
3147
3148     // This is if large pages should be used.
3149     PER_HEAP_ISOLATED
3150     size_t use_large_pages_p;
3151
3152     PER_HEAP_ISOLATED
3153     size_t last_gc_index;
3154
3155 #ifdef SEG_MAPPING_TABLE
3156     PER_HEAP_ISOLATED
3157     size_t min_segment_size;
3158
3159     PER_HEAP_ISOLATED
3160     size_t min_segment_size_shr;
3161 #endif //SEG_MAPPING_TABLE
3162
3163     // For SOH we always allocate segments of the same
3164     // size unless no_gc_region requires larger ones.
3165     PER_HEAP_ISOLATED
3166     size_t soh_segment_size;
3167
3168     PER_HEAP_ISOLATED
3169     size_t min_loh_segment_size;
3170
3171     PER_HEAP_ISOLATED
3172     size_t segment_info_size;
3173
3174     PER_HEAP
3175     uint8_t* lowest_address;
3176
3177     PER_HEAP
3178     uint8_t* highest_address;
3179
3180     PER_HEAP
3181     BOOL ephemeral_promotion;
3182     PER_HEAP
3183     uint8_t* saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
3184     PER_HEAP
3185     size_t saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];
3186
3187 protected:
3188 #ifdef MULTIPLE_HEAPS
3189     PER_HEAP
3190     GCHeap* vm_heap;
3191     PER_HEAP
3192     int heap_number;
3193     PER_HEAP
3194     VOLATILE(int) alloc_context_count;
3195 #else //MULTIPLE_HEAPS
3196 #define vm_heap ((GCHeap*) g_theGCHeap)
3197 #define heap_number (0)
3198 #endif //MULTIPLE_HEAPS
3199
3200     PER_HEAP
3201     size_t time_bgc_last;
3202
3203     PER_HEAP
3204     uint8_t*       gc_low; // lowest address being condemned
3205
3206     PER_HEAP
3207     uint8_t*       gc_high; //highest address being condemned
3208
3209     PER_HEAP
3210     size_t      mark_stack_tos;
3211
3212     PER_HEAP
3213     size_t      mark_stack_bos;
3214
3215     PER_HEAP
3216     size_t      mark_stack_array_length;
3217
3218     PER_HEAP
3219     mark*       mark_stack_array;
3220
3221 #if defined (_DEBUG) && defined (VERIFY_HEAP)
3222     PER_HEAP
3223     BOOL       verify_pinned_queue_p;
3224 #endif // _DEBUG && VERIFY_HEAP
3225
3226     PER_HEAP
3227     uint8_t*    oldest_pinned_plug;
3228
3229 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
3230     PER_HEAP
3231     size_t      num_pinned_objects;
3232 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
3233
3234 #ifdef FEATURE_LOH_COMPACTION
3235     PER_HEAP
3236     size_t      loh_pinned_queue_tos;
3237
3238     PER_HEAP
3239     size_t      loh_pinned_queue_bos;
3240
3241     PER_HEAP
3242     size_t      loh_pinned_queue_length;
3243
3244     PER_HEAP_ISOLATED
3245     int         loh_pinned_queue_decay;
3246
3247     PER_HEAP
3248     mark*       loh_pinned_queue;
3249
3250     // This is for forced LOH compaction via the complus env var
3251     PER_HEAP_ISOLATED
3252     BOOL        loh_compaction_always_p;
3253
3254     // This is set by the user.
3255     PER_HEAP_ISOLATED
3256     gc_loh_compaction_mode loh_compaction_mode;
3257
3258     // We may not compact LOH on every heap if we can't
3259     // grow the pinned queue. This is to indicate whether
3260     // this heap's LOH is compacted or not. So even if
3261     // settings.loh_compaction is TRUE this may not be TRUE.
3262     PER_HEAP
3263     BOOL        loh_compacted_p;
3264 #endif //FEATURE_LOH_COMPACTION
3265
3266 #ifdef BACKGROUND_GC
3267
3268     PER_HEAP
3269     EEThreadId bgc_thread_id;
3270
3271 #ifdef WRITE_WATCH
3272     PER_HEAP
3273     uint8_t* background_written_addresses [array_size+2];
3274 #endif //WRITE_WATCH
3275
3276     PER_HEAP_ISOLATED
3277     VOLATILE(c_gc_state) current_c_gc_state;     //tells the large object allocator to
3278     //mark the object as new since the start of gc.
3279
3280     PER_HEAP_ISOLATED
3281     gc_mechanisms saved_bgc_settings;
3282
3283     PER_HEAP
3284     gc_history_per_heap bgc_data_per_heap;
3285
3286     PER_HEAP
3287     BOOL bgc_thread_running; // gc thread is its main loop
3288
3289     PER_HEAP_ISOLATED
3290     BOOL keep_bgc_threads_p;
3291
3292     // This event is used by BGC threads to do something on 
3293     // one specific thread while other BGC threads have to 
3294     // wait. This is different from a join 'cause you can't
3295     // specify which thread should be doing some task
3296     // while other threads have to wait.
3297     // For example, to make the BGC threads managed threads 
3298     // we need to create them on the thread that called 
3299     // SuspendEE which is heap 0.
3300     PER_HEAP_ISOLATED
3301     GCEvent bgc_threads_sync_event;
3302
3303     PER_HEAP
3304     Thread* bgc_thread;
3305
3306     PER_HEAP
3307     CLRCriticalSection bgc_threads_timeout_cs;
3308
3309     PER_HEAP_ISOLATED
3310     GCEvent background_gc_done_event;
3311
3312     PER_HEAP_ISOLATED
3313     GCEvent ee_proceed_event;
3314
3315     PER_HEAP
3316     GCEvent gc_lh_block_event;
3317
3318     PER_HEAP_ISOLATED
3319     bool gc_can_use_concurrent;
3320
3321     PER_HEAP_ISOLATED
3322     bool temp_disable_concurrent_p;
3323
3324     PER_HEAP_ISOLATED
3325     BOOL do_ephemeral_gc_p;
3326
3327     PER_HEAP_ISOLATED
3328     BOOL do_concurrent_p;
3329
3330     PER_HEAP
3331     VOLATILE(bgc_state) current_bgc_state;
3332
3333     struct gc_history
3334     {
3335         size_t gc_index;
3336         bgc_state current_bgc_state;
3337         uint32_t gc_time_ms;
3338         // This is in bytes per ms; consider breaking it 
3339         // into the efficiency per phase.
3340         size_t gc_efficiency; 
3341         uint8_t* eph_low;
3342         uint8_t* gen0_start;
3343         uint8_t* eph_high;
3344         uint8_t* bgc_highest;
3345         uint8_t* bgc_lowest;
3346         uint8_t* fgc_highest;
3347         uint8_t* fgc_lowest;
3348         uint8_t* g_highest;
3349         uint8_t* g_lowest;
3350     };
3351
3352 #define max_history_count 64
3353
3354     PER_HEAP
3355     int gchist_index_per_heap;
3356
3357     PER_HEAP
3358     gc_history gchist_per_heap[max_history_count];
3359
3360     PER_HEAP_ISOLATED
3361     int gchist_index;
3362
3363     PER_HEAP_ISOLATED
3364     gc_mechanisms_store gchist[max_history_count];
3365
3366     PER_HEAP
3367     void add_to_history_per_heap();
3368
3369     PER_HEAP_ISOLATED
3370     void add_to_history();
3371
3372     PER_HEAP
3373     size_t total_promoted_bytes;
3374
3375     PER_HEAP
3376     size_t     bgc_overflow_count;
3377
3378     PER_HEAP
3379     size_t     bgc_begin_loh_size;
3380     PER_HEAP
3381     size_t     end_loh_size;
3382
3383     // We need to throttle the LOH allocations during BGC since we can't
3384     // collect LOH when BGC is in progress. 
3385     // We allow the LOH heap size to double during a BGC. So for every
3386     // 10% increase we will have the LOH allocating thread sleep for one more
3387     // ms. So we are already 30% over the original heap size the thread will
3388     // sleep for 3ms.
3389     PER_HEAP
3390     uint32_t   bgc_alloc_spin_loh;
3391
3392     // This includes what we allocate at the end of segment - allocating
3393     // in free list doesn't increase the heap size.
3394     PER_HEAP
3395     size_t     bgc_loh_size_increased;
3396
3397     PER_HEAP
3398     size_t     bgc_loh_allocated_in_free;
3399
3400     PER_HEAP
3401     size_t     background_soh_alloc_count;
3402
3403     PER_HEAP
3404     size_t     background_loh_alloc_count;
3405
3406     PER_HEAP
3407     VOLATILE(int32_t) loh_alloc_thread_count;
3408
3409     PER_HEAP
3410     uint8_t**  background_mark_stack_tos;
3411
3412     PER_HEAP
3413     uint8_t**  background_mark_stack_array;
3414
3415     PER_HEAP
3416     size_t    background_mark_stack_array_length;
3417
3418     PER_HEAP
3419     uint8_t*  background_min_overflow_address;
3420
3421     PER_HEAP
3422     uint8_t*  background_max_overflow_address;
3423
3424     // We can't process the soh range concurrently so we
3425     // wait till final mark to process it.
3426     PER_HEAP
3427     BOOL      processed_soh_overflow_p;
3428
3429     PER_HEAP
3430     uint8_t*  background_min_soh_overflow_address;
3431
3432     PER_HEAP
3433     uint8_t*  background_max_soh_overflow_address;
3434
3435     PER_HEAP
3436     heap_segment* saved_overflow_ephemeral_seg;
3437
3438     PER_HEAP
3439     heap_segment* saved_sweep_ephemeral_seg;
3440
3441     PER_HEAP
3442     uint8_t* saved_sweep_ephemeral_start;
3443
3444     PER_HEAP
3445     uint8_t* background_saved_lowest_address;
3446
3447     PER_HEAP
3448     uint8_t* background_saved_highest_address;
3449
3450     // This is used for synchronization between the bgc thread
3451     // for this heap and the user threads allocating on this
3452     // heap.
3453     PER_HEAP
3454     exclusive_sync* bgc_alloc_lock;
3455
3456 #ifdef SNOOP_STATS
3457     PER_HEAP
3458     snoop_stats_data snoop_stat;
3459 #endif //SNOOP_STATS
3460
3461
3462     PER_HEAP
3463     uint8_t**          c_mark_list;
3464
3465     PER_HEAP
3466     size_t          c_mark_list_length;
3467
3468     PER_HEAP
3469     size_t          c_mark_list_index;
3470 #endif //BACKGROUND_GC
3471
3472 #ifdef MARK_LIST
3473     PER_HEAP
3474     uint8_t** mark_list;
3475
3476     PER_HEAP_ISOLATED
3477     size_t mark_list_size;
3478
3479     PER_HEAP
3480     uint8_t** mark_list_end;
3481
3482     PER_HEAP
3483     uint8_t** mark_list_index;
3484
3485     PER_HEAP_ISOLATED
3486     uint8_t** g_mark_list;
3487 #ifdef PARALLEL_MARK_LIST_SORT
3488     PER_HEAP_ISOLATED
3489     uint8_t** g_mark_list_copy;
3490     PER_HEAP
3491     uint8_t*** mark_list_piece_start;
3492     uint8_t*** mark_list_piece_end;
3493 #endif //PARALLEL_MARK_LIST_SORT
3494 #endif //MARK_LIST
3495
3496     PER_HEAP
3497     uint8_t*  min_overflow_address;
3498
3499     PER_HEAP
3500     uint8_t*  max_overflow_address;
3501
3502 #ifndef MULTIPLE_HEAPS
3503     PER_HEAP
3504     uint8_t*  shigh; //keeps track of the highest marked object
3505
3506     PER_HEAP
3507     uint8_t*  slow; //keeps track of the lowest marked object
3508 #endif //MULTIPLE_HEAPS
3509
3510     PER_HEAP
3511     size_t allocation_quantum;
3512
3513     PER_HEAP
3514     size_t alloc_contexts_used;
3515
3516     PER_HEAP_ISOLATED
3517     no_gc_region_info current_no_gc_region_info;
3518
3519     PER_HEAP
3520     size_t soh_allocation_no_gc;
3521
3522     PER_HEAP
3523     size_t loh_allocation_no_gc;
3524
3525     PER_HEAP
3526     bool no_gc_oom_p;
3527
3528     PER_HEAP
3529     heap_segment* saved_loh_segment_no_gc;
3530
3531     PER_HEAP_ISOLATED
3532     BOOL proceed_with_gc_p;
3533
3534 #define youngest_generation (generation_of (0))
3535 #define large_object_generation (generation_of (max_generation+1))
3536
3537     // The more_space_lock and gc_lock is used for 3 purposes:
3538     //
3539     // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock_soh)
3540     // 2) to synchronize allocations of large objects (more_space_lock_loh)
3541     // 3) to synchronize the GC itself (gc_lock)
3542     //
3543     PER_HEAP_ISOLATED
3544     GCSpinLock gc_lock; //lock while doing GC
3545
3546     PER_HEAP
3547     GCSpinLock more_space_lock_soh; //lock while allocating more space for soh
3548
3549     PER_HEAP
3550     GCSpinLock more_space_lock_loh;
3551
3552 #ifdef SYNCHRONIZATION_STATS
3553
3554     PER_HEAP
3555     unsigned int good_suspension;
3556
3557     PER_HEAP
3558     unsigned int bad_suspension;
3559
3560     // Number of times when msl_acquire is > 200 cycles.
3561     PER_HEAP
3562     unsigned int num_high_msl_acquire;
3563
3564     // Number of times when msl_acquire is < 200 cycles.
3565     PER_HEAP
3566     unsigned int num_low_msl_acquire;
3567
3568     // Number of times the more_space_lock is acquired.
3569     PER_HEAP
3570     unsigned int num_msl_acquired;
3571
3572     // Total cycles it takes to acquire the more_space_lock.
3573     PER_HEAP
3574     uint64_t total_msl_acquire;
3575
3576     PER_HEAP
3577     void init_heap_sync_stats()
3578     {
3579         good_suspension = 0;
3580         bad_suspension = 0;
3581         num_msl_acquired = 0;
3582         total_msl_acquire = 0;
3583         num_high_msl_acquire = 0;
3584         num_low_msl_acquire = 0;
3585         more_space_lock.init();
3586         gc_lock.init();
3587     }
3588
3589     PER_HEAP
3590     void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
3591     {
3592         printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
3593             heap_num,
3594             alloc_contexts_used,
3595             good_suspension,
3596             bad_suspension,
3597             (unsigned int)(total_msl_acquire / gc_count_during_log),
3598             num_high_msl_acquire / gc_count_during_log,
3599             num_low_msl_acquire / gc_count_during_log,
3600             num_msl_acquired / gc_count_during_log,
3601             more_space_lock.num_switch_thread / gc_count_during_log,
3602             more_space_lock.num_wait_longer / gc_count_during_log,
3603             more_space_lock.num_switch_thread_w / gc_count_during_log,
3604             more_space_lock.num_disable_preemptive_w / gc_count_during_log);
3605     }
3606
3607 #endif //SYNCHRONIZATION_STATS
3608
3609 #define NUM_LOH_ALIST (7)
3610 #define BASE_LOH_ALIST (64*1024)
3611     PER_HEAP 
3612     alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
3613
3614 #define NUM_GEN2_ALIST (12)
3615 #ifdef BIT64
3616 #define BASE_GEN2_ALIST (1*256)
3617 #else
3618 #define BASE_GEN2_ALIST (1*128)
3619 #endif // BIT64
3620     PER_HEAP
3621     alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
3622
3623 //------------------------------------------    
3624
3625     PER_HEAP
3626     dynamic_data dynamic_data_table [NUMBERGENERATIONS+1];
3627
3628     PER_HEAP
3629     gc_history_per_heap gc_data_per_heap;
3630
3631     PER_HEAP
3632     size_t maxgen_pinned_compact_before_advance;
3633
3634     // dynamic tuning.
3635     PER_HEAP
3636     BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
3637     // if elevate_p is FALSE, it means we are determining fragmentation for a generation
3638     // to see if we should condemn this gen; otherwise it means we are determining if
3639     // we should elevate to doing max_gen from an ephemeral gen.
3640     PER_HEAP
3641     BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
3642     PER_HEAP
3643     BOOL 
3644     dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number);
3645     PER_HEAP
3646     BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem);
3647     PER_HEAP
3648     BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
3649
3650     PER_HEAP
3651     int generation_skip_ratio;//in %
3652
3653     PER_HEAP
3654     BOOL gen0_bricks_cleared;
3655 #ifdef FFIND_OBJECT
3656     PER_HEAP
3657     int gen0_must_clear_bricks;
3658 #endif //FFIND_OBJECT
3659     
3660     PER_HEAP_ISOLATED
3661     bool maxgen_size_inc_p; 
3662
3663     PER_HEAP_ISOLATED
3664     size_t full_gc_counts[gc_type_max];
3665
3666     // the # of bytes allocates since the last full compacting GC.
3667     PER_HEAP
3668     uint64_t loh_alloc_since_cg;
3669
3670     PER_HEAP
3671     BOOL elevation_requested;
3672
3673     // if this is TRUE, we should always guarantee that we do a 
3674     // full compacting GC before we OOM.
3675     PER_HEAP
3676     BOOL last_gc_before_oom;
3677
3678     PER_HEAP_ISOLATED
3679     BOOL should_expand_in_full_gc;
3680
3681     // When we decide if we should expand the heap or not, we are
3682     // fine NOT to expand if we find enough free space in gen0's free
3683     // list or end of seg and we check this in decide_on_compacting.
3684     // This is an expensive check so we just record the fact and not
3685     // need to check in the allocator again.
3686     PER_HEAP
3687     BOOL sufficient_gen0_space_p;
3688
3689 #ifdef MULTIPLE_HEAPS
3690     PER_HEAP
3691     bool gen0_allocated_after_gc_p;
3692 #endif //MULTIPLE_HEAPS
3693
3694     // A provisional mode means we could change our mind in the middle of a GC 
3695     // and want to do a different GC instead.
3696     // 
3697     // Right now there's only one such case which is in the middle of a gen1
3698     // GC we want to do a blocking gen2 instead. If/When we have more we should
3699     // have an enum that tells us which case in this provisional mode
3700     // we are in.
3701     //
3702     // When this mode is triggered, our current (only) condition says
3703     // we have high fragmentation in gen2 even after we do a compacting
3704     // full GC which is an indication of heavy pinning in gen2. In this
3705     // case we never do BGCs, we just do either gen0 or gen1's till a
3706     // gen1 needs to increase the gen2 size, in which case we finish up
3707     // the current gen1 as a sweeping GC and immediately do a compacting 
3708     // full GC instead (without restarting EE).
3709     PER_HEAP_ISOLATED
3710     bool provisional_mode_triggered;
3711
3712     PER_HEAP_ISOLATED
3713     bool pm_trigger_full_gc;
3714
3715     // For testing only BEG
3716     // pm_stress_on currently means (since we just have one mode) we 
3717     // randomly turn the mode on; and after a random # of NGC2s we 
3718     // turn it off.
3719     // NOTE that this means concurrent will be disabled so we can 
3720     // simulate what this mode is supposed to be used.
3721     PER_HEAP_ISOLATED
3722     bool pm_stress_on;
3723
3724     PER_HEAP_ISOLATED
3725     size_t provisional_triggered_gc_count;
3726
3727     PER_HEAP_ISOLATED
3728     size_t provisional_off_gc_count;
3729     // For testing only END
3730
3731     PER_HEAP_ISOLATED
3732     size_t num_provisional_triggered;
3733
3734     PER_HEAP
3735     size_t allocated_since_last_gc;
3736
3737 #ifdef BACKGROUND_GC
3738     PER_HEAP_ISOLATED
3739     size_t ephemeral_fgc_counts[max_generation];
3740
3741     PER_HEAP_ISOLATED
3742     BOOL alloc_wait_event_p;
3743
3744     PER_HEAP
3745     uint8_t* next_sweep_obj;
3746
3747     PER_HEAP
3748     uint8_t* current_sweep_pos;
3749
3750 #endif //BACKGROUND_GC
3751
3752     PER_HEAP
3753     fgm_history fgm_result;
3754
3755     PER_HEAP_ISOLATED
3756     size_t eph_gen_starts_size;
3757
3758 #ifdef GC_CONFIG_DRIVEN
3759     PER_HEAP_ISOLATED
3760     size_t time_init;
3761
3762     PER_HEAP_ISOLATED
3763     size_t time_since_init;
3764
3765     // 0 stores compacting GCs;
3766     // 1 stores sweeping GCs;
3767     PER_HEAP_ISOLATED
3768     size_t compact_or_sweep_gcs[2];
3769
3770     PER_HEAP
3771     size_t interesting_data_per_gc[max_idp_count];
3772 #endif //GC_CONFIG_DRIVEN
3773
3774     PER_HEAP
3775     BOOL        ro_segments_in_range;
3776
3777 #ifdef BACKGROUND_GC
3778     PER_HEAP
3779     heap_segment* freeable_small_heap_segment;
3780 #endif //BACKGROUND_GC
3781
3782     PER_HEAP
3783     heap_segment* freeable_large_heap_segment;
3784
3785     PER_HEAP_ISOLATED
3786     heap_segment* segment_standby_list;
3787
3788     PER_HEAP
3789     size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
3790
3791     PER_HEAP
3792     size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
3793
3794     PER_HEAP
3795     size_t ordered_plug_indices[MAX_NUM_BUCKETS];
3796
3797     PER_HEAP
3798     size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
3799
3800     PER_HEAP
3801     BOOL ordered_plug_indices_init;
3802
3803     PER_HEAP
3804     BOOL use_bestfit;
3805
3806     PER_HEAP
3807     uint8_t* bestfit_first_pin;
3808
3809     PER_HEAP
3810     BOOL commit_end_of_seg;
3811
3812     PER_HEAP
3813     size_t max_free_space_items; // dynamically adjusted.
3814
3815     PER_HEAP
3816     size_t free_space_buckets;
3817
3818     PER_HEAP
3819     size_t free_space_items;
3820
3821     // -1 means we are using all the free
3822     // spaces we have (not including
3823     // end of seg space).
3824     PER_HEAP
3825     int trimmed_free_space_index;
3826
3827     PER_HEAP
3828     size_t total_ephemeral_plugs;
3829
3830     PER_HEAP
3831     seg_free_spaces* bestfit_seg;
3832
3833     // Note: we know this from the plan phase.
3834     // total_ephemeral_plugs actually has the same value
3835     // but while we are calculating its value we also store
3836     // info on how big the plugs are for best fit which we
3837     // don't do in plan phase.
3838     // TODO: get rid of total_ephemeral_plugs.
3839     PER_HEAP
3840     size_t total_ephemeral_size;
3841
3842 public:
3843
3844 #ifdef HEAP_ANALYZE
3845
3846     PER_HEAP_ISOLATED
3847     BOOL heap_analyze_enabled;
3848
3849     PER_HEAP
3850     size_t internal_root_array_length;
3851
3852     // next two fields are used to optimize the search for the object 
3853     // enclosing the current reference handled by ha_mark_object_simple.
3854     PER_HEAP
3855     uint8_t*  current_obj;
3856
3857     PER_HEAP
3858     size_t current_obj_size;
3859
3860 #endif //HEAP_ANALYZE
3861
3862     /* ----------------------- global members ----------------------- */
3863 public:
3864
3865     PER_HEAP
3866     int         condemned_generation_num;
3867
3868     PER_HEAP
3869     BOOL        blocking_collection;
3870
3871 #ifdef MULTIPLE_HEAPS
3872     static
3873     int n_heaps;
3874
3875     static
3876     gc_heap** g_heaps;
3877
3878     static
3879     size_t*   g_promoted;
3880 #ifdef BACKGROUND_GC
3881     static
3882     size_t*   g_bpromoted;
3883 #endif //BACKGROUND_GC
3884 #ifdef MH_SC_MARK
3885     PER_HEAP_ISOLATED
3886     int*  g_mark_stack_busy;
3887 #endif //MH_SC_MARK
3888 #else
3889     static
3890     size_t    g_promoted;
3891 #ifdef BACKGROUND_GC
3892     static
3893     size_t    g_bpromoted;
3894 #endif //BACKGROUND_GC
3895 #endif //MULTIPLE_HEAPS
3896     
3897     static
3898     size_t reserved_memory;
3899     static
3900     size_t reserved_memory_limit;
3901     static
3902     BOOL      g_low_memory_status;
3903
3904 protected:
3905     PER_HEAP
3906     void update_collection_counts ();
3907 }; // class gc_heap
3908
3909 #define ASSERT_OFFSETS_MATCH(field) \
3910   static_assert(offsetof(dac_gc_heap, field) == offsetof(gc_heap, field), #field " offset mismatch")
3911
3912 #ifdef MULTIPLE_HEAPS
3913 ASSERT_OFFSETS_MATCH(alloc_allocated);
3914 ASSERT_OFFSETS_MATCH(ephemeral_heap_segment);
3915 ASSERT_OFFSETS_MATCH(finalize_queue);
3916 ASSERT_OFFSETS_MATCH(oom_info);
3917 ASSERT_OFFSETS_MATCH(interesting_data_per_heap);
3918 ASSERT_OFFSETS_MATCH(compact_reasons_per_heap);
3919 ASSERT_OFFSETS_MATCH(expand_mechanisms_per_heap);
3920 ASSERT_OFFSETS_MATCH(interesting_mechanism_bits_per_heap);
3921 ASSERT_OFFSETS_MATCH(internal_root_array);
3922 ASSERT_OFFSETS_MATCH(internal_root_array_index);
3923 ASSERT_OFFSETS_MATCH(heap_analyze_success);
3924 ASSERT_OFFSETS_MATCH(generation_table);
3925 #endif // MULTIPLE_HEAPS
3926
3927 #ifdef FEATURE_PREMORTEM_FINALIZATION
3928 class CFinalize
3929 {
3930 #ifdef DACCESS_COMPILE
3931     friend class ::ClrDataAccess;
3932 #endif // DACCESS_COMPILE
3933
3934     friend class CFinalizeStaticAsserts;
3935
3936 private:
3937
3938     //adjust the count and add a constant to add a segment
3939     static const int ExtraSegCount = 2;
3940     static const int FinalizerListSeg = NUMBERGENERATIONS+1;
3941     static const int CriticalFinalizerListSeg = NUMBERGENERATIONS;
3942     //Does not correspond to a segment
3943     static const int FreeList = NUMBERGENERATIONS+ExtraSegCount;
3944
3945     PTR_PTR_Object m_FillPointers[NUMBERGENERATIONS+ExtraSegCount];
3946     PTR_PTR_Object m_Array;
3947     PTR_PTR_Object m_EndArray;
3948     size_t   m_PromotedCount;
3949     
3950     VOLATILE(int32_t) lock;
3951 #ifdef _DEBUG
3952     EEThreadId lockowner_threadid;
3953 #endif // _DEBUG
3954
3955     BOOL GrowArray();
3956     void MoveItem (Object** fromIndex,
3957                    unsigned int fromSeg,
3958                    unsigned int toSeg);
3959
3960     inline PTR_PTR_Object& SegQueue (unsigned int Seg)
3961     {
3962         return (Seg ? m_FillPointers [Seg-1] : m_Array);
3963     }
3964     inline PTR_PTR_Object& SegQueueLimit (unsigned int Seg)
3965     {
3966         return m_FillPointers [Seg];
3967     }
3968
3969     BOOL IsSegEmpty ( unsigned int i)
3970     {
3971         ASSERT ( (int)i < FreeList);
3972         return (SegQueueLimit(i) == SegQueue (i));
3973
3974     }
3975
3976     BOOL FinalizeSegForAppDomain (void *pDomain, 
3977                                   BOOL fRunFinalizers, 
3978                                   unsigned int Seg);
3979
3980 public:
3981     ~CFinalize();
3982     bool Initialize();
3983     void EnterFinalizeLock();
3984     void LeaveFinalizeLock();
3985     bool RegisterForFinalization (int gen, Object* obj, size_t size=0);
3986     Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
3987     BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
3988     void RelocateFinalizationData (int gen, gc_heap* hp);
3989     void WalkFReachableObjects (fq_walk_fn fn);
3990     void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
3991     void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
3992     size_t GetPromotedCount();
3993
3994     //Methods used by the shutdown code to call every finalizer
3995     void SetSegForShutDown(BOOL fHasLock);
3996     size_t GetNumberFinalizableObjects();
3997     void DiscardNonCriticalObjects();
3998
3999     //Methods used by the app domain unloading call to finalize objects in an app domain
4000     bool FinalizeAppDomain (void *pDomain, bool fRunFinalizers);
4001
4002     void CheckFinalizerObjects();
4003 };
4004
4005 class CFinalizeStaticAsserts {
4006     static_assert(dac_finalize_queue::ExtraSegCount == CFinalize::ExtraSegCount, "ExtraSegCount mismatch");
4007     static_assert(offsetof(dac_finalize_queue, m_FillPointers) == offsetof(CFinalize, m_FillPointers), "CFinalize layout mismatch");
4008 };
4009 #endif // FEATURE_PREMORTEM_FINALIZATION
4010
4011 inline
4012  size_t& dd_begin_data_size (dynamic_data* inst)
4013 {
4014   return inst->begin_data_size;
4015 }
4016 inline
4017  size_t& dd_survived_size (dynamic_data* inst)
4018 {
4019   return inst->survived_size;
4020 }
4021 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
4022 inline
4023  size_t& dd_num_npinned_plugs(dynamic_data* inst)
4024 {
4025   return inst->num_npinned_plugs;
4026 }
4027 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
4028 inline
4029 size_t& dd_pinned_survived_size (dynamic_data* inst)
4030 {
4031   return inst->pinned_survived_size;
4032 }
4033 inline
4034 size_t& dd_added_pinned_size (dynamic_data* inst)
4035 {
4036   return inst->added_pinned_size;
4037 }
4038 inline
4039 size_t& dd_artificial_pinned_survived_size (dynamic_data* inst)
4040 {
4041   return inst->artificial_pinned_survived_size;
4042 }
4043 #ifdef SHORT_PLUGS
4044 inline
4045 size_t& dd_padding_size (dynamic_data* inst)
4046 {
4047   return inst->padding_size;
4048 }
4049 #endif //SHORT_PLUGS
4050 inline
4051  size_t& dd_current_size (dynamic_data* inst)
4052 {
4053   return inst->current_size;
4054 }
4055 inline
4056 float& dd_surv (dynamic_data* inst)
4057 {
4058   return inst->surv;
4059 }
4060 inline
4061 size_t& dd_freach_previous_promotion (dynamic_data* inst)
4062 {
4063   return inst->freach_previous_promotion;
4064 }
4065 inline
4066 size_t& dd_desired_allocation (dynamic_data* inst)
4067 {
4068   return inst->desired_allocation;
4069 }
4070 inline
4071 size_t& dd_collection_count (dynamic_data* inst)
4072 {
4073     return inst->collection_count;
4074 }
4075 inline
4076 size_t& dd_promoted_size (dynamic_data* inst)
4077 {
4078     return inst->promoted_size;
4079 }
4080 inline
4081 float& dd_limit (dynamic_data* inst)
4082 {
4083   return inst->sdata->limit;
4084 }
4085 inline
4086 float& dd_max_limit (dynamic_data* inst)
4087 {
4088   return inst->sdata->max_limit;
4089 }
4090 inline
4091 size_t& dd_max_size (dynamic_data* inst)
4092 {
4093   return inst->sdata->max_size;
4094 }
4095 inline
4096 size_t& dd_min_size (dynamic_data* inst)
4097 {
4098   return inst->min_size;
4099 }
4100 inline
4101 ptrdiff_t& dd_new_allocation (dynamic_data* inst)
4102 {
4103   return inst->new_allocation;
4104 }
4105 inline
4106 ptrdiff_t& dd_gc_new_allocation (dynamic_data* inst)
4107 {
4108   return inst->gc_new_allocation;
4109 }
4110 inline
4111 size_t& dd_fragmentation_limit (dynamic_data* inst)
4112 {
4113   return inst->sdata->fragmentation_limit;
4114 }
4115 inline
4116 float& dd_fragmentation_burden_limit (dynamic_data* inst)
4117 {
4118   return inst->sdata->fragmentation_burden_limit;
4119 }
4120 inline
4121 float dd_v_fragmentation_burden_limit (dynamic_data* inst)
4122 {
4123   return (min (2*dd_fragmentation_burden_limit (inst), 0.75f));
4124 }
4125 inline
4126 size_t& dd_fragmentation (dynamic_data* inst)
4127 {
4128   return inst->fragmentation;
4129 }
4130 inline
4131 size_t& dd_gc_clock (dynamic_data* inst)
4132 {
4133   return inst->gc_clock;
4134 }
4135 inline
4136 size_t& dd_time_clock (dynamic_data* inst)
4137 {
4138   return inst->time_clock;
4139 }
4140
4141 inline
4142 size_t& dd_gc_clock_interval (dynamic_data* inst)
4143 {
4144   return inst->sdata->gc_clock;
4145 }
4146 inline
4147 size_t& dd_time_clock_interval (dynamic_data* inst)
4148 {
4149   return inst->sdata->time_clock;
4150 }
4151
4152 inline
4153 size_t& dd_gc_elapsed_time (dynamic_data* inst)
4154 {
4155     return inst->gc_elapsed_time;
4156 }
4157
4158 inline
4159 float& dd_gc_speed (dynamic_data* inst)
4160 {
4161     return inst->gc_speed;
4162 }
4163
4164 inline
4165 alloc_context* generation_alloc_context (generation* inst)
4166 {
4167     return &(inst->allocation_context);
4168 }
4169
4170 inline
4171 uint8_t*& generation_allocation_start (generation* inst)
4172 {
4173   return inst->allocation_start;
4174 }
4175 inline
4176 uint8_t*& generation_allocation_pointer (generation* inst)
4177 {
4178   return inst->allocation_context.alloc_ptr;
4179 }
4180 inline
4181 uint8_t*& generation_allocation_limit (generation* inst)
4182 {
4183   return inst->allocation_context.alloc_limit;
4184 }
4185 inline 
4186 allocator* generation_allocator (generation* inst)
4187 {
4188     return &inst->free_list_allocator;
4189 }
4190
4191 inline
4192 PTR_heap_segment& generation_start_segment (generation* inst)
4193 {
4194   return inst->start_segment;
4195 }
4196 inline
4197 heap_segment*& generation_allocation_segment (generation* inst)
4198 {
4199   return inst->allocation_segment;
4200 }
4201 inline
4202 uint8_t*& generation_plan_allocation_start (generation* inst)
4203 {
4204   return inst->plan_allocation_start;
4205 }
4206 inline
4207 size_t& generation_plan_allocation_start_size (generation* inst)
4208 {
4209   return inst->plan_allocation_start_size;
4210 }
4211 inline
4212 uint8_t*& generation_allocation_context_start_region (generation* inst)
4213 {
4214   return inst->allocation_context_start_region;
4215 }
4216 inline
4217 size_t& generation_free_list_space (generation* inst)
4218 {
4219   return inst->free_list_space;
4220 }
4221 inline
4222 size_t& generation_free_obj_space (generation* inst)
4223 {
4224   return inst->free_obj_space;
4225 }
4226 inline
4227 size_t& generation_allocation_size (generation* inst)
4228 {
4229   return inst->allocation_size;
4230 }
4231
4232 inline
4233 size_t& generation_pinned_allocated (generation* inst)
4234 {
4235     return inst->pinned_allocated;
4236 }
4237 inline
4238 size_t& generation_pinned_allocation_sweep_size (generation* inst)
4239 {
4240     return inst->pinned_allocation_sweep_size;
4241 }
4242 inline
4243 size_t& generation_pinned_allocation_compact_size (generation* inst)
4244 {
4245     return inst->pinned_allocation_compact_size;
4246 }
4247 inline
4248 size_t&  generation_free_list_allocated (generation* inst)
4249 {
4250     return inst->free_list_allocated;
4251 }
4252 inline
4253 size_t&  generation_end_seg_allocated (generation* inst)
4254 {
4255     return inst->end_seg_allocated;
4256 }
4257 inline
4258 BOOL&  generation_allocate_end_seg_p (generation* inst)
4259 {
4260     return inst->allocate_end_seg_p;
4261 }
4262 inline
4263 size_t& generation_condemned_allocated (generation* inst)
4264 {
4265     return inst->condemned_allocated;
4266 }
4267 #ifdef FREE_USAGE_STATS
4268 inline
4269 size_t& generation_pinned_free_obj_space (generation* inst)
4270 {
4271     return inst->pinned_free_obj_space;
4272 }
4273 inline
4274 size_t& generation_allocated_in_pinned_free (generation* inst)
4275 {
4276     return inst->allocated_in_pinned_free;
4277 }
4278 inline
4279 size_t& generation_allocated_since_last_pin (generation* inst)
4280 {
4281     return inst->allocated_since_last_pin;
4282 }
4283 #endif //FREE_USAGE_STATS
4284 inline 
4285 float generation_allocator_efficiency (generation* inst)
4286 {
4287     if ((generation_free_list_allocated (inst) + generation_free_obj_space (inst)) != 0)
4288     {
4289         return ((float) (generation_free_list_allocated (inst)) / (float)(generation_free_list_allocated (inst) + generation_free_obj_space (inst)));
4290     }
4291     else
4292         return 0;
4293 }
4294 inline
4295 size_t generation_unusable_fragmentation (generation* inst)
4296 {
4297     return (size_t)(generation_free_obj_space (inst) + 
4298                     (1.0f-generation_allocator_efficiency(inst))*generation_free_list_space (inst));
4299 }
4300
4301 #define plug_skew           sizeof(ObjHeader)
4302 // We always use USE_PADDING_TAIL when fitting so items on the free list should be
4303 // twice the min_obj_size.
4304 #define min_free_list       (2*min_obj_size)
4305 struct plug
4306 {
4307     uint8_t *  skew[plug_skew / sizeof(uint8_t *)];
4308 };
4309
4310 class pair
4311 {
4312 public:
4313     short left;
4314     short right;
4315 };
4316
4317 //Note that these encode the fact that plug_skew is a multiple of uint8_t*.
4318 // Each of new field is prepended to the prior struct.
4319
4320 struct plug_and_pair
4321 {
4322     pair        m_pair;
4323     plug        m_plug;
4324 };
4325
4326 struct plug_and_reloc
4327 {
4328     ptrdiff_t   reloc;
4329     pair        m_pair;
4330     plug        m_plug;
4331 };
4332
4333 struct plug_and_gap
4334 {
4335     ptrdiff_t   gap;
4336     ptrdiff_t   reloc;
4337     union
4338     {
4339         pair    m_pair;
4340         int     lr;  //for clearing the entire pair in one instruction
4341     };
4342     plug        m_plug;
4343 };
4344
4345 struct gap_reloc_pair
4346 {
4347     size_t gap;
4348     size_t   reloc;
4349     pair        m_pair;
4350 };
4351
4352 #define min_pre_pin_obj_size (sizeof (gap_reloc_pair) + min_obj_size)
4353
4354 struct DECLSPEC_ALIGN(8) aligned_plug_and_gap
4355 {
4356     plug_and_gap plugandgap;
4357 };
4358
4359 struct loh_obj_and_pad
4360 {
4361     ptrdiff_t   reloc;    
4362     plug        m_plug;
4363 };
4364
4365 struct loh_padding_obj
4366 {
4367     uint8_t*    mt;
4368     size_t      len;
4369     ptrdiff_t   reloc;
4370     plug        m_plug;
4371 };
4372 #define loh_padding_obj_size (sizeof(loh_padding_obj))
4373
4374 //flags description
4375 #define heap_segment_flags_readonly     1
4376 #define heap_segment_flags_inrange      2
4377 #define heap_segment_flags_unmappable   4
4378 #define heap_segment_flags_loh          8
4379 #ifdef BACKGROUND_GC
4380 #define heap_segment_flags_swept        16
4381 #define heap_segment_flags_decommitted  32
4382 #define heap_segment_flags_ma_committed 64
4383 // for segments whose mark array is only partially committed.
4384 #define heap_segment_flags_ma_pcommitted 128
4385 #define heap_segment_flags_loh_delete   256
4386 #endif //BACKGROUND_GC
4387
4388 //need to be careful to keep enough pad items to fit a relocation node
4389 //padded to QuadWord before the plug_skew
4390
4391 class heap_segment
4392 {
4393 public:
4394     uint8_t*        allocated;
4395     uint8_t*        committed;
4396     uint8_t*        reserved;
4397     uint8_t*        used;
4398     uint8_t*        mem;
4399     size_t          flags;
4400     PTR_heap_segment next;
4401     uint8_t*        background_allocated;
4402 #ifdef MULTIPLE_HEAPS
4403     gc_heap*        heap;
4404 #endif //MULTIPLE_HEAPS
4405     uint8_t*        plan_allocated;
4406     uint8_t*        saved_bg_allocated;
4407
4408 #ifdef _MSC_VER
4409 // Disable this warning - we intentionally want __declspec(align()) to insert padding for us
4410 #pragma warning(disable:4324)  // structure was padded due to __declspec(align())
4411 #endif
4412     aligned_plug_and_gap padandplug;
4413 #ifdef _MSC_VER
4414 #pragma warning(default:4324)  // structure was padded due to __declspec(align())
4415 #endif
4416 };
4417
4418 static_assert(offsetof(dac_heap_segment, allocated) == offsetof(heap_segment, allocated), "DAC heap segment layout mismatch");
4419 static_assert(offsetof(dac_heap_segment, committed) == offsetof(heap_segment, committed), "DAC heap segment layout mismatch");
4420 static_assert(offsetof(dac_heap_segment, reserved) == offsetof(heap_segment, reserved), "DAC heap segment layout mismatch");
4421 static_assert(offsetof(dac_heap_segment, used) == offsetof(heap_segment, used), "DAC heap segment layout mismatch");
4422 static_assert(offsetof(dac_heap_segment, mem) == offsetof(heap_segment, mem), "DAC heap segment layout mismatch");
4423 static_assert(offsetof(dac_heap_segment, flags) == offsetof(heap_segment, flags), "DAC heap segment layout mismatch");
4424 static_assert(offsetof(dac_heap_segment, next) == offsetof(heap_segment, next), "DAC heap segment layout mismatch");
4425 static_assert(offsetof(dac_heap_segment, background_allocated) == offsetof(heap_segment, background_allocated), "DAC heap segment layout mismatch");
4426 #ifdef MULTIPLE_HEAPS
4427 static_assert(offsetof(dac_heap_segment, heap) == offsetof(heap_segment, heap), "DAC heap segment layout mismatch");
4428 #endif // MULTIPLE_HEAPS
4429
4430 inline
4431 uint8_t*& heap_segment_reserved (heap_segment* inst)
4432 {
4433   return inst->reserved;
4434 }
4435 inline
4436 uint8_t*& heap_segment_committed (heap_segment* inst)
4437 {
4438   return inst->committed;
4439 }
4440 inline
4441 uint8_t*& heap_segment_used (heap_segment* inst)
4442 {
4443   return inst->used;
4444 }
4445 inline
4446 uint8_t*& heap_segment_allocated (heap_segment* inst)
4447 {
4448   return inst->allocated;
4449 }
4450
4451 inline
4452 BOOL heap_segment_read_only_p (heap_segment* inst)
4453 {
4454     return ((inst->flags & heap_segment_flags_readonly) != 0);
4455 }
4456
4457 inline
4458 BOOL heap_segment_in_range_p (heap_segment* inst)
4459 {
4460     return (!(inst->flags & heap_segment_flags_readonly) ||
4461             ((inst->flags & heap_segment_flags_inrange) != 0));
4462 }
4463
4464 inline
4465 BOOL heap_segment_unmappable_p (heap_segment* inst)
4466 {
4467     return (!(inst->flags & heap_segment_flags_readonly) ||
4468             ((inst->flags & heap_segment_flags_unmappable) != 0));
4469 }
4470
4471 inline
4472 BOOL heap_segment_loh_p (heap_segment * inst)
4473 {
4474     return !!(inst->flags & heap_segment_flags_loh);
4475 }
4476
4477 #ifdef BACKGROUND_GC
4478 inline
4479 BOOL heap_segment_decommitted_p (heap_segment * inst)
4480 {
4481     return !!(inst->flags & heap_segment_flags_decommitted);
4482 }
4483 #endif //BACKGROUND_GC
4484
4485 inline
4486 PTR_heap_segment & heap_segment_next (heap_segment* inst)
4487 {
4488   return inst->next;
4489 }
4490 inline
4491 uint8_t*& heap_segment_mem (heap_segment* inst)
4492 {
4493   return inst->mem;
4494 }
4495 inline
4496 uint8_t*& heap_segment_plan_allocated (heap_segment* inst)
4497 {
4498   return inst->plan_allocated;
4499 }
4500
4501 #ifdef BACKGROUND_GC
4502 inline
4503 uint8_t*& heap_segment_background_allocated (heap_segment* inst)
4504 {
4505   return inst->background_allocated;
4506 }
4507 inline
4508 uint8_t*& heap_segment_saved_bg_allocated (heap_segment* inst)
4509 {
4510   return inst->saved_bg_allocated;
4511 }
4512 #endif //BACKGROUND_GC
4513
4514 #ifdef MULTIPLE_HEAPS
4515 inline
4516 gc_heap*& heap_segment_heap (heap_segment* inst)
4517 {
4518     return inst->heap;
4519 }
4520 #endif //MULTIPLE_HEAPS
4521
4522 inline
4523 generation* gc_heap::generation_of (int  n)
4524 {
4525     assert (((n <= max_generation+1) && (n >= 0)));
4526     return &generation_table [ n ];
4527 }
4528
4529 inline
4530 dynamic_data* gc_heap::dynamic_data_of (int gen_number)
4531 {
4532     return &dynamic_data_table [ gen_number ];
4533 }
4534
4535 #define GC_PAGE_SIZE 0x1000
4536
4537 #define card_word_width ((size_t)32)
4538
4539 //
4540 // The value of card_size is determined empirically according to the average size of an object
4541 // In the code we also rely on the assumption that one card_table entry (uint32_t) covers an entire os page
4542 //
4543 #if defined (BIT64)
4544 #define card_size ((size_t)(2*GC_PAGE_SIZE/card_word_width))
4545 #else
4546 #define card_size ((size_t)(GC_PAGE_SIZE/card_word_width))
4547 #endif // BIT64
4548
4549 inline
4550 size_t card_word (size_t card)
4551 {
4552     return card / card_word_width;
4553 }
4554
4555 inline
4556 unsigned card_bit (size_t card)
4557 {
4558     return (unsigned)(card % card_word_width);
4559 }
4560
4561 inline
4562 size_t gcard_of (uint8_t* object)
4563 {
4564     return (size_t)(object) / card_size;
4565 }