Merge pull request #11077 from pgavlin/gh10940
[platform/upstream/coreclr.git] / src / gc / gcpriv.h
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 // optimize for speed
5
6
7 #ifndef _DEBUG
8 #ifdef _MSC_VER
9 #pragma optimize( "t", on )
10 #endif
11 #endif
12 #define inline __forceinline
13
14 #include "gc.h"
15
16 //#define DT_LOG
17
18 #include "gcrecord.h"
19
20 #ifdef _MSC_VER
21 #pragma warning(disable:4293)
22 #pragma warning(disable:4477)
23 #endif //_MSC_VER
24
25 inline void FATAL_GC_ERROR()
26 {
27 #ifndef DACCESS_COMPILE
28     GCToOSInterface::DebugBreak();
29 #endif // DACCESS_COMPILE
30     _ASSERTE(!"Fatal Error in GC.");
31     GCToEEInterface::HandleFatalError(COR_E_EXECUTIONENGINE);
32 }
33
34 #ifdef _MSC_VER
35 #pragma inline_depth(20)
36 #endif
37
38 /* the following section defines the optional features */
39
40 // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
41 // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION
42 // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much 
43 // work to make FEATURE_STRUCTALIGN not apply to LOH so they can be both
44 // turned on.
45 #define FEATURE_LOH_COMPACTION
46
47 #ifdef FEATURE_64BIT_ALIGNMENT
48 // We need the following feature as part of keeping 64-bit types aligned in the GC heap.
49 #define RESPECT_LARGE_ALIGNMENT //used to keep "double" objects aligned during
50                                 //relocation
51 #endif //FEATURE_64BIT_ALIGNMENT
52
53 #define SHORT_PLUGS //used to keep ephemeral plugs short so they fit better into the oldest generation free items
54
55 #ifdef SHORT_PLUGS
56 #define DESIRED_PLUG_LENGTH (1000)
57 #endif //SHORT_PLUGS
58
59 #define FEATURE_PREMORTEM_FINALIZATION
60 #define GC_HISTORY
61
62 #ifndef FEATURE_REDHAWK
63 #define HEAP_ANALYZE
64 #define COLLECTIBLE_CLASS
65 #endif // !FEATURE_REDHAWK
66
67 #ifdef HEAP_ANALYZE
68 #define initial_internal_roots        (1024*16)
69 #endif // HEAP_ANALYZE
70
71 #define MARK_LIST         //used sorted list to speed up plan phase
72
73 #define BACKGROUND_GC   //concurrent background GC (requires WRITE_WATCH)
74
75 #ifdef SERVER_GC
76 #define MH_SC_MARK //scalable marking
77 //#define SNOOP_STATS //diagnostic
78 #define PARALLEL_MARK_LIST_SORT //do the sorting and merging of the multiple mark lists in server gc in parallel
79 #endif //SERVER_GC
80
81 //This is used to mark some type volatile only when the scalable marking is used. 
82 #if defined (SERVER_GC) && defined (MH_SC_MARK)
83 #define SERVER_SC_MARK_VOLATILE(x) VOLATILE(x)
84 #else //SERVER_GC&&MH_SC_MARK
85 #define SERVER_SC_MARK_VOLATILE(x) x
86 #endif //SERVER_GC&&MH_SC_MARK
87
88 //#define MULTIPLE_HEAPS         //Allow multiple heaps for servers
89
90 #define INTERIOR_POINTERS   //Allow interior pointers in the code manager
91
92 #define CARD_BUNDLE         //enable card bundle feature.(requires WRITE_WATCH)
93
94 // If this is defined we use a map for segments in order to find the heap for 
95 // a segment fast. But it does use more memory as we have to cover the whole
96 // heap range and for each entry we allocate a struct of 5 ptr-size words
97 // (3 for WKS as there's only one heap). 
98 #define SEG_MAPPING_TABLE
99
100 // If allocating the heap mapping table for the available VA consumes too
101 // much memory, you can enable this to allocate only the portion that
102 // corresponds to rw segments and grow it when needed in grow_brick_card_table.
103 // However in heap_of you will need to always compare the address with
104 // g_lowest/highest before you can look at the heap mapping table.
105 #define GROWABLE_SEG_MAPPING_TABLE
106
107 #ifdef BACKGROUND_GC
108 #define MARK_ARRAY      //Mark bit in an array
109 #endif //BACKGROUND_GC
110
111 #if defined(BACKGROUND_GC) || defined (CARD_BUNDLE) || defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP)
112 #define WRITE_WATCH     //Write Watch feature
113 #endif //BACKGROUND_GC || CARD_BUNDLE
114
115 #ifdef WRITE_WATCH
116 #define array_size 100
117 #endif //WRITE_WATCH
118
119 //#define SHORT_PLUGS           //keep plug short
120
121 #define FFIND_OBJECT        //faster find_object, slower allocation
122 #define FFIND_DECAY  7      //Number of GC for which fast find will be active
123
124 //#define NO_WRITE_BARRIER  //no write barrier, use Write Watch feature
125
126 //#define DEBUG_WRITE_WATCH //Additional debug for write watch
127
128 //#define STRESS_PINNING    //Stress pinning by pinning randomly
129
130 //#define TRACE_GC          //debug trace gc operation
131 //#define SIMPLE_DPRINTF
132
133 //#define TIME_GC           //time allocation and garbage collection
134 //#define TIME_WRITE_WATCH  //time GetWriteWatch and ResetWriteWatch calls
135 //#define COUNT_CYCLES  //Use cycle counter for timing
136 //#define JOIN_STATS         //amount of time spent in the join
137 //also, see TIME_SUSPEND in switches.h.
138
139 //#define SYNCHRONIZATION_STATS
140 //#define SEG_REUSE_STATS
141
142 #if defined (SYNCHRONIZATION_STATS) || defined (STAGE_STATS)
143 #define BEGIN_TIMING(x) \
144     int64_t x##_start; \
145     x##_start = GCToOSInterface::QueryPerformanceCounter()
146
147 #define END_TIMING(x) \
148     int64_t x##_end; \
149     x##_end = GCToOSInterface::QueryPerformanceCounter(); \
150     x += x##_end - x##_start
151
152 #else
153 #define BEGIN_TIMING(x)
154 #define END_TIMING(x)
155 #define BEGIN_TIMING_CYCLES(x)
156 #define END_TIMING_CYCLES(x)
157 #endif //SYNCHRONIZATION_STATS || STAGE_STATS
158
159 /* End of optional features */
160
161 #ifdef GC_CONFIG_DRIVEN
162 void GCLogConfig (const char *fmt, ... );
163 #define cprintf(x) {GCLogConfig x;}
164 #endif //GC_CONFIG_DRIVEN
165
166 #ifdef _DEBUG
167 #define TRACE_GC
168 #endif
169
170 // For the bestfit algorithm when we relocate ephemeral generations into an 
171 // existing gen2 segment.
172 // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
173 #define MIN_INDEX_POWER2 6
174
175 #ifdef SERVER_GC
176
177 #ifdef BIT64
178 #define MAX_INDEX_POWER2 30
179 #else
180 #define MAX_INDEX_POWER2 26
181 #endif  // BIT64
182
183 #else //SERVER_GC
184
185 #ifdef BIT64
186 #define MAX_INDEX_POWER2 28
187 #else
188 #define MAX_INDEX_POWER2 24
189 #endif  // BIT64
190
191 #endif //SERVER_GC
192
193 #define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1)
194
195 #define MAX_NUM_FREE_SPACES 200 
196 #define MIN_NUM_FREE_SPACES 5 
197
198 //Please leave these definitions intact.
199
200 // hosted api
201 #ifdef memcpy
202 #undef memcpy
203 #endif //memcpy
204
205 #ifdef FEATURE_STRUCTALIGN
206 #define REQD_ALIGN_DCL ,int requiredAlignment
207 #define REQD_ALIGN_ARG ,requiredAlignment
208 #define REQD_ALIGN_AND_OFFSET_DCL ,int requiredAlignment,size_t alignmentOffset
209 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL ,int requiredAlignment=DATA_ALIGNMENT,size_t alignmentOffset=0
210 #define REQD_ALIGN_AND_OFFSET_ARG ,requiredAlignment,alignmentOffset
211 #else // FEATURE_STRUCTALIGN
212 #define REQD_ALIGN_DCL
213 #define REQD_ALIGN_ARG
214 #define REQD_ALIGN_AND_OFFSET_DCL
215 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL
216 #define REQD_ALIGN_AND_OFFSET_ARG
217 #endif // FEATURE_STRUCTALIGN
218
219 #ifdef MULTIPLE_HEAPS
220 #define THREAD_NUMBER_DCL ,int thread
221 #define THREAD_NUMBER_ARG ,thread
222 #define THREAD_NUMBER_FROM_CONTEXT int thread = sc->thread_number;
223 #define THREAD_FROM_HEAP  int thread = heap_number;
224 #define HEAP_FROM_THREAD  gc_heap* hpt = gc_heap::g_heaps[thread];
225 #else
226 #define THREAD_NUMBER_DCL
227 #define THREAD_NUMBER_ARG
228 #define THREAD_NUMBER_FROM_CONTEXT
229 #define THREAD_FROM_HEAP
230 #define HEAP_FROM_THREAD  gc_heap* hpt = 0;
231 #endif //MULTIPLE_HEAPS
232
233 //These constants are ordered
234 const int policy_sweep = 0;
235 const int policy_compact = 1;
236 const int policy_expand  = 2;
237
238 #ifdef TRACE_GC
239
240
241 extern int     print_level;
242 extern BOOL    trace_gc;
243 extern int    gc_trace_fac;
244
245
246 class hlet
247 {
248     static hlet* bindings;
249     int prev_val;
250     int* pval;
251     hlet* prev_let;
252 public:
253     hlet (int& place, int value)
254     {
255         prev_val = place;
256         pval = &place;
257         place = value;
258         prev_let = bindings;
259         bindings = this;
260     }
261     ~hlet ()
262     {
263         *pval = prev_val;
264         bindings = prev_let;
265     }
266 };
267
268
269 #define let(p,v) hlet __x = hlet (p, v);
270
271 #else //TRACE_GC
272
273 #define gc_count    -1
274 #define let(s,v)
275
276 #endif //TRACE_GC
277
278 #ifdef TRACE_GC
279 #define SEG_REUSE_LOG_0 7
280 #define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1)
281 #define DT_LOG_0 (SEG_REUSE_LOG_1 + 1)
282 #define BGC_LOG (DT_LOG_0 + 1)
283 #define GTC_LOG (DT_LOG_0 + 2)
284 #define GC_TABLE_LOG (DT_LOG_0 + 3)
285 #define JOIN_LOG (DT_LOG_0 + 4)
286 #define SPINLOCK_LOG (DT_LOG_0 + 5)
287 #define SNOOP_LOG (DT_LOG_0 + 6)
288
289 #ifndef DACCESS_COMPILE
290
291 #ifdef SIMPLE_DPRINTF
292
293 //#define dprintf(l,x) {if (trace_gc && ((l<=print_level)||gc_heap::settings.concurrent)) {printf ("\n");printf x ; fflush(stdout);}}
294 void GCLog (const char *fmt, ... );
295 //#define dprintf(l,x) {if (trace_gc && (l<=print_level)) {GCLog x;}}
296 //#define dprintf(l,x) {if ((l==SEG_REUSE_LOG_0) || (l==SEG_REUSE_LOG_1) || (trace_gc && (l<=3))) {GCLog x;}}
297 //#define dprintf(l,x) {if (l == DT_LOG_0) {GCLog x;}}
298 //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == BGC_LOG) || (l==GTC_LOG))) {GCLog x;}}
299 //#define dprintf(l,x) {if ((l == 1) || (l == 2222)) {GCLog x;}}
300 #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
301 //#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}}
302 //#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}}
303 //#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}}
304 #else //SIMPLE_DPRINTF
305
306 // The GCTrace output goes to stdout by default but can get sent to the stress log or the logfile if the
307 // reg key GCTraceFacility is set.  THe stress log can only take a format string and 4 numbers or
308 // string literals.
309 #define dprintf(l,x) {if (trace_gc && (l<=print_level)) { \
310       if ( !gc_trace_fac) {printf ("\n");printf x ; fflush(stdout);} \
311       else if ( gc_trace_fac == 2) {LogSpewAlways x;LogSpewAlways ("\n");} \
312       else if ( gc_trace_fac == 1) {STRESS_LOG_VA(x);}}}
313
314 #endif //SIMPLE_DPRINTF
315
316 #else //DACCESS_COMPILE
317 #define dprintf(l,x)
318 #endif //DACCESS_COMPILE
319 #else //TRACE_GC
320 #define dprintf(l,x)
321 #endif //TRACE_GC
322
323 #ifndef FEATURE_REDHAWK
324 #undef  assert
325 #define assert _ASSERTE
326 #undef  ASSERT
327 #define ASSERT _ASSERTE
328 #endif // FEATURE_REDHAWK
329
330 #ifdef _DEBUG
331
332 struct GCDebugSpinLock {
333     VOLATILE(int32_t) lock;                   // -1 if free, 0 if held
334     VOLATILE(Thread *) holding_thread;     // -1 if no thread holds the lock.
335     VOLATILE(BOOL) released_by_gc_p;       // a GC thread released the lock.
336
337     GCDebugSpinLock()
338         : lock(-1), holding_thread((Thread*) -1)
339     {
340     }
341 };
342 typedef GCDebugSpinLock GCSpinLock;
343
344 #elif defined (SYNCHRONIZATION_STATS)
345
346 struct GCSpinLockInstru {
347     VOLATILE(int32_t) lock;
348     // number of times we went into SwitchToThread in enter_spin_lock.
349     unsigned int num_switch_thread;
350     // number of times we went into WaitLonger.
351     unsigned int num_wait_longer;
352     // number of times we went to calling SwitchToThread in WaitLonger.
353     unsigned int num_switch_thread_w;
354     // number of times we went to calling DisablePreemptiveGC in WaitLonger.
355     unsigned int num_disable_preemptive_w;
356
357     GCSpinLockInstru()
358         : lock(-1), num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0)
359     {
360     }
361
362     void init()
363     {
364         num_switch_thread = 0;
365         num_wait_longer = 0;
366         num_switch_thread_w = 0;
367         num_disable_preemptive_w = 0;
368     }
369 };
370
371 typedef GCSpinLockInstru GCSpinLock;
372
373 #else
374
375 struct GCDebugSpinLock {
376     VOLATILE(int32_t) lock;                   // -1 if free, 0 if held
377
378     GCDebugSpinLock()
379         : lock(-1)
380     {
381     }
382 };
383 typedef GCDebugSpinLock GCSpinLock;
384
385 #endif
386
387 class mark;
388 class heap_segment;
389 class CObjectHeader;
390 class l_heap;
391 class sorted_table;
392 class c_synchronize;
393 class seg_free_spaces;
394 class gc_heap;
395
396 #ifdef BACKGROUND_GC
397 class exclusive_sync;
398 class recursive_gc_sync;
399 #endif //BACKGROUND_GC
400
401 // The following 2 modes are of the same format as in clr\src\bcl\system\runtime\gcsettings.cs
402 // make sure you change that one if you change this one!
403 enum gc_pause_mode
404 {
405     pause_batch = 0, //We are not concerned about pause length
406     pause_interactive = 1,     //We are running an interactive app
407     pause_low_latency = 2,     //short pauses are essential
408     //avoid long pauses from blocking full GCs unless running out of memory
409     pause_sustained_low_latency = 3,
410     pause_no_gc = 4
411 };
412
413 enum gc_loh_compaction_mode
414 {
415     loh_compaction_default = 1, // the default mode, don't compact LOH.
416     loh_compaction_once = 2, // only compact once the next time a blocking full GC happens.
417     loh_compaction_auto = 4 // GC decides when to compact LOH, to be implemented.
418 };
419
420 enum set_pause_mode_status
421 {
422     set_pause_mode_success = 0,
423     set_pause_mode_no_gc = 1 // NoGCRegion is in progress, can't change pause mode.
424 };
425
426 enum gc_tuning_point
427 {
428     tuning_deciding_condemned_gen,
429     tuning_deciding_full_gc,
430     tuning_deciding_compaction,
431     tuning_deciding_expansion,
432     tuning_deciding_promote_ephemeral
433 };
434
435 #if defined(TRACE_GC) && defined(BACKGROUND_GC)
436 static const char * const str_bgc_state[] =
437 {
438     "not_in_process",
439     "mark_handles",
440     "mark_stack",
441     "revisit_soh",
442     "revisit_loh",
443     "overflow_soh",
444     "overflow_loh",
445     "final_marking",
446     "sweep_soh",
447     "sweep_loh",
448     "plan_phase"
449 };
450 #endif // defined(TRACE_GC) && defined(BACKGROUND_GC)
451
452 enum allocation_state
453 {
454     a_state_start = 0,
455     a_state_can_allocate,
456     a_state_cant_allocate,
457     a_state_try_fit,
458     a_state_try_fit_new_seg,
459     a_state_try_fit_new_seg_after_cg,
460     a_state_try_fit_no_seg,
461     a_state_try_fit_after_cg,
462     a_state_try_fit_after_bgc,
463     a_state_try_free_full_seg_in_bgc, 
464     a_state_try_free_after_bgc,
465     a_state_try_seg_end,
466     a_state_acquire_seg,
467     a_state_acquire_seg_after_cg,
468     a_state_acquire_seg_after_bgc,
469     a_state_check_and_wait_for_bgc,
470     a_state_trigger_full_compact_gc,
471     a_state_trigger_ephemeral_gc,
472     a_state_trigger_2nd_ephemeral_gc,
473     a_state_check_retry_seg,
474     a_state_max
475 };
476
477 enum gc_type
478 {
479     gc_type_compacting = 0,
480     gc_type_blocking = 1,
481 #ifdef BACKGROUND_GC
482     gc_type_background = 2,
483 #endif //BACKGROUND_GC
484     gc_type_max = 3
485 };
486
487 #define v_high_memory_load_th 97
488
489 //encapsulates the mechanism for the current gc
490 class gc_mechanisms
491 {
492 public:
493     VOLATILE(size_t) gc_index; // starts from 1 for the first GC, like dd_collection_count
494     int condemned_generation;
495     BOOL promotion;
496     BOOL compaction;
497     BOOL loh_compaction;
498     BOOL heap_expansion;
499     uint32_t concurrent;
500     BOOL demotion;
501     BOOL card_bundles;
502     int  gen0_reduction_count;
503     BOOL should_lock_elevation;
504     int elevation_locked_count;
505     BOOL elevation_reduced;
506     BOOL minimal_gc;
507     gc_reason reason;
508     gc_pause_mode pause_mode;
509     BOOL found_finalizers;
510
511 #ifdef BACKGROUND_GC
512     BOOL background_p;
513     bgc_state b_state;
514     BOOL allocations_allowed;
515 #endif //BACKGROUND_GC
516
517 #ifdef STRESS_HEAP
518     BOOL stress_induced;
519 #endif // STRESS_HEAP
520
521     uint32_t entry_memory_load;
522
523     void init_mechanisms(); //for each GC
524     void first_init(); // for the life of the EE
525
526     void record (gc_history_global* history);
527 };
528
529 // This is a compact version of gc_mechanism that we use to save in the history.
530 class gc_mechanisms_store
531 {
532 public:
533     size_t gc_index; 
534     bool promotion;
535     bool compaction;
536     bool loh_compaction;
537     bool heap_expansion;
538     bool concurrent;
539     bool demotion;
540     bool card_bundles;
541     bool should_lock_elevation;
542     int condemned_generation   : 8; 
543     int gen0_reduction_count   : 8;
544     int elevation_locked_count : 8;
545     gc_reason reason           : 8;
546     gc_pause_mode pause_mode   : 8;
547 #ifdef BACKGROUND_GC
548     bgc_state b_state          : 8;
549 #endif //BACKGROUND_GC
550     bool found_finalizers;
551
552 #ifdef BACKGROUND_GC
553     bool background_p;
554 #endif //BACKGROUND_GC
555
556 #ifdef STRESS_HEAP
557     bool stress_induced;
558 #endif // STRESS_HEAP
559
560 #ifdef BIT64
561     uint32_t entry_memory_load;
562 #endif // BIT64
563
564     void store (gc_mechanisms* gm)
565     {
566         gc_index                = gm->gc_index; 
567         condemned_generation    = gm->condemned_generation;
568         promotion               = (gm->promotion != 0);
569         compaction              = (gm->compaction != 0);
570         loh_compaction          = (gm->loh_compaction != 0);
571         heap_expansion          = (gm->heap_expansion != 0);
572         concurrent              = (gm->concurrent != 0);
573         demotion                = (gm->demotion != 0);
574         card_bundles            = (gm->card_bundles != 0);
575         gen0_reduction_count    = gm->gen0_reduction_count;
576         should_lock_elevation   = (gm->should_lock_elevation != 0);
577         elevation_locked_count  = gm->elevation_locked_count;
578         reason                  = gm->reason;
579         pause_mode              = gm->pause_mode;
580         found_finalizers        = (gm->found_finalizers != 0);
581
582 #ifdef BACKGROUND_GC
583         background_p            = (gm->background_p != 0);
584         b_state                 = gm->b_state;
585 #endif //BACKGROUND_GC
586
587 #ifdef STRESS_HEAP
588         stress_induced          = (gm->stress_induced != 0);
589 #endif // STRESS_HEAP
590
591 #ifdef BIT64
592         entry_memory_load       = gm->entry_memory_load;
593 #endif // BIT64        
594     }
595 };
596
597 #ifdef GC_STATS
598
599 // GC specific statistics, tracking counts and timings for GCs occuring in the system.
600 // This writes the statistics to a file every 60 seconds, if a file is specified in
601 // COMPlus_GcMixLog
602
603 struct GCStatistics
604     : public StatisticsBase
605 {
606     // initialized to the contents of COMPlus_GcMixLog, or NULL, if not present
607     static TCHAR* logFileName;
608     static FILE*  logFile;
609
610     // number of times we executed a background GC, a foreground GC, or a
611     // non-concurrent GC
612     int cntBGC, cntFGC, cntNGC;
613
614     // min, max, and total time spent performing BGCs, FGCs, NGCs
615     // (BGC time includes everything between the moment the BGC starts until 
616     // it completes, i.e. the times of all FGCs occuring concurrently)
617     MinMaxTot bgc, fgc, ngc;
618
619     // number of times we executed a compacting GC (sweeping counts can be derived)
620     int cntCompactNGC, cntCompactFGC;
621
622     // count of reasons
623     int cntReasons[reason_max];
624
625     // count of condemned generation, by NGC and FGC:
626     int cntNGCGen[max_generation+1];
627     int cntFGCGen[max_generation];
628     
629     ///////////////////////////////////////////////////////////////////////////////////////////////
630     // Internal mechanism:
631
632     virtual void Initialize();
633     virtual void DisplayAndUpdate();
634
635     // Public API
636
637     static BOOL Enabled()
638     { return logFileName != NULL; }
639
640     void AddGCStats(const gc_mechanisms& settings, size_t timeInMSec);
641 };
642
643 extern GCStatistics g_GCStatistics;
644 extern GCStatistics g_LastGCStatistics;
645
646 #endif // GC_STATS
647
648
649 typedef DPTR(class heap_segment)               PTR_heap_segment;
650 typedef DPTR(class gc_heap)                    PTR_gc_heap;
651 typedef DPTR(PTR_gc_heap)                      PTR_PTR_gc_heap;
652 #ifdef FEATURE_PREMORTEM_FINALIZATION
653 typedef DPTR(class CFinalize)                  PTR_CFinalize;
654 #endif // FEATURE_PREMORTEM_FINALIZATION
655
656 //-------------------------------------
657 //generation free list. It is an array of free lists bucketed by size, starting at sizes lower than first_bucket_size 
658 //and doubling each time. The last bucket (index == num_buckets) is for largest sizes with no limit
659
660 #define MAX_BUCKET_COUNT (13)//Max number of buckets for the small generations. 
661 class alloc_list 
662 {
663     uint8_t* head;
664     uint8_t* tail;
665
666     size_t damage_count;
667 public:
668 #ifdef FL_VERIFICATION
669     size_t item_count;
670 #endif //FL_VERIFICATION
671
672     uint8_t*& alloc_list_head () { return head;}
673     uint8_t*& alloc_list_tail () { return tail;}
674     size_t& alloc_list_damage_count(){ return damage_count; }
675     alloc_list()
676     {
677         head = 0; 
678         tail = 0; 
679         damage_count = 0;
680     }
681 };
682
683
684 class allocator 
685 {
686     size_t num_buckets;
687     size_t frst_bucket_size;
688     alloc_list first_bucket;
689     alloc_list* buckets;
690     alloc_list& alloc_list_of (unsigned int bn);
691     size_t& alloc_list_damage_count_of (unsigned int bn);
692
693 public:
694     allocator (unsigned int num_b, size_t fbs, alloc_list* b);
695     allocator()
696     {
697         num_buckets = 1;
698         frst_bucket_size = SIZE_T_MAX;
699     }
700     unsigned int number_of_buckets() {return (unsigned int)num_buckets;}
701
702     size_t first_bucket_size() {return frst_bucket_size;}
703     uint8_t*& alloc_list_head_of (unsigned int bn)
704     {
705         return alloc_list_of (bn).alloc_list_head();
706     }
707     uint8_t*& alloc_list_tail_of (unsigned int bn)
708     {
709         return alloc_list_of (bn).alloc_list_tail();
710     }
711     void clear();
712     BOOL discard_if_no_fit_p()
713     {
714         return (num_buckets == 1);
715     }
716
717     // This is when we know there's nothing to repair because this free
718     // list has never gone through plan phase. Right now it's only used
719     // by the background ephemeral sweep when we copy the local free list
720     // to gen0's free list.
721     //
722     // We copy head and tail manually (vs together like copy_to_alloc_list)
723     // since we need to copy tail first because when we get the free items off
724     // of each bucket we check head first. We also need to copy the
725     // smaller buckets first so when gen0 allocation needs to thread
726     // smaller items back that bucket is guaranteed to have been full
727     // copied.
728     void copy_with_no_repair (allocator* allocator_to_copy)
729     {
730         assert (num_buckets == allocator_to_copy->number_of_buckets());
731         for (unsigned int i = 0; i < num_buckets; i++)
732         {
733             alloc_list* al = &(allocator_to_copy->alloc_list_of (i));
734             alloc_list_tail_of(i) = al->alloc_list_tail();
735             alloc_list_head_of(i) = al->alloc_list_head();
736         }
737     }
738
739     void unlink_item (unsigned int bucket_number, uint8_t* item, uint8_t* previous_item, BOOL use_undo_p);
740     void thread_item (uint8_t* item, size_t size);
741     void thread_item_front (uint8_t* itme, size_t size);
742     void thread_free_item (uint8_t* free_item, uint8_t*& head, uint8_t*& tail);
743     void copy_to_alloc_list (alloc_list* toalist);
744     void copy_from_alloc_list (alloc_list* fromalist);
745     void commit_alloc_list_changes();
746 };
747
748 #define NUM_GEN_POWER2 (20)
749 #define BASE_GEN_SIZE (1*512)
750
751 // group the frequently used ones together (need intrumentation on accessors)
752 class generation
753 {
754 public:
755     // Don't move these first two fields without adjusting the references
756     // from the __asm in jitinterface.cpp.
757     alloc_context   allocation_context;
758     PTR_heap_segment start_segment;
759     uint8_t*        allocation_start;
760     heap_segment*   allocation_segment;
761     uint8_t*        allocation_context_start_region;
762     allocator       free_list_allocator;
763     size_t          free_list_allocated;
764     size_t          end_seg_allocated;
765     BOOL            allocate_end_seg_p;
766     size_t          condemned_allocated;
767     size_t          free_list_space;
768     size_t          free_obj_space;
769     size_t          allocation_size;
770     uint8_t*        plan_allocation_start;
771     size_t          plan_allocation_start_size;
772
773     // this is the pinned plugs that got allocated into this gen.
774     size_t          pinned_allocated;
775     size_t          pinned_allocation_compact_size;
776     size_t          pinned_allocation_sweep_size;
777     int             gen_num;
778
779 #ifdef FREE_USAGE_STATS
780     size_t          gen_free_spaces[NUM_GEN_POWER2];
781     // these are non pinned plugs only
782     size_t          gen_plugs[NUM_GEN_POWER2];
783     size_t          gen_current_pinned_free_spaces[NUM_GEN_POWER2];
784     size_t          pinned_free_obj_space;
785     // this is what got allocated into the pinned free spaces.
786     size_t          allocated_in_pinned_free;
787     size_t          allocated_since_last_pin;
788 #endif //FREE_USAGE_STATS
789 };
790
791 static_assert(offsetof(dac_generation, allocation_context) == offsetof(generation, allocation_context), "DAC generation offset mismatch");
792 static_assert(offsetof(dac_generation, start_segment) == offsetof(generation, start_segment), "DAC generation offset mismatch");
793 static_assert(offsetof(dac_generation, allocation_start) == offsetof(generation, allocation_start), "DAC generation offset mismatch");
794
795
796 // The dynamic data fields are grouped into 3 categories:
797 //
798 // calculated logical data (like desired_allocation)
799 // physical data (like fragmentation)
800 // const data (like min_gc_size), initialized at the beginning
801 class dynamic_data
802 {
803 public:
804     ptrdiff_t new_allocation;
805     ptrdiff_t gc_new_allocation; // new allocation at beginning of gc
806     float     surv;
807     size_t    desired_allocation;
808
809     // # of bytes taken by objects (ie, not free space) at the beginning
810     // of the GC.
811     size_t    begin_data_size;
812     // # of bytes taken by survived objects after mark.
813     size_t    survived_size;
814     // # of bytes taken by survived pinned plugs after mark.
815     size_t    pinned_survived_size;
816     size_t    artificial_pinned_survived_size;
817     size_t    added_pinned_size;
818
819 #ifdef SHORT_PLUGS
820     size_t    padding_size;
821 #endif //SHORT_PLUGS
822 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
823     // # of plugs that are not pinned plugs.
824     size_t    num_npinned_plugs;
825 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
826     //total object size after a GC, ie, doesn't include fragmentation
827     size_t    current_size; 
828     size_t    collection_count;
829     size_t    promoted_size;
830     size_t    freach_previous_promotion;
831     size_t    fragmentation;    //fragmentation when we don't compact
832     size_t    gc_clock;         //gc# when last GC happened
833     size_t    time_clock;       //time when last gc started
834     size_t    gc_elapsed_time;  // Time it took for the gc to complete
835     float     gc_speed;         //  speed in bytes/msec for the gc to complete
836
837     // min_size is always the same as min_gc_size..
838     size_t    min_gc_size;
839     size_t    max_size;
840     size_t    min_size;
841     size_t    default_new_allocation;
842     size_t    fragmentation_limit;
843     float     fragmentation_burden_limit;
844     float     limit;
845     float     max_limit;
846 };
847
848 #define ro_in_entry 0x1
849
850 #ifdef SEG_MAPPING_TABLE
851 // Note that I am storing both h0 and seg0, even though in Server GC you can get to 
852 // the heap* from the segment info. This is because heap_of needs to be really fast
853 // and we would not want yet another indirection.
854 struct seg_mapping
855 {
856     // if an address is > boundary it belongs to h1; else h0.
857     // since we init h0 and h1 to 0, if we get 0 it means that
858     // address doesn't exist on managed segments. And heap_of 
859     // would just return heap0 which is what it does now.
860     uint8_t* boundary;
861 #ifdef MULTIPLE_HEAPS
862     gc_heap* h0;
863     gc_heap* h1;
864 #endif //MULTIPLE_HEAPS
865     // You could have an address that's inbetween 2 segments and 
866     // this would return a seg, the caller then will use 
867     // in_range_for_segment to determine if it's on that seg.
868     heap_segment* seg0; // this is what the seg for h0 is.
869     heap_segment* seg1; // this is what the seg for h1 is.
870     // Note that when frozen objects are used we mask seg1
871     // with 0x1 to indicate that there is a ro segment for
872     // this entry.
873 };
874 #endif //SEG_MAPPING_TABLE
875
876 // alignment helpers
877 //Alignment constant for allocation
878 #define ALIGNCONST (DATA_ALIGNMENT-1)
879
880 inline
881 size_t Align (size_t nbytes, int alignment=ALIGNCONST)
882 {
883     return (nbytes + alignment) & ~alignment;
884 }
885
886 //return alignment constant for small object heap vs large object heap
887 inline
888 int get_alignment_constant (BOOL small_object_p)
889 {
890 #ifdef FEATURE_STRUCTALIGN
891     // If any objects on the large object heap require 8-byte alignment,
892     // the compiler will tell us so.  Let's not guess an alignment here.
893     return ALIGNCONST;
894 #else // FEATURE_STRUCTALIGN
895     return small_object_p ? ALIGNCONST : 7;
896 #endif // FEATURE_STRUCTALIGN
897 }
898
899 struct etw_opt_info
900 {
901     size_t desired_allocation;
902     size_t new_allocation;
903     int    gen_number;
904 };
905
906 enum alloc_wait_reason
907 {
908     // When we don't care about firing an event for
909     // this.
910     awr_ignored = -1,
911
912     // when we detect we are in low memory
913     awr_low_memory = 0,
914
915     // when we detect the ephemeral segment is too full
916     awr_low_ephemeral = 1,
917
918     // we've given out too much budget for gen0.
919     awr_gen0_alloc = 2,
920
921     // we've given out too much budget for loh.
922     awr_loh_alloc = 3,
923
924     // this event is really obsolete - it's for pre-XP
925     // OSs where low mem notification is not supported.
926     awr_alloc_loh_low_mem = 4,
927
928     // we ran out of VM spaced to reserve on loh.
929     awr_loh_oos = 5, 
930
931     // ran out of space when allocating a small object
932     awr_gen0_oos_bgc = 6,
933
934     // ran out of space when allocating a large object
935     awr_loh_oos_bgc = 7,
936
937     // waiting for BGC to let FGC happen
938     awr_fgc_wait_for_bgc = 8,
939
940     // wait for bgc to finish to get loh seg.
941     awr_get_loh_seg = 9,
942
943     // we don't allow loh allocation during bgc planning.
944     awr_loh_alloc_during_plan = 10,
945
946     // we don't allow too much loh allocation during bgc.
947     awr_loh_alloc_during_bgc = 11
948 };
949
950 struct alloc_thread_wait_data
951 {
952     int awr;
953 };
954
955 enum msl_take_state
956 {
957     mt_get_large_seg,
958     mt_wait_bgc_plan,
959     mt_wait_bgc,
960     mt_block_gc,
961     mt_clr_mem,
962     mt_clr_large_mem,
963     mt_t_eph_gc,
964     mt_t_full_gc,
965     mt_alloc_small,
966     mt_alloc_large,
967     mt_alloc_small_cant,
968     mt_alloc_large_cant,
969     mt_try_alloc,
970     mt_try_budget
971 };
972
973 enum msl_enter_state
974 {
975     me_acquire,
976     me_release
977 };
978
979 struct spinlock_info
980 {
981     msl_enter_state enter_state;
982     msl_take_state take_state;
983     EEThreadId thread_id;
984 };
985
986 const unsigned HS_CACHE_LINE_SIZE = 128;
987
988 #ifdef SNOOP_STATS
989 struct snoop_stats_data
990 {
991     int heap_index;
992
993     // total number of objects that we called
994     // gc_mark on.
995     size_t objects_checked_count;
996     // total number of time we called gc_mark
997     // on a 0 reference.
998     size_t zero_ref_count;
999     // total objects actually marked.
1000     size_t objects_marked_count;
1001     // number of objects written to the mark stack because
1002     // of mark_stolen.
1003     size_t stolen_stack_count;
1004     // number of objects pushed onto the mark stack because
1005     // of the partial mark code path.
1006     size_t partial_stack_count;
1007     // number of objects pushed onto the mark stack because
1008     // of the non partial mark code path.
1009     size_t normal_stack_count;
1010     // number of references marked without mark stack.
1011     size_t non_stack_count;
1012
1013     // number of times we detect next heap's mark stack
1014     // is not busy.
1015     size_t stack_idle_count;
1016
1017     // number of times we do switch to thread.
1018     size_t switch_to_thread_count;
1019
1020     // number of times we are checking if the next heap's
1021     // mark stack is busy.
1022     size_t check_level_count;
1023     // number of times next stack is busy and level is 
1024     // at the bottom.
1025     size_t busy_count;
1026     // how many interlocked exchange operations we did
1027     size_t interlocked_count;
1028     // numer of times parent objects stolen
1029     size_t partial_mark_parent_count;
1030     // numer of times we look at a normal stolen entry, 
1031     // or the beginning/ending PM pair.
1032     size_t stolen_or_pm_count; 
1033     // number of times we see 2 for the entry.
1034     size_t stolen_entry_count; 
1035     // number of times we see a PM entry that's not ready.
1036     size_t pm_not_ready_count; 
1037     // number of stolen normal marked objects and partial mark children.
1038     size_t normal_count;
1039     // number of times the bottom of mark stack was cleared.
1040     size_t stack_bottom_clear_count;
1041 };
1042 #endif //SNOOP_STATS
1043
1044 struct no_gc_region_info
1045 {
1046     size_t soh_allocation_size;
1047     size_t loh_allocation_size;
1048     size_t started;
1049     size_t num_gcs;
1050     size_t num_gcs_induced;
1051     start_no_gc_region_status start_status;
1052     gc_pause_mode saved_pause_mode;
1053     size_t saved_gen0_min_size;
1054     size_t saved_gen3_min_size;
1055     BOOL minimal_gc_p;
1056 };
1057
1058 // if you change these, make sure you update them for sos (strike.cpp) as well.
1059 // 
1060 // !!!NOTE!!!
1061 // Right now I am only recording data from blocking GCs. When recording from BGC,
1062 // it should have its own copy just like gc_data_per_heap.
1063 // for BGCs we will have a very different set of datapoints to record.
1064 enum interesting_data_point
1065 {
1066     idp_pre_short = 0,
1067     idp_post_short = 1,
1068     idp_merged_pin = 2,
1069     idp_converted_pin = 3,
1070     idp_pre_pin = 4,
1071     idp_post_pin = 5,
1072     idp_pre_and_post_pin = 6,
1073     idp_pre_short_padded = 7,
1074     idp_post_short_padded = 8,
1075     max_idp_count
1076 };
1077
1078 //class definition of the internal class
1079 class gc_heap
1080 {
1081     friend struct ::_DacGlobals;
1082 #ifdef DACCESS_COMPILE
1083     friend class ::ClrDataAccess;
1084     friend class ::DacHeapWalker;
1085 #endif //DACCESS_COMPILE
1086
1087     friend class GCHeap;
1088 #ifdef FEATURE_PREMORTEM_FINALIZATION
1089     friend class CFinalize;
1090 #endif // FEATURE_PREMORTEM_FINALIZATION
1091     friend struct ::alloc_context;
1092     friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, uint32_t dwFlags);
1093     friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1094     friend class t_join;
1095     friend class gc_mechanisms;
1096     friend class seg_free_spaces;
1097
1098 #ifdef BACKGROUND_GC
1099     friend class exclusive_sync;
1100     friend class recursive_gc_sync;
1101 #endif //BACKGROUND_GC
1102
1103 #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1104     friend void checkGCWriteBarrier();
1105     friend void initGCShadow();
1106 #endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1107
1108     friend void PopulateDacVars(GcDacVars *gcDacVars);
1109
1110 #ifdef MULTIPLE_HEAPS
1111     typedef void (gc_heap::* card_fn) (uint8_t**, int);
1112 #define call_fn(fn) (this->*fn)
1113 #define __this this
1114 #else
1115     typedef void (* card_fn) (uint8_t**);
1116 #define call_fn(fn) (*fn)
1117 #define __this (gc_heap*)0
1118 #endif
1119
1120 public:
1121
1122 #ifdef TRACE_GC
1123     PER_HEAP
1124     void print_free_list (int gen, heap_segment* seg);
1125 #endif // TRACE_GC
1126
1127 #ifdef SYNCHRONIZATION_STATS
1128
1129     PER_HEAP_ISOLATED
1130     void init_sync_stats()
1131     {
1132 #ifdef MULTIPLE_HEAPS
1133         for (int i = 0; i < gc_heap::n_heaps; i++)
1134         {
1135             gc_heap::g_heaps[i]->init_heap_sync_stats();
1136         }
1137 #else  //MULTIPLE_HEAPS
1138         init_heap_sync_stats();
1139 #endif  //MULTIPLE_HEAPS
1140     }
1141
1142     PER_HEAP_ISOLATED
1143     void print_sync_stats(unsigned int gc_count_during_log)
1144     {
1145         // bad/good gl acquire is accumulative during the log interval (because the numbers are too small)
1146         // min/max msl_acquire is the min/max during the log interval, not each GC.
1147         // Threads is however many allocation threads for the last GC.
1148         // num of msl acquired, avg_msl, high and low are all for each GC.
1149         printf("%2s%2s%10s%10s%12s%6s%4s%8s(  st,  wl, stw, dpw)\n",
1150             "H", "T", "good_sus", "bad_sus", "avg_msl", "high", "low", "num_msl");
1151
1152 #ifdef MULTIPLE_HEAPS
1153         for (int i = 0; i < gc_heap::n_heaps; i++)
1154         {
1155             gc_heap::g_heaps[i]->print_heap_sync_stats(i, gc_count_during_log);
1156         }
1157 #else  //MULTIPLE_HEAPS
1158         print_heap_sync_stats(0, gc_count_during_log);
1159 #endif  //MULTIPLE_HEAPS
1160     }
1161
1162 #endif //SYNCHRONIZATION_STATS
1163
1164     PER_HEAP
1165     void verify_soh_segment_list();
1166     PER_HEAP
1167     void verify_mark_array_cleared (heap_segment* seg);
1168     PER_HEAP
1169     void verify_mark_array_cleared();
1170     PER_HEAP
1171     void verify_seg_end_mark_array_cleared();
1172     PER_HEAP
1173     void verify_partial();
1174
1175 #ifdef VERIFY_HEAP
1176     PER_HEAP
1177     void verify_free_lists(); 
1178     PER_HEAP
1179     void verify_heap (BOOL begin_gc_p);
1180 #endif //VERIFY_HEAP
1181
1182     PER_HEAP_ISOLATED
1183     void fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num);
1184
1185     PER_HEAP_ISOLATED
1186     void fire_pevents();
1187
1188 #ifdef FEATURE_BASICFREEZE
1189     static void walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef);
1190 #endif
1191
1192     static
1193     heap_segment* make_heap_segment (uint8_t* new_pages,
1194                                      size_t size, 
1195                                      int h_number);
1196     static
1197     l_heap* make_large_heap (uint8_t* new_pages, size_t size, BOOL managed);
1198
1199     static
1200     gc_heap* make_gc_heap(
1201 #if defined (MULTIPLE_HEAPS)
1202         GCHeap* vm_heap,
1203         int heap_number
1204 #endif //MULTIPLE_HEAPS
1205         );
1206
1207     static
1208     void destroy_gc_heap(gc_heap* heap);
1209
1210     static
1211     HRESULT initialize_gc  (size_t segment_size,
1212                             size_t heap_size
1213 #ifdef MULTIPLE_HEAPS
1214                             , unsigned number_of_heaps
1215 #endif //MULTIPLE_HEAPS
1216         );
1217
1218     static
1219     void shutdown_gc();
1220
1221     PER_HEAP
1222     CObjectHeader* allocate (size_t jsize,
1223                              alloc_context* acontext);
1224
1225 #ifdef MULTIPLE_HEAPS
1226     static void balance_heaps (alloc_context* acontext);
1227     static 
1228     gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
1229     static
1230     void gc_thread_stub (void* arg);
1231 #endif //MULTIPLE_HEAPS
1232
1233     CObjectHeader* try_fast_alloc (size_t jsize);
1234
1235     // For LOH allocations we only update the alloc_bytes_loh in allocation
1236     // context - we don't actually use the ptr/limit from it so I am
1237     // making this explicit by not passing in the alloc_context.
1238     PER_HEAP
1239     CObjectHeader* allocate_large_object (size_t size, int64_t& alloc_bytes);
1240
1241 #ifdef FEATURE_STRUCTALIGN
1242     PER_HEAP
1243     uint8_t* pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size);
1244 #endif // FEATURE_STRUCTALIGN
1245
1246     PER_HEAP_ISOLATED
1247     void do_pre_gc();
1248
1249     PER_HEAP_ISOLATED
1250     void do_post_gc();
1251
1252     PER_HEAP
1253     BOOL expand_soh_with_minimal_gc();
1254
1255     // EE is always suspended when this method is called.
1256     // returning FALSE means we actually didn't do a GC. This happens
1257     // when we figured that we needed to do a BGC.
1258     PER_HEAP
1259     int garbage_collect (int n);
1260
1261     PER_HEAP
1262     void init_records();
1263
1264     static 
1265     uint32_t* make_card_table (uint8_t* start, uint8_t* end);
1266
1267     static
1268     void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
1269
1270     static
1271     int grow_brick_card_tables (uint8_t* start,
1272                                 uint8_t* end,
1273                                 size_t size,
1274                                 heap_segment* new_seg, 
1275                                 gc_heap* hp,
1276                                 BOOL loh_p);
1277
1278     PER_HEAP
1279     BOOL is_mark_set (uint8_t* o);
1280
1281 #ifdef FEATURE_BASICFREEZE
1282     PER_HEAP_ISOLATED
1283     bool frozen_object_p(Object* obj);
1284 #endif // FEATURE_BASICFREEZE
1285
1286 protected:
1287
1288     PER_HEAP_ISOLATED
1289     void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1290
1291     PER_HEAP
1292     void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1293
1294     struct walk_relocate_args
1295     {
1296         uint8_t* last_plug;
1297         BOOL is_shortened;
1298         mark* pinned_plug_entry;
1299         void* profiling_context;
1300         record_surv_fn fn;
1301     };
1302
1303     PER_HEAP
1304     void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type);
1305
1306     PER_HEAP
1307     void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
1308                     walk_relocate_args* args);
1309
1310     PER_HEAP
1311     void walk_relocation (void* profiling_context, record_surv_fn fn);
1312
1313     PER_HEAP
1314     void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
1315
1316     PER_HEAP
1317     void walk_finalize_queue (fq_walk_fn fn);
1318
1319 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1320     PER_HEAP
1321     void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn);
1322 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1323
1324     // used in blocking GCs after plan phase so this walks the plugs.
1325     PER_HEAP
1326     void walk_survivors_relocation (void* profiling_context, record_surv_fn fn);
1327     PER_HEAP
1328     void walk_survivors_for_loh (void* profiling_context, record_surv_fn fn);
1329
1330     PER_HEAP
1331     int generation_to_condemn (int n, 
1332                                BOOL* blocking_collection_p,
1333                                BOOL* elevation_requested_p,
1334                                BOOL check_only_p);
1335
1336     PER_HEAP_ISOLATED
1337     int joined_generation_to_condemn (BOOL should_evaluate_elevation, int n_initial, BOOL* blocking_collection
1338                                         STRESS_HEAP_ARG(int n_original));
1339
1340     PER_HEAP
1341     size_t min_reclaim_fragmentation_threshold (uint32_t num_heaps);
1342
1343     PER_HEAP_ISOLATED
1344     uint64_t min_high_fragmentation_threshold (uint64_t available_mem, uint32_t num_heaps);
1345
1346     PER_HEAP
1347     void concurrent_print_time_delta (const char* msg);
1348     PER_HEAP
1349     void free_list_info (int gen_num, const char* msg);
1350
1351     // in svr GC on entry and exit of this method, the GC threads are not 
1352     // synchronized
1353     PER_HEAP
1354     void gc1();
1355
1356     PER_HEAP_ISOLATED
1357     void save_data_for_no_gc();
1358
1359     PER_HEAP_ISOLATED
1360     void restore_data_for_no_gc();
1361
1362     PER_HEAP_ISOLATED
1363     void update_collection_counts_for_no_gc();
1364
1365     PER_HEAP_ISOLATED
1366     BOOL should_proceed_with_gc();
1367
1368     PER_HEAP_ISOLATED
1369     void record_gcs_during_no_gc();
1370
1371     PER_HEAP
1372     BOOL find_loh_free_for_no_gc();
1373
1374     PER_HEAP
1375     BOOL find_loh_space_for_no_gc();
1376
1377     PER_HEAP
1378     BOOL commit_loh_for_no_gc (heap_segment* seg);
1379
1380     PER_HEAP_ISOLATED
1381     start_no_gc_region_status prepare_for_no_gc_region (uint64_t total_size,
1382                                                         BOOL loh_size_known,
1383                                                         uint64_t loh_size,
1384                                                         BOOL disallow_full_blocking);
1385
1386     PER_HEAP
1387     BOOL loh_allocated_for_no_gc();
1388
1389     PER_HEAP_ISOLATED
1390     void release_no_gc_loh_segments();    
1391
1392     PER_HEAP_ISOLATED
1393     void thread_no_gc_loh_segments();
1394
1395     PER_HEAP
1396     void check_and_set_no_gc_oom();
1397
1398     PER_HEAP
1399     void allocate_for_no_gc_after_gc();
1400
1401     PER_HEAP
1402     void set_loh_allocations_for_no_gc();
1403
1404     PER_HEAP
1405     void set_soh_allocations_for_no_gc();
1406
1407     PER_HEAP
1408     void prepare_for_no_gc_after_gc();
1409
1410     PER_HEAP_ISOLATED
1411     void set_allocations_for_no_gc();
1412
1413     PER_HEAP_ISOLATED
1414     BOOL should_proceed_for_no_gc();
1415
1416     PER_HEAP_ISOLATED
1417     start_no_gc_region_status get_start_no_gc_region_status();
1418
1419     PER_HEAP_ISOLATED
1420     end_no_gc_region_status end_no_gc_region();
1421
1422     PER_HEAP_ISOLATED
1423     void handle_failure_for_no_gc();
1424
1425     PER_HEAP
1426     void fire_etw_allocation_event (size_t allocation_amount, int gen_number, uint8_t* object_address);
1427
1428     PER_HEAP
1429     void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject);
1430
1431     PER_HEAP
1432     size_t limit_from_size (size_t size, size_t room, int gen_number,
1433                             int align_const);
1434     PER_HEAP
1435     int try_allocate_more_space (alloc_context* acontext, size_t jsize,
1436                                  int alloc_generation_number);
1437     PER_HEAP
1438     BOOL allocate_more_space (alloc_context* acontext, size_t jsize,
1439                               int alloc_generation_number);
1440
1441     PER_HEAP
1442     size_t get_full_compact_gc_count();
1443
1444     PER_HEAP
1445     BOOL short_on_end_of_seg (int gen_number,
1446                               heap_segment* seg,
1447                               int align_const);
1448
1449     PER_HEAP
1450     BOOL a_fit_free_list_p (int gen_number, 
1451                             size_t size, 
1452                             alloc_context* acontext,
1453                             int align_const);
1454
1455 #ifdef BACKGROUND_GC
1456     PER_HEAP
1457     void wait_for_background (alloc_wait_reason awr);
1458
1459     PER_HEAP
1460     void wait_for_bgc_high_memory (alloc_wait_reason awr);
1461
1462     PER_HEAP
1463     void bgc_loh_alloc_clr (uint8_t* alloc_start,
1464                             size_t size, 
1465                             alloc_context* acontext,
1466                             int align_const, 
1467                             int lock_index,
1468                             BOOL check_used_p,
1469                             heap_segment* seg);
1470 #endif //BACKGROUND_GC
1471     
1472 #ifdef BACKGROUND_GC
1473     PER_HEAP
1474     void wait_for_background_planning (alloc_wait_reason awr);
1475
1476     PER_HEAP
1477     BOOL bgc_loh_should_allocate();
1478 #endif //BACKGROUND_GC
1479
1480 #define max_saved_spinlock_info 48
1481
1482 #ifdef SPINLOCK_HISTORY
1483     PER_HEAP
1484     int spinlock_info_index;
1485
1486     PER_HEAP
1487     spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
1488 #endif //SPINLOCK_HISTORY
1489
1490     PER_HEAP
1491     void add_saved_spinlock_info (
1492             msl_enter_state enter_state, 
1493             msl_take_state take_state);
1494
1495     PER_HEAP
1496     BOOL a_fit_free_list_large_p (size_t size, 
1497                                   alloc_context* acontext,
1498                                   int align_const);
1499
1500     PER_HEAP
1501     BOOL a_fit_segment_end_p (int gen_number,
1502                               heap_segment* seg,
1503                               size_t size, 
1504                               alloc_context* acontext,
1505                               int align_const,
1506                               BOOL* commit_failed_p);
1507     PER_HEAP
1508     BOOL loh_a_fit_segment_end_p (int gen_number,
1509                                   size_t size, 
1510                                   alloc_context* acontext,
1511                                   int align_const,
1512                                   BOOL* commit_failed_p,
1513                                   oom_reason* oom_r);
1514     PER_HEAP
1515     BOOL loh_get_new_seg (generation* gen,
1516                           size_t size,
1517                           int align_const,
1518                           BOOL* commit_failed_p,
1519                           oom_reason* oom_r);
1520
1521     PER_HEAP_ISOLATED
1522     size_t get_large_seg_size (size_t size);
1523
1524     PER_HEAP
1525     BOOL retry_full_compact_gc (size_t size);
1526
1527     PER_HEAP
1528     BOOL check_and_wait_for_bgc (alloc_wait_reason awr,
1529                                  BOOL* did_full_compact_gc);
1530
1531     PER_HEAP
1532     BOOL trigger_full_compact_gc (gc_reason gr, 
1533                                   oom_reason* oom_r);
1534
1535     PER_HEAP
1536     BOOL trigger_ephemeral_gc (gc_reason gr);
1537
1538     PER_HEAP
1539     BOOL soh_try_fit (int gen_number,
1540                       size_t size, 
1541                       alloc_context* acontext,
1542                       int align_const,
1543                       BOOL* commit_failed_p,
1544                       BOOL* short_seg_end_p);
1545     PER_HEAP
1546     BOOL loh_try_fit (int gen_number,
1547                       size_t size, 
1548                       alloc_context* acontext,
1549                       int align_const,
1550                       BOOL* commit_failed_p,
1551                       oom_reason* oom_r);
1552
1553     PER_HEAP
1554     BOOL allocate_small (int gen_number,
1555                          size_t size, 
1556                          alloc_context* acontext,
1557                          int align_const);
1558
1559 #ifdef RECORD_LOH_STATE
1560     #define max_saved_loh_states 12
1561     PER_HEAP
1562     int loh_state_index;
1563
1564     struct loh_state_info
1565     {
1566         allocation_state alloc_state;
1567         EEThreadId thread_id;
1568     };
1569
1570     PER_HEAP
1571     loh_state_info last_loh_states[max_saved_loh_states];
1572     PER_HEAP
1573     void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id);
1574 #endif //RECORD_LOH_STATE
1575     PER_HEAP
1576     BOOL allocate_large (int gen_number,
1577                          size_t size, 
1578                          alloc_context* acontext,
1579                          int align_const);
1580
1581     PER_HEAP_ISOLATED
1582     int init_semi_shared();
1583     PER_HEAP
1584     int init_gc_heap (int heap_number);
1585     PER_HEAP
1586     void self_destroy();
1587     PER_HEAP_ISOLATED
1588     void destroy_semi_shared();
1589     PER_HEAP
1590     void repair_allocation_contexts (BOOL repair_p);
1591     PER_HEAP
1592     void fix_allocation_contexts (BOOL for_gc_p);
1593     PER_HEAP
1594     void fix_youngest_allocation_area (BOOL for_gc_p);
1595     PER_HEAP
1596     void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
1597                                  int align_const);
1598     PER_HEAP
1599     void fix_large_allocation_area (BOOL for_gc_p);
1600     PER_HEAP
1601     void fix_older_allocation_area (generation* older_gen);
1602     PER_HEAP
1603     void set_allocation_heap_segment (generation* gen);
1604     PER_HEAP
1605     void reset_allocation_pointers (generation* gen, uint8_t* start);
1606     PER_HEAP
1607     int object_gennum (uint8_t* o);
1608     PER_HEAP
1609     int object_gennum_plan (uint8_t* o);
1610     PER_HEAP_ISOLATED
1611     void init_heap_segment (heap_segment* seg);
1612     PER_HEAP
1613     void delete_heap_segment (heap_segment* seg, BOOL consider_hoarding=FALSE);
1614 #ifdef FEATURE_BASICFREEZE
1615     PER_HEAP
1616     BOOL insert_ro_segment (heap_segment* seg);
1617     PER_HEAP
1618     void remove_ro_segment (heap_segment* seg);
1619 #endif //FEATURE_BASICFREEZE
1620     PER_HEAP
1621     BOOL set_ro_segment_in_range (heap_segment* seg);
1622     PER_HEAP
1623     BOOL unprotect_segment (heap_segment* seg);
1624     PER_HEAP
1625     heap_segment* soh_get_segment_to_expand();
1626     PER_HEAP
1627     heap_segment* get_segment (size_t size, BOOL loh_p);
1628     PER_HEAP_ISOLATED
1629     void seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp);
1630     PER_HEAP_ISOLATED
1631     void seg_mapping_table_remove_segment (heap_segment* seg);
1632     PER_HEAP
1633     heap_segment* get_large_segment (size_t size, BOOL* did_full_compact_gc);
1634     PER_HEAP
1635     void thread_loh_segment (heap_segment* new_seg);
1636     PER_HEAP_ISOLATED
1637     heap_segment* get_segment_for_loh (size_t size
1638 #ifdef MULTIPLE_HEAPS
1639                                       , gc_heap* hp
1640 #endif //MULTIPLE_HEAPS
1641                                       );
1642     PER_HEAP
1643     void reset_heap_segment_pages (heap_segment* seg);
1644     PER_HEAP
1645     void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
1646     PER_HEAP
1647     void decommit_heap_segment (heap_segment* seg);
1648     PER_HEAP
1649     void clear_gen0_bricks();
1650 #ifdef BACKGROUND_GC
1651     PER_HEAP
1652     void rearrange_small_heap_segments();
1653 #endif //BACKGROUND_GC
1654     PER_HEAP
1655     void rearrange_large_heap_segments();
1656     PER_HEAP
1657     void rearrange_heap_segments(BOOL compacting);
1658
1659     PER_HEAP_ISOLATED
1660     void reset_write_watch_for_gc_heap(void* base_address, size_t region_size);
1661     PER_HEAP_ISOLATED
1662     void get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended);
1663
1664     PER_HEAP
1665     void switch_one_quantum();
1666     PER_HEAP
1667     void reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size);
1668     PER_HEAP
1669     void switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size);
1670     PER_HEAP
1671     void reset_write_watch (BOOL concurrent_p);
1672     PER_HEAP
1673     void adjust_ephemeral_limits ();
1674     PER_HEAP
1675     void make_generation (generation& gen, heap_segment* seg,
1676                           uint8_t* start, uint8_t* pointer);
1677
1678
1679 #define USE_PADDING_FRONT 1
1680 #define USE_PADDING_TAIL  2
1681
1682     PER_HEAP
1683     BOOL size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1684                      uint8_t* old_loc=0, int use_padding=USE_PADDING_TAIL);
1685     PER_HEAP
1686     BOOL a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1687                        int align_const);
1688
1689     PER_HEAP
1690     void handle_oom (int heap_num, oom_reason reason, size_t alloc_size, 
1691                      uint8_t* allocated, uint8_t* reserved);
1692
1693     PER_HEAP
1694     size_t card_of ( uint8_t* object);
1695     PER_HEAP
1696     uint8_t* brick_address (size_t brick);
1697     PER_HEAP
1698     size_t brick_of (uint8_t* add);
1699     PER_HEAP
1700     uint8_t* card_address (size_t card);
1701     PER_HEAP
1702     size_t card_to_brick (size_t card);
1703     PER_HEAP
1704     void clear_card (size_t card);
1705     PER_HEAP
1706     void set_card (size_t card);
1707     PER_HEAP
1708     BOOL  card_set_p (size_t card);
1709     PER_HEAP
1710     void card_table_set_bit (uint8_t* location);
1711
1712 #ifdef CARD_BUNDLE
1713     PER_HEAP
1714     void update_card_table_bundle();
1715     PER_HEAP
1716     void reset_card_table_write_watch();
1717     PER_HEAP
1718     void card_bundle_clear(size_t cardb);
1719     PER_HEAP
1720     void card_bundle_set (size_t cardb);
1721     PER_HEAP
1722     void card_bundles_set (size_t start_cardb, size_t end_cardb);
1723     PER_HEAP
1724     void verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word);
1725     PER_HEAP
1726     void verify_card_bundles();
1727     PER_HEAP
1728     BOOL card_bundle_set_p (size_t cardb);
1729     PER_HEAP
1730     BOOL find_card_dword (size_t& cardw, size_t cardw_end);
1731     PER_HEAP
1732     void enable_card_bundles();
1733     PER_HEAP_ISOLATED
1734     BOOL card_bundles_enabled();
1735
1736 #endif //CARD_BUNDLE
1737
1738     PER_HEAP
1739     BOOL find_card (uint32_t* card_table, size_t& card,
1740                     size_t card_word_end, size_t& end_card);
1741     PER_HEAP
1742     BOOL grow_heap_segment (heap_segment* seg, uint8_t* high_address);
1743     PER_HEAP
1744     int grow_heap_segment (heap_segment* seg, uint8_t* high_address, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL);
1745     PER_HEAP
1746     void copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
1747                                 short* old_brick_table,
1748                                 heap_segment* seg,
1749                                 uint8_t* start, uint8_t* end);
1750     PER_HEAP
1751     void init_brick_card_range (heap_segment* seg);
1752     PER_HEAP
1753     void copy_brick_card_table_l_heap ();
1754     PER_HEAP
1755     void copy_brick_card_table();
1756     PER_HEAP
1757     void clear_brick_table (uint8_t* from, uint8_t* end);
1758     PER_HEAP
1759     void set_brick (size_t index, ptrdiff_t val);
1760     PER_HEAP
1761     int brick_entry (size_t index);
1762 #ifdef MARK_ARRAY
1763     PER_HEAP
1764     unsigned int mark_array_marked (uint8_t* add);
1765     PER_HEAP
1766     void mark_array_set_marked (uint8_t* add);
1767     PER_HEAP
1768     BOOL is_mark_bit_set (uint8_t* add);
1769     PER_HEAP
1770     void gmark_array_set_marked (uint8_t* add);
1771     PER_HEAP
1772     void set_mark_array_bit (size_t mark_bit);
1773     PER_HEAP
1774     BOOL mark_array_bit_set (size_t mark_bit);
1775     PER_HEAP
1776     void mark_array_clear_marked (uint8_t* add);
1777     PER_HEAP
1778     void clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only=TRUE
1779 #ifdef FEATURE_BASICFREEZE
1780         , BOOL read_only=FALSE
1781 #endif // FEATURE_BASICFREEZE
1782         );
1783 #ifdef BACKGROUND_GC
1784     PER_HEAP
1785     void seg_clear_mark_array_bits_soh (heap_segment* seg);
1786     PER_HEAP
1787     void clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1788     PER_HEAP
1789     void bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1790     PER_HEAP
1791     void clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p);
1792 #ifdef VERIFY_HEAP
1793     PER_HEAP
1794     void set_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1795     PER_HEAP
1796     void check_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1797 #endif //VERIFY_HEAP
1798 #endif //BACKGROUND_GC
1799 #endif //MARK_ARRAY
1800
1801     PER_HEAP
1802     BOOL large_object_marked (uint8_t* o, BOOL clearp);
1803
1804 #ifdef BACKGROUND_GC
1805     PER_HEAP
1806     BOOL background_allowed_p();
1807 #endif //BACKGROUND_GC
1808
1809     PER_HEAP_ISOLATED
1810     void send_full_gc_notification (int gen_num, BOOL due_to_alloc_p);
1811
1812     PER_HEAP
1813     void check_for_full_gc (int gen_num, size_t size);
1814
1815     PER_HEAP
1816     void adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
1817                        int gen_number);
1818     PER_HEAP
1819     void adjust_limit_clr (uint8_t* start, size_t limit_size,
1820                            alloc_context* acontext, heap_segment* seg,
1821                            int align_const, int gen_number);
1822     PER_HEAP
1823     void  leave_allocation_segment (generation* gen);
1824
1825     PER_HEAP
1826     void init_free_and_plug();
1827
1828     PER_HEAP
1829     void print_free_and_plug (const char* msg);
1830
1831     PER_HEAP
1832     void add_gen_plug (int gen_number, size_t plug_size);
1833
1834     PER_HEAP
1835     void add_gen_free (int gen_number, size_t free_size);
1836
1837     PER_HEAP
1838     void add_item_to_current_pinned_free (int gen_number, size_t free_size);
1839     
1840     PER_HEAP
1841     void remove_gen_free (int gen_number, size_t free_size);
1842
1843     PER_HEAP
1844     uint8_t* allocate_in_older_generation (generation* gen, size_t size,
1845                                         int from_gen_number,
1846                                         uint8_t* old_loc=0
1847                                         REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1848     PER_HEAP
1849     generation*  ensure_ephemeral_heap_segment (generation* consing_gen);
1850     PER_HEAP
1851     uint8_t* allocate_in_condemned_generations (generation* gen,
1852                                              size_t size,
1853                                              int from_gen_number,
1854 #ifdef SHORT_PLUGS
1855                                              BOOL* convert_to_pinned_p=NULL,
1856                                              uint8_t* next_pinned_plug=0,
1857                                              heap_segment* current_seg=0,
1858 #endif //SHORT_PLUGS
1859                                              uint8_t* old_loc=0
1860                                              REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1861 #ifdef INTERIOR_POINTERS
1862     // Verifies that interior is actually in the range of seg; otherwise 
1863     // returns 0.
1864     PER_HEAP_ISOLATED
1865     heap_segment* find_segment (uint8_t* interior, BOOL small_segment_only_p);
1866
1867     PER_HEAP
1868     heap_segment* find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p);
1869
1870     PER_HEAP
1871     uint8_t* find_object_for_relocation (uint8_t* o, uint8_t* low, uint8_t* high);
1872 #endif //INTERIOR_POINTERS
1873
1874     PER_HEAP_ISOLATED
1875     gc_heap* heap_of (uint8_t* object);
1876
1877     PER_HEAP_ISOLATED
1878     gc_heap* heap_of_gc (uint8_t* object);
1879
1880     PER_HEAP_ISOLATED
1881     size_t&  promoted_bytes (int);
1882
1883     PER_HEAP
1884     uint8_t* find_object (uint8_t* o, uint8_t* low);
1885
1886     PER_HEAP
1887     dynamic_data* dynamic_data_of (int gen_number);
1888     PER_HEAP
1889     ptrdiff_t  get_desired_allocation (int gen_number);
1890     PER_HEAP
1891     ptrdiff_t  get_new_allocation (int gen_number);
1892     PER_HEAP
1893     ptrdiff_t  get_allocation (int gen_number);
1894     PER_HEAP
1895     bool new_allocation_allowed (int gen_number);
1896 #ifdef BACKGROUND_GC
1897     PER_HEAP_ISOLATED
1898     void allow_new_allocation (int gen_number);
1899     PER_HEAP_ISOLATED
1900     void disallow_new_allocation (int gen_number);
1901 #endif //BACKGROUND_GC
1902     PER_HEAP
1903     void reset_pinned_queue();
1904     PER_HEAP
1905     void reset_pinned_queue_bos();
1906     PER_HEAP
1907     void set_allocator_next_pin (generation* gen);
1908     PER_HEAP
1909     void set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit);
1910     PER_HEAP
1911     void enque_pinned_plug (generation* gen, uint8_t* plug, size_t len);
1912     PER_HEAP
1913     void enque_pinned_plug (uint8_t* plug,
1914                             BOOL save_pre_plug_info_p,
1915                             uint8_t* last_object_in_last_plug);
1916     PER_HEAP
1917     void merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size);
1918     PER_HEAP
1919     void set_pinned_info (uint8_t* last_pinned_plug,
1920                           size_t plug_len,
1921                           uint8_t* alloc_pointer,
1922                           uint8_t*& alloc_limit);
1923     PER_HEAP
1924     void set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen);
1925     PER_HEAP
1926     void save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug);
1927     PER_HEAP
1928     size_t deque_pinned_plug ();
1929     PER_HEAP
1930     mark* pinned_plug_of (size_t bos);
1931     PER_HEAP
1932     mark* oldest_pin ();
1933     PER_HEAP
1934     mark* before_oldest_pin();
1935     PER_HEAP
1936     BOOL pinned_plug_que_empty_p ();
1937     PER_HEAP
1938     void make_mark_stack (mark* arr);
1939 #ifdef MH_SC_MARK
1940     PER_HEAP
1941     int& mark_stack_busy();
1942     PER_HEAP
1943     VOLATILE(uint8_t*)& ref_mark_stack (gc_heap* hp, int index);
1944 #endif
1945 #ifdef BACKGROUND_GC
1946     PER_HEAP_ISOLATED
1947     size_t&  bpromoted_bytes (int);
1948     PER_HEAP
1949     void make_background_mark_stack (uint8_t** arr);
1950     PER_HEAP
1951     void make_c_mark_list (uint8_t** arr);
1952 #endif //BACKGROUND_GC
1953     PER_HEAP
1954     generation* generation_of (int  n);
1955     PER_HEAP
1956     BOOL gc_mark1 (uint8_t* o);
1957     PER_HEAP
1958     BOOL gc_mark (uint8_t* o, uint8_t* low, uint8_t* high);
1959     PER_HEAP
1960     uint8_t* mark_object(uint8_t* o THREAD_NUMBER_DCL);
1961 #ifdef HEAP_ANALYZE
1962     PER_HEAP
1963     void ha_mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
1964 #endif //HEAP_ANALYZE
1965     PER_HEAP
1966     void mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
1967     PER_HEAP
1968     void mark_object_simple1 (uint8_t* o, uint8_t* start THREAD_NUMBER_DCL);
1969
1970 #ifdef MH_SC_MARK
1971     PER_HEAP
1972     void mark_steal ();
1973 #endif //MH_SC_MARK
1974
1975 #ifdef BACKGROUND_GC
1976
1977     PER_HEAP
1978     BOOL background_marked (uint8_t* o);
1979     PER_HEAP
1980     BOOL background_mark1 (uint8_t* o);
1981     PER_HEAP
1982     BOOL background_mark (uint8_t* o, uint8_t* low, uint8_t* high);
1983     PER_HEAP
1984     uint8_t* background_mark_object (uint8_t* o THREAD_NUMBER_DCL);
1985     PER_HEAP
1986     void background_mark_simple (uint8_t* o THREAD_NUMBER_DCL);
1987     PER_HEAP
1988     void background_mark_simple1 (uint8_t* o THREAD_NUMBER_DCL);
1989     PER_HEAP_ISOLATED
1990     void background_promote (Object**, ScanContext* , uint32_t);
1991     PER_HEAP
1992     BOOL background_object_marked (uint8_t* o, BOOL clearp);
1993     PER_HEAP
1994     void init_background_gc();
1995     PER_HEAP
1996     uint8_t* background_next_end (heap_segment*, BOOL);
1997     PER_HEAP
1998     void generation_delete_heap_segment (generation*, 
1999                                          heap_segment*, heap_segment*, heap_segment*);
2000     PER_HEAP
2001     void set_mem_verify (uint8_t*, uint8_t*, uint8_t);
2002     PER_HEAP
2003     void process_background_segment_end (heap_segment*, generation*, uint8_t*,
2004                                      heap_segment*, BOOL*);
2005     PER_HEAP
2006     void process_n_background_segments (heap_segment*, heap_segment*, generation* gen);
2007     PER_HEAP
2008     BOOL fgc_should_consider_object (uint8_t* o,
2009                                      heap_segment* seg,
2010                                      BOOL consider_bgc_mark_p,
2011                                      BOOL check_current_sweep_p,
2012                                      BOOL check_saved_sweep_p);
2013     PER_HEAP
2014     void should_check_bgc_mark (heap_segment* seg, 
2015                                 BOOL* consider_bgc_mark_p, 
2016                                 BOOL* check_current_sweep_p,
2017                                 BOOL* check_saved_sweep_p);
2018     PER_HEAP
2019     void background_ephemeral_sweep();
2020     PER_HEAP
2021     void background_sweep ();
2022     PER_HEAP
2023     void background_mark_through_object (uint8_t* oo THREAD_NUMBER_DCL);
2024     PER_HEAP
2025     uint8_t* background_seg_end (heap_segment* seg, BOOL concurrent_p);
2026     PER_HEAP
2027     uint8_t* background_first_overflow (uint8_t* min_add,
2028                                      heap_segment* seg,
2029                                      BOOL concurrent_p, 
2030                                      BOOL small_object_p);
2031     PER_HEAP
2032     void background_process_mark_overflow_internal (int condemned_gen_number,
2033                                                     uint8_t* min_add, uint8_t* max_add,
2034                                                     BOOL concurrent_p);
2035     PER_HEAP
2036     BOOL background_process_mark_overflow (BOOL concurrent_p);
2037
2038     // for foreground GC to get hold of background structures containing refs
2039     PER_HEAP
2040     void
2041     scan_background_roots (promote_func* fn, int hn, ScanContext *pSC);
2042
2043     PER_HEAP
2044     BOOL bgc_mark_array_range (heap_segment* seg, 
2045                                BOOL whole_seg_p,
2046                                uint8_t** range_beg,
2047                                uint8_t** range_end);
2048     PER_HEAP
2049     void bgc_verify_mark_array_cleared (heap_segment* seg);
2050     PER_HEAP
2051     void verify_mark_bits_cleared (uint8_t* obj, size_t s);
2052     PER_HEAP
2053     void clear_all_mark_array();
2054 #endif //BACKGROUND_GC
2055
2056     PER_HEAP
2057     uint8_t* next_end (heap_segment* seg, uint8_t* f);
2058     PER_HEAP
2059     void fix_card_table ();
2060     PER_HEAP
2061     void mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
2062     PER_HEAP
2063     BOOL process_mark_overflow (int condemned_gen_number);
2064     PER_HEAP
2065     void process_mark_overflow_internal (int condemned_gen_number,
2066                                          uint8_t* min_address, uint8_t* max_address);
2067
2068 #ifdef SNOOP_STATS
2069     PER_HEAP
2070     void print_snoop_stat();
2071 #endif //SNOOP_STATS
2072
2073 #ifdef MH_SC_MARK
2074
2075     PER_HEAP
2076     BOOL check_next_mark_stack (gc_heap* next_heap);
2077
2078 #endif //MH_SC_MARK
2079
2080     PER_HEAP
2081     void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
2082
2083     PER_HEAP
2084     void mark_phase (int condemned_gen_number, BOOL mark_only_p);
2085
2086     PER_HEAP
2087     void pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* high);
2088
2089 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
2090     PER_HEAP_ISOLATED
2091     size_t get_total_pinned_objects();
2092 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
2093
2094     PER_HEAP
2095     void reset_mark_stack ();
2096     PER_HEAP
2097     uint8_t* insert_node (uint8_t* new_node, size_t sequence_number,
2098                        uint8_t* tree, uint8_t* last_node);
2099     PER_HEAP
2100     size_t update_brick_table (uint8_t* tree, size_t current_brick,
2101                                uint8_t* x, uint8_t* plug_end);
2102
2103     PER_HEAP
2104     void plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate);
2105
2106     PER_HEAP
2107     void realloc_plan_generation_start (generation* gen, generation* consing_gen);
2108
2109     PER_HEAP
2110     void plan_generation_starts (generation*& consing_gen);
2111
2112     PER_HEAP
2113     void advance_pins_for_demotion (generation* gen);
2114
2115     PER_HEAP
2116     void process_ephemeral_boundaries(uint8_t* x, int& active_new_gen_number,
2117                                       int& active_old_gen_number,
2118                                       generation*& consing_gen,
2119                                       BOOL& allocate_in_condemned);
2120     PER_HEAP
2121     void seg_clear_mark_bits (heap_segment* seg);
2122     PER_HEAP
2123     void sweep_ro_segments (heap_segment* start_seg);
2124     PER_HEAP
2125     void convert_to_pinned_plug (BOOL& last_npinned_plug_p, 
2126                                  BOOL& last_pinned_plug_p, 
2127                                  BOOL& pinned_plug_p,
2128                                  size_t ps,
2129                                  size_t& artificial_pinned_size);
2130     PER_HEAP
2131     void store_plug_gap_info (uint8_t* plug_start,
2132                               uint8_t* plug_end,
2133                               BOOL& last_npinned_plug_p, 
2134                               BOOL& last_pinned_plug_p, 
2135                               uint8_t*& last_pinned_plug,
2136                               BOOL& pinned_plug_p,
2137                               uint8_t* last_object_in_last_plug,
2138                               BOOL& merge_with_last_pin_p,
2139                               // this is only for verification purpose
2140                               size_t last_plug_len);
2141     PER_HEAP
2142     void plan_phase (int condemned_gen_number);
2143
2144     PER_HEAP
2145     void record_interesting_data_point (interesting_data_point idp);
2146
2147 #ifdef GC_CONFIG_DRIVEN
2148     PER_HEAP
2149     void record_interesting_info_per_heap();
2150     PER_HEAP_ISOLATED
2151     void record_global_mechanisms();
2152     PER_HEAP_ISOLATED
2153     BOOL should_do_sweeping_gc (BOOL compact_p);
2154 #endif //GC_CONFIG_DRIVEN
2155
2156 #ifdef FEATURE_LOH_COMPACTION
2157     // plan_loh can allocate memory so it can fail. If it fails, we will
2158     // fall back to sweeping.  
2159     PER_HEAP
2160     BOOL plan_loh();
2161
2162     PER_HEAP
2163     void compact_loh();
2164
2165     PER_HEAP
2166     void relocate_in_loh_compact();
2167
2168     PER_HEAP
2169     void walk_relocation_for_loh (void* profiling_context, record_surv_fn fn);
2170
2171     PER_HEAP
2172     BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
2173
2174     PER_HEAP
2175     void loh_set_allocator_next_pin();
2176
2177     PER_HEAP
2178     BOOL loh_pinned_plug_que_empty_p();
2179
2180     PER_HEAP
2181     size_t loh_deque_pinned_plug();
2182
2183     PER_HEAP
2184     mark* loh_pinned_plug_of (size_t bos);
2185
2186     PER_HEAP
2187     mark* loh_oldest_pin();
2188
2189     PER_HEAP
2190     BOOL loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit);
2191
2192     PER_HEAP
2193     uint8_t* loh_allocate_in_condemned (uint8_t* old_loc, size_t size);
2194
2195     PER_HEAP_ISOLATED
2196     BOOL loh_object_p (uint8_t* o);
2197
2198     PER_HEAP_ISOLATED
2199     BOOL should_compact_loh();
2200
2201     // If the LOH compaction mode is just to compact once,
2202     // we need to see if we should reset it back to not compact.
2203     // We would only reset if every heap's LOH was compacted.
2204     PER_HEAP_ISOLATED
2205     void check_loh_compact_mode  (BOOL all_heaps_compacted_p);
2206 #endif //FEATURE_LOH_COMPACTION
2207
2208     PER_HEAP
2209     void decommit_ephemeral_segment_pages (int condemned_gen_number);
2210     PER_HEAP
2211     void fix_generation_bounds (int condemned_gen_number,
2212                                 generation* consing_gen);
2213     PER_HEAP
2214     uint8_t* generation_limit (int gen_number);
2215
2216     struct make_free_args
2217     {
2218         int free_list_gen_number;
2219         uint8_t* current_gen_limit;
2220         generation* free_list_gen;
2221         uint8_t* highest_plug;
2222     };
2223     PER_HEAP
2224     uint8_t* allocate_at_end (size_t size);
2225     PER_HEAP
2226     BOOL ensure_gap_allocation (int condemned_gen_number);
2227     // make_free_lists is only called by blocking GCs.
2228     PER_HEAP
2229     void make_free_lists (int condemned_gen_number);
2230     PER_HEAP
2231     void make_free_list_in_brick (uint8_t* tree, make_free_args* args);
2232     PER_HEAP
2233     void thread_gap (uint8_t* gap_start, size_t size, generation*  gen);
2234     PER_HEAP
2235     void loh_thread_gap_front (uint8_t* gap_start, size_t size, generation*  gen);
2236     PER_HEAP
2237     void make_unused_array (uint8_t* x, size_t size, BOOL clearp=FALSE, BOOL resetp=FALSE);
2238     PER_HEAP
2239     void clear_unused_array (uint8_t* x, size_t size);
2240     PER_HEAP
2241     void relocate_address (uint8_t** old_address THREAD_NUMBER_DCL);
2242     struct relocate_args
2243     {
2244         uint8_t* last_plug;
2245         uint8_t* low;
2246         uint8_t* high;
2247         BOOL is_shortened;
2248         mark* pinned_plug_entry;
2249     };
2250
2251     PER_HEAP
2252     void reloc_survivor_helper (uint8_t** pval);
2253     PER_HEAP
2254     void check_class_object_demotion (uint8_t* obj);
2255     PER_HEAP
2256     void check_class_object_demotion_internal (uint8_t* obj);
2257
2258     PER_HEAP 
2259     void check_demotion_helper (uint8_t** pval, uint8_t* parent_obj);
2260
2261     PER_HEAP
2262     void relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end);
2263
2264     PER_HEAP
2265     void verify_pins_with_post_plug_info (const char* msg);
2266
2267 #ifdef COLLECTIBLE_CLASS
2268     PER_HEAP
2269     void unconditional_set_card_collectible (uint8_t* obj);
2270 #endif //COLLECTIBLE_CLASS
2271
2272     PER_HEAP
2273     void relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry);
2274     
2275     PER_HEAP
2276     void relocate_obj_helper (uint8_t* x, size_t s);
2277
2278     PER_HEAP
2279     void reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc);
2280
2281     PER_HEAP
2282     void relocate_pre_plug_info (mark* pinned_plug_entry);
2283
2284     PER_HEAP
2285     void relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned);
2286
2287     PER_HEAP
2288     void relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end,
2289                                      BOOL check_last_object_p, 
2290                                      mark* pinned_plug_entry);
2291     PER_HEAP
2292     void relocate_survivors_in_brick (uint8_t* tree, relocate_args* args);
2293
2294     PER_HEAP
2295     void update_oldest_pinned_plug();
2296
2297     PER_HEAP
2298     void relocate_survivors (int condemned_gen_number,
2299                              uint8_t* first_condemned_address );
2300     PER_HEAP
2301     void relocate_phase (int condemned_gen_number,
2302                          uint8_t* first_condemned_address);
2303
2304     struct compact_args
2305     {
2306         BOOL copy_cards_p;
2307         uint8_t* last_plug;
2308         ptrdiff_t last_plug_relocation;
2309         uint8_t* before_last_plug;
2310         size_t current_compacted_brick;
2311         BOOL is_shortened;
2312         mark* pinned_plug_entry;
2313         BOOL check_gennum_p;
2314         int src_gennum;
2315
2316         void print()
2317         {
2318             dprintf (3, ("last plug: %Ix, last plug reloc: %Ix, before last: %Ix, b: %Ix",
2319                 last_plug, last_plug_relocation, before_last_plug, current_compacted_brick));
2320         }
2321     };
2322
2323     PER_HEAP
2324     void copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2325     PER_HEAP
2326     void  gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2327     PER_HEAP
2328     void compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args);
2329     PER_HEAP
2330     void compact_in_brick (uint8_t* tree, compact_args* args);
2331
2332     PER_HEAP
2333     mark* get_next_pinned_entry (uint8_t* tree,
2334                                  BOOL* has_pre_plug_info_p,
2335                                  BOOL* has_post_plug_info_p,
2336                                  BOOL deque_p=TRUE);
2337
2338     PER_HEAP
2339     mark* get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p);
2340
2341     PER_HEAP
2342     void recover_saved_pinned_info();
2343
2344     PER_HEAP
2345     void compact_phase (int condemned_gen_number, uint8_t*
2346                         first_condemned_address, BOOL clear_cards);
2347     PER_HEAP
2348     void clear_cards (size_t start_card, size_t end_card);
2349     PER_HEAP
2350     void clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address);
2351     PER_HEAP
2352     void copy_cards (size_t dst_card, size_t src_card,
2353                      size_t end_card, BOOL nextp);
2354     PER_HEAP
2355     void copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2356
2357 #ifdef BACKGROUND_GC
2358     PER_HEAP
2359     void copy_mark_bits (size_t dst_mark_bit, size_t src_mark_bit, size_t end_mark_bit);
2360     PER_HEAP
2361     void copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2362 #endif //BACKGROUND_GC
2363
2364
2365     PER_HEAP
2366     BOOL ephemeral_pointer_p (uint8_t* o);
2367     PER_HEAP
2368     void fix_brick_to_highest (uint8_t* o, uint8_t* next_o);
2369     PER_HEAP
2370     uint8_t* find_first_object (uint8_t* start_address, uint8_t* first_object);
2371     PER_HEAP
2372     uint8_t* compute_next_boundary (uint8_t* low, int gen_number, BOOL relocating);
2373     PER_HEAP
2374     void keep_card_live (uint8_t* o, size_t& n_gen,
2375                          size_t& cg_pointers_found);
2376     PER_HEAP
2377     void mark_through_cards_helper (uint8_t** poo, size_t& ngen,
2378                                     size_t& cg_pointers_found,
2379                                     card_fn fn, uint8_t* nhigh,
2380                                     uint8_t* next_boundary);
2381
2382     PER_HEAP
2383     BOOL card_transition (uint8_t* po, uint8_t* end, size_t card_word_end,
2384                                size_t& cg_pointers_found, 
2385                                size_t& n_eph, size_t& n_card_set,
2386                                size_t& card, size_t& end_card,
2387                                BOOL& foundp, uint8_t*& start_address,
2388                                uint8_t*& limit, size_t& n_cards_cleared);
2389     PER_HEAP
2390     void mark_through_cards_for_segments (card_fn fn, BOOL relocating);
2391
2392     PER_HEAP
2393     void repair_allocation_in_expanded_heap (generation* gen);
2394     PER_HEAP
2395     BOOL can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index);
2396     PER_HEAP
2397     BOOL can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index);
2398     PER_HEAP
2399     BOOL can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count);
2400 #ifdef SEG_REUSE_STATS
2401     PER_HEAP
2402     size_t dump_buckets (size_t* ordered_indices, int count, size_t* total_size);
2403 #endif //SEG_REUSE_STATS
2404     PER_HEAP
2405     void build_ordered_free_spaces (heap_segment* seg);
2406     PER_HEAP
2407     void count_plug (size_t last_plug_size, uint8_t*& last_plug);
2408     PER_HEAP
2409     void count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug);
2410     PER_HEAP
2411     void build_ordered_plug_indices ();
2412     PER_HEAP
2413     void init_ordered_free_space_indices ();
2414     PER_HEAP
2415     void trim_free_spaces_indices ();
2416     PER_HEAP
2417     BOOL try_best_fit (BOOL end_of_segment_p);
2418     PER_HEAP
2419     BOOL best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space);
2420     PER_HEAP
2421     BOOL process_free_space (heap_segment* seg, 
2422                              size_t free_space,
2423                              size_t min_free_size, 
2424                              size_t min_cont_size,
2425                              size_t* total_free_space,
2426                              size_t* largest_free_space);
2427     PER_HEAP
2428     size_t compute_eph_gen_starts_size();
2429     PER_HEAP
2430     void compute_new_ephemeral_size();
2431     PER_HEAP
2432     BOOL expand_reused_seg_p();
2433     PER_HEAP
2434     BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size,
2435                             size_t min_cont_size, allocator* al);
2436     PER_HEAP
2437     uint8_t* allocate_in_expanded_heap (generation* gen, size_t size,
2438                                      BOOL& adjacentp, uint8_t* old_loc,
2439 #ifdef SHORT_PLUGS
2440                                      BOOL set_padding_on_saved_p,
2441                                      mark* pinned_plug_entry,
2442 #endif //SHORT_PLUGS
2443                                      BOOL consider_bestfit, int active_new_gen_number
2444                                      REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
2445     PER_HEAP
2446     void realloc_plug (size_t last_plug_size, uint8_t*& last_plug,
2447                        generation* gen, uint8_t* start_address,
2448                        unsigned int& active_new_gen_number,
2449                        uint8_t*& last_pinned_gap, BOOL& leftp,
2450                        BOOL shortened_p
2451 #ifdef SHORT_PLUGS
2452                        , mark* pinned_plug_entry
2453 #endif //SHORT_PLUGS
2454                        );
2455     PER_HEAP
2456     void realloc_in_brick (uint8_t* tree, uint8_t*& last_plug, uint8_t* start_address,
2457                            generation* gen,
2458                            unsigned int& active_new_gen_number,
2459                            uint8_t*& last_pinned_gap, BOOL& leftp);
2460     PER_HEAP
2461     void realloc_plugs (generation* consing_gen, heap_segment* seg,
2462                         uint8_t* start_address, uint8_t* end_address,
2463                         unsigned active_new_gen_number);
2464
2465     PER_HEAP
2466     void set_expand_in_full_gc (int condemned_gen_number);
2467
2468     PER_HEAP
2469     void verify_no_pins (uint8_t* start, uint8_t* end);
2470
2471     PER_HEAP
2472     generation* expand_heap (int condemned_generation,
2473                              generation* consing_gen,
2474                              heap_segment* new_heap_segment);
2475
2476     PER_HEAP
2477     void save_ephemeral_generation_starts();
2478
2479     PER_HEAP
2480     bool init_dynamic_data ();
2481     PER_HEAP
2482     float surv_to_growth (float cst, float limit, float max_limit);
2483     PER_HEAP
2484     size_t desired_new_allocation (dynamic_data* dd, size_t out,
2485                                    int gen_number, int pass);
2486
2487     PER_HEAP
2488     void trim_youngest_desired_low_memory();
2489
2490     PER_HEAP
2491     void decommit_ephemeral_segment_pages();
2492
2493 #ifdef BIT64
2494     PER_HEAP_ISOLATED
2495     size_t trim_youngest_desired (uint32_t memory_load,
2496                                   size_t total_new_allocation,
2497                                   size_t total_min_allocation);
2498     PER_HEAP_ISOLATED
2499     size_t joined_youngest_desired (size_t new_allocation);
2500 #endif // BIT64
2501     PER_HEAP_ISOLATED
2502     size_t get_total_heap_size ();
2503     PER_HEAP_ISOLATED
2504     size_t get_total_committed_size();
2505
2506     PER_HEAP_ISOLATED
2507     void get_memory_info (uint32_t* memory_load, 
2508                           uint64_t* available_physical=NULL,
2509                           uint64_t* available_page_file=NULL);
2510     PER_HEAP
2511     size_t generation_size (int gen_number);
2512     PER_HEAP_ISOLATED
2513     size_t get_total_survived_size();
2514     PER_HEAP
2515     size_t get_current_allocated();
2516     PER_HEAP_ISOLATED
2517     size_t get_total_allocated();
2518     PER_HEAP
2519     size_t current_generation_size (int gen_number);
2520     PER_HEAP
2521     size_t generation_plan_size (int gen_number);
2522     PER_HEAP
2523     void  compute_promoted_allocation (int gen_number);
2524     PER_HEAP
2525     size_t  compute_in (int gen_number);
2526     PER_HEAP
2527     void compute_new_dynamic_data (int gen_number);
2528     PER_HEAP
2529     gc_history_per_heap* get_gc_data_per_heap();
2530     PER_HEAP
2531     size_t new_allocation_limit (size_t size, size_t free_size, int gen_number);
2532     PER_HEAP
2533     size_t generation_fragmentation (generation* gen,
2534                                      generation* consing_gen,
2535                                      uint8_t* end);
2536     PER_HEAP
2537     size_t generation_sizes (generation* gen);
2538     PER_HEAP
2539     size_t committed_size();
2540     PER_HEAP
2541     size_t approximate_new_allocation();
2542     PER_HEAP
2543     size_t end_space_after_gc();
2544     PER_HEAP
2545     BOOL decide_on_compacting (int condemned_gen_number,
2546                                size_t fragmentation,
2547                                BOOL& should_expand);
2548     PER_HEAP
2549     BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
2550     PER_HEAP
2551     void reset_large_object (uint8_t* o);
2552     PER_HEAP
2553     void sweep_large_objects ();
2554     PER_HEAP
2555     void relocate_in_large_objects ();
2556     PER_HEAP
2557     void mark_through_cards_for_large_objects (card_fn fn, BOOL relocating);
2558     PER_HEAP
2559     void descr_segment (heap_segment* seg);
2560     PER_HEAP
2561     void descr_card_table ();
2562     PER_HEAP
2563     void descr_generations (BOOL begin_gc_p);
2564
2565     PER_HEAP_ISOLATED
2566     void descr_generations_to_profiler (gen_walk_fn fn, void *context);
2567
2568     /*------------ Multiple non isolated heaps ----------------*/
2569 #ifdef MULTIPLE_HEAPS
2570     PER_HEAP_ISOLATED
2571     BOOL   create_thread_support (unsigned number_of_heaps);
2572     PER_HEAP_ISOLATED
2573     void destroy_thread_support ();
2574     PER_HEAP
2575     bool create_gc_thread();
2576     PER_HEAP
2577     void gc_thread_function();
2578 #ifdef MARK_LIST
2579 #ifdef PARALLEL_MARK_LIST_SORT
2580     PER_HEAP
2581     void sort_mark_list();
2582     PER_HEAP
2583     void merge_mark_lists();
2584     PER_HEAP
2585     void append_to_mark_list(uint8_t **start, uint8_t **end);
2586 #else //PARALLEL_MARK_LIST_SORT
2587     PER_HEAP_ISOLATED
2588     void combine_mark_lists();
2589 #endif //PARALLEL_MARK_LIST_SORT
2590 #endif
2591 #endif //MULTIPLE_HEAPS
2592
2593     /*------------ End of Multiple non isolated heaps ---------*/
2594
2595 #ifndef SEG_MAPPING_TABLE
2596     PER_HEAP_ISOLATED
2597     heap_segment* segment_of (uint8_t* add,  ptrdiff_t & delta,
2598                               BOOL verify_p = FALSE);
2599 #endif //SEG_MAPPING_TABLE
2600
2601 #ifdef BACKGROUND_GC
2602
2603     //this is called by revisit....
2604     PER_HEAP
2605     uint8_t* high_page (heap_segment* seg, BOOL concurrent_p);
2606
2607     PER_HEAP
2608     void revisit_written_page (uint8_t* page, uint8_t* end, BOOL concurrent_p,
2609                                heap_segment* seg,  uint8_t*& last_page,
2610                                uint8_t*& last_object, BOOL large_objects_p,
2611                                size_t& num_marked_objects);
2612     PER_HEAP
2613     void revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p=FALSE);
2614
2615     PER_HEAP
2616     void concurrent_scan_dependent_handles (ScanContext *sc);
2617
2618     PER_HEAP_ISOLATED
2619     void suspend_EE ();
2620
2621     PER_HEAP_ISOLATED
2622     void bgc_suspend_EE ();
2623
2624     PER_HEAP_ISOLATED
2625     void restart_EE ();
2626
2627     PER_HEAP
2628     void background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags);
2629
2630     PER_HEAP
2631     void background_scan_dependent_handles (ScanContext *sc);
2632
2633     PER_HEAP
2634     void allow_fgc();
2635
2636     // Restores BGC settings if necessary.
2637     PER_HEAP_ISOLATED
2638     void recover_bgc_settings();
2639
2640     PER_HEAP
2641     void save_bgc_data_per_heap();
2642
2643     PER_HEAP
2644     BOOL should_commit_mark_array();
2645
2646     PER_HEAP
2647     void clear_commit_flag();
2648
2649     PER_HEAP_ISOLATED
2650     void clear_commit_flag_global();
2651
2652     PER_HEAP_ISOLATED
2653     void verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr);
2654
2655     PER_HEAP_ISOLATED
2656     void verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr);
2657
2658     PER_HEAP_ISOLATED
2659     BOOL commit_mark_array_by_range (uint8_t* begin,
2660                                      uint8_t* end,
2661                                      uint32_t* mark_array_addr);
2662
2663     PER_HEAP_ISOLATED
2664     BOOL commit_mark_array_new_seg (gc_heap* hp, 
2665                                     heap_segment* seg,
2666                                     uint32_t* new_card_table = 0,
2667                                     uint8_t* new_lowest_address = 0);
2668
2669     PER_HEAP_ISOLATED
2670     BOOL commit_mark_array_with_check (heap_segment* seg, uint32_t* mark_array_addr);
2671
2672     // commit the portion of the mark array that corresponds to 
2673     // this segment (from beginning to reserved).
2674     // seg and heap_segment_reserved (seg) are guaranteed to be 
2675     // page aligned.
2676     PER_HEAP_ISOLATED
2677     BOOL commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr);
2678
2679     // During BGC init, we commit the mark array for all in range
2680     // segments whose mark array hasn't been committed or fully
2681     // committed. All rw segments are in range, only ro segments
2682     // can be partial in range.
2683     PER_HEAP
2684     BOOL commit_mark_array_bgc_init (uint32_t* mark_array_addr);
2685
2686     PER_HEAP
2687     BOOL commit_new_mark_array (uint32_t* new_mark_array);
2688
2689     // We need to commit all segments that intersect with the bgc
2690     // range. If a segment is only partially in range, we still
2691     // should commit the mark array for the whole segment as 
2692     // we will set the mark array commit flag for this segment.
2693     PER_HEAP_ISOLATED
2694     BOOL commit_new_mark_array_global (uint32_t* new_mark_array);
2695
2696     // We can't decommit the first and the last page in the mark array
2697     // if the beginning and ending don't happen to be page aligned.
2698     PER_HEAP
2699     void decommit_mark_array_by_seg (heap_segment* seg);
2700
2701     PER_HEAP
2702     void background_mark_phase();
2703
2704     PER_HEAP
2705     void background_drain_mark_list (int thread);
2706
2707     PER_HEAP
2708     void background_grow_c_mark_list();
2709
2710     PER_HEAP_ISOLATED
2711     void background_promote_callback(Object** object, ScanContext* sc, uint32_t flags);
2712
2713     PER_HEAP
2714     void mark_absorb_new_alloc();
2715
2716     PER_HEAP
2717     void restart_vm();
2718
2719     PER_HEAP
2720     BOOL prepare_bgc_thread(gc_heap* gh);
2721     PER_HEAP
2722     BOOL create_bgc_thread(gc_heap* gh);
2723     PER_HEAP_ISOLATED
2724     BOOL create_bgc_threads_support (int number_of_heaps);
2725     PER_HEAP
2726     BOOL create_bgc_thread_support();
2727     PER_HEAP_ISOLATED
2728     int check_for_ephemeral_alloc();
2729     PER_HEAP_ISOLATED
2730     void wait_to_proceed();
2731     PER_HEAP_ISOLATED
2732     void fire_alloc_wait_event_begin (alloc_wait_reason awr);
2733     PER_HEAP_ISOLATED
2734     void fire_alloc_wait_event_end (alloc_wait_reason awr);
2735     PER_HEAP
2736     void background_gc_wait_lh (alloc_wait_reason awr = awr_ignored);
2737     PER_HEAP
2738     uint32_t background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE);
2739     PER_HEAP_ISOLATED
2740     void start_c_gc();
2741     PER_HEAP
2742     void kill_gc_thread();
2743     PER_HEAP
2744     uint32_t bgc_thread_function();
2745     PER_HEAP_ISOLATED
2746     void do_background_gc();
2747     static
2748     uint32_t __stdcall bgc_thread_stub (void* arg);
2749
2750 #endif //BACKGROUND_GC
2751  
2752 public:
2753
2754     PER_HEAP_ISOLATED
2755     VOLATILE(bool) internal_gc_done;
2756
2757 #ifdef BACKGROUND_GC
2758     PER_HEAP_ISOLATED
2759     uint32_t cm_in_progress;
2760
2761     // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
2762     // we do right before the bgc starts.
2763     PER_HEAP_ISOLATED
2764     BOOL     dont_restart_ee_p;
2765
2766     PER_HEAP_ISOLATED
2767     GCEvent bgc_start_event;
2768 #endif //BACKGROUND_GC
2769
2770     // The variables in this block are known to the DAC and must come first
2771     // in the gc_heap class.
2772
2773     // Keeps track of the highest address allocated by Alloc
2774     PER_HEAP
2775     uint8_t* alloc_allocated;
2776
2777     // The ephemeral heap segment
2778     PER_HEAP
2779     heap_segment* ephemeral_heap_segment;
2780
2781     // The finalize queue.
2782     PER_HEAP
2783     CFinalize* finalize_queue;
2784
2785     // OOM info.
2786     PER_HEAP
2787     oom_history oom_info;
2788
2789     // Interesting data, recorded per-heap.
2790     PER_HEAP
2791     size_t interesting_data_per_heap[max_idp_count];
2792
2793     PER_HEAP
2794     size_t compact_reasons_per_heap[max_compact_reasons_count];
2795
2796     PER_HEAP
2797     size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
2798
2799     PER_HEAP
2800     size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
2801
2802     PER_HEAP
2803     uint8_t** internal_root_array;
2804
2805     PER_HEAP
2806     size_t internal_root_array_index;
2807
2808     PER_HEAP
2809     BOOL heap_analyze_success;
2810
2811     // The generation table. Must always be last.
2812     PER_HEAP
2813     generation generation_table [NUMBERGENERATIONS + 1];
2814
2815     // End DAC zone
2816
2817     PER_HEAP
2818     BOOL expanded_in_fgc;
2819
2820     PER_HEAP_ISOLATED
2821     uint32_t wait_for_gc_done(int32_t timeOut = INFINITE);
2822
2823     // Returns TRUE if the thread used to be in cooperative mode 
2824     // before calling this function.
2825     PER_HEAP_ISOLATED
2826     BOOL enable_preemptive (Thread* current_thread);
2827     PER_HEAP_ISOLATED
2828     void disable_preemptive (Thread* current_thread, BOOL restore_cooperative);
2829
2830     /* ------------------- per heap members --------------------------*/
2831
2832     PER_HEAP
2833 #ifndef MULTIPLE_HEAPS
2834     GCEvent gc_done_event;
2835 #else // MULTIPLE_HEAPS
2836     GCEvent gc_done_event;
2837 #endif // MULTIPLE_HEAPS
2838
2839     PER_HEAP
2840     VOLATILE(int32_t) gc_done_event_lock;
2841
2842     PER_HEAP
2843     VOLATILE(bool) gc_done_event_set;
2844
2845     PER_HEAP 
2846     void set_gc_done();
2847
2848     PER_HEAP 
2849     void reset_gc_done();
2850
2851     PER_HEAP
2852     void enter_gc_done_event_lock();
2853
2854     PER_HEAP
2855     void exit_gc_done_event_lock();
2856
2857     PER_HEAP
2858     uint8_t*  ephemeral_low;      //lowest ephemeral address
2859
2860     PER_HEAP
2861     uint8_t*  ephemeral_high;     //highest ephemeral address
2862
2863     PER_HEAP
2864     uint32_t* card_table;
2865
2866     PER_HEAP
2867     short* brick_table;
2868
2869 #ifdef MARK_ARRAY
2870     PER_HEAP
2871     uint32_t* mark_array;
2872 #endif //MARK_ARRAY
2873
2874 #ifdef CARD_BUNDLE
2875     PER_HEAP
2876     uint32_t* card_bundle_table;
2877 #endif //CARD_BUNDLE
2878
2879 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
2880     PER_HEAP_ISOLATED
2881     sorted_table* seg_table;
2882 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
2883
2884     PER_HEAP_ISOLATED
2885     VOLATILE(BOOL) gc_started;
2886
2887     // The following 2 events are there to support the gen2 
2888     // notification feature which is only enabled if concurrent
2889     // GC is disabled.
2890     PER_HEAP_ISOLATED
2891     GCEvent full_gc_approach_event;
2892
2893     PER_HEAP_ISOLATED
2894     GCEvent full_gc_end_event;
2895
2896     // Full GC Notification percentages.
2897     PER_HEAP_ISOLATED
2898     uint32_t fgn_maxgen_percent;
2899
2900     PER_HEAP_ISOLATED
2901     uint32_t fgn_loh_percent;
2902
2903     PER_HEAP_ISOLATED
2904     VOLATILE(bool) full_gc_approach_event_set;
2905
2906 #ifdef BACKGROUND_GC
2907     PER_HEAP_ISOLATED
2908     BOOL fgn_last_gc_was_concurrent;
2909 #endif //BACKGROUND_GC
2910
2911     PER_HEAP
2912     size_t fgn_last_alloc;
2913
2914     static uint32_t user_thread_wait (GCEvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
2915
2916     static wait_full_gc_status full_gc_wait (GCEvent *event, int time_out_ms);
2917
2918     PER_HEAP
2919     uint8_t* demotion_low;
2920
2921     PER_HEAP
2922     uint8_t* demotion_high;
2923
2924     PER_HEAP
2925     BOOL demote_gen1_p;
2926
2927     PER_HEAP
2928     uint8_t* last_gen1_pin_end;
2929
2930     PER_HEAP
2931     gen_to_condemn_tuning gen_to_condemn_reasons;
2932
2933     PER_HEAP
2934     size_t etw_allocation_running_amount[2];
2935
2936     PER_HEAP
2937     int gc_policy;  //sweep, compact, expand
2938
2939 #ifdef MULTIPLE_HEAPS
2940     PER_HEAP_ISOLATED
2941     bool gc_thread_no_affinitize_p;
2942
2943     PER_HEAP_ISOLATED
2944     GCEvent gc_start_event;
2945
2946     PER_HEAP_ISOLATED
2947     GCEvent ee_suspend_event;
2948
2949     PER_HEAP
2950     heap_segment* new_heap_segment;
2951
2952 #define alloc_quantum_balance_units (16)
2953
2954     PER_HEAP_ISOLATED
2955     size_t min_balance_threshold;
2956 #else //MULTIPLE_HEAPS
2957
2958     PER_HEAP
2959     size_t allocation_running_time;
2960
2961     PER_HEAP
2962     size_t allocation_running_amount;
2963
2964 #endif //MULTIPLE_HEAPS
2965
2966     PER_HEAP_ISOLATED
2967     gc_mechanisms settings;
2968
2969     PER_HEAP_ISOLATED
2970     gc_history_global gc_data_global;
2971
2972     PER_HEAP_ISOLATED
2973     size_t gc_last_ephemeral_decommit_time;
2974
2975     PER_HEAP_ISOLATED
2976     size_t gc_gen0_desired_high;
2977
2978     PER_HEAP
2979     size_t gen0_big_free_spaces;
2980
2981 #ifdef SHORT_PLUGS
2982     PER_HEAP_ISOLATED
2983     double short_plugs_pad_ratio;
2984 #endif //SHORT_PLUGS
2985
2986 #ifdef BIT64
2987     PER_HEAP_ISOLATED
2988     size_t youngest_gen_desired_th;
2989 #endif //BIT64
2990
2991     PER_HEAP_ISOLATED
2992     uint32_t high_memory_load_th;
2993
2994     PER_HEAP_ISOLATED
2995     uint64_t mem_one_percent;
2996
2997     PER_HEAP_ISOLATED
2998     uint64_t total_physical_mem;
2999
3000     PER_HEAP_ISOLATED
3001     uint64_t entry_available_physical_mem;
3002
3003     PER_HEAP_ISOLATED
3004     size_t last_gc_index;
3005
3006     PER_HEAP_ISOLATED
3007     size_t min_segment_size;
3008
3009     PER_HEAP
3010     uint8_t* lowest_address;
3011
3012     PER_HEAP
3013     uint8_t* highest_address;
3014
3015     PER_HEAP
3016     BOOL ephemeral_promotion;
3017     PER_HEAP
3018     uint8_t* saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
3019     PER_HEAP
3020     size_t saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];
3021
3022 protected:
3023 #ifdef MULTIPLE_HEAPS
3024     PER_HEAP
3025     GCHeap* vm_heap;
3026     PER_HEAP
3027     int heap_number;
3028     PER_HEAP
3029     VOLATILE(int) alloc_context_count;
3030 #else //MULTIPLE_HEAPS
3031 #define vm_heap ((GCHeap*) g_theGCHeap)
3032 #define heap_number (0)
3033 #endif //MULTIPLE_HEAPS
3034
3035     PER_HEAP
3036     size_t time_bgc_last;
3037
3038     PER_HEAP
3039     uint8_t*       gc_low; // lowest address being condemned
3040
3041     PER_HEAP
3042     uint8_t*       gc_high; //highest address being condemned
3043
3044     PER_HEAP
3045     size_t      mark_stack_tos;
3046
3047     PER_HEAP
3048     size_t      mark_stack_bos;
3049
3050     PER_HEAP
3051     size_t      mark_stack_array_length;
3052
3053     PER_HEAP
3054     mark*       mark_stack_array;
3055
3056     PER_HEAP
3057     BOOL        verify_pinned_queue_p;
3058
3059     PER_HEAP
3060     uint8_t*    oldest_pinned_plug;
3061
3062 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
3063     PER_HEAP
3064     size_t      num_pinned_objects;
3065 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
3066
3067 #ifdef FEATURE_LOH_COMPACTION
3068     PER_HEAP
3069     size_t      loh_pinned_queue_tos;
3070
3071     PER_HEAP
3072     size_t      loh_pinned_queue_bos;
3073
3074     PER_HEAP
3075     size_t      loh_pinned_queue_length;
3076
3077     PER_HEAP_ISOLATED
3078     int         loh_pinned_queue_decay;
3079
3080     PER_HEAP
3081     mark*       loh_pinned_queue;
3082
3083     // This is for forced LOH compaction via the complus env var
3084     PER_HEAP_ISOLATED
3085     BOOL        loh_compaction_always_p;
3086
3087     // This is set by the user.
3088     PER_HEAP_ISOLATED
3089     gc_loh_compaction_mode loh_compaction_mode;
3090
3091     // We may not compact LOH on every heap if we can't
3092     // grow the pinned queue. This is to indicate whether
3093     // this heap's LOH is compacted or not. So even if
3094     // settings.loh_compaction is TRUE this may not be TRUE.
3095     PER_HEAP
3096     BOOL        loh_compacted_p;
3097 #endif //FEATURE_LOH_COMPACTION
3098
3099 #ifdef BACKGROUND_GC
3100
3101     PER_HEAP
3102     EEThreadId bgc_thread_id;
3103
3104 #ifdef WRITE_WATCH
3105     PER_HEAP
3106     uint8_t* background_written_addresses [array_size+2];
3107 #endif //WRITE_WATCH
3108
3109     PER_HEAP_ISOLATED
3110     VOLATILE(c_gc_state) current_c_gc_state;     //tells the large object allocator to
3111     //mark the object as new since the start of gc.
3112
3113     PER_HEAP_ISOLATED
3114     gc_mechanisms saved_bgc_settings;
3115
3116     PER_HEAP
3117     gc_history_per_heap bgc_data_per_heap;
3118
3119     PER_HEAP
3120     BOOL bgc_thread_running; // gc thread is its main loop
3121
3122     PER_HEAP_ISOLATED
3123     BOOL keep_bgc_threads_p;
3124
3125     // This event is used by BGC threads to do something on 
3126     // one specific thread while other BGC threads have to 
3127     // wait. This is different from a join 'cause you can't
3128     // specify which thread should be doing some task
3129     // while other threads have to wait.
3130     // For example, to make the BGC threads managed threads 
3131     // we need to create them on the thread that called 
3132     // SuspendEE which is heap 0.
3133     PER_HEAP_ISOLATED
3134     GCEvent bgc_threads_sync_event;
3135
3136     PER_HEAP
3137     Thread* bgc_thread;
3138
3139     PER_HEAP
3140     CLRCriticalSection bgc_threads_timeout_cs;
3141
3142     PER_HEAP_ISOLATED
3143     GCEvent background_gc_done_event;
3144
3145     PER_HEAP_ISOLATED
3146     GCEvent ee_proceed_event;
3147
3148     PER_HEAP
3149     GCEvent gc_lh_block_event;
3150
3151     PER_HEAP_ISOLATED
3152     bool gc_can_use_concurrent;
3153
3154     PER_HEAP_ISOLATED
3155     bool temp_disable_concurrent_p;
3156
3157     PER_HEAP_ISOLATED
3158     BOOL do_ephemeral_gc_p;
3159
3160     PER_HEAP_ISOLATED
3161     BOOL do_concurrent_p;
3162
3163     PER_HEAP
3164     VOLATILE(bgc_state) current_bgc_state;
3165
3166     struct gc_history
3167     {
3168         size_t gc_index;
3169         bgc_state current_bgc_state;
3170         uint32_t gc_time_ms;
3171         // This is in bytes per ms; consider breaking it 
3172         // into the efficiency per phase.
3173         size_t gc_efficiency; 
3174         uint8_t* eph_low;
3175         uint8_t* gen0_start;
3176         uint8_t* eph_high;
3177         uint8_t* bgc_highest;
3178         uint8_t* bgc_lowest;
3179         uint8_t* fgc_highest;
3180         uint8_t* fgc_lowest;
3181         uint8_t* g_highest;
3182         uint8_t* g_lowest;
3183     };
3184
3185 #define max_history_count 64
3186
3187     PER_HEAP
3188     int gchist_index_per_heap;
3189
3190     PER_HEAP
3191     gc_history gchist_per_heap[max_history_count];
3192
3193     PER_HEAP_ISOLATED
3194     int gchist_index;
3195
3196     PER_HEAP_ISOLATED
3197     gc_mechanisms_store gchist[max_history_count];
3198
3199     PER_HEAP
3200     void add_to_history_per_heap();
3201
3202     PER_HEAP_ISOLATED
3203     void add_to_history();
3204
3205     PER_HEAP
3206     size_t total_promoted_bytes;
3207
3208     PER_HEAP
3209     size_t     bgc_overflow_count;
3210
3211     PER_HEAP
3212     size_t     bgc_begin_loh_size;
3213     PER_HEAP
3214     size_t     end_loh_size;
3215
3216     // We need to throttle the LOH allocations during BGC since we can't
3217     // collect LOH when BGC is in progress. 
3218     // We allow the LOH heap size to double during a BGC. So for every
3219     // 10% increase we will have the LOH allocating thread sleep for one more
3220     // ms. So we are already 30% over the original heap size the thread will
3221     // sleep for 3ms.
3222     PER_HEAP
3223     uint32_t   bgc_alloc_spin_loh;
3224
3225     // This includes what we allocate at the end of segment - allocating
3226     // in free list doesn't increase the heap size.
3227     PER_HEAP
3228     size_t     bgc_loh_size_increased;
3229
3230     PER_HEAP
3231     size_t     bgc_loh_allocated_in_free;
3232
3233     PER_HEAP
3234     size_t     background_soh_alloc_count;
3235
3236     PER_HEAP
3237     size_t     background_loh_alloc_count;
3238
3239     PER_HEAP
3240     uint8_t**  background_mark_stack_tos;
3241
3242     PER_HEAP
3243     uint8_t**  background_mark_stack_array;
3244
3245     PER_HEAP
3246     size_t    background_mark_stack_array_length;
3247
3248     PER_HEAP
3249     uint8_t*  background_min_overflow_address;
3250
3251     PER_HEAP
3252     uint8_t*  background_max_overflow_address;
3253
3254     // We can't process the soh range concurrently so we
3255     // wait till final mark to process it.
3256     PER_HEAP
3257     BOOL      processed_soh_overflow_p;
3258
3259     PER_HEAP
3260     uint8_t*  background_min_soh_overflow_address;
3261
3262     PER_HEAP
3263     uint8_t*  background_max_soh_overflow_address;
3264
3265     PER_HEAP
3266     heap_segment* saved_overflow_ephemeral_seg;
3267
3268     PER_HEAP
3269     heap_segment* saved_sweep_ephemeral_seg;
3270
3271     PER_HEAP
3272     uint8_t* saved_sweep_ephemeral_start;
3273
3274     PER_HEAP
3275     uint8_t* background_saved_lowest_address;
3276
3277     PER_HEAP
3278     uint8_t* background_saved_highest_address;
3279
3280     // This is used for synchronization between the bgc thread
3281     // for this heap and the user threads allocating on this
3282     // heap.
3283     PER_HEAP
3284     exclusive_sync* bgc_alloc_lock;
3285
3286 #ifdef SNOOP_STATS
3287     PER_HEAP
3288     snoop_stats_data snoop_stat;
3289 #endif //SNOOP_STATS
3290
3291
3292     PER_HEAP
3293     uint8_t**          c_mark_list;
3294
3295     PER_HEAP
3296     size_t          c_mark_list_length;
3297
3298     PER_HEAP
3299     size_t          c_mark_list_index;
3300 #endif //BACKGROUND_GC
3301
3302 #ifdef MARK_LIST
3303     PER_HEAP
3304     uint8_t** mark_list;
3305
3306     PER_HEAP_ISOLATED
3307     size_t mark_list_size;
3308
3309     PER_HEAP
3310     uint8_t** mark_list_end;
3311
3312     PER_HEAP
3313     uint8_t** mark_list_index;
3314
3315     PER_HEAP_ISOLATED
3316     uint8_t** g_mark_list;
3317 #ifdef PARALLEL_MARK_LIST_SORT
3318     PER_HEAP_ISOLATED
3319     uint8_t** g_mark_list_copy;
3320     PER_HEAP
3321     uint8_t*** mark_list_piece_start;
3322     uint8_t*** mark_list_piece_end;
3323 #endif //PARALLEL_MARK_LIST_SORT
3324 #endif //MARK_LIST
3325
3326     PER_HEAP
3327     uint8_t*  min_overflow_address;
3328
3329     PER_HEAP
3330     uint8_t*  max_overflow_address;
3331
3332     PER_HEAP
3333     uint8_t*  shigh; //keeps track of the highest marked object
3334
3335     PER_HEAP
3336     uint8_t*  slow; //keeps track of the lowest marked object
3337
3338     PER_HEAP
3339     size_t allocation_quantum;
3340
3341     PER_HEAP
3342     size_t alloc_contexts_used;
3343
3344     PER_HEAP_ISOLATED
3345     no_gc_region_info current_no_gc_region_info;
3346
3347     PER_HEAP
3348     size_t soh_allocation_no_gc;
3349
3350     PER_HEAP
3351     size_t loh_allocation_no_gc;
3352
3353     PER_HEAP
3354     bool no_gc_oom_p;
3355
3356     PER_HEAP
3357     heap_segment* saved_loh_segment_no_gc;
3358
3359     PER_HEAP_ISOLATED
3360     BOOL proceed_with_gc_p;
3361
3362 #define youngest_generation (generation_of (0))
3363 #define large_object_generation (generation_of (max_generation+1))
3364
3365     // The more_space_lock and gc_lock is used for 3 purposes:
3366     //
3367     // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock)
3368     // 2) to synchronize allocations of large objects (more_space_lock)
3369     // 3) to synchronize the GC itself (gc_lock)
3370     //
3371     PER_HEAP_ISOLATED
3372     GCSpinLock gc_lock; //lock while doing GC
3373
3374     PER_HEAP
3375     GCSpinLock more_space_lock; //lock while allocating more space
3376
3377 #ifdef SYNCHRONIZATION_STATS
3378
3379     PER_HEAP
3380     unsigned int good_suspension;
3381
3382     PER_HEAP
3383     unsigned int bad_suspension;
3384
3385     // Number of times when msl_acquire is > 200 cycles.
3386     PER_HEAP
3387     unsigned int num_high_msl_acquire;
3388
3389     // Number of times when msl_acquire is < 200 cycles.
3390     PER_HEAP
3391     unsigned int num_low_msl_acquire;
3392
3393     // Number of times the more_space_lock is acquired.
3394     PER_HEAP
3395     unsigned int num_msl_acquired;
3396
3397     // Total cycles it takes to acquire the more_space_lock.
3398     PER_HEAP
3399     uint64_t total_msl_acquire;
3400
3401     PER_HEAP
3402     void init_heap_sync_stats()
3403     {
3404         good_suspension = 0;
3405         bad_suspension = 0;
3406         num_msl_acquired = 0;
3407         total_msl_acquire = 0;
3408         num_high_msl_acquire = 0;
3409         num_low_msl_acquire = 0;
3410         more_space_lock.init();
3411         gc_lock.init();
3412     }
3413
3414     PER_HEAP
3415     void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
3416     {
3417         printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
3418             heap_num,
3419             alloc_contexts_used,
3420             good_suspension,
3421             bad_suspension,
3422             (unsigned int)(total_msl_acquire / gc_count_during_log),
3423             num_high_msl_acquire / gc_count_during_log,
3424             num_low_msl_acquire / gc_count_during_log,
3425             num_msl_acquired / gc_count_during_log,
3426             more_space_lock.num_switch_thread / gc_count_during_log,
3427             more_space_lock.num_wait_longer / gc_count_during_log,
3428             more_space_lock.num_switch_thread_w / gc_count_during_log,
3429             more_space_lock.num_disable_preemptive_w / gc_count_during_log);
3430     }
3431
3432 #endif //SYNCHRONIZATION_STATS
3433
3434 #define NUM_LOH_ALIST (7)
3435 #define BASE_LOH_ALIST (64*1024)
3436     PER_HEAP 
3437     alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
3438
3439 #define NUM_GEN2_ALIST (12)
3440 #ifdef BIT64
3441 #define BASE_GEN2_ALIST (1*256)
3442 #else
3443 #define BASE_GEN2_ALIST (1*128)
3444 #endif // BIT64
3445     PER_HEAP
3446     alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
3447
3448 //------------------------------------------    
3449
3450     PER_HEAP
3451     dynamic_data dynamic_data_table [NUMBERGENERATIONS+1];
3452
3453     PER_HEAP
3454     gc_history_per_heap gc_data_per_heap;
3455
3456     PER_HEAP
3457     size_t maxgen_pinned_compact_before_advance;
3458
3459     // dynamic tuning.
3460     PER_HEAP
3461     BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
3462     // if elevate_p is FALSE, it means we are determining fragmentation for a generation
3463     // to see if we should condemn this gen; otherwise it means we are determining if
3464     // we should elevate to doing max_gen from an ephemeral gen.
3465     PER_HEAP
3466     BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
3467     PER_HEAP
3468     BOOL 
3469     dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number);
3470     PER_HEAP
3471     BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem);
3472     PER_HEAP
3473     BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
3474
3475     PER_HEAP
3476     int generation_skip_ratio;//in %
3477
3478     PER_HEAP
3479     BOOL gen0_bricks_cleared;
3480 #ifdef FFIND_OBJECT
3481     PER_HEAP
3482     int gen0_must_clear_bricks;
3483 #endif //FFIND_OBJECT
3484     
3485     PER_HEAP_ISOLATED
3486     size_t full_gc_counts[gc_type_max];
3487
3488     // the # of bytes allocates since the last full compacting GC.
3489     PER_HEAP
3490     uint64_t loh_alloc_since_cg;
3491
3492     PER_HEAP
3493     BOOL elevation_requested;
3494
3495     // if this is TRUE, we should always guarantee that we do a 
3496     // full compacting GC before we OOM.
3497     PER_HEAP
3498     BOOL last_gc_before_oom;
3499
3500     PER_HEAP_ISOLATED
3501     BOOL should_expand_in_full_gc;
3502
3503 #ifdef BACKGROUND_GC
3504     PER_HEAP_ISOLATED
3505     size_t ephemeral_fgc_counts[max_generation];
3506
3507     PER_HEAP_ISOLATED
3508     BOOL alloc_wait_event_p;
3509
3510     PER_HEAP
3511     uint8_t* next_sweep_obj;
3512
3513     PER_HEAP
3514     uint8_t* current_sweep_pos;
3515
3516 #endif //BACKGROUND_GC
3517
3518     PER_HEAP
3519     fgm_history fgm_result;
3520
3521     PER_HEAP_ISOLATED
3522     size_t eph_gen_starts_size;
3523
3524 #ifdef GC_CONFIG_DRIVEN
3525     PER_HEAP_ISOLATED
3526     size_t time_init;
3527
3528     PER_HEAP_ISOLATED
3529     size_t time_since_init;
3530
3531     // 0 stores compacting GCs;
3532     // 1 stores sweeping GCs;
3533     PER_HEAP_ISOLATED
3534     size_t compact_or_sweep_gcs[2];
3535
3536     PER_HEAP
3537     size_t interesting_data_per_gc[max_idp_count];
3538
3539 #endif //GC_CONFIG_DRIVEN
3540
3541     PER_HEAP
3542     BOOL        ro_segments_in_range;
3543
3544 #ifdef BACKGROUND_GC
3545     PER_HEAP
3546     heap_segment* freeable_small_heap_segment;
3547 #endif //BACKGROUND_GC
3548
3549     PER_HEAP
3550     heap_segment* freeable_large_heap_segment;
3551
3552     PER_HEAP_ISOLATED
3553     heap_segment* segment_standby_list;
3554
3555     PER_HEAP
3556     size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
3557
3558     PER_HEAP
3559     size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
3560
3561     PER_HEAP
3562     size_t ordered_plug_indices[MAX_NUM_BUCKETS];
3563
3564     PER_HEAP
3565     size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
3566
3567     PER_HEAP
3568     BOOL ordered_plug_indices_init;
3569
3570     PER_HEAP
3571     BOOL use_bestfit;
3572
3573     PER_HEAP
3574     uint8_t* bestfit_first_pin;
3575
3576     PER_HEAP
3577     BOOL commit_end_of_seg;
3578
3579     PER_HEAP
3580     size_t max_free_space_items; // dynamically adjusted.
3581
3582     PER_HEAP
3583     size_t free_space_buckets;
3584
3585     PER_HEAP
3586     size_t free_space_items;
3587
3588     // -1 means we are using all the free
3589     // spaces we have (not including
3590     // end of seg space).
3591     PER_HEAP
3592     int trimmed_free_space_index;
3593
3594     PER_HEAP
3595     size_t total_ephemeral_plugs;
3596
3597     PER_HEAP
3598     seg_free_spaces* bestfit_seg;
3599
3600     // Note: we know this from the plan phase.
3601     // total_ephemeral_plugs actually has the same value
3602     // but while we are calculating its value we also store
3603     // info on how big the plugs are for best fit which we
3604     // don't do in plan phase.
3605     // TODO: get rid of total_ephemeral_plugs.
3606     PER_HEAP
3607     size_t total_ephemeral_size;
3608
3609 public:
3610
3611 #ifdef HEAP_ANALYZE
3612
3613     PER_HEAP_ISOLATED
3614     BOOL heap_analyze_enabled;
3615
3616     PER_HEAP
3617     size_t internal_root_array_length;
3618
3619     // next two fields are used to optimize the search for the object 
3620     // enclosing the current reference handled by ha_mark_object_simple.
3621     PER_HEAP
3622     uint8_t*  current_obj;
3623
3624     PER_HEAP
3625     size_t current_obj_size;
3626
3627 #endif //HEAP_ANALYZE
3628
3629     /* ----------------------- global members ----------------------- */
3630 public:
3631
3632     PER_HEAP
3633     int         condemned_generation_num;
3634
3635     PER_HEAP
3636     BOOL        blocking_collection;
3637
3638 #ifdef MULTIPLE_HEAPS
3639     static
3640     int n_heaps;
3641
3642     static
3643     gc_heap** g_heaps;
3644
3645     static
3646     size_t*   g_promoted;
3647 #ifdef BACKGROUND_GC
3648     static
3649     size_t*   g_bpromoted;
3650 #endif //BACKGROUND_GC
3651 #ifdef MH_SC_MARK
3652     PER_HEAP_ISOLATED
3653     int*  g_mark_stack_busy;
3654 #endif //MH_SC_MARK
3655 #else
3656     static
3657     size_t    g_promoted;
3658 #ifdef BACKGROUND_GC
3659     static
3660     size_t    g_bpromoted;
3661 #endif //BACKGROUND_GC
3662 #endif //MULTIPLE_HEAPS
3663     
3664     static
3665     size_t reserved_memory;
3666     static
3667     size_t reserved_memory_limit;
3668     static
3669     BOOL      g_low_memory_status;
3670
3671 protected:
3672     PER_HEAP
3673     void update_collection_counts ();
3674
3675 }; // class gc_heap
3676
3677 #define ASSERT_OFFSETS_MATCH(field) \
3678   static_assert_no_msg(offsetof(dac_gc_heap, field) == offsetof(gc_heap, field))
3679
3680 #ifdef MULTIPLE_HEAPS
3681 ASSERT_OFFSETS_MATCH(alloc_allocated);
3682 ASSERT_OFFSETS_MATCH(ephemeral_heap_segment);
3683 ASSERT_OFFSETS_MATCH(finalize_queue);
3684 ASSERT_OFFSETS_MATCH(oom_info);
3685 ASSERT_OFFSETS_MATCH(interesting_data_per_heap);
3686 ASSERT_OFFSETS_MATCH(compact_reasons_per_heap);
3687 ASSERT_OFFSETS_MATCH(expand_mechanisms_per_heap);
3688 ASSERT_OFFSETS_MATCH(interesting_mechanism_bits_per_heap);
3689 ASSERT_OFFSETS_MATCH(internal_root_array);
3690 ASSERT_OFFSETS_MATCH(internal_root_array_index);
3691 ASSERT_OFFSETS_MATCH(heap_analyze_success);
3692 ASSERT_OFFSETS_MATCH(generation_table);
3693 #endif // MULTIPLE_HEAPS
3694
3695 #ifdef FEATURE_PREMORTEM_FINALIZATION
3696 class CFinalize
3697 {
3698 #ifdef DACCESS_COMPILE
3699     friend class ::ClrDataAccess;
3700 #endif // DACCESS_COMPILE
3701
3702     friend class CFinalizeStaticAsserts;
3703
3704 private:
3705
3706     //adjust the count and add a constant to add a segment
3707     static const int ExtraSegCount = 2;
3708     static const int FinalizerListSeg = NUMBERGENERATIONS+1;
3709     static const int CriticalFinalizerListSeg = NUMBERGENERATIONS;
3710     //Does not correspond to a segment
3711     static const int FreeList = NUMBERGENERATIONS+ExtraSegCount;
3712
3713     PTR_PTR_Object m_FillPointers[NUMBERGENERATIONS+ExtraSegCount];
3714     PTR_PTR_Object m_Array;
3715     PTR_PTR_Object m_EndArray;
3716     size_t   m_PromotedCount;
3717     
3718     VOLATILE(int32_t) lock;
3719 #ifdef _DEBUG
3720     EEThreadId lockowner_threadid;
3721 #endif // _DEBUG
3722
3723     BOOL GrowArray();
3724     void MoveItem (Object** fromIndex,
3725                    unsigned int fromSeg,
3726                    unsigned int toSeg);
3727
3728     inline PTR_PTR_Object& SegQueue (unsigned int Seg)
3729     {
3730         return (Seg ? m_FillPointers [Seg-1] : m_Array);
3731     }
3732     inline PTR_PTR_Object& SegQueueLimit (unsigned int Seg)
3733     {
3734         return m_FillPointers [Seg];
3735     }
3736
3737     BOOL IsSegEmpty ( unsigned int i)
3738     {
3739         ASSERT ( (int)i < FreeList);
3740         return (SegQueueLimit(i) == SegQueue (i));
3741
3742     }
3743
3744     BOOL FinalizeSegForAppDomain (AppDomain *pDomain, 
3745                                   BOOL fRunFinalizers, 
3746                                   unsigned int Seg);
3747
3748 public:
3749     ~CFinalize();
3750     bool Initialize();
3751     void EnterFinalizeLock();
3752     void LeaveFinalizeLock();
3753     bool RegisterForFinalization (int gen, Object* obj, size_t size=0);
3754     Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
3755     BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
3756     void RelocateFinalizationData (int gen, gc_heap* hp);
3757     void WalkFReachableObjects (fq_walk_fn fn);
3758     void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
3759     void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
3760     size_t GetPromotedCount();
3761
3762     //Methods used by the shutdown code to call every finalizer
3763     void SetSegForShutDown(BOOL fHasLock);
3764     size_t GetNumberFinalizableObjects();
3765     void DiscardNonCriticalObjects();
3766
3767     //Methods used by the app domain unloading call to finalize objects in an app domain
3768     bool FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers);
3769
3770     void CheckFinalizerObjects();
3771
3772 };
3773
3774 class CFinalizeStaticAsserts {
3775     static_assert(dac_finalize_queue::ExtraSegCount == CFinalize::ExtraSegCount, "ExtraSegCount mismatch");
3776     static_assert(offsetof(dac_finalize_queue, m_FillPointers) == offsetof(CFinalize, m_FillPointers), "CFinalize layout mismatch");
3777 };
3778
3779
3780 #endif // FEATURE_PREMORTEM_FINALIZATION
3781
3782 inline
3783  size_t& dd_begin_data_size (dynamic_data* inst)
3784 {
3785   return inst->begin_data_size;
3786 }
3787 inline
3788  size_t& dd_survived_size (dynamic_data* inst)
3789 {
3790   return inst->survived_size;
3791 }
3792 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
3793 inline
3794  size_t& dd_num_npinned_plugs(dynamic_data* inst)
3795 {
3796   return inst->num_npinned_plugs;
3797 }
3798 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
3799 inline
3800 size_t& dd_pinned_survived_size (dynamic_data* inst)
3801 {
3802   return inst->pinned_survived_size;
3803 }
3804 inline
3805 size_t& dd_added_pinned_size (dynamic_data* inst)
3806 {
3807   return inst->added_pinned_size;
3808 }
3809 inline
3810 size_t& dd_artificial_pinned_survived_size (dynamic_data* inst)
3811 {
3812   return inst->artificial_pinned_survived_size;
3813 }
3814 #ifdef SHORT_PLUGS
3815 inline
3816 size_t& dd_padding_size (dynamic_data* inst)
3817 {
3818   return inst->padding_size;
3819 }
3820 #endif //SHORT_PLUGS
3821 inline
3822  size_t& dd_current_size (dynamic_data* inst)
3823 {
3824   return inst->current_size;
3825 }
3826 inline
3827 float& dd_surv (dynamic_data* inst)
3828 {
3829   return inst->surv;
3830 }
3831 inline
3832 size_t& dd_freach_previous_promotion (dynamic_data* inst)
3833 {
3834   return inst->freach_previous_promotion;
3835 }
3836 inline
3837 size_t& dd_desired_allocation (dynamic_data* inst)
3838 {
3839   return inst->desired_allocation;
3840 }
3841 inline
3842 size_t& dd_collection_count (dynamic_data* inst)
3843 {
3844     return inst->collection_count;
3845 }
3846 inline
3847 size_t& dd_promoted_size (dynamic_data* inst)
3848 {
3849     return inst->promoted_size;
3850 }
3851 inline
3852 float& dd_limit (dynamic_data* inst)
3853 {
3854   return inst->limit;
3855 }
3856 inline
3857 float& dd_max_limit (dynamic_data* inst)
3858 {
3859   return inst->max_limit;
3860 }
3861 inline
3862 size_t& dd_min_gc_size (dynamic_data* inst)
3863 {
3864   return inst->min_gc_size;
3865 }
3866 inline
3867 size_t& dd_max_size (dynamic_data* inst)
3868 {
3869   return inst->max_size;
3870 }
3871 inline
3872 size_t& dd_min_size (dynamic_data* inst)
3873 {
3874   return inst->min_size;
3875 }
3876 inline
3877 ptrdiff_t& dd_new_allocation (dynamic_data* inst)
3878 {
3879   return inst->new_allocation;
3880 }
3881 inline
3882 ptrdiff_t& dd_gc_new_allocation (dynamic_data* inst)
3883 {
3884   return inst->gc_new_allocation;
3885 }
3886 inline
3887 size_t& dd_default_new_allocation (dynamic_data* inst)
3888 {
3889   return inst->default_new_allocation;
3890 }
3891 inline
3892 size_t& dd_fragmentation_limit (dynamic_data* inst)
3893 {
3894   return inst->fragmentation_limit;
3895 }
3896 inline
3897 float& dd_fragmentation_burden_limit (dynamic_data* inst)
3898 {
3899   return inst->fragmentation_burden_limit;
3900 }
3901 inline
3902 float dd_v_fragmentation_burden_limit (dynamic_data* inst)
3903 {
3904   return (min (2*dd_fragmentation_burden_limit (inst), 0.75f));
3905 }
3906 inline
3907 size_t& dd_fragmentation (dynamic_data* inst)
3908 {
3909   return inst->fragmentation;
3910 }
3911
3912 inline
3913 size_t& dd_gc_clock (dynamic_data* inst)
3914 {
3915   return inst->gc_clock;
3916 }
3917 inline
3918 size_t& dd_time_clock (dynamic_data* inst)
3919 {
3920   return inst->time_clock;
3921 }
3922
3923 inline
3924 size_t& dd_gc_elapsed_time (dynamic_data* inst)
3925 {
3926     return inst->gc_elapsed_time;
3927 }
3928
3929 inline
3930 float& dd_gc_speed (dynamic_data* inst)
3931 {
3932     return inst->gc_speed;
3933 }
3934
3935 inline
3936 alloc_context* generation_alloc_context (generation* inst)
3937 {
3938     return &(inst->allocation_context);
3939 }
3940
3941 inline
3942 uint8_t*& generation_allocation_start (generation* inst)
3943 {
3944   return inst->allocation_start;
3945 }
3946 inline
3947 uint8_t*& generation_allocation_pointer (generation* inst)
3948 {
3949   return inst->allocation_context.alloc_ptr;
3950 }
3951 inline
3952 uint8_t*& generation_allocation_limit (generation* inst)
3953 {
3954   return inst->allocation_context.alloc_limit;
3955 }
3956 inline 
3957 allocator* generation_allocator (generation* inst)
3958 {
3959     return &inst->free_list_allocator;
3960 }
3961
3962 inline
3963 PTR_heap_segment& generation_start_segment (generation* inst)
3964 {
3965   return inst->start_segment;
3966 }
3967 inline
3968 heap_segment*& generation_allocation_segment (generation* inst)
3969 {
3970   return inst->allocation_segment;
3971 }
3972 inline
3973 uint8_t*& generation_plan_allocation_start (generation* inst)
3974 {
3975   return inst->plan_allocation_start;
3976 }
3977 inline
3978 size_t& generation_plan_allocation_start_size (generation* inst)
3979 {
3980   return inst->plan_allocation_start_size;
3981 }
3982 inline
3983 uint8_t*& generation_allocation_context_start_region (generation* inst)
3984 {
3985   return inst->allocation_context_start_region;
3986 }
3987 inline
3988 size_t& generation_free_list_space (generation* inst)
3989 {
3990   return inst->free_list_space;
3991 }
3992 inline
3993 size_t& generation_free_obj_space (generation* inst)
3994 {
3995   return inst->free_obj_space;
3996 }
3997 inline
3998 size_t& generation_allocation_size (generation* inst)
3999 {
4000   return inst->allocation_size;
4001 }
4002
4003 inline
4004 size_t& generation_pinned_allocated (generation* inst)
4005 {
4006     return inst->pinned_allocated;
4007 }
4008 inline
4009 size_t& generation_pinned_allocation_sweep_size (generation* inst)
4010 {
4011     return inst->pinned_allocation_sweep_size;
4012 }
4013 inline
4014 size_t& generation_pinned_allocation_compact_size (generation* inst)
4015 {
4016     return inst->pinned_allocation_compact_size;
4017 }
4018 inline
4019 size_t&  generation_free_list_allocated (generation* inst)
4020 {
4021     return inst->free_list_allocated;
4022 }
4023 inline
4024 size_t&  generation_end_seg_allocated (generation* inst)
4025 {
4026     return inst->end_seg_allocated;
4027 }
4028 inline
4029 BOOL&  generation_allocate_end_seg_p (generation* inst)
4030 {
4031     return inst->allocate_end_seg_p;
4032 }
4033 inline
4034 size_t& generation_condemned_allocated (generation* inst)
4035 {
4036     return inst->condemned_allocated;
4037 }
4038 #ifdef FREE_USAGE_STATS
4039 inline
4040 size_t& generation_pinned_free_obj_space (generation* inst)
4041 {
4042     return inst->pinned_free_obj_space;
4043 }
4044 inline
4045 size_t& generation_allocated_in_pinned_free (generation* inst)
4046 {
4047     return inst->allocated_in_pinned_free;
4048 }
4049 inline
4050 size_t& generation_allocated_since_last_pin (generation* inst)
4051 {
4052     return inst->allocated_since_last_pin;
4053 }
4054 #endif //FREE_USAGE_STATS
4055 inline 
4056 float generation_allocator_efficiency (generation* inst)
4057 {
4058     if ((generation_free_list_allocated (inst) + generation_free_obj_space (inst)) != 0)
4059     {
4060         return ((float) (generation_free_list_allocated (inst)) / (float)(generation_free_list_allocated (inst) + generation_free_obj_space (inst)));
4061     }
4062     else
4063         return 0;
4064 }
4065 inline
4066 size_t generation_unusable_fragmentation (generation* inst)
4067 {
4068     return (size_t)(generation_free_obj_space (inst) + 
4069                     (1.0f-generation_allocator_efficiency(inst))*generation_free_list_space (inst));
4070 }
4071
4072 #define plug_skew           sizeof(ObjHeader)
4073 // We always use USE_PADDING_TAIL when fitting so items on the free list should be
4074 // twice the min_obj_size.
4075 #define min_free_list       (2*min_obj_size)
4076 struct plug
4077 {
4078     uint8_t *  skew[plug_skew / sizeof(uint8_t *)];
4079 };
4080
4081 class pair
4082 {
4083 public:
4084     short left;
4085     short right;
4086 };
4087
4088 //Note that these encode the fact that plug_skew is a multiple of uint8_t*.
4089 // Each of new field is prepended to the prior struct.
4090
4091 struct plug_and_pair
4092 {
4093     pair        m_pair;
4094     plug        m_plug;
4095 };
4096
4097 struct plug_and_reloc
4098 {
4099     ptrdiff_t   reloc;
4100     pair        m_pair;
4101     plug        m_plug;
4102 };
4103
4104 struct plug_and_gap
4105 {
4106     ptrdiff_t   gap;
4107     ptrdiff_t   reloc;
4108     union
4109     {
4110         pair    m_pair;
4111         int     lr;  //for clearing the entire pair in one instruction
4112     };
4113     plug        m_plug;
4114 };
4115
4116 struct gap_reloc_pair
4117 {
4118     size_t gap;
4119     size_t   reloc;
4120     pair        m_pair;
4121 };
4122
4123 #define min_pre_pin_obj_size (sizeof (gap_reloc_pair) + min_obj_size)
4124
4125 struct DECLSPEC_ALIGN(8) aligned_plug_and_gap
4126 {
4127     plug_and_gap plugandgap;
4128 };
4129
4130 struct loh_obj_and_pad
4131 {
4132     ptrdiff_t   reloc;    
4133     plug        m_plug;
4134 };
4135
4136 struct loh_padding_obj
4137 {
4138     uint8_t*    mt;
4139     size_t      len;
4140     ptrdiff_t   reloc;
4141     plug        m_plug;
4142 };
4143 #define loh_padding_obj_size (sizeof(loh_padding_obj))
4144
4145 //flags description
4146 #define heap_segment_flags_readonly     1
4147 #define heap_segment_flags_inrange      2
4148 #define heap_segment_flags_unmappable   4
4149 #define heap_segment_flags_loh          8
4150 #ifdef BACKGROUND_GC
4151 #define heap_segment_flags_swept        16
4152 #define heap_segment_flags_decommitted  32
4153 #define heap_segment_flags_ma_committed 64
4154 // for segments whose mark array is only partially committed.
4155 #define heap_segment_flags_ma_pcommitted 128
4156 #endif //BACKGROUND_GC
4157
4158 //need to be careful to keep enough pad items to fit a relocation node
4159 //padded to QuadWord before the plug_skew
4160
4161 class heap_segment
4162 {
4163 public:
4164     uint8_t*        allocated;
4165     uint8_t*        committed;
4166     uint8_t*        reserved;
4167     uint8_t*        used;
4168     uint8_t*        mem;
4169     size_t          flags;
4170     PTR_heap_segment next;
4171     uint8_t*        background_allocated;
4172 #ifdef MULTIPLE_HEAPS
4173     gc_heap*        heap;
4174 #endif //MULTIPLE_HEAPS
4175     uint8_t*        plan_allocated;
4176     uint8_t*        saved_bg_allocated;
4177
4178 #ifdef _MSC_VER
4179 // Disable this warning - we intentionally want __declspec(align()) to insert padding for us
4180 #pragma warning(disable:4324)  // structure was padded due to __declspec(align())
4181 #endif
4182     aligned_plug_and_gap padandplug;
4183 #ifdef _MSC_VER
4184 #pragma warning(default:4324)  // structure was padded due to __declspec(align())
4185 #endif
4186 };
4187
4188 static_assert(offsetof(dac_heap_segment, allocated) == offsetof(heap_segment, allocated), "DAC heap segment layout mismatch");
4189 static_assert(offsetof(dac_heap_segment, committed) == offsetof(heap_segment, committed), "DAC heap segment layout mismatch");
4190 static_assert(offsetof(dac_heap_segment, reserved) == offsetof(heap_segment, reserved), "DAC heap segment layout mismatch");
4191 static_assert(offsetof(dac_heap_segment, used) == offsetof(heap_segment, used), "DAC heap segment layout mismatch");
4192 static_assert(offsetof(dac_heap_segment, mem) == offsetof(heap_segment, mem), "DAC heap segment layout mismatch");
4193 static_assert(offsetof(dac_heap_segment, flags) == offsetof(heap_segment, flags), "DAC heap segment layout mismatch");
4194 static_assert(offsetof(dac_heap_segment, next) == offsetof(heap_segment, next), "DAC heap segment layout mismatch");
4195 static_assert(offsetof(dac_heap_segment, background_allocated) == offsetof(heap_segment, background_allocated), "DAC heap segment layout mismatch");
4196 #ifdef MULTIPLE_HEAPS
4197 static_assert(offsetof(dac_heap_segment, heap) == offsetof(heap_segment, heap), "DAC heap segment layout mismatch");
4198 #endif // MULTIPLE_HEAPS
4199
4200 inline
4201 uint8_t*& heap_segment_reserved (heap_segment* inst)
4202 {
4203   return inst->reserved;
4204 }
4205 inline
4206 uint8_t*& heap_segment_committed (heap_segment* inst)
4207 {
4208   return inst->committed;
4209 }
4210 inline
4211 uint8_t*& heap_segment_used (heap_segment* inst)
4212 {
4213   return inst->used;
4214 }
4215 inline
4216 uint8_t*& heap_segment_allocated (heap_segment* inst)
4217 {
4218   return inst->allocated;
4219 }
4220
4221 inline
4222 BOOL heap_segment_read_only_p (heap_segment* inst)
4223 {
4224     return ((inst->flags & heap_segment_flags_readonly) != 0);
4225 }
4226
4227 inline
4228 BOOL heap_segment_in_range_p (heap_segment* inst)
4229 {
4230     return (!(inst->flags & heap_segment_flags_readonly) ||
4231             ((inst->flags & heap_segment_flags_inrange) != 0));
4232 }
4233
4234 inline
4235 BOOL heap_segment_unmappable_p (heap_segment* inst)
4236 {
4237     return (!(inst->flags & heap_segment_flags_readonly) ||
4238             ((inst->flags & heap_segment_flags_unmappable) != 0));
4239 }
4240
4241 inline
4242 BOOL heap_segment_loh_p (heap_segment * inst)
4243 {
4244     return !!(inst->flags & heap_segment_flags_loh);
4245 }
4246
4247 #ifdef BACKGROUND_GC
4248 inline
4249 BOOL heap_segment_decommitted_p (heap_segment * inst)
4250 {
4251     return !!(inst->flags & heap_segment_flags_decommitted);
4252 }
4253 #endif //BACKGROUND_GC
4254
4255 inline
4256 PTR_heap_segment & heap_segment_next (heap_segment* inst)
4257 {
4258   return inst->next;
4259 }
4260 inline
4261 uint8_t*& heap_segment_mem (heap_segment* inst)
4262 {
4263   return inst->mem;
4264 }
4265 inline
4266 uint8_t*& heap_segment_plan_allocated (heap_segment* inst)
4267 {
4268   return inst->plan_allocated;
4269 }
4270
4271 #ifdef BACKGROUND_GC
4272 inline
4273 uint8_t*& heap_segment_background_allocated (heap_segment* inst)
4274 {
4275   return inst->background_allocated;
4276 }
4277 inline
4278 uint8_t*& heap_segment_saved_bg_allocated (heap_segment* inst)
4279 {
4280   return inst->saved_bg_allocated;
4281 }
4282 #endif //BACKGROUND_GC
4283
4284 #ifdef MULTIPLE_HEAPS
4285 inline
4286 gc_heap*& heap_segment_heap (heap_segment* inst)
4287 {
4288     return inst->heap;
4289 }
4290 #endif //MULTIPLE_HEAPS
4291
4292 inline
4293 generation* gc_heap::generation_of (int  n)
4294 {
4295     assert (((n <= max_generation+1) && (n >= 0)));
4296     return &generation_table [ n ];
4297 }
4298
4299 inline
4300 dynamic_data* gc_heap::dynamic_data_of (int gen_number)
4301 {
4302     return &dynamic_data_table [ gen_number ];
4303 }
4304
4305 #define card_word_width ((size_t)32)
4306
4307 //
4308 // The value of card_size is determined empirically according to the average size of an object
4309 // In the code we also rely on the assumption that one card_table entry (uint32_t) covers an entire os page
4310 //
4311 #if defined (BIT64)
4312 #define card_size ((size_t)(2*OS_PAGE_SIZE/card_word_width))
4313 #else
4314 #define card_size ((size_t)(OS_PAGE_SIZE/card_word_width))
4315 #endif // BIT64
4316
4317 // Returns the index of the card word a card is in
4318 inline
4319 size_t card_word (size_t card)
4320 {
4321     return card / card_word_width;
4322 }
4323
4324 // Returns the index of a card within its card word
4325 inline
4326 unsigned card_bit (size_t card)
4327 {
4328     return (unsigned)(card % card_word_width);
4329 }
4330
4331 inline
4332 size_t gcard_of (uint8_t* object)
4333 {
4334     return (size_t)(object) / card_size;
4335 }
4336