Add a fourth parameter to the DEFINE_DACVAR macro that is the actual fully qualified...
[platform/upstream/coreclr.git] / src / gc / gcpriv.h
1 //
2 // Copyright (c) Microsoft. All rights reserved.
3 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
4 //
5 // optimize for speed
6
7
8 #ifndef _DEBUG
9 #ifdef _MSC_VER
10 #pragma optimize( "t", on )
11 #endif
12 #endif
13 #define inline __forceinline
14
15 #include "gc.h"
16
17 //#define DT_LOG
18
19 #include "gcrecord.h"
20
21 inline void FATAL_GC_ERROR()
22 {
23     DebugBreak();
24     _ASSERTE(!"Fatal Error in GC.");
25     EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
26 }
27
28 #ifdef _MSC_VER
29 #pragma inline_depth(20)
30 #endif
31
32 /* the following section defines the optional features */
33
34 // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
35 // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION
36 // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much 
37 // work to make FEATURE_STRUCTALIGN not apply to LOH so they can be both
38 // turned on.
39 #define FEATURE_LOH_COMPACTION
40
41 #ifdef FEATURE_64BIT_ALIGNMENT
42 // We need the following feature as part of keeping 64-bit types aligned in the GC heap.
43 #define RESPECT_LARGE_ALIGNMENT //used to keep "double" objects aligned during
44                                 //relocation
45 #endif //FEATURE_64BIT_ALIGNMENT
46
47 #define SHORT_PLUGS //used to keep ephemeral plugs short so they fit better into the oldest generation free items
48 #ifdef SHORT_PLUGS
49 #define DESIRED_PLUG_LENGTH (1000)
50 #endif //SHORT_PLUGS
51
52 #define FEATURE_PREMORTEM_FINALIZATION
53 #define GC_HISTORY
54
55 #ifndef FEATURE_REDHAWK
56 #define HEAP_ANALYZE
57 #define COLLECTIBLE_CLASS
58 #endif // !FEATURE_REDHAWK
59
60 #ifdef HEAP_ANALYZE
61 #define initial_internal_roots        (1024*16)
62 #endif // HEAP_ANALYZE
63
64 #define MARK_LIST         //used sorted list to speed up plan phase
65
66 #define BACKGROUND_GC   //concurrent background GC (requires WRITE_WATCH)
67
68 #ifdef SERVER_GC
69 #define MH_SC_MARK //scalable marking
70 //#define SNOOP_STATS //diagnostic
71 #define PARALLEL_MARK_LIST_SORT //do the sorting and merging of the multiple mark lists in server gc in parallel
72 #endif //SERVER_GC
73
74 //This is used to mark some type volatile only when the scalable marking is used. 
75 #if defined (SERVER_GC) && defined (MH_SC_MARK)
76 #define SERVER_SC_MARK_VOLATILE(x) VOLATILE(x)
77 #else //SERVER_GC&&MH_SC_MARK
78 #define SERVER_SC_MARK_VOLATILE(x) x
79 #endif //SERVER_GC&&MH_SC_MARK
80
81 //#define MULTIPLE_HEAPS         //Allow multiple heaps for servers
82
83 #define INTERIOR_POINTERS   //Allow interior pointers in the code manager
84
85 #define CARD_BUNDLE         //enable card bundle feature.(requires WRITE_WATCH)
86
87 // If this is defined we use a map for segments in order to find the heap for 
88 // a segment fast. But it does use more memory as we have to cover the whole
89 // heap range and for each entry we allocate a struct of 5 ptr-size words
90 // (3 for WKS as there's only one heap). 
91 #define SEG_MAPPING_TABLE
92
93 // If allocating the heap mapping table for the available VA consumes too
94 // much memory, you can enable this to allocate only the portion that
95 // corresponds to rw segments and grow it when needed in grow_brick_card_table.
96 // However in heap_of you will need to always compare the address with
97 // g_lowest/highest before you can look at the heap mapping table.
98 #define GROWABLE_SEG_MAPPING_TABLE
99
100 #ifdef BACKGROUND_GC
101 #define MARK_ARRAY      //Mark bit in an array
102 #endif //BACKGROUND_GC
103
104 #if defined(BACKGROUND_GC) || defined (CARD_BUNDLE)
105 #define WRITE_WATCH     //Write Watch feature
106 #endif //BACKGROUND_GC || CARD_BUNDLE
107
108 #ifdef WRITE_WATCH
109 #define array_size 100
110 #endif //WRITE_WATCH
111
112 //#define SHORT_PLUGS           //keep plug short
113
114 #define FFIND_OBJECT        //faster find_object, slower allocation
115 #define FFIND_DECAY  7      //Number of GC for which fast find will be active
116
117 //#define NO_WRITE_BARRIER  //no write barrier, use Write Watch feature
118
119 //#define DEBUG_WRITE_WATCH //Additional debug for write watch
120
121 //#define STRESS_PINNING    //Stress pinning by pinning randomly
122
123 //#define TRACE_GC          //debug trace gc operation
124 //#define SIMPLE_DPRINTF
125
126 //#define CATCH_GC          //catches exception during GC
127
128 //#define TIME_GC           //time allocation and garbage collection
129 //#define TIME_WRITE_WATCH  //time GetWriteWatch and ResetWriteWatch calls
130 //#define COUNT_CYCLES  //Use cycle counter for timing
131 //#define JOIN_STATS         //amount of time spent in the join
132 //also, see TIME_SUSPEND in switches.h.
133
134 //#define SYNCHRONIZATION_STATS
135 //#define SEG_REUSE_STATS
136
137 #if defined (SYNCHRONIZATION_STATS) || defined (STAGE_STATS)
138 #define BEGIN_TIMING(x) \
139     LARGE_INTEGER x##_start; \
140     QueryPerformanceCounter (&x##_start)
141
142 #define END_TIMING(x) \
143     LARGE_INTEGER x##_end; \
144     QueryPerformanceCounter (&x##_end); \
145     x += x##_end.QuadPart - x##_start.QuadPart
146
147 #else
148 #define BEGIN_TIMING(x)
149 #define END_TIMING(x)
150 #define BEGIN_TIMING_CYCLES(x)
151 #define END_TIMING_CYCLES(x)
152 #endif //SYNCHRONIZATION_STATS || STAGE_STATS
153
154 #define NO_CATCH_HANDLERS  //to debug gc1, remove the catch handlers
155
156 /* End of optional features */
157
158 #ifdef _DEBUG
159 #define TRACE_GC
160 #endif
161
162 #define NUMBERGENERATIONS   4               //Max number of generations
163
164 // For the bestfit algorithm when we relocate ephemeral generations into an 
165 // existing gen2 segment.
166 // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
167 #define MIN_INDEX_POWER2 6
168
169 #ifdef SERVER_GC
170
171 #ifdef _WIN64
172 #define MAX_INDEX_POWER2 30
173 #else
174 #define MAX_INDEX_POWER2 26
175 #endif  // _WIN64
176
177 #else //SERVER_GC
178
179 #ifdef _WIN64
180 #define MAX_INDEX_POWER2 28
181 #else
182 #define MAX_INDEX_POWER2 24
183 #endif  // _WIN64
184
185 #endif //SERVER_GC
186
187 #define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1)
188
189 #define MAX_NUM_FREE_SPACES 200 
190 #define MIN_NUM_FREE_SPACES 5 
191
192 //Please leave these definitions intact.
193
194 #define CLREvent CLREventStatic
195
196 #ifdef CreateFileMapping
197
198 #undef CreateFileMapping
199
200 #endif //CreateFileMapping
201
202 #define CreateFileMapping WszCreateFileMapping
203
204 // hosted api
205 #ifdef InitializeCriticalSection
206 #undef InitializeCriticalSection
207 #endif //ifdef InitializeCriticalSection
208 #define InitializeCriticalSection UnsafeInitializeCriticalSection
209
210 #ifdef DeleteCriticalSection
211 #undef DeleteCriticalSection
212 #endif //ifdef DeleteCriticalSection
213 #define DeleteCriticalSection UnsafeDeleteCriticalSection
214
215 #ifdef EnterCriticalSection
216 #undef EnterCriticalSection
217 #endif //ifdef EnterCriticalSection
218 #define EnterCriticalSection UnsafeEEEnterCriticalSection
219
220 #ifdef LeaveCriticalSection
221 #undef LeaveCriticalSection
222 #endif //ifdef LeaveCriticalSection
223 #define LeaveCriticalSection UnsafeEELeaveCriticalSection
224
225 #ifdef TryEnterCriticalSection
226 #undef TryEnterCriticalSection
227 #endif //ifdef TryEnterCriticalSection
228 #define TryEnterCriticalSection UnsafeEETryEnterCriticalSection
229
230 #ifdef CreateSemaphore
231 #undef CreateSemaphore
232 #endif //CreateSemaphore
233 #define CreateSemaphore UnsafeCreateSemaphore
234
235 #ifdef CreateEvent
236 #undef CreateEvent
237 #endif //ifdef CreateEvent
238 #define CreateEvent UnsafeCreateEvent
239
240 #ifdef VirtualAlloc
241 #undef VirtualAlloc
242 #endif //ifdef VirtualAlloc
243 #define VirtualAlloc ClrVirtualAlloc
244
245 #ifdef VirtualFree
246 #undef VirtualFree
247 #endif //ifdef VirtualFree
248 #define VirtualFree ClrVirtualFree
249
250 #ifdef VirtualQuery
251 #undef VirtualQuery
252 #endif //ifdef VirtualQuery
253 #define VirtualQuery ClrVirtualQuery
254
255 #ifdef VirtualProtect
256 #undef VirtualProtect
257 #endif //ifdef VirtualProtect
258 #define VirtualProtect ClrVirtualProtect
259
260 #ifdef memcpy
261 #undef memcpy
262 #endif //memcpy
263
264 #ifdef FEATURE_STRUCTALIGN
265 #define REQD_ALIGN_DCL ,int requiredAlignment
266 #define REQD_ALIGN_ARG ,requiredAlignment
267 #define REQD_ALIGN_AND_OFFSET_DCL ,int requiredAlignment,size_t alignmentOffset
268 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL ,int requiredAlignment=DATA_ALIGNMENT,size_t alignmentOffset=0
269 #define REQD_ALIGN_AND_OFFSET_ARG ,requiredAlignment,alignmentOffset
270 #else // FEATURE_STRUCTALIGN
271 #define REQD_ALIGN_DCL
272 #define REQD_ALIGN_ARG
273 #define REQD_ALIGN_AND_OFFSET_DCL
274 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL
275 #define REQD_ALIGN_AND_OFFSET_ARG
276 #endif // FEATURE_STRUCTALIGN
277
278 #ifdef MULTIPLE_HEAPS
279 #define THREAD_NUMBER_DCL ,int thread
280 #define THREAD_NUMBER_ARG ,thread
281 #define THREAD_NUMBER_FROM_CONTEXT int thread = sc->thread_number;
282 #define THREAD_FROM_HEAP  int thread = heap_number;
283 #define HEAP_FROM_THREAD  gc_heap* hpt = gc_heap::g_heaps[thread];
284 #else
285 #define THREAD_NUMBER_DCL
286 #define THREAD_NUMBER_ARG
287 #define THREAD_NUMBER_FROM_CONTEXT
288 #define THREAD_FROM_HEAP
289 #define HEAP_FROM_THREAD  gc_heap* hpt = 0;
290 #endif //MULTIPLE_HEAPS
291
292 //These constants are ordered
293 const int policy_sweep = 0;
294 const int policy_compact = 1;
295 const int policy_expand  = 2;
296
297 #ifdef TRACE_GC
298
299
300 extern int     print_level;
301 extern BOOL    trace_gc;
302 extern int    gc_trace_fac;
303
304
305 class hlet
306 {
307     static hlet* bindings;
308     int prev_val;
309     int* pval;
310     hlet* prev_let;
311 public:
312     hlet (int& place, int value)
313     {
314         prev_val = place;
315         pval = &place;
316         place = value;
317         prev_let = bindings;
318         bindings = this;
319     }
320     ~hlet ()
321     {
322         *pval = prev_val;
323         bindings = prev_let;
324     }
325 };
326
327
328 #define let(p,v) hlet __x = hlet (p, v);
329
330 #else //TRACE_GC
331
332 #define gc_count    -1
333 #define let(s,v)
334
335 #endif //TRACE_GC
336
337 #ifdef TRACE_GC
338 #define SEG_REUSE_LOG_0 7
339 #define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1)
340 #define DT_LOG_0 (SEG_REUSE_LOG_1 + 1)
341 #define BGC_LOG (DT_LOG_0 + 1)
342 #define GTC_LOG (DT_LOG_0 + 2)
343 #define GC_TABLE_LOG (DT_LOG_0 + 3)
344 #define JOIN_LOG (DT_LOG_0 + 4)
345 #define SPINLOCK_LOG (DT_LOG_0 + 5)
346 #define SNOOP_LOG (DT_LOG_0 + 6)
347
348 #ifndef DACCESS_COMPILE
349
350 #ifdef SIMPLE_DPRINTF
351
352 //#define dprintf(l,x) {if (trace_gc && ((l<=print_level)||gc_heap::settings.concurrent)) {printf ("\n");printf x ; fflush(stdout);}}
353 void LogValist(const char *fmt, va_list args);
354 void GCLog (const char *fmt, ... );
355 //#define dprintf(l,x) {if (trace_gc && (l<=print_level)) {GCLog x;}}
356 //#define dprintf(l,x) {if ((l==SEG_REUSE_LOG_0) || (l==SEG_REUSE_LOG_1) || (trace_gc && (l<=3))) {GCLog x;}}
357 //#define dprintf(l,x) {if (l == DT_LOG_0) {GCLog x;}}
358 //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == BGC_LOG) || (l==GTC_LOG))) {GCLog x;}}
359 //#define dprintf(l,x) {if (l==GTC_LOG) {GCLog x;}}
360 //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == 1234)) ) {GCLog x;}}
361 //#define dprintf(l,x) {if ((l <= 1) || (l == 2222)) {GCLog x;}}
362 #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
363 //#define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG) ||(l == DT_LOG_0)) {GCLog x;}}
364 //#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}}
365 //#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}}
366 //#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}}
367
368 #else //SIMPLE_DPRINTF
369
370 // The GCTrace output goes to stdout by default but can get sent to the stress log or the logfile if the
371 // reg key GCTraceFacility is set.  THe stress log can only take a format string and 4 numbers or
372 // string literals.
373 #define dprintf(l,x) {if (trace_gc && (l<=print_level)) { \
374       if ( !gc_trace_fac) {printf ("\n");printf x ; fflush(stdout);} \
375       else if ( gc_trace_fac == 2) {LogSpewAlways x;LogSpewAlways ("\n");} \
376       else if ( gc_trace_fac == 1) {STRESS_LOG_VA(x);}}}
377
378 #endif //SIMPLE_DPRINTF
379
380 #else //DACCESS_COMPILE
381 #define dprintf(l,x)
382 #endif //DACCESS_COMPILE
383 #else //TRACE_GC
384 #define dprintf(l,x)
385 #endif //TRACE_GC
386
387 #ifndef FEATURE_REDHAWK
388 #undef  assert
389 #define assert _ASSERTE
390 #undef  ASSERT
391 #define ASSERT _ASSERTE
392 #endif // FEATURE_REDHAWK
393
394 #ifdef _DEBUG
395
396 struct GCDebugSpinLock {
397     VOLATILE(LONG) lock;                   // -1 if free, 0 if held
398     VOLATILE(Thread *) holding_thread;     // -1 if no thread holds the lock.
399     VOLATILE(BOOL) released_by_gc_p;       // a GC thread released the lock.
400
401     GCDebugSpinLock()
402         : lock(-1), holding_thread((Thread*) -1)
403     {
404     }
405
406 };
407 typedef GCDebugSpinLock GCSpinLock;
408
409 #elif defined (SYNCHRONIZATION_STATS)
410
411 struct GCSpinLockInstru {
412     VOLATILE(LONG) lock;
413     // number of times we went into SwitchToThread in enter_spin_lock.
414     unsigned int num_switch_thread;
415     // number of times we went into WaitLonger.
416     unsigned int num_wait_longer;
417     // number of times we went to calling SwitchToThread in WaitLonger.
418     unsigned int num_switch_thread_w;
419     // number of times we went to calling DisablePreemptiveGC in WaitLonger.
420     unsigned int num_disable_preemptive_w;
421
422     GCSpinLockInstru()
423         : lock(-1), num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0)
424     {
425     }
426
427     void init()
428     {
429         num_switch_thread = 0;
430         num_wait_longer = 0;
431         num_switch_thread_w = 0;
432         num_disable_preemptive_w = 0;
433     }
434 };
435
436 typedef GCSpinLockInstru GCSpinLock;
437
438 #else
439
440 struct GCDebugSpinLock {
441     VOLATILE(LONG) lock;                   // -1 if free, 0 if held
442
443     GCDebugSpinLock()
444         : lock(-1)
445     {
446     }
447 };
448 typedef GCDebugSpinLock GCSpinLock;
449
450 #endif
451
452 class mark;
453 class heap_segment;
454 class CObjectHeader;
455 class l_heap;
456 class sorted_table;
457 class c_synchronize;
458 class seg_free_spaces;
459 class gc_heap;
460
461 #ifdef BACKGROUND_GC
462 class exclusive_sync;
463 class recursive_gc_sync;
464 #endif //BACKGROUND_GC
465
466 // The following 2 modes are of the same format as in clr\src\bcl\system\runtime\gcsettings.cs
467 // make sure you change that one if you change this one!
468 enum gc_pause_mode
469 {
470     pause_batch = 0, //We are not concerned about pause length
471     pause_interactive = 1,     //We are running an interactive app
472     pause_low_latency = 2,     //short pauses are essential
473     //avoid long pauses from blocking full GCs unless running out of memory
474     pause_sustained_low_latency = 3,
475     pause_no_gc = 4
476 };
477
478 enum gc_loh_compaction_mode
479 {
480     loh_compaction_default = 1, // the default mode, don't compact LOH.
481     loh_compaction_once = 2, // only compact once the next time a blocking full GC happens.
482     loh_compaction_auto = 4 // GC decides when to compact LOH, to be implemented.
483 };
484
485 enum set_pause_mode_status
486 {
487     set_pause_mode_success = 0,
488     set_pause_mode_no_gc = 1 // NoGCRegion is in progress, can't change pause mode.
489 };
490
491 enum gc_tuning_point
492 {
493     tuning_deciding_condemned_gen,
494     tuning_deciding_full_gc,
495     tuning_deciding_compaction,
496     tuning_deciding_expansion,
497     tuning_deciding_promote_ephemeral
498 };
499
500 #if defined(TRACE_GC) && defined(BACKGROUND_GC)
501 static const char * const str_bgc_state[] =
502 {
503     "not_in_process",
504     "mark_handles",
505     "mark_stack",
506     "revisit_soh",
507     "revisit_loh",
508     "overflow_soh",
509     "overflow_loh",
510     "final_marking",
511     "sweep_soh",
512     "sweep_loh",
513     "plan_phase"
514 };
515 #endif // defined(TRACE_GC) && defined(BACKGROUND_GC)
516
517 enum allocation_state
518 {
519     a_state_start = 0,
520     a_state_can_allocate,
521     a_state_cant_allocate,
522     a_state_try_fit,
523     a_state_try_fit_new_seg,
524     a_state_try_fit_new_seg_after_cg,
525     a_state_try_fit_no_seg,
526     a_state_try_fit_after_cg,
527     a_state_try_fit_after_bgc,
528     a_state_try_free_full_seg_in_bgc, 
529     a_state_try_free_after_bgc,
530     a_state_try_seg_end,
531     a_state_acquire_seg,
532     a_state_acquire_seg_after_cg,
533     a_state_acquire_seg_after_bgc,
534     a_state_check_and_wait_for_bgc,
535     a_state_trigger_full_compact_gc,
536     a_state_trigger_ephemeral_gc,
537     a_state_trigger_2nd_ephemeral_gc,
538     a_state_check_retry_seg,
539     a_state_max
540 };
541
542 enum gc_type
543 {
544     gc_type_compacting = 0,
545     gc_type_blocking = 1,
546 #ifdef BACKGROUND_GC
547     gc_type_background = 2,
548 #endif //BACKGROUND_GC
549     gc_type_max = 3
550 };
551
552
553 //encapsulates the mechanism for the current gc
554 class gc_mechanisms
555 {
556 public:
557     VOLATILE(SIZE_T) gc_index; // starts from 1 for the first GC, like dd_collection_count 
558     int condemned_generation;
559     BOOL promotion;
560     BOOL compaction;
561     BOOL loh_compaction;
562     BOOL heap_expansion;
563     DWORD concurrent;
564     BOOL demotion;
565     BOOL card_bundles;
566     int  gen0_reduction_count;
567     BOOL should_lock_elevation;
568     int elevation_locked_count;
569     BOOL minimal_gc;
570     gc_reason reason;
571     gc_pause_mode pause_mode;
572     BOOL found_finalizers;
573
574 #ifdef BACKGROUND_GC
575     BOOL background_p;
576     bgc_state b_state;
577     BOOL allocations_allowed;
578 #endif //BACKGROUND_GC
579
580 #ifdef STRESS_HEAP
581     BOOL stress_induced;
582 #endif // STRESS_HEAP
583
584 #ifdef _WIN64
585     DWORD entry_memory_load;
586 #endif //_WIN64
587
588     void init_mechanisms(); //for each GC
589     void first_init(); // for the life of the EE
590
591     void record (gc_history_global* history);
592 };
593
594 // This is a compact version of gc_mechanism that we use to save in the history.
595 class gc_mechanisms_store
596 {
597 public:
598     size_t gc_index; 
599     bool promotion;
600     bool compaction;
601     bool loh_compaction;
602     bool heap_expansion;
603     bool concurrent;
604     bool demotion;
605     bool card_bundles;
606     bool should_lock_elevation;
607     int condemned_generation   : 8; 
608     int gen0_reduction_count   : 8;
609     int elevation_locked_count : 8;
610     gc_reason reason           : 8;
611     gc_pause_mode pause_mode   : 8;
612 #ifdef BACKGROUND_GC
613     bgc_state b_state          : 8;
614 #endif //BACKGROUND_GC
615     bool found_finalizers;
616
617 #ifdef BACKGROUND_GC
618     bool background_p;
619 #endif //BACKGROUND_GC
620
621 #ifdef STRESS_HEAP
622     bool stress_induced;
623 #endif // STRESS_HEAP
624
625 #ifdef _WIN64
626     DWORD entry_memory_load;
627 #endif //_WIN64
628
629     void store (gc_mechanisms* gm)
630     {
631         gc_index                = gm->gc_index; 
632         condemned_generation    = gm->condemned_generation;
633         promotion               = (gm->promotion != 0);
634         compaction              = (gm->compaction != 0);
635         loh_compaction          = (gm->loh_compaction != 0);
636         heap_expansion          = (gm->heap_expansion != 0);
637         concurrent              = (gm->concurrent != 0);
638         demotion                = (gm->demotion != 0);
639         card_bundles            = (gm->card_bundles != 0);
640         gen0_reduction_count    = gm->gen0_reduction_count;
641         should_lock_elevation   = (gm->should_lock_elevation != 0);
642         elevation_locked_count  = gm->elevation_locked_count;
643         reason                  = gm->reason;
644         pause_mode              = gm->pause_mode;
645         found_finalizers        = (gm->found_finalizers != 0);
646
647 #ifdef BACKGROUND_GC
648         background_p            = (gm->background_p != 0);
649         b_state                 = gm->b_state;
650 #endif //BACKGROUND_GC
651
652 #ifdef STRESS_HEAP
653         stress_induced          = (gm->stress_induced != 0);
654 #endif // STRESS_HEAP
655
656 #ifdef _WIN64
657         entry_memory_load       = gm->entry_memory_load;
658 #endif //_WIN64        
659     }
660 };
661
662 #ifdef GC_STATS
663
664 // GC specific statistics, tracking counts and timings for GCs occuring in the system.
665 // This writes the statistics to a file every 60 seconds, if a file is specified in
666 // COMPLUS_GcMixLog
667
668 struct GCStatistics
669     : public StatisticsBase
670 {
671     // initialized to the contents of COMPLUS_GcMixLog, or NULL, if not present
672     static WCHAR* logFileName;
673     static FILE*  logFile;
674
675     // number of times we executed a background GC, a foreground GC, or a
676     // non-concurrent GC
677     int cntBGC, cntFGC, cntNGC;
678
679     // min, max, and total time spent performing BGCs, FGCs, NGCs
680     // (BGC time includes everything between the moment the BGC starts until 
681     // it completes, i.e. the times of all FGCs occuring concurrently)
682     MinMaxTot bgc, fgc, ngc;
683
684     // number of times we executed a compacting GC (sweeping counts can be derived)
685     int cntCompactNGC, cntCompactFGC;
686
687     // count of reasons
688     int cntReasons[reason_max];
689
690     // count of condemned generation, by NGC and FGC:
691     int cntNGCGen[max_generation+1];
692     int cntFGCGen[max_generation];
693     
694     ///////////////////////////////////////////////////////////////////////////////////////////////
695     // Internal mechanism:
696
697     virtual void Initialize();
698     virtual void DisplayAndUpdate();
699
700     // Public API
701
702     static BOOL Enabled()
703     { return logFileName != NULL; }
704
705     void AddGCStats(const gc_mechanisms& settings, size_t timeInMSec);
706 };
707
708 extern GCStatistics g_GCStatistics;
709 extern GCStatistics g_LastGCStatistics;
710
711 #endif // GC_STATS
712
713
714 typedef DPTR(class heap_segment)               PTR_heap_segment;
715 typedef DPTR(class gc_heap)                    PTR_gc_heap;
716 typedef DPTR(PTR_gc_heap)                      PTR_PTR_gc_heap;
717 #ifdef FEATURE_PREMORTEM_FINALIZATION
718 typedef DPTR(class CFinalize)                  PTR_CFinalize;
719 #endif // FEATURE_PREMORTEM_FINALIZATION
720
721 //-------------------------------------
722 //generation free list. It is an array of free lists bucketed by size, starting at sizes lower than first_bucket_size 
723 //and doubling each time. The last bucket (index == num_buckets) is for largest sizes with no limit
724
725 #define MAX_BUCKET_COUNT (13)//Max number of buckets for the small generations. 
726 class alloc_list 
727 {
728     BYTE* head;
729     BYTE* tail;
730 public:
731     BYTE*& alloc_list_head () { return head;}
732     BYTE*& alloc_list_tail () { return tail;}
733     alloc_list()
734     {
735         head = 0; 
736         tail = 0; 
737     }
738 };
739
740
741 class allocator 
742 {
743     size_t num_buckets;
744     size_t frst_bucket_size;
745     alloc_list first_bucket;
746     alloc_list* buckets;
747     alloc_list& alloc_list_of (unsigned int bn);
748
749 public:
750     allocator (unsigned int num_b, size_t fbs, alloc_list* b);
751     allocator()
752     {
753         num_buckets = 1;
754         frst_bucket_size = SIZE_T_MAX;
755     }
756     unsigned int number_of_buckets() {return (unsigned int)num_buckets;}
757
758     size_t first_bucket_size() {return frst_bucket_size;}
759     BYTE*& alloc_list_head_of (unsigned int bn)
760     {
761         return alloc_list_of (bn).alloc_list_head();
762     }
763     BYTE*& alloc_list_tail_of (unsigned int bn)
764     {
765         return alloc_list_of (bn).alloc_list_tail();
766     }
767     void clear();
768     BOOL discard_if_no_fit_p()
769     {
770         return (num_buckets == 1);
771     }
772
773     // This is when we know there's nothing to repair because this free
774     // list has never gone through plan phase. Right now it's only used
775     // by the background ephemeral sweep when we copy the local free list
776     // to gen0's free list.
777     //
778     // We copy head and tail manually (vs together like copy_to_alloc_list)
779     // since we need to copy tail first because when we get the free items off
780     // of each bucket we check head first. We also need to copy the
781     // smaller buckets first so when gen0 allocation needs to thread
782     // smaller items back that bucket is guaranteed to have been full
783     // copied.
784     void copy_with_no_repair (allocator* allocator_to_copy)
785     {
786         assert (num_buckets == allocator_to_copy->number_of_buckets());
787         for (unsigned int i = 0; i < num_buckets; i++)
788         {
789             alloc_list* al = &(allocator_to_copy->alloc_list_of (i));
790             alloc_list_tail_of(i) = al->alloc_list_tail();
791             alloc_list_head_of(i) = al->alloc_list_head();
792         }
793     }
794
795     void unlink_item (unsigned int bucket_number, BYTE* item, BYTE* previous_item, BOOL use_undo_p);
796     void thread_item (BYTE* item, size_t size);
797     void thread_item_front (BYTE* itme, size_t size);
798     void thread_free_item (BYTE* free_item, BYTE*& head, BYTE*& tail);
799     void copy_to_alloc_list (alloc_list* toalist);
800     void copy_from_alloc_list (alloc_list* fromalist);
801     void commit_alloc_list_changes();
802 };
803
804 #define NUM_GEN_POWER2 (20)
805 #define BASE_GEN_SIZE (1*512)
806
807 // group the frequently used ones together (need intrumentation on accessors)
808 class generation
809 {
810 public:
811     // Don't move these first two fields without adjusting the references
812     // from the __asm in jitinterface.cpp.
813     alloc_context   allocation_context;
814     heap_segment*   allocation_segment;
815     PTR_heap_segment start_segment;
816     BYTE*           allocation_context_start_region;
817     BYTE*           allocation_start;
818     allocator       free_list_allocator;
819     size_t          free_list_allocated;
820     size_t          end_seg_allocated;
821     BOOL            allocate_end_seg_p;
822     size_t          condemned_allocated;
823     size_t          free_list_space;
824     size_t          free_obj_space;
825     size_t          allocation_size;
826     BYTE*           plan_allocation_start;
827     size_t          plan_allocation_start_size;
828
829     // this is the pinned plugs that got allocated into this gen.
830     size_t          pinned_allocated;
831     size_t          pinned_allocation_compact_size;
832     size_t          pinned_allocation_sweep_size;
833     int             gen_num;
834
835 #ifdef FREE_USAGE_STATS
836     size_t          gen_free_spaces[NUM_GEN_POWER2];
837     // these are non pinned plugs only
838     size_t          gen_plugs[NUM_GEN_POWER2];
839     size_t          gen_current_pinned_free_spaces[NUM_GEN_POWER2];
840     size_t          pinned_free_obj_space;
841     // this is what got allocated into the pinned free spaces.
842     size_t          allocated_in_pinned_free;
843     size_t          allocated_since_last_pin;
844 #endif //FREE_USAGE_STATS
845 };
846
847 // The dynamic data fields are grouped into 3 categories:
848 //
849 // calculated logical data (like desired_allocation)
850 // physical data (like fragmentation)
851 // const data (like min_gc_size), initialized at the beginning
852 class dynamic_data
853 {
854 public:
855     ptrdiff_t new_allocation;
856     ptrdiff_t gc_new_allocation; // new allocation at beginning of gc
857     float     surv;
858     size_t    desired_allocation;
859
860     // # of bytes taken by objects (ie, not free space) at the beginning
861     // of the GC.
862     size_t    begin_data_size;
863     // # of bytes taken by survived objects after mark.
864     size_t    survived_size;
865     // # of bytes taken by survived pinned plugs after mark.
866     size_t    pinned_survived_size;
867     size_t    artificial_pinned_survived_size;
868     size_t    added_pinned_size;
869
870 #ifdef SHORT_PLUGS
871     size_t    padding_size;
872 #endif //SHORT_PLUGS
873 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
874     // # of plugs that are not pinned plugs.
875     size_t    num_npinned_plugs;
876 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
877     //total object size after a GC, ie, doesn't include fragmentation
878     size_t    current_size; 
879     size_t    collection_count;
880     size_t    promoted_size;
881     size_t    freach_previous_promotion;
882     size_t    fragmentation;    //fragmentation when we don't compact
883     size_t    gc_clock;         //gc# when last GC happened
884     size_t    time_clock;       //time when last gc started
885     size_t    gc_elapsed_time;  // Time it took for the gc to complete
886     float     gc_speed;         //  speed in bytes/msec for the gc to complete
887
888     // min_size is always the same as min_gc_size..
889     size_t    min_gc_size;
890     size_t    max_size;
891     size_t    min_size;
892     size_t    default_new_allocation;
893     size_t    fragmentation_limit;
894     float     fragmentation_burden_limit;
895     float     limit;
896     float     max_limit;
897 };
898
899 #define ro_in_entry 0x1
900
901 #ifdef SEG_MAPPING_TABLE
902 // Note that I am storing both h0 and seg0, even though in Server GC you can get to 
903 // the heap* from the segment info. This is because heap_of needs to be really fast
904 // and we would not want yet another indirection.
905 struct seg_mapping
906 {
907     // if an address is > boundary it belongs to h1; else h0.
908     // since we init h0 and h1 to 0, if we get 0 it means that
909     // address doesn't exist on managed segments. And heap_of 
910     // would just return heap0 which is what it does now.
911     BYTE* boundary;
912 #ifdef MULTIPLE_HEAPS
913     gc_heap* h0;
914     gc_heap* h1;
915 #endif //MULTIPLE_HEAPS
916     // You could have an address that's inbetween 2 segments and 
917     // this would return a seg, the caller then will use 
918     // in_range_for_segment to determine if it's on that seg.
919     heap_segment* seg0; // this is what the seg for h0 is.
920     heap_segment* seg1; // this is what the seg for h1 is.
921     // Note that when frozen objects are used we mask seg1
922     // with 0x1 to indicate that there is a ro segment for
923     // this entry.
924 };
925 #endif //SEG_MAPPING_TABLE
926
927 // alignment helpers
928 //Alignment constant for allocation
929 #define ALIGNCONST (DATA_ALIGNMENT-1)
930
931 inline
932 size_t Align (size_t nbytes, int alignment=ALIGNCONST)
933 {
934     return (nbytes + alignment) & ~alignment;
935 }
936
937 //return alignment constant for small object heap vs large object heap
938 inline
939 int get_alignment_constant (BOOL small_object_p)
940 {
941 #ifdef FEATURE_STRUCTALIGN
942     // If any objects on the large object heap require 8-byte alignment,
943     // the compiler will tell us so.  Let's not guess an alignment here.
944     return ALIGNCONST;
945 #else // FEATURE_STRUCTALIGN
946     return small_object_p ? ALIGNCONST : 7;
947 #endif // FEATURE_STRUCTALIGN
948 }
949
950 struct etw_opt_info
951 {
952     size_t desired_allocation;
953     size_t new_allocation;
954     int    gen_number;
955 };
956
957 enum alloc_wait_reason
958 {
959     // When we don't care about firing an event for
960     // this.
961     awr_ignored = -1,
962
963     // when we detect we are in low memory
964     awr_low_memory = 0,
965
966     // when we detect the ephemeral segment is too full
967     awr_low_ephemeral = 1,
968
969     // we've given out too much budget for gen0.
970     awr_gen0_alloc = 2,
971
972     // we've given out too much budget for loh.
973     awr_loh_alloc = 3,
974
975     // this event is really obsolete - it's for pre-XP
976     // OSs where low mem notification is not supported.
977     awr_alloc_loh_low_mem = 4,
978
979     // we ran out of VM spaced to reserve on loh.
980     awr_loh_oos = 5, 
981
982     // ran out of space when allocating a small object
983     awr_gen0_oos_bgc = 6,
984
985     // ran out of space when allocating a large object
986     awr_loh_oos_bgc = 7,
987
988     // waiting for BGC to let FGC happen
989     awr_fgc_wait_for_bgc = 8,
990
991     // wait for bgc to finish to get loh seg.
992     awr_get_loh_seg = 9,
993
994     // we don't allow loh allocation during bgc planning.
995     awr_loh_alloc_during_plan = 10,
996
997     // we don't allow too much loh allocation during bgc.
998     awr_loh_alloc_during_bgc = 11
999 };
1000
1001 struct alloc_thread_wait_data
1002 {
1003     int awr;
1004 };
1005
1006 enum msl_take_state
1007 {
1008     mt_get_large_seg,
1009     mt_wait_bgc_plan,
1010     mt_wait_bgc,
1011     mt_block_gc,
1012     mt_clr_mem,
1013     mt_clr_large_mem,
1014     mt_t_eph_gc,
1015     mt_t_full_gc,
1016     mt_alloc_small,
1017     mt_alloc_large,
1018     mt_alloc_small_cant,
1019     mt_alloc_large_cant,
1020     mt_try_alloc,
1021     mt_try_budget
1022 };
1023
1024 enum msl_enter_state
1025 {
1026     me_acquire,
1027     me_release
1028 };
1029
1030 struct spinlock_info
1031 {
1032     msl_enter_state enter_state;
1033     msl_take_state take_state;
1034     DWORD thread_id;
1035 };
1036
1037 const unsigned HS_CACHE_LINE_SIZE = 128;
1038
1039 #ifdef SNOOP_STATS
1040 struct snoop_stats_data
1041 {
1042     int heap_index;
1043
1044     // total number of objects that we called
1045     // gc_mark on.
1046     size_t objects_checked_count;
1047     // total number of time we called gc_mark
1048     // on a 0 reference.
1049     size_t zero_ref_count;
1050     // total objects actually marked.
1051     size_t objects_marked_count;
1052     // number of objects written to the mark stack because
1053     // of mark_stolen.
1054     size_t stolen_stack_count;
1055     // number of objects pushed onto the mark stack because
1056     // of the partial mark code path.
1057     size_t partial_stack_count;
1058     // number of objects pushed onto the mark stack because
1059     // of the non partial mark code path.
1060     size_t normal_stack_count;
1061     // number of references marked without mark stack.
1062     size_t non_stack_count;
1063
1064     // number of times we detect next heap's mark stack
1065     // is not busy.
1066     size_t stack_idle_count;
1067
1068     // number of times we do switch to thread.
1069     size_t switch_to_thread_count;
1070
1071     // number of times we are checking if the next heap's
1072     // mark stack is busy.
1073     size_t check_level_count;
1074     // number of times next stack is busy and level is 
1075     // at the bottom.
1076     size_t busy_count;
1077     // how many interlocked exchange operations we did
1078     size_t interlocked_count;
1079     // numer of times parent objects stolen
1080     size_t partial_mark_parent_count;
1081     // numer of times we look at a normal stolen entry, 
1082     // or the beginning/ending PM pair.
1083     size_t stolen_or_pm_count; 
1084     // number of times we see 2 for the entry.
1085     size_t stolen_entry_count; 
1086     // number of times we see a PM entry that's not ready.
1087     size_t pm_not_ready_count; 
1088     // number of stolen normal marked objects and partial mark children.
1089     size_t normal_count;
1090     // number of times the bottom of mark stack was cleared.
1091     size_t stack_bottom_clear_count;
1092 };
1093 #endif //SNOOP_STATS
1094
1095 struct no_gc_region_info
1096 {
1097     size_t soh_allocation_size;
1098     size_t loh_allocation_size;
1099     size_t started;
1100     size_t num_gcs;
1101     size_t num_gcs_induced;
1102     start_no_gc_region_status start_status;
1103     gc_pause_mode saved_pause_mode;
1104     size_t saved_gen0_min_size;
1105     size_t saved_gen3_min_size;
1106     BOOL minimal_gc_p;
1107 };
1108
1109 //class definition of the internal class
1110 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
1111 extern void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1112 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
1113 class gc_heap
1114 {
1115     friend struct ::_DacGlobals;
1116 #ifdef DACCESS_COMPILE
1117     friend class ::ClrDataAccess;
1118     friend class ::DacHeapWalker;
1119 #endif //DACCESS_COMPILE
1120
1121     friend class GCHeap;
1122 #ifdef FEATURE_PREMORTEM_FINALIZATION
1123     friend class CFinalize;
1124 #endif // FEATURE_PREMORTEM_FINALIZATION
1125     friend struct ::alloc_context;
1126     friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, DWORD dwFlags);
1127     friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1128     friend class t_join;
1129     friend class gc_mechanisms;
1130     friend class seg_free_spaces;
1131
1132 #ifdef BACKGROUND_GC
1133     friend class exclusive_sync;
1134     friend class recursive_gc_sync;
1135 #endif //BACKGROUND_GC
1136
1137 #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1138     friend void checkGCWriteBarrier();
1139     friend void initGCShadow();
1140 #endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1141
1142 #ifdef MULTIPLE_HEAPS
1143     typedef void (gc_heap::* card_fn) (BYTE**, int);
1144 #define call_fn(fn) (this->*fn)
1145 #define __this this
1146 #else
1147     typedef void (* card_fn) (BYTE**);
1148 #define call_fn(fn) (*fn)
1149 #define __this (gc_heap*)0
1150 #endif
1151
1152 public:
1153
1154 #ifdef TRACE_GC
1155     PER_HEAP
1156     void print_free_list (int gen, heap_segment* seg);
1157 #endif // TRACE_GC
1158
1159 #ifdef SYNCHRONIZATION_STATS
1160
1161     PER_HEAP_ISOLATED
1162     void init_sync_stats()
1163     {
1164 #ifdef MULTIPLE_HEAPS
1165         for (int i = 0; i < gc_heap::n_heaps; i++)
1166         {
1167             gc_heap::g_heaps[i]->init_heap_sync_stats();
1168         }
1169 #else  //MULTIPLE_HEAPS
1170         init_heap_sync_stats();
1171 #endif  //MULTIPLE_HEAPS
1172     }
1173
1174     PER_HEAP_ISOLATED
1175     void print_sync_stats(unsigned int gc_count_during_log)
1176     {
1177         // bad/good gl acquire is accumulative during the log interval (because the numbers are too small)
1178         // min/max msl_acquire is the min/max during the log interval, not each GC.
1179         // Threads is however many allocation threads for the last GC.
1180         // num of msl acquired, avg_msl, high and low are all for each GC.
1181         printf("%2s%2s%10s%10s%12s%6s%4s%8s(  st,  wl, stw, dpw)\n",
1182             "H", "T", "good_sus", "bad_sus", "avg_msl", "high", "low", "num_msl");
1183
1184 #ifdef MULTIPLE_HEAPS
1185         for (int i = 0; i < gc_heap::n_heaps; i++)
1186         {
1187             gc_heap::g_heaps[i]->print_heap_sync_stats(i, gc_count_during_log);
1188         }
1189 #else  //MULTIPLE_HEAPS
1190         print_heap_sync_stats(0, gc_count_during_log);
1191 #endif  //MULTIPLE_HEAPS
1192     }
1193
1194 #endif //SYNCHRONIZATION_STATS
1195
1196     PER_HEAP
1197     void verify_soh_segment_list();
1198     PER_HEAP
1199     void verify_mark_array_cleared (heap_segment* seg);
1200     PER_HEAP
1201     void verify_mark_array_cleared();
1202     PER_HEAP
1203     void verify_seg_end_mark_array_cleared();
1204     PER_HEAP
1205     void verify_partial();
1206
1207 #ifdef VERIFY_HEAP
1208     PER_HEAP
1209     void verify_free_lists(); 
1210     PER_HEAP
1211     void verify_heap (BOOL begin_gc_p);
1212 #endif //VERIFY_HEAP
1213
1214     PER_HEAP_ISOLATED
1215     void fire_pevents();
1216
1217 #ifdef FEATURE_BASICFREEZE
1218     static void walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef);
1219 #endif
1220
1221     static
1222     heap_segment* make_heap_segment (BYTE* new_pages, 
1223                                      size_t size, 
1224                                      int h_number);
1225     static
1226     l_heap* make_large_heap (BYTE* new_pages, size_t size, BOOL managed);
1227
1228     static
1229     gc_heap* make_gc_heap(
1230 #if defined (MULTIPLE_HEAPS)
1231         GCHeap* vm_heap,
1232         int heap_number
1233 #endif //MULTIPLE_HEAPS
1234         );
1235
1236     static
1237     void destroy_gc_heap(gc_heap* heap);
1238
1239     static
1240     HRESULT initialize_gc  (size_t segment_size,
1241                             size_t heap_size
1242 #ifdef MULTIPLE_HEAPS
1243                             , unsigned number_of_heaps
1244 #endif //MULTIPLE_HEAPS
1245         );
1246
1247     static
1248     void shutdown_gc();
1249
1250     PER_HEAP
1251     CObjectHeader* allocate (size_t jsize,
1252                              alloc_context* acontext);
1253
1254 #ifdef MULTIPLE_HEAPS
1255     static void balance_heaps (alloc_context* acontext);
1256     static 
1257     gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
1258     static
1259     DWORD __stdcall gc_thread_stub (void* arg);
1260 #endif //MULTIPLE_HEAPS
1261
1262     CObjectHeader* try_fast_alloc (size_t jsize);
1263
1264     // For LOH allocations we only update the alloc_bytes_loh in allocation
1265     // context - we don't actually use the ptr/limit from it so I am
1266     // making this explicit by not passing in the alloc_context.
1267     PER_HEAP
1268     CObjectHeader* allocate_large_object (size_t size, __int64& alloc_bytes);
1269
1270 #ifdef FEATURE_STRUCTALIGN
1271     PER_HEAP
1272     BYTE* pad_for_alignment_large (BYTE* newAlloc, int requiredAlignment, size_t size);
1273 #endif // FEATURE_STRUCTALIGN
1274
1275     PER_HEAP
1276     void do_pre_gc();
1277
1278     PER_HEAP
1279     void do_post_gc();
1280
1281     PER_HEAP
1282     BOOL expand_soh_with_minimal_gc();
1283
1284     // EE is always suspended when this method is called.
1285     // returning FALSE means we actually didn't do a GC. This happens
1286     // when we figured that we needed to do a BGC.
1287     PER_HEAP
1288     int garbage_collect (int n);
1289
1290     static 
1291     DWORD* make_card_table (BYTE* start, BYTE* end);
1292
1293     static
1294     void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
1295
1296     static
1297     int grow_brick_card_tables (BYTE* start, 
1298                                 BYTE* end, 
1299                                 size_t size,
1300                                 heap_segment* new_seg, 
1301                                 gc_heap* hp,
1302                                 BOOL loh_p);
1303
1304     PER_HEAP
1305     BOOL is_mark_set (BYTE* o);
1306
1307 protected:
1308
1309     PER_HEAP
1310     void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1311
1312     struct walk_relocate_args
1313     {
1314         BYTE* last_plug;
1315         BOOL is_shortened;
1316         mark* pinned_plug_entry;
1317     };
1318
1319     PER_HEAP
1320     void walk_plug (BYTE* plug, size_t size, BOOL check_last_object_p, 
1321                     walk_relocate_args* args, size_t profiling_context);
1322
1323     PER_HEAP
1324     void walk_relocation (int condemned_gen_number,
1325                           BYTE* first_condemned_address, size_t profiling_context);
1326
1327     PER_HEAP
1328     void walk_relocation_in_brick (BYTE* tree, walk_relocate_args* args, size_t profiling_context);
1329
1330 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1331     PER_HEAP
1332     void walk_relocation_for_bgc(size_t profiling_context);
1333
1334     PER_HEAP
1335     void make_free_lists_for_profiler_for_bgc();
1336 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1337
1338     PER_HEAP
1339     int generation_to_condemn (int n, 
1340                                BOOL* blocking_collection_p,
1341                                BOOL* elevation_requested_p,
1342                                BOOL check_only_p);
1343
1344     PER_HEAP_ISOLATED
1345     int joined_generation_to_condemn (BOOL should_evaluate_elevation, int n_initial, BOOL* blocking_collection
1346                                         STRESS_HEAP_ARG(int n_original));
1347
1348     PER_HEAP_ISOLATED
1349     size_t min_reclaim_fragmentation_threshold(ULONGLONG total_mem, DWORD num_heaps);
1350
1351     PER_HEAP_ISOLATED
1352     ULONGLONG min_high_fragmentation_threshold(ULONGLONG available_mem, DWORD num_heaps);
1353
1354     PER_HEAP
1355     void concurrent_print_time_delta (const char* msg);
1356     PER_HEAP
1357     void free_list_info (int gen_num, const char* msg);
1358
1359     // in svr GC on entry and exit of this method, the GC threads are not 
1360     // synchronized
1361     PER_HEAP
1362     void gc1();
1363
1364     PER_HEAP_ISOLATED
1365     void save_data_for_no_gc();
1366
1367     PER_HEAP_ISOLATED
1368     void restore_data_for_no_gc();
1369
1370     PER_HEAP_ISOLATED
1371     void update_collection_counts_for_no_gc();
1372
1373     PER_HEAP_ISOLATED
1374     BOOL should_proceed_with_gc();
1375
1376     PER_HEAP_ISOLATED
1377     void record_gcs_during_no_gc();
1378
1379     PER_HEAP
1380     BOOL find_loh_free_for_no_gc();
1381
1382     PER_HEAP
1383     BOOL find_loh_space_for_no_gc();
1384
1385     PER_HEAP
1386     BOOL commit_loh_for_no_gc (heap_segment* seg);
1387
1388     PER_HEAP_ISOLATED
1389     start_no_gc_region_status prepare_for_no_gc_region (ULONGLONG total_size, 
1390                                                         BOOL loh_size_known, 
1391                                                         ULONGLONG loh_size, 
1392                                                         BOOL disallow_full_blocking);
1393
1394     PER_HEAP
1395     BOOL loh_allocated_for_no_gc();
1396
1397     PER_HEAP_ISOLATED
1398     void release_no_gc_loh_segments();    
1399
1400     PER_HEAP_ISOLATED
1401     void thread_no_gc_loh_segments();
1402
1403     PER_HEAP
1404     void allocate_for_no_gc_after_gc();
1405
1406     PER_HEAP
1407     void set_loh_allocations_for_no_gc();
1408
1409     PER_HEAP
1410     void set_soh_allocations_for_no_gc();
1411
1412     PER_HEAP
1413     void prepare_for_no_gc_after_gc();
1414
1415     PER_HEAP_ISOLATED
1416     void set_allocations_for_no_gc();
1417
1418     PER_HEAP_ISOLATED
1419     BOOL should_proceed_for_no_gc();
1420
1421     PER_HEAP_ISOLATED
1422     start_no_gc_region_status get_start_no_gc_region_status();
1423
1424     PER_HEAP_ISOLATED
1425     end_no_gc_region_status end_no_gc_region();
1426
1427     PER_HEAP_ISOLATED
1428     void handle_failure_for_no_gc();
1429
1430     PER_HEAP
1431     void fire_etw_allocation_event (size_t allocation_amount, int gen_number, BYTE* object_address);
1432
1433     PER_HEAP
1434     void fire_etw_pin_object_event (BYTE* object, BYTE** ppObject);
1435
1436     PER_HEAP
1437     size_t limit_from_size (size_t size, size_t room, int gen_number,
1438                             int align_const);
1439     PER_HEAP
1440     int try_allocate_more_space (alloc_context* acontext, size_t jsize,
1441                                  int alloc_generation_number);
1442     PER_HEAP
1443     BOOL allocate_more_space (alloc_context* acontext, size_t jsize,
1444                               int alloc_generation_number);
1445
1446     PER_HEAP
1447     size_t get_full_compact_gc_count();
1448
1449     PER_HEAP
1450     BOOL short_on_end_of_seg (int gen_number,
1451                               heap_segment* seg,
1452                               int align_const);
1453
1454     PER_HEAP
1455     BOOL a_fit_free_list_p (int gen_number, 
1456                             size_t size, 
1457                             alloc_context* acontext,
1458                             int align_const);
1459
1460 #ifdef BACKGROUND_GC
1461     PER_HEAP
1462     void wait_for_background (alloc_wait_reason awr);
1463
1464     PER_HEAP
1465     void wait_for_bgc_high_memory (alloc_wait_reason awr);
1466
1467     PER_HEAP
1468     void bgc_loh_alloc_clr (BYTE* alloc_start, 
1469                             size_t size, 
1470                             alloc_context* acontext,
1471                             int align_const, 
1472                             int lock_index,
1473                             BOOL check_used_p,
1474                             heap_segment* seg);
1475 #endif //BACKGROUND_GC
1476     
1477 #ifdef BACKGROUND_GC
1478     PER_HEAP
1479     void wait_for_background_planning (alloc_wait_reason awr);
1480
1481     PER_HEAP
1482     BOOL bgc_loh_should_allocate();
1483 #endif //BACKGROUND_GC
1484
1485 #define max_saved_spinlock_info 48
1486
1487 #ifdef SPINLOCK_HISTORY
1488     PER_HEAP
1489     int spinlock_info_index;
1490
1491     PER_HEAP
1492     spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
1493 #endif //SPINLOCK_HISTORY
1494
1495     PER_HEAP
1496     void add_saved_spinlock_info (
1497             msl_enter_state enter_state, 
1498             msl_take_state take_state);
1499
1500     PER_HEAP
1501     BOOL a_fit_free_list_large_p (size_t size, 
1502                                   alloc_context* acontext,
1503                                   int align_const);
1504
1505     PER_HEAP
1506     BOOL a_fit_segment_end_p (int gen_number,
1507                               heap_segment* seg,
1508                               size_t size, 
1509                               alloc_context* acontext,
1510                               int align_const,
1511                               BOOL* commit_failed_p);
1512     PER_HEAP
1513     BOOL loh_a_fit_segment_end_p (int gen_number,
1514                                   size_t size, 
1515                                   alloc_context* acontext,
1516                                   int align_const,
1517                                   BOOL* commit_failed_p,
1518                                   oom_reason* oom_r);
1519     PER_HEAP
1520     BOOL loh_get_new_seg (generation* gen,
1521                           size_t size,
1522                           int align_const,
1523                           BOOL* commit_failed_p,
1524                           oom_reason* oom_r);
1525
1526     PER_HEAP_ISOLATED
1527     size_t get_large_seg_size (size_t size);
1528
1529     PER_HEAP
1530     BOOL retry_full_compact_gc (size_t size);
1531
1532     PER_HEAP
1533     BOOL check_and_wait_for_bgc (alloc_wait_reason awr,
1534                                  BOOL* did_full_compact_gc);
1535
1536     PER_HEAP
1537     BOOL trigger_full_compact_gc (gc_reason gr, 
1538                                   oom_reason* oom_r);
1539
1540     PER_HEAP
1541     BOOL trigger_ephemeral_gc (gc_reason gr);
1542
1543     PER_HEAP
1544     BOOL soh_try_fit (int gen_number,
1545                       size_t size, 
1546                       alloc_context* acontext,
1547                       int align_const,
1548                       BOOL* commit_failed_p,
1549                       BOOL* short_seg_end_p);
1550     PER_HEAP
1551     BOOL loh_try_fit (int gen_number,
1552                       size_t size, 
1553                       alloc_context* acontext,
1554                       int align_const,
1555                       BOOL* commit_failed_p,
1556                       oom_reason* oom_r);
1557
1558     PER_HEAP
1559     BOOL allocate_small (int gen_number,
1560                          size_t size, 
1561                          alloc_context* acontext,
1562                          int align_const);
1563
1564     enum c_gc_state
1565     {
1566         c_gc_state_marking,
1567         c_gc_state_planning,
1568         c_gc_state_free
1569     };
1570
1571 #ifdef RECORD_LOH_STATE
1572     #define max_saved_loh_states 12
1573     PER_HEAP
1574     int loh_state_index;
1575
1576     struct loh_state_info
1577     {
1578         allocation_state alloc_state;
1579         DWORD thread_id;
1580     };
1581
1582     PER_HEAP
1583     loh_state_info last_loh_states[max_saved_loh_states];
1584     PER_HEAP
1585     void add_saved_loh_state (allocation_state loh_state_to_save, DWORD thread_id);
1586 #endif //RECORD_LOH_STATE
1587     PER_HEAP
1588     BOOL allocate_large (int gen_number,
1589                          size_t size, 
1590                          alloc_context* acontext,
1591                          int align_const);
1592
1593     PER_HEAP_ISOLATED
1594     int init_semi_shared();
1595     PER_HEAP
1596     int init_gc_heap (int heap_number);
1597     PER_HEAP
1598     void self_destroy();
1599     PER_HEAP_ISOLATED
1600     void destroy_semi_shared();
1601     PER_HEAP
1602     void repair_allocation_contexts (BOOL repair_p);
1603     PER_HEAP
1604     void fix_allocation_contexts (BOOL for_gc_p);
1605     PER_HEAP
1606     void fix_youngest_allocation_area (BOOL for_gc_p);
1607     PER_HEAP
1608     void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
1609                                  int align_const);
1610     PER_HEAP
1611     void fix_large_allocation_area (BOOL for_gc_p);
1612     PER_HEAP
1613     void fix_older_allocation_area (generation* older_gen);
1614     PER_HEAP
1615     void set_allocation_heap_segment (generation* gen);
1616     PER_HEAP
1617     void reset_allocation_pointers (generation* gen, BYTE* start);
1618     PER_HEAP
1619     int object_gennum (BYTE* o);
1620     PER_HEAP
1621     int object_gennum_plan (BYTE* o);
1622     PER_HEAP_ISOLATED
1623     void init_heap_segment (heap_segment* seg);
1624     PER_HEAP
1625     void delete_heap_segment (heap_segment* seg, BOOL consider_hoarding=FALSE);
1626 #ifdef FEATURE_BASICFREEZE
1627     PER_HEAP
1628     BOOL insert_ro_segment (heap_segment* seg);
1629     PER_HEAP
1630     void remove_ro_segment (heap_segment* seg);
1631 #endif //FEATURE_BASICFREEZE
1632     PER_HEAP
1633     BOOL set_ro_segment_in_range (heap_segment* seg);
1634     PER_HEAP
1635     BOOL unprotect_segment (heap_segment* seg);
1636     PER_HEAP
1637     heap_segment* soh_get_segment_to_expand();
1638     PER_HEAP
1639     heap_segment* get_segment (size_t size, BOOL loh_p);
1640     PER_HEAP_ISOLATED
1641     void seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp);
1642     PER_HEAP_ISOLATED
1643     void seg_mapping_table_remove_segment (heap_segment* seg);
1644     PER_HEAP
1645     heap_segment* get_large_segment (size_t size, BOOL* did_full_compact_gc);
1646     PER_HEAP
1647     void thread_loh_segment (heap_segment* new_seg);
1648     PER_HEAP_ISOLATED
1649     heap_segment* get_segment_for_loh (size_t size
1650 #ifdef MULTIPLE_HEAPS
1651                                       , gc_heap* hp
1652 #endif //MULTIPLE_HEAPS
1653                                       );
1654     PER_HEAP
1655     void reset_heap_segment_pages (heap_segment* seg);
1656     PER_HEAP
1657     void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
1658     PER_HEAP
1659     void decommit_heap_segment (heap_segment* seg);
1660     PER_HEAP
1661     void clear_gen0_bricks();
1662 #ifdef BACKGROUND_GC
1663     PER_HEAP
1664     void rearrange_small_heap_segments();
1665 #endif //BACKGROUND_GC
1666     PER_HEAP
1667     void rearrange_large_heap_segments();
1668     PER_HEAP
1669     void rearrange_heap_segments(BOOL compacting);
1670     PER_HEAP
1671     void switch_one_quantum();
1672     PER_HEAP
1673     void reset_ww_by_chunk (BYTE* start_address, size_t total_reset_size);
1674     PER_HEAP
1675     void switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size);
1676     PER_HEAP
1677     void reset_write_watch (BOOL concurrent_p);
1678     PER_HEAP
1679     void adjust_ephemeral_limits ();
1680     PER_HEAP
1681     void make_generation (generation& gen, heap_segment* seg,
1682                           BYTE* start, BYTE* pointer);
1683
1684
1685 #define USE_PADDING_FRONT 1
1686 #define USE_PADDING_TAIL  2
1687
1688     PER_HEAP
1689     BOOL size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, BYTE* alloc_pointer, BYTE* alloc_limit,
1690                      BYTE* old_loc=0, int use_padding=USE_PADDING_TAIL);
1691     PER_HEAP
1692     BOOL a_size_fit_p (size_t size, BYTE* alloc_pointer, BYTE* alloc_limit,
1693                        int align_const);
1694
1695     PER_HEAP
1696     void handle_oom (int heap_num, oom_reason reason, size_t alloc_size, 
1697                      BYTE* allocated, BYTE* reserved);
1698
1699     PER_HEAP
1700     size_t card_of ( BYTE* object);
1701     PER_HEAP
1702     BYTE* brick_address (size_t brick);
1703     PER_HEAP
1704     size_t brick_of (BYTE* add);
1705     PER_HEAP
1706     BYTE* card_address (size_t card);
1707     PER_HEAP
1708     size_t card_to_brick (size_t card);
1709     PER_HEAP
1710     void clear_card (size_t card);
1711     PER_HEAP
1712     void set_card (size_t card);
1713     PER_HEAP
1714     BOOL  card_set_p (size_t card);
1715     PER_HEAP
1716     void card_table_set_bit (BYTE* location);
1717
1718 #ifdef CARD_BUNDLE
1719     PER_HEAP
1720     void update_card_table_bundle();
1721     PER_HEAP
1722     void reset_card_table_write_watch();
1723     PER_HEAP
1724     void card_bundle_clear(size_t cardb);
1725     PER_HEAP
1726     void card_bundles_set (size_t start_cardb, size_t end_cardb);
1727     PER_HEAP
1728     BOOL card_bundle_set_p (size_t cardb);
1729     PER_HEAP
1730     BOOL find_card_dword (size_t& cardw, size_t cardw_end);
1731     PER_HEAP
1732     void enable_card_bundles();
1733     PER_HEAP_ISOLATED
1734     BOOL card_bundles_enabled();
1735
1736 #endif //CARD_BUNDLE
1737
1738     PER_HEAP
1739     BOOL find_card (DWORD* card_table, size_t& card,
1740                     size_t card_word_end, size_t& end_card);
1741     PER_HEAP
1742     BOOL grow_heap_segment (heap_segment* seg, BYTE* high_address);
1743     PER_HEAP
1744     int grow_heap_segment (heap_segment* seg, BYTE* high_address, BYTE* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL);
1745     PER_HEAP
1746     void copy_brick_card_range (BYTE* la, DWORD* old_card_table,
1747                                 short* old_brick_table,
1748                                 heap_segment* seg,
1749                                 BYTE* start, BYTE* end, BOOL heap_expand);
1750     PER_HEAP
1751     void init_brick_card_range (heap_segment* seg);
1752     PER_HEAP
1753     void copy_brick_card_table_l_heap ();
1754     PER_HEAP
1755     void copy_brick_card_table(BOOL heap_expand);
1756     PER_HEAP
1757     void clear_brick_table (BYTE* from, BYTE* end);
1758     PER_HEAP
1759     void set_brick (size_t index, ptrdiff_t val);
1760     PER_HEAP
1761     int brick_entry (size_t index);
1762 #ifdef MARK_ARRAY
1763     PER_HEAP
1764     unsigned int mark_array_marked (BYTE* add);
1765     PER_HEAP
1766     void mark_array_set_marked (BYTE* add);
1767     PER_HEAP
1768     BOOL is_mark_bit_set (BYTE* add);
1769     PER_HEAP
1770     void gc_heap::gmark_array_set_marked (BYTE* add);
1771     PER_HEAP
1772     void set_mark_array_bit (size_t mark_bit);
1773     PER_HEAP
1774     BOOL mark_array_bit_set (size_t mark_bit);
1775     PER_HEAP
1776     void mark_array_clear_marked (BYTE* add);
1777     PER_HEAP
1778     void clear_mark_array (BYTE* from, BYTE* end, BOOL check_only=TRUE);
1779 #ifdef BACKGROUND_GC
1780     PER_HEAP
1781     void seg_clear_mark_array_bits_soh (heap_segment* seg);
1782     PER_HEAP
1783     void clear_batch_mark_array_bits (BYTE* start, BYTE* end);
1784     PER_HEAP
1785     void bgc_clear_batch_mark_array_bits (BYTE* start, BYTE* end);
1786     PER_HEAP
1787     void clear_mark_array_by_objects (BYTE* from, BYTE* end, BOOL loh_p);
1788 #ifdef VERIFY_HEAP
1789     PER_HEAP
1790     void set_batch_mark_array_bits (BYTE* start, BYTE* end);
1791     PER_HEAP
1792     void check_batch_mark_array_bits (BYTE* start, BYTE* end);
1793 #endif //VERIFY_HEAP
1794 #endif //BACKGROUND_GC
1795 #endif //MARK_ARRAY
1796
1797     PER_HEAP
1798     BOOL large_object_marked (BYTE* o, BOOL clearp);
1799
1800 #ifdef BACKGROUND_GC
1801     PER_HEAP
1802     BOOL background_allowed_p();
1803 #endif //BACKGROUND_GC
1804
1805     PER_HEAP_ISOLATED
1806     void send_full_gc_notification (int gen_num, BOOL due_to_alloc_p);
1807
1808     PER_HEAP
1809     void check_for_full_gc (int gen_num, size_t size);
1810
1811     PER_HEAP
1812     void adjust_limit (BYTE* start, size_t limit_size, generation* gen,
1813                        int gen_number);
1814     PER_HEAP
1815     void adjust_limit_clr (BYTE* start, size_t limit_size,
1816                            alloc_context* acontext, heap_segment* seg,
1817                            int align_const);
1818     PER_HEAP
1819     void  leave_allocation_segment (generation* gen);
1820
1821     PER_HEAP
1822     void init_free_and_plug();
1823
1824     PER_HEAP
1825     void print_free_and_plug (const char* msg);
1826
1827     PER_HEAP
1828     void add_gen_plug (int gen_number, size_t plug_size);
1829
1830     PER_HEAP
1831     void add_gen_free (int gen_number, size_t free_size);
1832
1833     PER_HEAP
1834     void add_item_to_current_pinned_free (int gen_number, size_t free_size);
1835     
1836     PER_HEAP
1837     void remove_gen_free (int gen_number, size_t free_size);
1838
1839     PER_HEAP
1840     BYTE* allocate_in_older_generation (generation* gen, size_t size,
1841                                         int from_gen_number,
1842                                         BYTE* old_loc=0
1843                                         REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1844     PER_HEAP
1845     generation*  ensure_ephemeral_heap_segment (generation* consing_gen);
1846     PER_HEAP
1847     BYTE* allocate_in_condemned_generations (generation* gen,
1848                                              size_t size,
1849                                              int from_gen_number,
1850 #ifdef SHORT_PLUGS
1851                                              BYTE* next_pinned_plug=0,
1852                                              heap_segment* current_seg=0,
1853 #endif //SHORT_PLUGS
1854                                              BYTE* old_loc=0
1855                                              REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1856 #ifdef INTERIOR_POINTERS
1857     // Verifies that interior is actually in the range of seg; otherwise 
1858     // returns 0.
1859     PER_HEAP_ISOLATED
1860     heap_segment* find_segment (BYTE* interior, BOOL small_segment_only_p);
1861
1862     PER_HEAP
1863     heap_segment* find_segment_per_heap (BYTE* interior, BOOL small_segment_only_p);
1864
1865     PER_HEAP
1866     BYTE* find_object_for_relocation (BYTE* o, BYTE* low, BYTE* high);
1867 #endif //INTERIOR_POINTERS
1868
1869     PER_HEAP_ISOLATED
1870     gc_heap* heap_of (BYTE* object);
1871
1872     PER_HEAP_ISOLATED
1873     gc_heap* heap_of_gc (BYTE* object);
1874
1875     PER_HEAP_ISOLATED
1876     size_t&  promoted_bytes (int);
1877
1878     PER_HEAP
1879     BYTE* find_object (BYTE* o, BYTE* low);
1880
1881     PER_HEAP
1882     dynamic_data* dynamic_data_of (int gen_number);
1883     PER_HEAP
1884     ptrdiff_t  get_desired_allocation (int gen_number);
1885     PER_HEAP
1886     ptrdiff_t  get_new_allocation (int gen_number);
1887     PER_HEAP
1888     ptrdiff_t  get_allocation (int gen_number);
1889     PER_HEAP
1890     bool new_allocation_allowed (int gen_number);
1891 #ifdef BACKGROUND_GC
1892     PER_HEAP_ISOLATED
1893     void allow_new_allocation (int gen_number);
1894     PER_HEAP_ISOLATED
1895     void disallow_new_allocation (int gen_number);
1896 #endif //BACKGROUND_GC
1897     PER_HEAP
1898     void reset_pinned_queue();
1899     PER_HEAP
1900     void reset_pinned_queue_bos();
1901     PER_HEAP
1902     void set_allocator_next_pin (generation* gen);
1903     PER_HEAP
1904     void set_allocator_next_pin (BYTE* alloc_pointer, BYTE*& alloc_limit);
1905     PER_HEAP
1906     void enque_pinned_plug (generation* gen, BYTE* plug, size_t len);
1907     PER_HEAP
1908     void enque_pinned_plug (BYTE* plug, 
1909                             BOOL save_pre_plug_info_p, 
1910                             BYTE* last_object_in_last_plug);
1911     PER_HEAP
1912     void merge_with_last_pinned_plug (BYTE* last_pinned_plug, size_t plug_size);
1913     PER_HEAP
1914     void set_pinned_info (BYTE* last_pinned_plug, 
1915                           size_t plug_len, 
1916                           BYTE* alloc_pointer, 
1917                           BYTE*& alloc_limit);
1918     PER_HEAP
1919     void set_pinned_info (BYTE* last_pinned_plug, size_t plug_len, generation* gen);
1920     PER_HEAP
1921     void save_post_plug_info (BYTE* last_pinned_plug, BYTE* last_object_in_last_plug, BYTE* post_plug);
1922     PER_HEAP
1923     size_t deque_pinned_plug ();
1924     PER_HEAP
1925     mark* pinned_plug_of (size_t bos);
1926     PER_HEAP
1927     mark* oldest_pin ();
1928     PER_HEAP
1929     mark* before_oldest_pin();
1930     PER_HEAP
1931     BOOL pinned_plug_que_empty_p ();
1932     PER_HEAP
1933     void make_mark_stack (mark* arr);
1934 #ifdef MH_SC_MARK
1935     PER_HEAP
1936     int& mark_stack_busy();
1937     PER_HEAP
1938     VOLATILE(BYTE*)& ref_mark_stack (gc_heap* hp, int index);
1939 #endif
1940 #ifdef BACKGROUND_GC
1941     PER_HEAP_ISOLATED
1942     size_t&  bpromoted_bytes (int);
1943     PER_HEAP
1944     void make_background_mark_stack (BYTE** arr);
1945     PER_HEAP
1946     void make_c_mark_list (BYTE** arr);
1947 #endif //BACKGROUND_GC
1948     PER_HEAP
1949     generation* generation_of (int  n);
1950     PER_HEAP
1951     BOOL gc_mark1 (BYTE* o);
1952     PER_HEAP
1953     BOOL gc_mark (BYTE* o, BYTE* low, BYTE* high);
1954     PER_HEAP
1955     BYTE* mark_object(BYTE* o THREAD_NUMBER_DCL);
1956 #ifdef HEAP_ANALYZE
1957     PER_HEAP
1958     void ha_mark_object_simple (BYTE** o THREAD_NUMBER_DCL);
1959 #endif //HEAP_ANALYZE
1960     PER_HEAP
1961     void mark_object_simple (BYTE** o THREAD_NUMBER_DCL);
1962     PER_HEAP
1963     void mark_object_simple1 (BYTE* o, BYTE* start THREAD_NUMBER_DCL);
1964
1965 #ifdef MH_SC_MARK
1966     PER_HEAP
1967     void mark_steal ();
1968 #endif //MH_SC_MARK
1969
1970 #ifdef BACKGROUND_GC
1971
1972     PER_HEAP
1973     BOOL background_marked (BYTE* o);
1974     PER_HEAP
1975     BOOL background_mark1 (BYTE* o);
1976     PER_HEAP
1977     BOOL background_mark (BYTE* o, BYTE* low, BYTE* high);
1978     PER_HEAP
1979     BYTE* background_mark_object (BYTE* o THREAD_NUMBER_DCL);
1980     PER_HEAP
1981     void background_mark_simple (BYTE* o THREAD_NUMBER_DCL);
1982     PER_HEAP
1983     void background_mark_simple1 (BYTE* o THREAD_NUMBER_DCL);
1984     PER_HEAP_ISOLATED
1985     void background_promote (Object**, ScanContext* , DWORD);
1986     PER_HEAP
1987     BOOL background_object_marked (BYTE* o, BOOL clearp);
1988     PER_HEAP
1989     void init_background_gc();
1990     PER_HEAP
1991     BYTE* background_next_end (heap_segment*, BOOL);
1992     PER_HEAP
1993     void generation_delete_heap_segment (generation*, 
1994                                          heap_segment*, heap_segment*, heap_segment*);
1995     PER_HEAP
1996     void set_mem_verify (BYTE*, BYTE*, BYTE);
1997     PER_HEAP
1998     void process_background_segment_end (heap_segment*, generation*, BYTE*,
1999                                      heap_segment*, BOOL*);
2000     PER_HEAP
2001     void process_n_background_segments (heap_segment*, heap_segment*, generation* gen);
2002     PER_HEAP
2003     BOOL fgc_should_consider_object (BYTE* o, 
2004                                      heap_segment* seg,
2005                                      BOOL consider_bgc_mark_p, 
2006                                      BOOL check_current_sweep_p,
2007                                      BOOL check_saved_sweep_p);
2008     PER_HEAP
2009     void should_check_bgc_mark (heap_segment* seg, 
2010                                 BOOL* consider_bgc_mark_p, 
2011                                 BOOL* check_current_sweep_p,
2012                                 BOOL* check_saved_sweep_p);
2013     PER_HEAP
2014     void background_ephemeral_sweep();
2015     PER_HEAP
2016     void background_sweep ();
2017     PER_HEAP
2018     void background_mark_through_object (BYTE* oo THREAD_NUMBER_DCL);
2019     PER_HEAP
2020     BYTE* background_seg_end (heap_segment* seg, BOOL concurrent_p);
2021     PER_HEAP
2022     BYTE* background_first_overflow (BYTE* min_add,
2023                                      heap_segment* seg,
2024                                      BOOL concurrent_p, 
2025                                      BOOL small_object_p);
2026     PER_HEAP
2027     void background_process_mark_overflow_internal (int condemned_gen_number,
2028                                                     BYTE* min_add, BYTE* max_add,
2029                                                     BOOL concurrent_p);
2030     PER_HEAP
2031     BOOL background_process_mark_overflow (BOOL concurrent_p);
2032
2033     // for foreground GC to get hold of background structures containing refs
2034     PER_HEAP
2035     void
2036     scan_background_roots (promote_func* fn, int hn, ScanContext *pSC);
2037
2038     PER_HEAP
2039     BOOL bgc_mark_array_range (heap_segment* seg, 
2040                                BOOL whole_seg_p,
2041                                BYTE** range_beg,
2042                                BYTE** range_end);
2043     PER_HEAP
2044     void bgc_verify_mark_array_cleared (heap_segment* seg);
2045     PER_HEAP
2046     void verify_mark_bits_cleared (BYTE* obj, size_t s);
2047     PER_HEAP
2048     void clear_all_mark_array();
2049 #endif //BACKGROUND_GC
2050
2051     PER_HEAP
2052     BYTE* next_end (heap_segment* seg, BYTE* f);
2053     PER_HEAP
2054     void fix_card_table ();
2055     PER_HEAP
2056     void mark_through_object (BYTE* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
2057     PER_HEAP
2058     BOOL process_mark_overflow (int condemned_gen_number);
2059     PER_HEAP
2060     void process_mark_overflow_internal (int condemned_gen_number,
2061                                          BYTE* min_address, BYTE* max_address);
2062
2063 #ifdef SNOOP_STATS
2064     PER_HEAP
2065     void print_snoop_stat();
2066 #endif //SNOOP_STATS
2067
2068 #ifdef MH_SC_MARK
2069
2070     PER_HEAP
2071     BOOL check_next_mark_stack (gc_heap* next_heap);
2072
2073 #endif //MH_SC_MARK
2074
2075     PER_HEAP
2076     void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
2077
2078     PER_HEAP
2079     void mark_phase (int condemned_gen_number, BOOL mark_only_p);
2080
2081     PER_HEAP
2082     void pin_object (BYTE* o, BYTE** ppObject, BYTE* low, BYTE* high);
2083     PER_HEAP
2084     void reset_mark_stack ();
2085     PER_HEAP
2086     BYTE* insert_node (BYTE* new_node, size_t sequence_number,
2087                        BYTE* tree, BYTE* last_node);
2088     PER_HEAP
2089     size_t update_brick_table (BYTE* tree, size_t current_brick,
2090                                BYTE* x, BYTE* plug_end);
2091
2092     PER_HEAP
2093     void plan_generation_start (generation* gen, generation* consing_gen, BYTE* next_plug_to_allocate);
2094
2095     PER_HEAP
2096     void realloc_plan_generation_start (generation* gen, generation* consing_gen);
2097
2098     PER_HEAP
2099     void plan_generation_starts (generation*& consing_gen);
2100
2101     PER_HEAP
2102     void advance_pins_for_demotion (generation* gen);
2103
2104     PER_HEAP
2105     void process_ephemeral_boundaries(BYTE* x, int& active_new_gen_number,
2106                                       int& active_old_gen_number,
2107                                       generation*& consing_gen,
2108                                       BOOL& allocate_in_condemned);
2109     PER_HEAP
2110     void seg_clear_mark_bits (heap_segment* seg);
2111     PER_HEAP
2112     void sweep_ro_segments (heap_segment* start_seg);
2113     PER_HEAP
2114     void store_plug_gap_info (BYTE* plug_start,
2115                               BYTE* plug_end,
2116                               BOOL& last_npinned_plug_p, 
2117                               BOOL& last_pinned_plug_p, 
2118                               BYTE*& last_pinned_plug,
2119                               BOOL& pinned_plug_p,
2120                               BYTE* last_object_in_last_plug,
2121                               BOOL& merge_with_last_pin_p,
2122                               // this is only for verification purpose
2123                               size_t last_plug_len);
2124     PER_HEAP
2125     void plan_phase (int condemned_gen_number);
2126
2127 #ifdef FEATURE_LOH_COMPACTION
2128     // plan_loh can allocate memory so it can fail. If it fails, we will
2129     // fall back to sweeping.  
2130     PER_HEAP
2131     BOOL plan_loh();
2132
2133     PER_HEAP
2134     void compact_loh();
2135
2136     PER_HEAP
2137     void relocate_in_loh_compact();
2138
2139 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2140     PER_HEAP
2141     void walk_relocation_loh (size_t profiling_context);
2142 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2143
2144     PER_HEAP
2145     BOOL loh_enque_pinned_plug (BYTE* plug, size_t len);
2146
2147     PER_HEAP
2148     void loh_set_allocator_next_pin();
2149
2150     PER_HEAP
2151     BOOL loh_pinned_plug_que_empty_p();
2152
2153     PER_HEAP
2154     size_t loh_deque_pinned_plug();
2155
2156     PER_HEAP
2157     mark* loh_pinned_plug_of (size_t bos);
2158
2159     PER_HEAP
2160     mark* loh_oldest_pin();
2161
2162     PER_HEAP
2163     BOOL loh_size_fit_p (size_t size, BYTE* alloc_pointer, BYTE* alloc_limit);
2164
2165     PER_HEAP
2166     BYTE* loh_allocate_in_condemned (BYTE* old_loc, size_t size);
2167
2168     PER_HEAP_ISOLATED
2169     BOOL loh_object_p (BYTE* o);
2170
2171     PER_HEAP_ISOLATED
2172     BOOL should_compact_loh();
2173
2174     // If the LOH compaction mode is just to compact once,
2175     // we need to see if we should reset it back to not compact.
2176     // We would only reset if every heap's LOH was compacted.
2177     PER_HEAP_ISOLATED
2178     void check_loh_compact_mode  (BOOL all_heaps_compacted_p);
2179 #endif //FEATURE_LOH_COMPACTION
2180
2181     PER_HEAP
2182     void decommit_ephemeral_segment_pages (int condemned_gen_number);
2183     PER_HEAP
2184     void fix_generation_bounds (int condemned_gen_number,
2185                                 generation* consing_gen);
2186     PER_HEAP
2187     BYTE* generation_limit (int gen_number);
2188
2189     struct make_free_args
2190     {
2191         int free_list_gen_number;
2192         BYTE* current_gen_limit;
2193         generation* free_list_gen;
2194         BYTE* highest_plug;
2195     };
2196     PER_HEAP
2197     BYTE* allocate_at_end (size_t size);
2198     PER_HEAP
2199     BOOL ensure_gap_allocation (int condemned_gen_number);
2200     // make_free_lists is only called by blocking GCs.
2201     PER_HEAP
2202     void make_free_lists (int condemned_gen_number);
2203     PER_HEAP
2204     void make_free_list_in_brick (BYTE* tree, make_free_args* args);
2205     PER_HEAP
2206     void thread_gap (BYTE* gap_start, size_t size, generation*  gen);
2207     PER_HEAP
2208     void loh_thread_gap_front (BYTE* gap_start, size_t size, generation*  gen);
2209     PER_HEAP
2210     void make_unused_array (BYTE* x, size_t size, BOOL clearp=FALSE, BOOL resetp=FALSE);
2211     PER_HEAP
2212     void clear_unused_array (BYTE* x, size_t size);
2213     PER_HEAP
2214     void relocate_address (BYTE** old_address THREAD_NUMBER_DCL);
2215     struct relocate_args
2216     {
2217         BYTE* last_plug;
2218         BYTE* low;
2219         BYTE* high;
2220         BOOL is_shortened;
2221         mark* pinned_plug_entry;
2222     };
2223
2224     PER_HEAP
2225     void reloc_survivor_helper (BYTE** pval);
2226     PER_HEAP
2227     void check_class_object_demotion (BYTE* obj);
2228     PER_HEAP
2229     void check_class_object_demotion_internal (BYTE* obj);
2230
2231     PER_HEAP 
2232     void check_demotion_helper (BYTE** pval, BYTE* parent_obj);
2233
2234     PER_HEAP
2235     void relocate_survivor_helper (BYTE* plug, BYTE* plug_end);
2236
2237     PER_HEAP
2238     void verify_pins_with_post_plug_info (const char* msg);
2239
2240 #ifdef COLLECTIBLE_CLASS
2241     PER_HEAP
2242     void unconditional_set_card_collectible (BYTE* obj);
2243 #endif //COLLECTIBLE_CLASS
2244
2245     PER_HEAP
2246     void relocate_shortened_survivor_helper (BYTE* plug, BYTE* plug_end, mark* pinned_plug_entry);
2247     
2248     PER_HEAP
2249     void relocate_obj_helper (BYTE* x, size_t s);
2250
2251     PER_HEAP
2252     void reloc_ref_in_shortened_obj (BYTE** address_to_set_card, BYTE** address_to_reloc);
2253
2254     PER_HEAP
2255     void relocate_pre_plug_info (mark* pinned_plug_entry);
2256
2257     PER_HEAP
2258     void relocate_shortened_obj_helper (BYTE* x, size_t s, BYTE* end, mark* pinned_plug_entry, BOOL is_pinned);
2259
2260     PER_HEAP
2261     void relocate_survivors_in_plug (BYTE* plug, BYTE* plug_end,
2262                                      BOOL check_last_object_p, 
2263                                      mark* pinned_plug_entry);
2264     PER_HEAP
2265     void relocate_survivors_in_brick (BYTE* tree, relocate_args* args);
2266
2267     PER_HEAP
2268     void update_oldest_pinned_plug();
2269
2270     PER_HEAP
2271     void relocate_survivors (int condemned_gen_number,
2272                              BYTE* first_condemned_address );
2273     PER_HEAP
2274     void relocate_phase (int condemned_gen_number,
2275                          BYTE* first_condemned_address);
2276
2277     struct compact_args
2278     {
2279         BOOL copy_cards_p;
2280         BYTE* last_plug;
2281         ptrdiff_t last_plug_relocation;
2282         BYTE* before_last_plug;
2283         size_t current_compacted_brick;
2284         BOOL is_shortened;
2285         mark* pinned_plug_entry;
2286         BOOL check_gennum_p;
2287         int src_gennum;
2288
2289         void print()
2290         {
2291             dprintf (3, ("last plug: %Ix, last plug reloc: %Ix, before last: %Ix, b: %Ix",
2292                 last_plug, last_plug_relocation, before_last_plug, current_compacted_brick));
2293         }
2294     };
2295
2296     PER_HEAP
2297     void copy_cards_range (BYTE* dest, BYTE* src, size_t len, BOOL copy_cards_p);
2298     PER_HEAP
2299     void  gcmemcopy (BYTE* dest, BYTE* src, size_t len, BOOL copy_cards_p);
2300     PER_HEAP
2301     void compact_plug (BYTE* plug, size_t size, BOOL check_last_object_p, compact_args* args);
2302     PER_HEAP
2303     void compact_in_brick (BYTE* tree, compact_args* args);
2304
2305     PER_HEAP
2306     mark* get_next_pinned_entry (BYTE* tree, 
2307                                  BOOL* has_pre_plug_info_p, 
2308                                  BOOL* has_post_plug_info_p,
2309                                  BOOL deque_p=TRUE);
2310
2311     PER_HEAP
2312     mark* get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p);
2313
2314     PER_HEAP
2315     void recover_saved_pinned_info();
2316
2317     PER_HEAP
2318     void compact_phase (int condemned_gen_number, BYTE*
2319                         first_condemned_address, BOOL clear_cards);
2320     PER_HEAP
2321     void clear_cards (size_t start_card, size_t end_card);
2322     PER_HEAP
2323     void clear_card_for_addresses (BYTE* start_address, BYTE* end_address);
2324     PER_HEAP
2325     void copy_cards (size_t dst_card, size_t src_card,
2326                      size_t end_card, BOOL nextp);
2327     PER_HEAP
2328     void copy_cards_for_addresses (BYTE* dest, BYTE* src, size_t len);
2329
2330 #ifdef BACKGROUND_GC
2331     PER_HEAP
2332     void copy_mark_bits (size_t dst_mark_bit, size_t src_mark_bit, size_t end_mark_bit);
2333     PER_HEAP
2334     void copy_mark_bits_for_addresses (BYTE* dest, BYTE* src, size_t len);
2335 #endif //BACKGROUND_GC
2336
2337
2338     PER_HEAP
2339     BOOL ephemeral_pointer_p (BYTE* o);
2340     PER_HEAP
2341     void fix_brick_to_highest (BYTE* o, BYTE* next_o);
2342     PER_HEAP
2343     BYTE* find_first_object (BYTE* start_address, BYTE* first_object);
2344     PER_HEAP
2345     BYTE* compute_next_boundary (BYTE* low, int gen_number, BOOL relocating);
2346     PER_HEAP
2347     void keep_card_live (BYTE* o, size_t& n_gen,
2348                          size_t& cg_pointers_found);
2349     PER_HEAP
2350     void mark_through_cards_helper (BYTE** poo, size_t& ngen,
2351                                     size_t& cg_pointers_found,
2352                                     card_fn fn, BYTE* nhigh,
2353                                     BYTE* next_boundary);
2354
2355     PER_HEAP
2356     BOOL card_transition (BYTE* po, BYTE* end, size_t card_word_end,
2357                                size_t& cg_pointers_found, 
2358                                size_t& n_eph, size_t& n_card_set,
2359                                size_t& card, size_t& end_card,
2360                                BOOL& foundp, BYTE*& start_address,
2361                                BYTE*& limit, size_t& n_cards_cleared);
2362     PER_HEAP
2363     void mark_through_cards_for_segments (card_fn fn, BOOL relocating);
2364
2365     PER_HEAP
2366     void repair_allocation_in_expanded_heap (generation* gen);
2367     PER_HEAP
2368     BOOL can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index);
2369     PER_HEAP
2370     BOOL can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index);
2371     PER_HEAP
2372     BOOL can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count);
2373 #ifdef SEG_REUSE_STATS
2374     PER_HEAP
2375     size_t dump_buckets (size_t* ordered_indices, int count, size_t* total_size);
2376 #endif //SEG_REUSE_STATS
2377     PER_HEAP
2378     void build_ordered_free_spaces (heap_segment* seg);
2379     PER_HEAP
2380     void count_plug (size_t last_plug_size, BYTE*& last_plug);
2381     PER_HEAP
2382     void count_plugs_in_brick (BYTE* tree, BYTE*& last_plug);
2383     PER_HEAP
2384     void build_ordered_plug_indices ();
2385     PER_HEAP
2386     void init_ordered_free_space_indices ();
2387     PER_HEAP
2388     void trim_free_spaces_indices ();
2389     PER_HEAP
2390     BOOL try_best_fit (BOOL end_of_segment_p);
2391     PER_HEAP
2392     BOOL best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space);
2393     PER_HEAP
2394     BOOL process_free_space (heap_segment* seg, 
2395                              size_t free_space,
2396                              size_t min_free_size, 
2397                              size_t min_cont_size,
2398                              size_t* total_free_space,
2399                              size_t* largest_free_space);
2400     PER_HEAP
2401     size_t compute_eph_gen_starts_size();
2402     PER_HEAP
2403     void compute_new_ephemeral_size();
2404     PER_HEAP
2405     BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size,
2406                             size_t min_cont_size, allocator* al);
2407     PER_HEAP
2408     BYTE* allocate_in_expanded_heap (generation* gen, size_t size,
2409                                      BOOL& adjacentp, BYTE* old_loc,
2410 #ifdef SHORT_PLUGS
2411                                      BOOL set_padding_on_saved_p,
2412                                      mark* pinned_plug_entry,
2413 #endif //SHORT_PLUGS
2414                                      BOOL consider_bestfit, int active_new_gen_number
2415                                      REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
2416     PER_HEAP
2417     void realloc_plug (size_t last_plug_size, BYTE*& last_plug,
2418                        generation* gen, BYTE* start_address,
2419                        unsigned int& active_new_gen_number,
2420                        BYTE*& last_pinned_gap, BOOL& leftp, 
2421                        BOOL shortened_p
2422 #ifdef SHORT_PLUGS
2423                        , mark* pinned_plug_entry
2424 #endif //SHORT_PLUGS
2425                        );
2426     PER_HEAP
2427     void realloc_in_brick (BYTE* tree, BYTE*& last_plug, BYTE* start_address,
2428                            generation* gen,
2429                            unsigned int& active_new_gen_number,
2430                            BYTE*& last_pinned_gap, BOOL& leftp);
2431     PER_HEAP
2432     void realloc_plugs (generation* consing_gen, heap_segment* seg,
2433                         BYTE* start_address, BYTE* end_address,
2434                         unsigned active_new_gen_number);
2435
2436     PER_HEAP
2437     void set_expand_in_full_gc (int condemned_gen_number);
2438
2439     PER_HEAP
2440     void verify_no_pins (BYTE* start, BYTE* end);
2441
2442     PER_HEAP
2443     generation* expand_heap (int condemned_generation,
2444                              generation* consing_gen,
2445                              heap_segment* new_heap_segment);
2446
2447     PER_HEAP
2448     void save_ephemeral_generation_starts();
2449
2450     static size_t get_time_now();
2451
2452     PER_HEAP
2453     bool init_dynamic_data ();
2454     PER_HEAP
2455     float surv_to_growth (float cst, float limit, float max_limit);
2456     PER_HEAP
2457     size_t desired_new_allocation (dynamic_data* dd, size_t out,
2458                                    int gen_number, int pass);
2459
2460     PER_HEAP
2461     void trim_youngest_desired_low_memory();
2462
2463     PER_HEAP
2464     void decommit_ephemeral_segment_pages();
2465
2466 #ifdef _WIN64
2467     PER_HEAP_ISOLATED
2468     size_t trim_youngest_desired (DWORD memory_load, 
2469                                   size_t total_new_allocation,
2470                                   size_t total_min_allocation);
2471     PER_HEAP_ISOLATED
2472     size_t joined_youngest_desired (size_t new_allocation);
2473 #endif //_WIN64
2474     PER_HEAP_ISOLATED
2475     size_t get_total_heap_size ();
2476     PER_HEAP
2477     size_t generation_size (int gen_number);
2478     PER_HEAP_ISOLATED
2479     size_t get_total_survived_size();
2480     PER_HEAP
2481     size_t get_current_allocated();
2482     PER_HEAP_ISOLATED
2483     size_t get_total_allocated();
2484     PER_HEAP
2485     size_t current_generation_size (int gen_number);
2486     PER_HEAP
2487     size_t generation_plan_size (int gen_number);
2488     PER_HEAP
2489     void  compute_promoted_allocation (int gen_number);
2490     PER_HEAP
2491     size_t  compute_in (int gen_number);
2492     PER_HEAP
2493     void compute_new_dynamic_data (int gen_number);
2494     PER_HEAP
2495     gc_history_per_heap* get_gc_data_per_heap();
2496     PER_HEAP
2497     size_t new_allocation_limit (size_t size, size_t free_size, int gen_number);
2498     PER_HEAP
2499     size_t generation_fragmentation (generation* gen,
2500                                      generation* consing_gen,
2501                                      BYTE* end);
2502     PER_HEAP
2503     size_t generation_sizes (generation* gen);
2504     PER_HEAP
2505     size_t approximate_new_allocation();
2506     PER_HEAP
2507     size_t end_space_after_gc();
2508     PER_HEAP
2509     BOOL decide_on_compacting (int condemned_gen_number,
2510                                size_t fragmentation,
2511                                BOOL& should_expand);
2512     PER_HEAP
2513     BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
2514     PER_HEAP
2515     void reset_large_object (BYTE* o);
2516     PER_HEAP
2517     void sweep_large_objects ();
2518     PER_HEAP
2519     void relocate_in_large_objects ();
2520     PER_HEAP
2521     void mark_through_cards_for_large_objects (card_fn fn, BOOL relocating);
2522     PER_HEAP
2523     void descr_segment (heap_segment* seg);
2524     PER_HEAP
2525     void descr_card_table ();
2526     PER_HEAP
2527     void descr_generations (BOOL begin_gc_p);
2528
2529 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2530     PER_HEAP_ISOLATED
2531     void descr_generations_to_profiler (gen_walk_fn fn, void *context);
2532     PER_HEAP
2533     void record_survived_for_profiler(int condemned_gen_number, BYTE * first_condemned_address);
2534     PER_HEAP
2535     void notify_profiler_of_surviving_large_objects ();
2536 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2537
2538     /*------------ Multiple non isolated heaps ----------------*/
2539 #ifdef MULTIPLE_HEAPS
2540     PER_HEAP_ISOLATED
2541     BOOL   create_thread_support (unsigned number_of_heaps);
2542     PER_HEAP_ISOLATED
2543     void destroy_thread_support ();
2544     PER_HEAP
2545     HANDLE create_gc_thread();
2546     PER_HEAP
2547     DWORD gc_thread_function();
2548 #ifdef MARK_LIST
2549 #ifdef PARALLEL_MARK_LIST_SORT
2550     PER_HEAP
2551     void sort_mark_list();
2552     PER_HEAP
2553     void merge_mark_lists();
2554     PER_HEAP
2555     void append_to_mark_list(BYTE **start, BYTE **end);
2556 #else //PARALLEL_MARK_LIST_SORT
2557     PER_HEAP_ISOLATED
2558     void combine_mark_lists();
2559 #endif //PARALLEL_MARK_LIST_SORT
2560 #endif
2561 #endif //MULTIPLE_HEAPS
2562
2563     /*------------ End of Multiple non isolated heaps ---------*/
2564
2565 #ifndef SEG_MAPPING_TABLE
2566     PER_HEAP_ISOLATED
2567     heap_segment* segment_of (BYTE* add,  ptrdiff_t & delta,
2568                               BOOL verify_p = FALSE);
2569 #endif //SEG_MAPPING_TABLE
2570
2571 #ifdef BACKGROUND_GC
2572
2573     //this is called by revisit....
2574     PER_HEAP
2575     BYTE* high_page (heap_segment* seg, BOOL concurrent_p);
2576
2577     PER_HEAP
2578     void revisit_written_page (BYTE* page, BYTE* end, BOOL concurrent_p,
2579                                heap_segment* seg,  BYTE*& last_page,
2580                                BYTE*& last_object, BOOL large_objects_p,
2581                                size_t& num_marked_objects);
2582     PER_HEAP
2583     void revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p=FALSE);
2584
2585     PER_HEAP
2586     void concurrent_scan_dependent_handles (ScanContext *sc);
2587
2588     PER_HEAP_ISOLATED
2589     void suspend_EE ();
2590
2591     PER_HEAP_ISOLATED
2592     void bgc_suspend_EE ();
2593
2594     PER_HEAP_ISOLATED
2595     void restart_EE ();
2596
2597     PER_HEAP
2598     void background_verify_mark (Object*& object, ScanContext* sc, DWORD flags);
2599
2600     PER_HEAP
2601     void background_scan_dependent_handles (ScanContext *sc);
2602
2603     PER_HEAP
2604     void allow_fgc();
2605
2606     // Restores BGC settings if necessary.
2607     PER_HEAP_ISOLATED
2608     void recover_bgc_settings();
2609
2610     PER_HEAP
2611     void save_bgc_data_per_heap();
2612
2613     PER_HEAP
2614     BOOL should_commit_mark_array();
2615
2616     PER_HEAP
2617     void clear_commit_flag();
2618
2619     PER_HEAP_ISOLATED
2620     void clear_commit_flag_global();
2621
2622     PER_HEAP_ISOLATED
2623     void verify_mark_array_cleared (heap_segment* seg, DWORD* mark_array_addr);
2624
2625     PER_HEAP_ISOLATED
2626     void verify_mark_array_cleared (BYTE* begin, BYTE* end, DWORD* mark_array_addr);
2627
2628     PER_HEAP_ISOLATED
2629     BOOL commit_mark_array_by_range (BYTE* begin, 
2630                                      BYTE* end, 
2631                                      DWORD* mark_array_addr);
2632
2633     PER_HEAP_ISOLATED
2634     BOOL commit_mark_array_new_seg (gc_heap* hp, 
2635                                     heap_segment* seg,
2636                                     BYTE* new_lowest_address = 0);
2637
2638     PER_HEAP_ISOLATED
2639     BOOL commit_mark_array_with_check (heap_segment* seg, DWORD* mark_array_addr);
2640
2641     // commit the portion of the mark array that corresponds to 
2642     // this segment (from beginning to reserved).
2643     // seg and heap_segment_reserved (seg) are guaranteed to be 
2644     // page aligned.
2645     PER_HEAP_ISOLATED
2646     BOOL commit_mark_array_by_seg (heap_segment* seg, DWORD* mark_array_addr);
2647
2648     // During BGC init, we commit the mark array for all in range
2649     // segments whose mark array hasn't been committed or fully
2650     // committed. All rw segments are in range, only ro segments
2651     // can be partial in range.
2652     PER_HEAP
2653     BOOL commit_mark_array_bgc_init (DWORD* mark_array_addr);
2654
2655     PER_HEAP
2656     BOOL commit_new_mark_array (DWORD* new_mark_array);
2657
2658     // We need to commit all segments that intersect with the bgc
2659     // range. If a segment is only partially in range, we still
2660     // should commit the mark array for the whole segment as 
2661     // we will set the mark array commit flag for this segment.
2662     PER_HEAP_ISOLATED
2663     BOOL commit_new_mark_array_global (DWORD* new_mark_array);
2664
2665     // We can't decommit the first and the last page in the mark array
2666     // if the beginning and ending don't happen to be page aligned.
2667     PER_HEAP
2668     void decommit_mark_array_by_seg (heap_segment* seg);
2669
2670     PER_HEAP
2671     void background_mark_phase();
2672
2673     PER_HEAP
2674     void background_drain_mark_list (int thread);
2675
2676     PER_HEAP
2677     void background_grow_c_mark_list();
2678
2679     PER_HEAP_ISOLATED
2680     void background_promote_callback(Object** object, ScanContext* sc, DWORD flags);
2681
2682     PER_HEAP
2683     void mark_absorb_new_alloc();
2684
2685     PER_HEAP
2686     void restart_vm();
2687
2688     PER_HEAP
2689     BOOL prepare_bgc_thread(gc_heap* gh);
2690     PER_HEAP
2691     BOOL create_bgc_thread(gc_heap* gh);
2692     PER_HEAP_ISOLATED
2693     BOOL create_bgc_threads_support (int number_of_heaps);
2694     PER_HEAP
2695     BOOL create_bgc_thread_support();
2696     PER_HEAP_ISOLATED
2697     int check_for_ephemeral_alloc();
2698     PER_HEAP_ISOLATED
2699     void wait_to_proceed();
2700     PER_HEAP_ISOLATED
2701     void fire_alloc_wait_event_begin (alloc_wait_reason awr);
2702     PER_HEAP_ISOLATED
2703     void fire_alloc_wait_event_end (alloc_wait_reason awr);
2704     PER_HEAP
2705     void background_gc_wait_lh (alloc_wait_reason awr = awr_ignored);
2706     PER_HEAP
2707     DWORD background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE);
2708     PER_HEAP_ISOLATED
2709     void start_c_gc();
2710     PER_HEAP
2711     void kill_gc_thread();
2712     PER_HEAP
2713     DWORD bgc_thread_function();
2714     PER_HEAP_ISOLATED
2715     void do_background_gc();
2716     static
2717     DWORD __stdcall bgc_thread_stub (void* arg);
2718
2719 #ifdef FEATURE_REDHAWK
2720     // Helper used to wrap the start routine of background GC threads so we can do things like initialize the
2721     // Redhawk thread state which requires running in the new thread's context.
2722     static DWORD WINAPI rh_bgc_thread_stub(void * pContext);
2723
2724     // Context passed to the above.
2725     struct rh_bgc_thread_ctx
2726     {
2727         PTHREAD_START_ROUTINE   m_pRealStartRoutine;
2728         gc_heap *               m_pRealContext;
2729     };
2730 #endif //FEATURE_REDHAWK
2731
2732 #endif //BACKGROUND_GC
2733  
2734 public:
2735
2736     PER_HEAP_ISOLATED
2737     VOLATILE(bool) internal_gc_done;
2738
2739 #ifdef BACKGROUND_GC
2740     PER_HEAP_ISOLATED
2741     DWORD cm_in_progress;
2742
2743     PER_HEAP
2744     BOOL expanded_in_fgc;
2745
2746     // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
2747     // we do right before the bgc starts.
2748     PER_HEAP_ISOLATED
2749     BOOL     dont_restart_ee_p;
2750
2751     PER_HEAP_ISOLATED
2752     CLREvent bgc_start_event;
2753 #endif //BACKGROUND_GC
2754
2755     PER_HEAP_ISOLATED
2756     DWORD wait_for_gc_done(INT32 timeOut = INFINITE);
2757
2758     // Returns TRUE if the thread used to be in cooperative mode 
2759     // before calling this function.
2760     PER_HEAP_ISOLATED
2761     BOOL enable_preemptive (Thread* current_thread);
2762     PER_HEAP_ISOLATED
2763     void disable_preemptive (Thread* current_thread, BOOL restore_cooperative);
2764
2765     /* ------------------- per heap members --------------------------*/
2766
2767     PER_HEAP
2768 #ifndef MULTIPLE_HEAPS
2769     CLREvent gc_done_event;
2770 #else // MULTIPLE_HEAPS
2771     CLREvent gc_done_event;
2772 #endif // MULTIPLE_HEAPS
2773
2774     PER_HEAP
2775     VOLATILE(LONG) gc_done_event_lock;
2776
2777     PER_HEAP
2778     VOLATILE(bool) gc_done_event_set;
2779
2780     PER_HEAP 
2781     void set_gc_done();
2782
2783     PER_HEAP 
2784     void reset_gc_done();
2785
2786     PER_HEAP
2787     void enter_gc_done_event_lock();
2788
2789     PER_HEAP
2790     void exit_gc_done_event_lock();
2791
2792 #ifdef MULTIPLE_HEAPS
2793     PER_HEAP
2794     BYTE*  ephemeral_low;      //lowest ephemeral address
2795
2796     PER_HEAP
2797     BYTE*  ephemeral_high;     //highest ephemeral address
2798 #endif //MULTIPLE_HEAPS
2799
2800     PER_HEAP
2801     DWORD* card_table;
2802
2803     PER_HEAP
2804     short* brick_table;
2805
2806 #ifdef MARK_ARRAY
2807 #ifdef MULTIPLE_HEAPS
2808     PER_HEAP
2809     DWORD* mark_array;
2810 #else
2811     SPTR_DECL(DWORD, mark_array);
2812 #endif //MULTIPLE_HEAPS
2813 #endif //MARK_ARRAY
2814
2815 #ifdef CARD_BUNDLE
2816     PER_HEAP
2817     DWORD* card_bundle_table;
2818 #endif //CARD_BUNDLE
2819
2820 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
2821     PER_HEAP_ISOLATED
2822     sorted_table* seg_table;
2823 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
2824
2825     PER_HEAP_ISOLATED
2826     VOLATILE(BOOL) gc_started;
2827
2828     // The following 2 events are there to support the gen2 
2829     // notification feature which is only enabled if concurrent
2830     // GC is disabled.
2831     PER_HEAP_ISOLATED
2832     CLREvent full_gc_approach_event;
2833
2834     PER_HEAP_ISOLATED
2835     CLREvent full_gc_end_event;
2836
2837     // Full GC Notification percentages.
2838     PER_HEAP_ISOLATED
2839     DWORD fgn_maxgen_percent;
2840
2841     PER_HEAP_ISOLATED
2842     DWORD fgn_loh_percent;
2843
2844     PER_HEAP_ISOLATED
2845     VOLATILE(bool) full_gc_approach_event_set;
2846
2847 #ifdef BACKGROUND_GC
2848     PER_HEAP_ISOLATED
2849     BOOL fgn_last_gc_was_concurrent;
2850 #endif //BACKGROUND_GC
2851
2852     PER_HEAP
2853     size_t fgn_last_alloc;
2854
2855     static DWORD user_thread_wait (CLREvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
2856
2857     static wait_full_gc_status full_gc_wait (CLREvent *event, int time_out_ms);
2858
2859     PER_HEAP
2860     BYTE* demotion_low;
2861
2862     PER_HEAP
2863     BYTE* demotion_high;
2864
2865     PER_HEAP
2866     BOOL demote_gen1_p;
2867
2868     PER_HEAP
2869     BYTE* last_gen1_pin_end;
2870
2871     PER_HEAP
2872     gen_to_condemn_tuning gen_to_condemn_reasons;
2873
2874     PER_HEAP
2875     size_t etw_allocation_running_amount[2];
2876
2877     PER_HEAP
2878     int gc_policy;  //sweep, compact, expand
2879
2880 #ifdef MULTIPLE_HEAPS
2881     PER_HEAP_ISOLATED
2882     CLREvent gc_start_event;
2883
2884     PER_HEAP_ISOLATED
2885     CLREvent ee_suspend_event;
2886
2887     PER_HEAP
2888     heap_segment* new_heap_segment;
2889
2890 #define alloc_quantum_balance_units (16)
2891
2892     PER_HEAP_ISOLATED
2893     size_t min_balance_threshold;
2894 #else //MULTIPLE_HEAPS
2895
2896     PER_HEAP
2897     size_t allocation_running_time;
2898
2899     PER_HEAP
2900     size_t allocation_running_amount;
2901
2902 #endif //MULTIPLE_HEAPS
2903
2904     PER_HEAP_ISOLATED
2905     gc_mechanisms settings;
2906
2907     PER_HEAP_ISOLATED
2908     gc_history_global gc_data_global;
2909
2910     PER_HEAP_ISOLATED
2911     size_t gc_last_ephemeral_decommit_time;
2912
2913     PER_HEAP_ISOLATED
2914     size_t gc_gen0_desired_high;
2915
2916     PER_HEAP
2917     size_t gen0_big_free_spaces;
2918
2919 #ifdef _WIN64
2920     PER_HEAP_ISOLATED
2921     size_t youngest_gen_desired_th;
2922
2923     PER_HEAP_ISOLATED
2924     size_t mem_one_percent;
2925
2926     PER_HEAP_ISOLATED
2927     ULONGLONG total_physical_mem;
2928
2929     PER_HEAP_ISOLATED
2930     ULONGLONG available_physical_mem;
2931 #endif //_WIN64
2932
2933     PER_HEAP_ISOLATED
2934     size_t last_gc_index;
2935
2936     PER_HEAP_ISOLATED
2937     size_t min_segment_size;
2938
2939     PER_HEAP
2940     BYTE* lowest_address;
2941
2942     PER_HEAP
2943     BYTE* highest_address;
2944
2945     PER_HEAP
2946     BOOL ephemeral_promotion;
2947     PER_HEAP
2948     BYTE* saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
2949     PER_HEAP
2950     size_t saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];
2951
2952 protected:
2953 #ifdef MULTIPLE_HEAPS
2954     PER_HEAP
2955     GCHeap* vm_heap;
2956     PER_HEAP
2957     int heap_number;
2958     PER_HEAP
2959     VOLATILE(int) alloc_context_count;
2960 #else //MULTIPLE_HEAPS
2961 #define vm_heap ((GCHeap*) g_pGCHeap)
2962 #define heap_number (0)
2963 #endif //MULTIPLE_HEAPS
2964
2965 #ifndef MULTIPLE_HEAPS
2966     SPTR_DECL(heap_segment,ephemeral_heap_segment);
2967 #else
2968     PER_HEAP
2969     heap_segment* ephemeral_heap_segment;
2970 #endif // !MULTIPLE_HEAPS
2971
2972     PER_HEAP
2973     size_t time_bgc_last;
2974
2975     PER_HEAP
2976     BYTE*       gc_low; // lowest address being condemned
2977
2978     PER_HEAP
2979     BYTE*       gc_high; //highest address being condemned
2980
2981     PER_HEAP
2982     size_t      mark_stack_tos;
2983
2984     PER_HEAP
2985     size_t      mark_stack_bos;
2986
2987     PER_HEAP
2988     size_t      mark_stack_array_length;
2989
2990     PER_HEAP
2991     mark*       mark_stack_array;
2992
2993     PER_HEAP
2994     BOOL       verify_pinned_queue_p;
2995
2996     PER_HEAP
2997     BYTE*       oldest_pinned_plug;
2998
2999 #ifdef FEATURE_LOH_COMPACTION
3000     PER_HEAP
3001     size_t      loh_pinned_queue_tos;
3002
3003     PER_HEAP
3004     size_t      loh_pinned_queue_bos;
3005
3006     PER_HEAP
3007     size_t      loh_pinned_queue_length;
3008
3009     PER_HEAP_ISOLATED
3010     int         loh_pinned_queue_decay;
3011
3012     PER_HEAP
3013     mark*       loh_pinned_queue;
3014
3015     // This is for forced LOH compaction via the complus env var
3016     PER_HEAP_ISOLATED
3017     BOOL        loh_compaction_always_p;
3018
3019     // This is set by the user.
3020     PER_HEAP_ISOLATED
3021     gc_loh_compaction_mode loh_compaction_mode;
3022
3023     // We may not compact LOH on every heap if we can't
3024     // grow the pinned queue. This is to indicate whether
3025     // this heap's LOH is compacted or not. So even if
3026     // settings.loh_compaction is TRUE this may not be TRUE.
3027     PER_HEAP
3028     BOOL        loh_compacted_p;
3029 #endif //FEATURE_LOH_COMPACTION
3030
3031 #ifdef BACKGROUND_GC
3032
3033     PER_HEAP
3034     DWORD bgc_thread_id;
3035
3036 #ifdef WRITE_WATCH
3037     PER_HEAP
3038     BYTE* background_written_addresses [array_size+2];
3039 #endif //WRITE_WATCH
3040
3041 #if defined (DACCESS_COMPILE) && !defined (MULTIPLE_HEAPS)
3042     // doesn't need to be volatile for DAC.
3043     SVAL_DECL(c_gc_state, current_c_gc_state);
3044 #else
3045     PER_HEAP_ISOLATED
3046     VOLATILE(c_gc_state) current_c_gc_state;     //tells the large object allocator to
3047     //mark the object as new since the start of gc.
3048 #endif //DACCESS_COMPILE && !MULTIPLE_HEAPS
3049
3050     PER_HEAP_ISOLATED
3051     gc_mechanisms saved_bgc_settings;
3052
3053     PER_HEAP
3054     gc_history_per_heap saved_bgc_data_per_heap;
3055
3056     PER_HEAP
3057     BOOL bgc_data_saved_p;
3058
3059     PER_HEAP
3060     BOOL bgc_thread_running; // gc thread is its main loop
3061
3062     PER_HEAP_ISOLATED
3063     BOOL keep_bgc_threads_p;
3064
3065     // This event is used by BGC threads to do something on 
3066     // one specific thread while other BGC threads have to 
3067     // wait. This is different from a join 'cause you can't
3068     // specify which thread should be doing some task
3069     // while other threads have to wait.
3070     // For example, to make the BGC threads managed threads 
3071     // we need to create them on the thread that called 
3072     // SuspendEE which is heap 0.
3073     PER_HEAP_ISOLATED
3074     CLREvent bgc_threads_sync_event;
3075
3076     PER_HEAP
3077     Thread* bgc_thread;
3078
3079     PER_HEAP
3080     CRITICAL_SECTION bgc_threads_timeout_cs;
3081
3082     PER_HEAP_ISOLATED
3083     CLREvent background_gc_done_event;
3084
3085     PER_HEAP
3086     CLREvent background_gc_create_event;
3087
3088     PER_HEAP_ISOLATED
3089     CLREvent ee_proceed_event;
3090
3091     PER_HEAP
3092     CLREvent gc_lh_block_event;
3093
3094     PER_HEAP_ISOLATED
3095     BOOL gc_can_use_concurrent;
3096
3097     PER_HEAP_ISOLATED
3098     BOOL temp_disable_concurrent_p;
3099
3100     PER_HEAP_ISOLATED
3101     BOOL do_ephemeral_gc_p;
3102
3103     PER_HEAP_ISOLATED
3104     BOOL do_concurrent_p;
3105
3106     PER_HEAP
3107     VOLATILE(bgc_state) current_bgc_state;
3108
3109     struct gc_history
3110     {
3111         size_t gc_index;
3112         bgc_state current_bgc_state;
3113         DWORD gc_time_ms;
3114         // This is in bytes per ms; consider breaking it 
3115         // into the efficiency per phase.
3116         size_t gc_efficiency; 
3117         BYTE* eph_low;
3118         BYTE* gen0_start;
3119         BYTE* eph_high;
3120         BYTE* bgc_highest;
3121         BYTE* bgc_lowest;
3122         BYTE* fgc_highest;
3123         BYTE* fgc_lowest;
3124         BYTE* g_highest;
3125         BYTE* g_lowest;
3126     };
3127
3128 #define max_history_count 64
3129
3130     PER_HEAP
3131     int gchist_index_per_heap;
3132
3133     PER_HEAP
3134     gc_history gchist_per_heap[max_history_count];
3135
3136     PER_HEAP_ISOLATED
3137     int gchist_index;
3138
3139     PER_HEAP_ISOLATED
3140     gc_mechanisms_store gchist[max_history_count];
3141
3142     PER_HEAP
3143     void add_to_history_per_heap();
3144
3145     PER_HEAP_ISOLATED
3146     void add_to_history();
3147
3148     PER_HEAP
3149     size_t total_promoted_bytes;
3150
3151     PER_HEAP
3152     size_t     bgc_overflow_count;
3153
3154     PER_HEAP
3155     size_t     bgc_begin_loh_size;
3156     PER_HEAP
3157     size_t     end_loh_size;
3158
3159     // We need to throttle the LOH allocations during BGC since we can't
3160     // collect LOH when BGC is in progress. 
3161     // We allow the LOH heap size to double during a BGC. So for every
3162     // 10% increase we will have the LOH allocating thread sleep for one more
3163     // ms. So we are already 30% over the original heap size the thread will
3164     // sleep for 3ms.
3165     PER_HEAP
3166     DWORD      bgc_alloc_spin_loh;
3167
3168     // This includes what we allocate at the end of segment - allocating
3169     // in free list doesn't increase the heap size.
3170     PER_HEAP
3171     size_t     bgc_loh_size_increased;
3172
3173     PER_HEAP
3174     size_t     bgc_loh_allocated_in_free;
3175
3176     PER_HEAP
3177     size_t     background_soh_alloc_count;
3178
3179     PER_HEAP
3180     size_t     background_loh_alloc_count;
3181
3182     PER_HEAP
3183     BYTE**     background_mark_stack_tos;
3184
3185     PER_HEAP
3186     BYTE**     background_mark_stack_array;
3187
3188     PER_HEAP
3189     size_t    background_mark_stack_array_length;
3190
3191     PER_HEAP
3192     BYTE*     background_min_overflow_address;
3193
3194     PER_HEAP
3195     BYTE*     background_max_overflow_address;
3196
3197     // We can't process the soh range concurrently so we
3198     // wait till final mark to process it.
3199     PER_HEAP
3200     BOOL      processed_soh_overflow_p;
3201
3202     PER_HEAP
3203     BYTE*     background_min_soh_overflow_address;
3204
3205     PER_HEAP
3206     BYTE*     background_max_soh_overflow_address;
3207
3208     PER_HEAP
3209     heap_segment* saved_overflow_ephemeral_seg;
3210
3211 #ifndef MULTIPLE_HEAPS
3212     SPTR_DECL(heap_segment, saved_sweep_ephemeral_seg);
3213
3214     SPTR_DECL(BYTE, saved_sweep_ephemeral_start);
3215
3216     SPTR_DECL(BYTE, background_saved_lowest_address);
3217
3218     SPTR_DECL(BYTE, background_saved_highest_address);
3219 #else
3220
3221     PER_HEAP
3222     heap_segment* saved_sweep_ephemeral_seg;
3223
3224     PER_HEAP
3225     BYTE* saved_sweep_ephemeral_start;
3226
3227     PER_HEAP
3228     BYTE* background_saved_lowest_address;
3229
3230     PER_HEAP
3231     BYTE* background_saved_highest_address;
3232 #endif //!MULTIPLE_HEAPS
3233
3234     // This is used for synchronization between the bgc thread
3235     // for this heap and the user threads allocating on this
3236     // heap.
3237     PER_HEAP
3238     exclusive_sync* bgc_alloc_lock;
3239
3240 #ifdef SNOOP_STATS
3241     PER_HEAP
3242     snoop_stats_data snoop_stat;
3243 #endif //SNOOP_STATS
3244
3245
3246     PER_HEAP
3247     BYTE**          c_mark_list;
3248
3249     PER_HEAP
3250     size_t          c_mark_list_length;
3251
3252     PER_HEAP
3253     size_t          c_mark_list_index;
3254 #endif //BACKGROUND_GC
3255
3256 #ifdef MARK_LIST
3257     PER_HEAP
3258     BYTE** mark_list;
3259
3260     PER_HEAP_ISOLATED
3261     size_t mark_list_size;
3262
3263     PER_HEAP
3264     BYTE** mark_list_end;
3265
3266     PER_HEAP
3267     BYTE** mark_list_index;
3268
3269     PER_HEAP_ISOLATED
3270     BYTE** g_mark_list;
3271 #ifdef PARALLEL_MARK_LIST_SORT
3272     PER_HEAP_ISOLATED
3273     BYTE** g_mark_list_copy;
3274     PER_HEAP
3275     BYTE*** mark_list_piece_start;
3276     BYTE*** mark_list_piece_end;
3277 #endif //PARALLEL_MARK_LIST_SORT
3278 #endif //MARK_LIST
3279
3280     PER_HEAP
3281     BYTE*  min_overflow_address;
3282
3283     PER_HEAP
3284     BYTE*  max_overflow_address;
3285
3286     PER_HEAP
3287     BYTE*  shigh; //keeps track of the highest marked object
3288
3289     PER_HEAP
3290     BYTE*  slow; //keeps track of the lowest marked object
3291
3292     PER_HEAP
3293     size_t allocation_quantum;
3294
3295     PER_HEAP
3296     size_t alloc_contexts_used;
3297
3298     PER_HEAP_ISOLATED
3299     no_gc_region_info current_no_gc_region_info;
3300
3301     PER_HEAP
3302     size_t soh_allocation_no_gc;
3303
3304     PER_HEAP
3305     size_t loh_allocation_no_gc;
3306
3307     PER_HEAP
3308     heap_segment* saved_loh_segment_no_gc;
3309
3310     PER_HEAP_ISOLATED
3311     BOOL proceed_with_gc_p;
3312
3313 #define youngest_generation (generation_of (0))
3314 #define large_object_generation (generation_of (max_generation+1))
3315
3316 #ifndef MULTIPLE_HEAPS
3317     SPTR_DECL(BYTE,alloc_allocated);
3318 #else
3319     PER_HEAP
3320     BYTE* alloc_allocated; //keeps track of the highest
3321     //address allocated by alloc
3322 #endif // !MULTIPLE_HEAPS
3323
3324     // The more_space_lock and gc_lock is used for 3 purposes:
3325     //
3326     // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock)
3327     // 2) to synchronize allocations of large objects (more_space_lock)
3328     // 3) to synchronize the GC itself (gc_lock)
3329     //
3330     PER_HEAP_ISOLATED
3331     GCSpinLock gc_lock; //lock while doing GC
3332
3333     PER_HEAP
3334     GCSpinLock more_space_lock; //lock while allocating more space
3335
3336 #ifdef SYNCHRONIZATION_STATS
3337
3338     PER_HEAP
3339     unsigned int good_suspension;
3340
3341     PER_HEAP
3342     unsigned int bad_suspension;
3343
3344     // Number of times when msl_acquire is > 200 cycles.
3345     PER_HEAP
3346     unsigned int num_high_msl_acquire;
3347
3348     // Number of times when msl_acquire is < 200 cycles.
3349     PER_HEAP
3350     unsigned int num_low_msl_acquire;
3351
3352     // Number of times the more_space_lock is acquired.
3353     PER_HEAP
3354     unsigned int num_msl_acquired;
3355
3356     // Total cycles it takes to acquire the more_space_lock.
3357     PER_HEAP
3358     ULONGLONG total_msl_acquire;
3359
3360     PER_HEAP
3361     void init_heap_sync_stats()
3362     {
3363         good_suspension = 0;
3364         bad_suspension = 0;
3365         num_msl_acquired = 0;
3366         total_msl_acquire = 0;
3367         num_high_msl_acquire = 0;
3368         num_low_msl_acquire = 0;
3369         more_space_lock.init();
3370         gc_lock.init();
3371     }
3372
3373     PER_HEAP
3374     void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
3375     {
3376         printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
3377             heap_num,
3378             alloc_contexts_used,
3379             good_suspension,
3380             bad_suspension,
3381             (unsigned int)(total_msl_acquire / gc_count_during_log),
3382             num_high_msl_acquire / gc_count_during_log,
3383             num_low_msl_acquire / gc_count_during_log,
3384             num_msl_acquired / gc_count_during_log,
3385             more_space_lock.num_switch_thread / gc_count_during_log,
3386             more_space_lock.num_wait_longer / gc_count_during_log,
3387             more_space_lock.num_switch_thread_w / gc_count_during_log,
3388             more_space_lock.num_disable_preemptive_w / gc_count_during_log);
3389     }
3390
3391 #endif //SYNCHRONIZATION_STATS
3392
3393 #ifdef MULTIPLE_HEAPS
3394     PER_HEAP
3395     generation generation_table [NUMBERGENERATIONS+1];
3396 #endif
3397
3398
3399 #define NUM_LOH_ALIST (7)
3400 #define BASE_LOH_ALIST (64*1024)
3401     PER_HEAP 
3402     alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
3403
3404 #define NUM_GEN2_ALIST (12)
3405 #define BASE_GEN2_ALIST (1*64)
3406     PER_HEAP
3407     alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
3408
3409 //------------------------------------------    
3410
3411     PER_HEAP
3412     dynamic_data dynamic_data_table [NUMBERGENERATIONS+1];
3413
3414     PER_HEAP
3415     gc_history_per_heap gc_data_per_heap;
3416
3417     // dynamic tuning.
3418     PER_HEAP
3419     BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
3420     // if elevate_p is FALSE, it means we are determining fragmentation for a generation
3421     // to see if we should condemn this gen; otherwise it means we are determining if
3422     // we should elevate to doing max_gen from an ephemeral gen.
3423     PER_HEAP
3424     BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
3425     PER_HEAP
3426     BOOL 
3427     dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number, ULONGLONG total_mem);
3428     PER_HEAP
3429     BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, ULONGLONG available_mem);
3430     PER_HEAP
3431     BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
3432
3433     PER_HEAP
3434     int generation_skip_ratio;//in %
3435
3436     PER_HEAP
3437     BOOL gen0_bricks_cleared;
3438 #ifdef FFIND_OBJECT
3439     PER_HEAP
3440     int gen0_must_clear_bricks;
3441 #endif //FFIND_OBJECT
3442     
3443     PER_HEAP_ISOLATED
3444     size_t full_gc_counts[gc_type_max];
3445
3446     // the # of bytes allocates since the last full compacting GC.
3447     PER_HEAP
3448     unsigned __int64 loh_alloc_since_cg;
3449
3450     PER_HEAP
3451     BOOL elevation_requested;
3452
3453     // if this is TRUE, we should always guarantee that we do a 
3454     // full compacting GC before we OOM.
3455     PER_HEAP
3456     BOOL last_gc_before_oom;
3457
3458     PER_HEAP_ISOLATED
3459     BOOL should_expand_in_full_gc;
3460
3461 #ifdef BACKGROUND_GC
3462     PER_HEAP_ISOLATED
3463     size_t ephemeral_fgc_counts[max_generation];
3464
3465     PER_HEAP_ISOLATED
3466     BOOL alloc_wait_event_p;
3467
3468 #ifndef MULTIPLE_HEAPS
3469     SPTR_DECL(BYTE, next_sweep_obj);
3470 #else
3471     PER_HEAP
3472     BYTE* next_sweep_obj;
3473 #endif //MULTIPLE_HEAPS
3474
3475     PER_HEAP
3476     BYTE* current_sweep_pos;
3477
3478 #endif //BACKGROUND_GC
3479
3480 #ifndef MULTIPLE_HEAPS
3481     SVAL_DECL(oom_history, oom_info);
3482 #ifdef FEATURE_PREMORTEM_FINALIZATION
3483     SPTR_DECL(CFinalize,finalize_queue);
3484 #endif //FEATURE_PREMORTEM_FINALIZATION
3485 #else
3486
3487     PER_HEAP
3488     oom_history oom_info;
3489
3490 #ifdef FEATURE_PREMORTEM_FINALIZATION
3491     PER_HEAP
3492     PTR_CFinalize finalize_queue;
3493 #endif //FEATURE_PREMORTEM_FINALIZATION
3494 #endif // !MULTIPLE_HEAPS
3495
3496     PER_HEAP
3497     fgm_history fgm_result;
3498
3499     PER_HEAP_ISOLATED
3500     size_t eph_gen_starts_size;
3501
3502     PER_HEAP
3503     BOOL        ro_segments_in_range;
3504
3505 #ifdef BACKGROUND_GC
3506     PER_HEAP
3507     heap_segment* freeable_small_heap_segment;
3508 #endif //BACKGROUND_GC
3509
3510     PER_HEAP
3511     heap_segment* freeable_large_heap_segment;
3512
3513     PER_HEAP_ISOLATED
3514     heap_segment* segment_standby_list;
3515
3516     PER_HEAP
3517     size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
3518
3519     PER_HEAP
3520     size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
3521
3522     PER_HEAP
3523     size_t ordered_plug_indices[MAX_NUM_BUCKETS];
3524
3525     PER_HEAP
3526     size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
3527
3528     PER_HEAP
3529     BOOL ordered_plug_indices_init;
3530
3531     PER_HEAP
3532     BOOL use_bestfit;
3533
3534     PER_HEAP
3535     BYTE* bestfit_first_pin;
3536
3537     PER_HEAP
3538     BOOL commit_end_of_seg;
3539
3540     PER_HEAP
3541     size_t max_free_space_items; // dynamically adjusted.
3542
3543     PER_HEAP
3544     size_t free_space_buckets;
3545
3546     PER_HEAP
3547     size_t free_space_items;
3548
3549     // -1 means we are using all the free
3550     // spaces we have (not including
3551     // end of seg space).
3552     PER_HEAP
3553     int trimmed_free_space_index;
3554
3555     PER_HEAP
3556     size_t total_ephemeral_plugs;
3557
3558     PER_HEAP
3559     seg_free_spaces* bestfit_seg;
3560
3561     // Note: we know this from the plan phase.
3562     // total_ephemeral_plugs actually has the same value
3563     // but while we are calculating its value we also store
3564     // info on how big the plugs are for best fit which we
3565     // don't do in plan phase.
3566     // TODO: get rid of total_ephemeral_plugs.
3567     PER_HEAP
3568     size_t total_ephemeral_size;
3569
3570 public:
3571
3572 #ifdef HEAP_ANALYZE
3573
3574     PER_HEAP_ISOLATED
3575     BOOL heap_analyze_enabled;
3576
3577     PER_HEAP
3578     size_t internal_root_array_length;
3579
3580 #ifndef MULTIPLE_HEAPS
3581     SPTR_DECL(PTR_BYTE, internal_root_array);
3582     SVAL_DECL(size_t, internal_root_array_index);
3583     SVAL_DECL(BOOL,   heap_analyze_success);
3584 #else
3585     PER_HEAP
3586     BYTE** internal_root_array;
3587
3588     PER_HEAP
3589     size_t internal_root_array_index;
3590
3591     PER_HEAP
3592     BOOL   heap_analyze_success;
3593 #endif // !MULTIPLE_HEAPS
3594
3595     // next two fields are used to optimize the search for the object 
3596     // enclosing the current reference handled by ha_mark_object_simple.
3597     PER_HEAP
3598     BYTE*  current_obj;
3599
3600     PER_HEAP
3601     size_t current_obj_size;
3602
3603 #endif //HEAP_ANALYZE
3604
3605     /* ----------------------- global members ----------------------- */
3606 public:
3607
3608     PER_HEAP
3609     int         condemned_generation_num;
3610
3611     PER_HEAP
3612     BOOL        blocking_collection;
3613
3614 #ifdef MULTIPLE_HEAPS
3615     SVAL_DECL(int, n_heaps);
3616     SPTR_DECL(PTR_gc_heap, g_heaps);
3617
3618     static
3619     HANDLE*   g_gc_threads; // keep all of the gc threads.
3620     static
3621     size_t*   g_promoted;
3622 #ifdef BACKGROUND_GC
3623     static
3624     size_t*   g_bpromoted;
3625 #endif //BACKGROUND_GC
3626 #ifdef MH_SC_MARK
3627     PER_HEAP_ISOLATED
3628     int*  g_mark_stack_busy;
3629 #endif //MH_SC_MARK
3630 #else
3631     static
3632     size_t    g_promoted;
3633 #ifdef BACKGROUND_GC
3634     static
3635     size_t    g_bpromoted;
3636 #endif //BACKGROUND_GC
3637 #endif //MULTIPLE_HEAPS
3638     
3639     static
3640     size_t reserved_memory;
3641     static
3642     size_t reserved_memory_limit;
3643     static
3644     BOOL      g_low_memory_status;
3645
3646 protected:
3647     PER_HEAP
3648     void update_collection_counts ();
3649
3650 }; // class gc_heap
3651
3652
3653 #ifdef FEATURE_PREMORTEM_FINALIZATION
3654 class CFinalize
3655 {
3656 #ifdef DACCESS_COMPILE
3657     friend class ::ClrDataAccess;
3658 #endif // DACCESS_COMPILE
3659 private:
3660
3661     //adjust the count and add a constant to add a segment
3662     static const int ExtraSegCount = 2;
3663     static const int FinalizerListSeg = NUMBERGENERATIONS+1;
3664     static const int CriticalFinalizerListSeg = NUMBERGENERATIONS;
3665     //Does not correspond to a segment
3666     static const int FreeList = NUMBERGENERATIONS+ExtraSegCount;
3667
3668     PTR_PTR_Object m_Array;
3669     PTR_PTR_Object m_FillPointers[NUMBERGENERATIONS+ExtraSegCount];
3670     PTR_PTR_Object m_EndArray;
3671     size_t   m_PromotedCount;
3672     
3673     VOLATILE(LONG) lock;
3674 #ifdef _DEBUG
3675     DWORD lockowner_threadid;
3676 #endif // _DEBUG
3677
3678     BOOL GrowArray();
3679     void MoveItem (Object** fromIndex,
3680                    unsigned int fromSeg,
3681                    unsigned int toSeg);
3682
3683     inline PTR_PTR_Object& SegQueue (unsigned int Seg)
3684     {
3685         return (Seg ? m_FillPointers [Seg-1] : m_Array);
3686     }
3687     inline PTR_PTR_Object& SegQueueLimit (unsigned int Seg)
3688     {
3689         return m_FillPointers [Seg];
3690     }
3691
3692     BOOL IsSegEmpty ( unsigned int i)
3693     {
3694         ASSERT ( (int)i < FreeList);
3695         return (SegQueueLimit(i) == SegQueue (i));
3696
3697     }
3698
3699     BOOL FinalizeSegForAppDomain (AppDomain *pDomain, 
3700                                   BOOL fRunFinalizers, 
3701                                   unsigned int Seg);
3702
3703 public:
3704     ~CFinalize();
3705     bool Initialize();
3706     void EnterFinalizeLock();
3707     void LeaveFinalizeLock();
3708     bool RegisterForFinalization (int gen, Object* obj, size_t size=0);
3709     Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
3710     BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
3711     void RelocateFinalizationData (int gen, gc_heap* hp);
3712 #ifdef GC_PROFILING
3713     void WalkFReachableObjects (gc_heap* hp);
3714 #endif //GC_PROFILING
3715     void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
3716     void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
3717     size_t GetPromotedCount();
3718
3719     //Methods used by the shutdown code to call every finalizer
3720     void SetSegForShutDown(BOOL fHasLock);
3721     size_t GetNumberFinalizableObjects();
3722     void DiscardNonCriticalObjects();
3723
3724     //Methods used by the app domain unloading call to finalize objects in an app domain
3725     BOOL FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers);
3726
3727     void CheckFinalizerObjects();
3728 };
3729 #endif // FEATURE_PREMORTEM_FINALIZATION
3730
3731 inline
3732  size_t& dd_begin_data_size (dynamic_data* inst)
3733 {
3734   return inst->begin_data_size;
3735 }
3736 inline
3737  size_t& dd_survived_size (dynamic_data* inst)
3738 {
3739   return inst->survived_size;
3740 }
3741 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
3742 inline
3743  size_t& dd_num_npinned_plugs(dynamic_data* inst)
3744 {
3745   return inst->num_npinned_plugs;
3746 }
3747 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
3748 inline
3749 size_t& dd_pinned_survived_size (dynamic_data* inst)
3750 {
3751   return inst->pinned_survived_size;
3752 }
3753 inline
3754 size_t& dd_added_pinned_size (dynamic_data* inst)
3755 {
3756   return inst->added_pinned_size;
3757 }
3758 inline
3759 size_t& dd_artificial_pinned_survived_size (dynamic_data* inst)
3760 {
3761   return inst->artificial_pinned_survived_size;
3762 }
3763 #ifdef SHORT_PLUGS
3764 inline
3765 size_t& dd_padding_size (dynamic_data* inst)
3766 {
3767   return inst->padding_size;
3768 }
3769 #endif //SHORT_PLUGS
3770 inline
3771  size_t& dd_current_size (dynamic_data* inst)
3772 {
3773   return inst->current_size;
3774 }
3775 inline
3776 float& dd_surv (dynamic_data* inst)
3777 {
3778   return inst->surv;
3779 }
3780 inline
3781 size_t& dd_freach_previous_promotion (dynamic_data* inst)
3782 {
3783   return inst->freach_previous_promotion;
3784 }
3785 inline
3786 size_t& dd_desired_allocation (dynamic_data* inst)
3787 {
3788   return inst->desired_allocation;
3789 }
3790 inline
3791 size_t& dd_collection_count (dynamic_data* inst)
3792 {
3793     return inst->collection_count;
3794 }
3795 inline
3796 size_t& dd_promoted_size (dynamic_data* inst)
3797 {
3798     return inst->promoted_size;
3799 }
3800 inline
3801 float& dd_limit (dynamic_data* inst)
3802 {
3803   return inst->limit;
3804 }
3805 inline
3806 float& dd_max_limit (dynamic_data* inst)
3807 {
3808   return inst->max_limit;
3809 }
3810 inline
3811 size_t& dd_min_gc_size (dynamic_data* inst)
3812 {
3813   return inst->min_gc_size;
3814 }
3815 inline
3816 size_t& dd_max_size (dynamic_data* inst)
3817 {
3818   return inst->max_size;
3819 }
3820 inline
3821 size_t& dd_min_size (dynamic_data* inst)
3822 {
3823   return inst->min_size;
3824 }
3825 inline
3826 ptrdiff_t& dd_new_allocation (dynamic_data* inst)
3827 {
3828   return inst->new_allocation;
3829 }
3830 inline
3831 ptrdiff_t& dd_gc_new_allocation (dynamic_data* inst)
3832 {
3833   return inst->gc_new_allocation;
3834 }
3835 inline
3836 size_t& dd_default_new_allocation (dynamic_data* inst)
3837 {
3838   return inst->default_new_allocation;
3839 }
3840 inline
3841 size_t& dd_fragmentation_limit (dynamic_data* inst)
3842 {
3843   return inst->fragmentation_limit;
3844 }
3845 inline
3846 float& dd_fragmentation_burden_limit (dynamic_data* inst)
3847 {
3848   return inst->fragmentation_burden_limit;
3849 }
3850 inline
3851 float dd_v_fragmentation_burden_limit (dynamic_data* inst)
3852 {
3853   return (min (2*dd_fragmentation_burden_limit (inst), 0.75f));
3854 }
3855 inline
3856 size_t& dd_fragmentation (dynamic_data* inst)
3857 {
3858   return inst->fragmentation;
3859 }
3860
3861 inline
3862 size_t& dd_gc_clock (dynamic_data* inst)
3863 {
3864   return inst->gc_clock;
3865 }
3866 inline
3867 size_t& dd_time_clock (dynamic_data* inst)
3868 {
3869   return inst->time_clock;
3870 }
3871
3872 inline
3873 size_t& dd_gc_elapsed_time (dynamic_data* inst)
3874 {
3875     return inst->gc_elapsed_time;
3876 }
3877
3878 inline
3879 float& dd_gc_speed (dynamic_data* inst)
3880 {
3881     return inst->gc_speed;
3882 }
3883
3884 inline
3885 alloc_context* generation_alloc_context (generation* inst)
3886 {
3887     return &(inst->allocation_context);
3888 }
3889
3890 inline
3891 BYTE*& generation_allocation_start (generation* inst)
3892 {
3893   return inst->allocation_start;
3894 }
3895 inline
3896 BYTE*& generation_allocation_pointer (generation* inst)
3897 {
3898   return inst->allocation_context.alloc_ptr;
3899 }
3900 inline
3901 BYTE*& generation_allocation_limit (generation* inst)
3902 {
3903   return inst->allocation_context.alloc_limit;
3904 }
3905 inline 
3906 allocator* generation_allocator (generation* inst)
3907 {
3908     return &inst->free_list_allocator;
3909 }
3910
3911 inline
3912 PTR_heap_segment& generation_start_segment (generation* inst)
3913 {
3914   return inst->start_segment;
3915 }
3916 inline
3917 heap_segment*& generation_allocation_segment (generation* inst)
3918 {
3919   return inst->allocation_segment;
3920 }
3921 inline
3922 BYTE*& generation_plan_allocation_start (generation* inst)
3923 {
3924   return inst->plan_allocation_start;
3925 }
3926 inline
3927 size_t& generation_plan_allocation_start_size (generation* inst)
3928 {
3929   return inst->plan_allocation_start_size;
3930 }
3931 inline
3932 BYTE*& generation_allocation_context_start_region (generation* inst)
3933 {
3934   return inst->allocation_context_start_region;
3935 }
3936 inline
3937 size_t& generation_free_list_space (generation* inst)
3938 {
3939   return inst->free_list_space;
3940 }
3941 inline
3942 size_t& generation_free_obj_space (generation* inst)
3943 {
3944   return inst->free_obj_space;
3945 }
3946 inline
3947 size_t& generation_allocation_size (generation* inst)
3948 {
3949   return inst->allocation_size;
3950 }
3951
3952 inline
3953 size_t& generation_pinned_allocated (generation* inst)
3954 {
3955     return inst->pinned_allocated;
3956 }
3957 inline
3958 size_t& generation_pinned_allocation_sweep_size (generation* inst)
3959 {
3960     return inst->pinned_allocation_sweep_size;
3961 }
3962 inline
3963 size_t& generation_pinned_allocation_compact_size (generation* inst)
3964 {
3965     return inst->pinned_allocation_compact_size;
3966 }
3967 inline
3968 size_t&  generation_free_list_allocated (generation* inst)
3969 {
3970     return inst->free_list_allocated;
3971 }
3972 inline
3973 size_t&  generation_end_seg_allocated (generation* inst)
3974 {
3975     return inst->end_seg_allocated;
3976 }
3977 inline
3978 BOOL&  generation_allocate_end_seg_p (generation* inst)
3979 {
3980     return inst->allocate_end_seg_p;
3981 }
3982 inline
3983 size_t& generation_condemned_allocated (generation* inst)
3984 {
3985     return inst->condemned_allocated;
3986 }
3987 #ifdef FREE_USAGE_STATS
3988 inline
3989 size_t& generation_pinned_free_obj_space (generation* inst)
3990 {
3991     return inst->pinned_free_obj_space;
3992 }
3993 inline
3994 size_t& generation_allocated_in_pinned_free (generation* inst)
3995 {
3996     return inst->allocated_in_pinned_free;
3997 }
3998 inline
3999 size_t& generation_allocated_since_last_pin (generation* inst)
4000 {
4001     return inst->allocated_since_last_pin;
4002 }
4003 #endif //FREE_USAGE_STATS
4004 inline 
4005 float generation_allocator_efficiency (generation* inst)
4006 {
4007     if ((generation_free_list_allocated (inst) + generation_free_obj_space (inst)) != 0)
4008     {
4009         return ((float) (generation_free_list_allocated (inst)) / (float)(generation_free_list_allocated (inst) + generation_free_obj_space (inst)));
4010     }
4011     else
4012         return 0;
4013 }
4014 inline
4015 size_t generation_unusable_fragmentation (generation* inst)
4016 {
4017     return (size_t)(generation_free_obj_space (inst) + 
4018                     (1.0f-generation_allocator_efficiency(inst))*generation_free_list_space (inst));
4019 }
4020
4021 #define plug_skew           sizeof(ObjHeader)
4022 #define min_obj_size        (sizeof(BYTE*)+plug_skew+sizeof(size_t))//syncblock + vtable+ first field
4023 #define min_free_list       (sizeof(BYTE*)+min_obj_size) //Need one slot more
4024 //Note that this encodes the fact that plug_skew is a multiple of BYTE*.
4025 struct plug
4026 {
4027     BYTE *  skew[plug_skew / sizeof(BYTE *)];
4028 };
4029
4030 class pair
4031 {
4032 public:
4033     short left;
4034     short right;
4035 };
4036
4037 //Note that these encode the fact that plug_skew is a multiple of BYTE*.
4038 // Each of new field is prepended to the prior struct.
4039
4040 struct plug_and_pair
4041 {
4042     pair        m_pair;
4043     plug        m_plug;
4044 };
4045
4046 struct plug_and_reloc
4047 {
4048     ptrdiff_t   reloc;
4049     pair        m_pair;
4050     plug        m_plug;
4051 };
4052
4053 struct plug_and_gap
4054 {
4055     ptrdiff_t   gap;
4056     ptrdiff_t   reloc;
4057     union
4058     {
4059         pair    m_pair;
4060         int     lr;  //for clearing the entire pair in one instruction
4061     };
4062     plug        m_plug;
4063 };
4064
4065 struct gap_reloc_pair
4066 {
4067     size_t gap;
4068     size_t   reloc;
4069     pair        m_pair;
4070 };
4071
4072 #define min_pre_pin_obj_size (sizeof (gap_reloc_pair) + min_obj_size)
4073
4074 struct DECLSPEC_ALIGN(8) aligned_plug_and_gap
4075 {
4076     plug_and_gap plugandgap;
4077 };
4078
4079 struct loh_obj_and_pad
4080 {
4081     ptrdiff_t   reloc;    
4082     plug        m_plug;
4083 };
4084
4085 struct loh_padding_obj
4086 {
4087     BYTE*       mt;
4088     size_t      len;
4089     ptrdiff_t   reloc;
4090     plug        m_plug;
4091 };
4092 #define loh_padding_obj_size (sizeof(loh_padding_obj))
4093
4094 //flags description
4095 #define heap_segment_flags_readonly     1
4096 #define heap_segment_flags_inrange      2
4097 #define heap_segment_flags_unmappable   4
4098 #define heap_segment_flags_loh          8
4099 #ifdef BACKGROUND_GC
4100 #define heap_segment_flags_swept        16
4101 #define heap_segment_flags_decommitted  32
4102 #define heap_segment_flags_ma_committed 64
4103 // for segments whose mark array is only partially committed.
4104 #define heap_segment_flags_ma_pcommitted 128
4105 #endif //BACKGROUND_GC
4106
4107 //need to be careful to keep enough pad items to fit a relocation node
4108 //padded to QuadWord before the plug_skew
4109
4110 class heap_segment
4111 {
4112 public:
4113     BYTE*           allocated;
4114     BYTE*           committed;
4115     BYTE*           reserved;
4116     BYTE*           used;
4117     BYTE*           mem;
4118     size_t          flags;
4119     PTR_heap_segment next;
4120     BYTE*           plan_allocated;
4121 #ifdef BACKGROUND_GC
4122     BYTE*           background_allocated;
4123     BYTE*           saved_bg_allocated;
4124 #endif //BACKGROUND_GC
4125
4126 #ifdef MULTIPLE_HEAPS
4127     gc_heap*        heap;
4128 #endif //MULTIPLE_HEAPS
4129
4130 #ifdef _MSC_VER
4131 // Disable this warning - we intentionally want __declspec(align()) to insert padding for us
4132 #pragma warning(disable:4324)  // structure was padded due to __declspec(align())
4133 #endif
4134     aligned_plug_and_gap padandplug;
4135 #ifdef _MSC_VER
4136 #pragma warning(default:4324)  // structure was padded due to __declspec(align())
4137 #endif
4138 };
4139
4140 inline
4141 BYTE*& heap_segment_reserved (heap_segment* inst)
4142 {
4143   return inst->reserved;
4144 }
4145 inline
4146 BYTE*& heap_segment_committed (heap_segment* inst)
4147 {
4148   return inst->committed;
4149 }
4150 inline
4151 BYTE*& heap_segment_used (heap_segment* inst)
4152 {
4153   return inst->used;
4154 }
4155 inline
4156 BYTE*& heap_segment_allocated (heap_segment* inst)
4157 {
4158   return inst->allocated;
4159 }
4160
4161 inline
4162 BOOL heap_segment_read_only_p (heap_segment* inst)
4163 {
4164     return ((inst->flags & heap_segment_flags_readonly) != 0);
4165 }
4166
4167 inline
4168 BOOL heap_segment_in_range_p (heap_segment* inst)
4169 {
4170     return (!(inst->flags & heap_segment_flags_readonly) ||
4171             ((inst->flags & heap_segment_flags_inrange) != 0));
4172 }
4173
4174 inline
4175 BOOL heap_segment_unmappable_p (heap_segment* inst)
4176 {
4177     return (!(inst->flags & heap_segment_flags_readonly) ||
4178             ((inst->flags & heap_segment_flags_unmappable) != 0));
4179 }
4180
4181 inline
4182 BOOL heap_segment_loh_p (heap_segment * inst)
4183 {
4184     return !!(inst->flags & heap_segment_flags_loh);
4185 }
4186
4187 #ifdef BACKGROUND_GC
4188 inline
4189 BOOL heap_segment_decommitted_p (heap_segment * inst)
4190 {
4191     return !!(inst->flags & heap_segment_flags_decommitted);
4192 }
4193 #endif //BACKGROUND_GC
4194
4195 inline
4196 PTR_heap_segment & heap_segment_next (heap_segment* inst)
4197 {
4198   return inst->next;
4199 }
4200 inline
4201 BYTE*& heap_segment_mem (heap_segment* inst)
4202 {
4203   return inst->mem;
4204 }
4205 inline
4206 BYTE*& heap_segment_plan_allocated (heap_segment* inst)
4207 {
4208   return inst->plan_allocated;
4209 }
4210
4211 #ifdef BACKGROUND_GC
4212 inline
4213 BYTE*& heap_segment_background_allocated (heap_segment* inst)
4214 {
4215   return inst->background_allocated;
4216 }
4217 inline
4218 BYTE*& heap_segment_saved_bg_allocated (heap_segment* inst)
4219 {
4220   return inst->saved_bg_allocated;
4221 }
4222 #endif //BACKGROUND_GC
4223
4224 #ifdef MULTIPLE_HEAPS
4225 inline
4226 gc_heap*& heap_segment_heap (heap_segment* inst)
4227 {
4228     return inst->heap;
4229 }
4230 #endif //MULTIPLE_HEAPS
4231
4232 #ifndef MULTIPLE_HEAPS
4233
4234 #ifndef DACCESS_COMPILE
4235 extern "C" {
4236 #endif //!DACCESS_COMPILE
4237
4238 GARY_DECL(generation,generation_table,NUMBERGENERATIONS+1);
4239
4240 #ifndef DACCESS_COMPILE
4241 }
4242 #endif //!DACCESS_COMPILE
4243
4244 #endif //MULTIPLE_HEAPS
4245
4246 inline
4247 generation* gc_heap::generation_of (int  n)
4248 {
4249     assert (((n <= max_generation+1) && (n >= 0)));
4250     return &generation_table [ n ];
4251 }
4252
4253 inline
4254 dynamic_data* gc_heap::dynamic_data_of (int gen_number)
4255 {
4256     return &dynamic_data_table [ gen_number ];
4257 }
4258
4259 extern "C" BYTE* g_ephemeral_low;
4260 extern "C" BYTE* g_ephemeral_high;
4261
4262 #define card_word_width ((size_t)32)
4263
4264 //
4265 // The value of card_size is determined empirically according to the average size of an object
4266 // In the code we also rely on the assumption that one card_table entry (DWORD) covers an entire os page
4267 //
4268 #if defined (_WIN64)
4269 #define card_size ((size_t)(2*OS_PAGE_SIZE/card_word_width))
4270 #else
4271 #define card_size ((size_t)(OS_PAGE_SIZE/card_word_width))
4272 #endif //_WIN64
4273
4274 inline
4275 size_t card_word (size_t card)
4276 {
4277     return card / card_word_width;
4278 }
4279
4280 inline
4281 unsigned card_bit (size_t card)
4282 {
4283     return (unsigned)(card % card_word_width);
4284 }
4285
4286 inline
4287 size_t gcard_of (BYTE* object)
4288 {
4289     return (size_t)(object) / card_size;
4290 }
4291