Merge pull request #337 from kangaroo/issue-324
[platform/upstream/coreclr.git] / src / gc / gcpriv.h
1 //
2 // Copyright (c) Microsoft. All rights reserved.
3 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
4 //
5 // optimize for speed
6
7
8 #ifndef _DEBUG
9 #ifdef _MSC_VER
10 #pragma optimize( "t", on )
11 #endif
12 #endif
13 #define inline __forceinline
14
15 #include "gc.h"
16
17 //#define DT_LOG
18
19 #include "gcrecord.h"
20
21 inline void FATAL_GC_ERROR()
22 {
23     DebugBreak();
24     _ASSERTE(!"Fatal Error in GC.");
25     EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
26 }
27
28 #ifdef _MSC_VER
29 #pragma inline_depth(20)
30 #endif
31
32 /* the following section defines the optional features */
33
34 // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
35 // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION
36 // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much 
37 // work to make FEATURE_STRUCTALIGN not apply to LOH so they can be both
38 // turned on.
39 #define FEATURE_LOH_COMPACTION
40
41 #ifdef FEATURE_64BIT_ALIGNMENT
42 // We need the following feature as part of keeping 64-bit types aligned in the GC heap.
43 #define RESPECT_LARGE_ALIGNMENT //used to keep "double" objects aligned during
44                                 //relocation
45 #endif //FEATURE_64BIT_ALIGNMENT
46
47 #define SHORT_PLUGS //used to keep ephemeral plugs short so they fit better into the oldest generation free items
48 #ifdef SHORT_PLUGS
49 #define DESIRED_PLUG_LENGTH (1000)
50 #endif //SHORT_PLUGS
51
52 #define FEATURE_PREMORTEM_FINALIZATION
53 #define GC_HISTORY
54
55 #ifndef FEATURE_REDHAWK
56 #define HEAP_ANALYZE
57 #define COLLECTIBLE_CLASS
58 #endif // !FEATURE_REDHAWK
59
60 #ifdef HEAP_ANALYZE
61 #define initial_internal_roots        (1024*16)
62 #endif // HEAP_ANALYZE
63
64 #define MARK_LIST         //used sorted list to speed up plan phase
65
66 #define BACKGROUND_GC   //concurrent background GC (requires WRITE_WATCH)
67
68 #ifdef SERVER_GC
69 #define MH_SC_MARK //scalable marking
70 //#define SNOOP_STATS //diagnostic
71 #define PARALLEL_MARK_LIST_SORT //do the sorting and merging of the multiple mark lists in server gc in parallel
72 #endif //SERVER_GC
73
74 //This is used to mark some type volatile only when the scalable marking is used. 
75 #if defined (SERVER_GC) && defined (MH_SC_MARK)
76 #define SERVER_SC_MARK_VOLATILE(x) VOLATILE(x)
77 #else //SERVER_GC&&MH_SC_MARK
78 #define SERVER_SC_MARK_VOLATILE(x) x
79 #endif //SERVER_GC&&MH_SC_MARK
80
81 //#define MULTIPLE_HEAPS         //Allow multiple heaps for servers
82
83 #define INTERIOR_POINTERS   //Allow interior pointers in the code manager
84
85 #define CARD_BUNDLE         //enable card bundle feature.(requires WRITE_WATCH)
86
87 // If this is defined we use a map for segments in order to find the heap for 
88 // a segment fast. But it does use more memory as we have to cover the whole
89 // heap range and for each entry we allocate a struct of 5 ptr-size words
90 // (3 for WKS as there's only one heap). 
91 #define SEG_MAPPING_TABLE
92
93 // If allocating the heap mapping table for the available VA consumes too
94 // much memory, you can enable this to allocate only the portion that
95 // corresponds to rw segments and grow it when needed in grow_brick_card_table.
96 // However in heap_of you will need to always compare the address with
97 // g_lowest/highest before you can look at the heap mapping table.
98 #define GROWABLE_SEG_MAPPING_TABLE
99
100 #ifdef BACKGROUND_GC
101 #define MARK_ARRAY      //Mark bit in an array
102 #endif //BACKGROUND_GC
103
104 #if defined(BACKGROUND_GC) || defined (CARD_BUNDLE)
105 #define WRITE_WATCH     //Write Watch feature
106 #endif //BACKGROUND_GC || CARD_BUNDLE
107
108 #ifdef WRITE_WATCH
109 #define array_size 100
110 #endif //WRITE_WATCH
111
112 //#define SHORT_PLUGS           //keep plug short
113
114 #define FFIND_OBJECT        //faster find_object, slower allocation
115 #define FFIND_DECAY  7      //Number of GC for which fast find will be active
116
117 //#define NO_WRITE_BARRIER  //no write barrier, use Write Watch feature
118
119 //#define DEBUG_WRITE_WATCH //Additional debug for write watch
120
121 //#define STRESS_PINNING    //Stress pinning by pinning randomly
122
123 //#define TRACE_GC          //debug trace gc operation
124 //#define SIMPLE_DPRINTF
125
126 //#define CATCH_GC          //catches exception during GC
127
128 //#define TIME_GC           //time allocation and garbage collection
129 //#define TIME_WRITE_WATCH  //time GetWriteWatch and ResetWriteWatch calls
130 //#define COUNT_CYCLES  //Use cycle counter for timing
131 //#define JOIN_STATS         //amount of time spent in the join
132 //also, see TIME_SUSPEND in switches.h.
133
134 //#define SYNCHRONIZATION_STATS
135 //#define SEG_REUSE_STATS
136
137 #if defined (SYNCHRONIZATION_STATS) || defined (STAGE_STATS)
138 #define BEGIN_TIMING(x) \
139     LARGE_INTEGER x##_start; \
140     QueryPerformanceCounter (&x##_start)
141
142 #define END_TIMING(x) \
143     LARGE_INTEGER x##_end; \
144     QueryPerformanceCounter (&x##_end); \
145     x += x##_end.QuadPart - x##_start.QuadPart
146
147 #else
148 #define BEGIN_TIMING(x)
149 #define END_TIMING(x)
150 #define BEGIN_TIMING_CYCLES(x)
151 #define END_TIMING_CYCLES(x)
152 #endif //SYNCHRONIZATION_STATS || STAGE_STATS
153
154 #define NO_CATCH_HANDLERS  //to debug gc1, remove the catch handlers
155
156 /* End of optional features */
157
158 #ifdef _DEBUG
159 #define TRACE_GC
160 #endif
161
162 #define NUMBERGENERATIONS   4               //Max number of generations
163
164 // For the bestfit algorithm when we relocate ephemeral generations into an 
165 // existing gen2 segment.
166 // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
167 #define MIN_INDEX_POWER2 6
168
169 #ifdef SERVER_GC
170
171 #ifdef _WIN64
172 #define MAX_INDEX_POWER2 30
173 #else
174 #define MAX_INDEX_POWER2 26
175 #endif  // _WIN64
176
177 #else //SERVER_GC
178
179 #ifdef _WIN64
180 #define MAX_INDEX_POWER2 28
181 #else
182 #define MAX_INDEX_POWER2 24
183 #endif  // _WIN64
184
185 #endif //SERVER_GC
186
187 #define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1)
188
189 #define MAX_NUM_FREE_SPACES 200 
190 #define MIN_NUM_FREE_SPACES 5 
191
192 //Please leave these definitions intact.
193
194 #define CLREvent CLREventStatic
195
196 #ifdef CreateFileMapping
197
198 #undef CreateFileMapping
199
200 #endif //CreateFileMapping
201
202 #define CreateFileMapping WszCreateFileMapping
203
204 // hosted api
205 #ifdef InitializeCriticalSection
206 #undef InitializeCriticalSection
207 #endif //ifdef InitializeCriticalSection
208 #define InitializeCriticalSection UnsafeInitializeCriticalSection
209
210 #ifdef DeleteCriticalSection
211 #undef DeleteCriticalSection
212 #endif //ifdef DeleteCriticalSection
213 #define DeleteCriticalSection UnsafeDeleteCriticalSection
214
215 #ifdef EnterCriticalSection
216 #undef EnterCriticalSection
217 #endif //ifdef EnterCriticalSection
218 #define EnterCriticalSection UnsafeEEEnterCriticalSection
219
220 #ifdef LeaveCriticalSection
221 #undef LeaveCriticalSection
222 #endif //ifdef LeaveCriticalSection
223 #define LeaveCriticalSection UnsafeEELeaveCriticalSection
224
225 #ifdef TryEnterCriticalSection
226 #undef TryEnterCriticalSection
227 #endif //ifdef TryEnterCriticalSection
228 #define TryEnterCriticalSection UnsafeEETryEnterCriticalSection
229
230 #ifdef CreateSemaphore
231 #undef CreateSemaphore
232 #endif //CreateSemaphore
233 #define CreateSemaphore UnsafeCreateSemaphore
234
235 #ifdef CreateEvent
236 #undef CreateEvent
237 #endif //ifdef CreateEvent
238 #define CreateEvent UnsafeCreateEvent
239
240 #ifdef VirtualAlloc
241 #undef VirtualAlloc
242 #endif //ifdef VirtualAlloc
243 #define VirtualAlloc ClrVirtualAlloc
244
245 #ifdef VirtualFree
246 #undef VirtualFree
247 #endif //ifdef VirtualFree
248 #define VirtualFree ClrVirtualFree
249
250 #ifdef VirtualQuery
251 #undef VirtualQuery
252 #endif //ifdef VirtualQuery
253 #define VirtualQuery ClrVirtualQuery
254
255 #ifdef VirtualProtect
256 #undef VirtualProtect
257 #endif //ifdef VirtualProtect
258 #define VirtualProtect ClrVirtualProtect
259
260 #ifdef memcpy
261 #undef memcpy
262 #endif //memcpy
263
264 #ifdef FEATURE_STRUCTALIGN
265 #define REQD_ALIGN_DCL ,int requiredAlignment
266 #define REQD_ALIGN_ARG ,requiredAlignment
267 #define REQD_ALIGN_AND_OFFSET_DCL ,int requiredAlignment,size_t alignmentOffset
268 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL ,int requiredAlignment=DATA_ALIGNMENT,size_t alignmentOffset=0
269 #define REQD_ALIGN_AND_OFFSET_ARG ,requiredAlignment,alignmentOffset
270 #else // FEATURE_STRUCTALIGN
271 #define REQD_ALIGN_DCL
272 #define REQD_ALIGN_ARG
273 #define REQD_ALIGN_AND_OFFSET_DCL
274 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL
275 #define REQD_ALIGN_AND_OFFSET_ARG
276 #endif // FEATURE_STRUCTALIGN
277
278 #ifdef MULTIPLE_HEAPS
279 #define THREAD_NUMBER_DCL ,int thread
280 #define THREAD_NUMBER_ARG ,thread
281 #define THREAD_NUMBER_FROM_CONTEXT int thread = sc->thread_number;
282 #define THREAD_FROM_HEAP  int thread = heap_number;
283 #define HEAP_FROM_THREAD  gc_heap* hpt = gc_heap::g_heaps[thread];
284 #else
285 #define THREAD_NUMBER_DCL
286 #define THREAD_NUMBER_ARG
287 #define THREAD_NUMBER_FROM_CONTEXT
288 #define THREAD_FROM_HEAP
289 #define HEAP_FROM_THREAD  gc_heap* hpt = 0;
290 #endif //MULTIPLE_HEAPS
291
292 //These constants are ordered
293 const int policy_sweep = 0;
294 const int policy_compact = 1;
295 const int policy_expand  = 2;
296
297 #ifdef TRACE_GC
298
299
300 extern int     print_level;
301 extern BOOL    trace_gc;
302 extern int    gc_trace_fac;
303
304
305 class hlet
306 {
307     static hlet* bindings;
308     int prev_val;
309     int* pval;
310     hlet* prev_let;
311 public:
312     hlet (int& place, int value)
313     {
314         prev_val = place;
315         pval = &place;
316         place = value;
317         prev_let = bindings;
318         bindings = this;
319     }
320     ~hlet ()
321     {
322         *pval = prev_val;
323         bindings = prev_let;
324     }
325 };
326
327
328 #define let(p,v) hlet __x = hlet (p, v);
329
330 #else //TRACE_GC
331
332 #define gc_count    -1
333 #define let(s,v)
334
335 #endif //TRACE_GC
336
337 #ifdef TRACE_GC
338 #define SEG_REUSE_LOG_0 7
339 #define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1)
340 #define DT_LOG_0 (SEG_REUSE_LOG_1 + 1)
341 #define BGC_LOG (DT_LOG_0 + 1)
342 #define GTC_LOG (DT_LOG_0 + 2)
343 #define GC_TABLE_LOG (DT_LOG_0 + 3)
344 #define JOIN_LOG (DT_LOG_0 + 4)
345 #define SPINLOCK_LOG (DT_LOG_0 + 5)
346 #define SNOOP_LOG (DT_LOG_0 + 6)
347
348 #ifndef DACCESS_COMPILE
349
350 #ifdef SIMPLE_DPRINTF
351
352 //#define dprintf(l,x) {if (trace_gc && ((l<=print_level)||gc_heap::settings.concurrent)) {printf ("\n");printf x ; fflush(stdout);}}
353 void LogValist(const char *fmt, va_list args);
354 void GCLog (const char *fmt, ... );
355 //#define dprintf(l,x) {if (trace_gc && (l<=print_level)) {GCLog x;}}
356 //#define dprintf(l,x) {if ((l==SEG_REUSE_LOG_0) || (l==SEG_REUSE_LOG_1) || (trace_gc && (l<=3))) {GCLog x;}}
357 //#define dprintf(l,x) {if (l == DT_LOG_0) {GCLog x;}}
358 //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == BGC_LOG) || (l==GTC_LOG))) {GCLog x;}}
359 //#define dprintf(l,x) {if (l==GTC_LOG) {GCLog x;}}
360 //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == 1234)) ) {GCLog x;}}
361 //#define dprintf(l,x) {if ((l <= 1) || (l == 2222)) {GCLog x;}}
362 #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
363 //#define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG) ||(l == DT_LOG_0)) {GCLog x;}}
364 //#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}}
365 //#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}}
366 //#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}}
367
368 #else //SIMPLE_DPRINTF
369
370 // The GCTrace output goes to stdout by default but can get sent to the stress log or the logfile if the
371 // reg key GCTraceFacility is set.  THe stress log can only take a format string and 4 numbers or
372 // string literals.
373 #define dprintf(l,x) {if (trace_gc && (l<=print_level)) { \
374       if ( !gc_trace_fac) {printf ("\n");printf x ; fflush(stdout);} \
375       else if ( gc_trace_fac == 2) {LogSpewAlways x;LogSpewAlways ("\n");} \
376       else if ( gc_trace_fac == 1) {STRESS_LOG_VA(x);}}}
377
378 #endif //SIMPLE_DPRINTF
379
380 #else //DACCESS_COMPILE
381 #define dprintf(l,x)
382 #endif //DACCESS_COMPILE
383 #else //TRACE_GC
384 #define dprintf(l,x)
385 #endif //TRACE_GC
386
387 #ifndef FEATURE_REDHAWK
388 #undef  assert
389 #define assert _ASSERTE
390 #undef  ASSERT
391 #define ASSERT _ASSERTE
392 #endif // FEATURE_REDHAWK
393
394 #ifdef _DEBUG
395
396 struct GCDebugSpinLock {
397     VOLATILE(LONG) lock;                   // -1 if free, 0 if held
398     VOLATILE(Thread *) holding_thread;     // -1 if no thread holds the lock.
399     VOLATILE(BOOL) released_by_gc_p;       // a GC thread released the lock.
400
401     GCDebugSpinLock()
402         : lock(-1), holding_thread((Thread*) -1)
403     {
404     }
405
406 };
407 typedef GCDebugSpinLock GCSpinLock;
408
409 #elif defined (SYNCHRONIZATION_STATS)
410
411 struct GCSpinLockInstru {
412     VOLATILE(LONG) lock;
413     // number of times we went into SwitchToThread in enter_spin_lock.
414     unsigned int num_switch_thread;
415     // number of times we went into WaitLonger.
416     unsigned int num_wait_longer;
417     // number of times we went to calling SwitchToThread in WaitLonger.
418     unsigned int num_switch_thread_w;
419     // number of times we went to calling DisablePreemptiveGC in WaitLonger.
420     unsigned int num_disable_preemptive_w;
421
422     GCSpinLockInstru()
423         : lock(-1), num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0)
424     {
425     }
426
427     void init()
428     {
429         num_switch_thread = 0;
430         num_wait_longer = 0;
431         num_switch_thread_w = 0;
432         num_disable_preemptive_w = 0;
433     }
434 };
435
436 typedef GCSpinLockInstru GCSpinLock;
437
438 #else
439
440 struct GCDebugSpinLock {
441     VOLATILE(LONG) lock;                   // -1 if free, 0 if held
442
443     GCDebugSpinLock()
444         : lock(-1)
445     {
446     }
447 };
448 typedef GCDebugSpinLock GCSpinLock;
449
450 #endif
451
452 class mark;
453 class heap_segment;
454 class CObjectHeader;
455 class l_heap;
456 class sorted_table;
457 class c_synchronize;
458 class seg_free_spaces;
459 class gc_heap;
460
461 #ifdef BACKGROUND_GC
462 class exclusive_sync;
463 class recursive_gc_sync;
464 #endif //BACKGROUND_GC
465
466 // The following 2 modes are of the same format as in clr\src\bcl\system\runtime\gcsettings.cs
467 // make sure you change that one if you change this one!
468 enum gc_pause_mode
469 {
470     pause_batch = 0, //We are not concerned about pause length
471     pause_interactive = 1,     //We are running an interactive app
472     pause_low_latency = 2,     //short pauses are essential
473     //avoid long pauses from blocking full GCs unless running out of memory
474     pause_sustained_low_latency = 3,
475     pause_no_gc = 4
476 };
477
478 enum gc_loh_compaction_mode
479 {
480     loh_compaction_default = 1, // the default mode, don't compact LOH.
481     loh_compaction_once = 2, // only compact once the next time a blocking full GC happens.
482     loh_compaction_auto = 4 // GC decides when to compact LOH, to be implemented.
483 };
484
485 enum set_pause_mode_status
486 {
487     set_pause_mode_success = 0,
488     set_pause_mode_no_gc = 1 // NoGCRegion is in progress, can't change pause mode.
489 };
490
491 enum gc_tuning_point
492 {
493     tuning_deciding_condemned_gen,
494     tuning_deciding_full_gc,
495     tuning_deciding_compaction,
496     tuning_deciding_expansion,
497     tuning_deciding_promote_ephemeral
498 };
499
500 #if defined(TRACE_GC) && defined(BACKGROUND_GC)
501 static const char * const str_bgc_state[] =
502 {
503     "not_in_process",
504     "mark_handles",
505     "mark_stack",
506     "revisit_soh",
507     "revisit_loh",
508     "overflow_soh",
509     "overflow_loh",
510     "final_marking",
511     "sweep_soh",
512     "sweep_loh",
513     "plan_phase"
514 };
515 #endif // defined(TRACE_GC) && defined(BACKGROUND_GC)
516
517 enum allocation_state
518 {
519     a_state_start = 0,
520     a_state_can_allocate,
521     a_state_cant_allocate,
522     a_state_try_fit,
523     a_state_try_fit_new_seg,
524     a_state_try_fit_new_seg_after_cg,
525     a_state_try_fit_no_seg,
526     a_state_try_fit_after_cg,
527     a_state_try_fit_after_bgc,
528     a_state_try_free_full_seg_in_bgc, 
529     a_state_try_free_after_bgc,
530     a_state_try_seg_end,
531     a_state_acquire_seg,
532     a_state_acquire_seg_after_cg,
533     a_state_acquire_seg_after_bgc,
534     a_state_check_and_wait_for_bgc,
535     a_state_trigger_full_compact_gc,
536     a_state_trigger_ephemeral_gc,
537     a_state_trigger_2nd_ephemeral_gc,
538     a_state_check_retry_seg,
539     a_state_max
540 };
541
542 enum gc_type
543 {
544     gc_type_compacting = 0,
545     gc_type_blocking = 1,
546 #ifdef BACKGROUND_GC
547     gc_type_background = 2,
548 #endif //BACKGROUND_GC
549     gc_type_max = 3
550 };
551
552
553 //encapsulates the mechanism for the current gc
554 class gc_mechanisms
555 {
556 public:
557     VOLATILE(SIZE_T) gc_index; // starts from 1 for the first GC, like dd_collection_count 
558     int condemned_generation;
559     BOOL promotion;
560     BOOL compaction;
561     BOOL loh_compaction;
562     BOOL heap_expansion;
563     DWORD concurrent;
564     BOOL demotion;
565     BOOL card_bundles;
566     int  gen0_reduction_count;
567     BOOL should_lock_elevation;
568     int elevation_locked_count;
569     BOOL minimal_gc;
570     gc_reason reason;
571     gc_pause_mode pause_mode;
572     BOOL found_finalizers;
573
574 #ifdef BACKGROUND_GC
575     BOOL background_p;
576     bgc_state b_state;
577     BOOL allocations_allowed;
578 #endif //BACKGROUND_GC
579
580 #ifdef STRESS_HEAP
581     BOOL stress_induced;
582 #endif // STRESS_HEAP
583
584 #ifdef _WIN64
585     DWORD entry_memory_load;
586 #endif //_WIN64
587
588     void init_mechanisms(); //for each GC
589     void first_init(); // for the life of the EE
590
591     void record (gc_history_global* history);
592 };
593
594 // This is a compact version of gc_mechanism that we use to save in the history.
595 class gc_mechanisms_store
596 {
597 public:
598     size_t gc_index; 
599     bool promotion;
600     bool compaction;
601     bool loh_compaction;
602     bool heap_expansion;
603     bool concurrent;
604     bool demotion;
605     bool card_bundles;
606     bool should_lock_elevation;
607     int condemned_generation   : 8; 
608     int gen0_reduction_count   : 8;
609     int elevation_locked_count : 8;
610     gc_reason reason           : 8;
611     gc_pause_mode pause_mode   : 8;
612 #ifdef BACKGROUND_GC
613     bgc_state b_state          : 8;
614 #endif //BACKGROUND_GC
615     bool found_finalizers;
616
617 #ifdef BACKGROUND_GC
618     bool background_p;
619 #endif //BACKGROUND_GC
620
621 #ifdef STRESS_HEAP
622     bool stress_induced;
623 #endif // STRESS_HEAP
624
625 #ifdef _WIN64
626     DWORD entry_memory_load;
627 #endif //_WIN64
628
629     void store (gc_mechanisms* gm)
630     {
631         gc_index                = gm->gc_index; 
632         condemned_generation    = gm->condemned_generation;
633         promotion               = (gm->promotion != 0);
634         compaction              = (gm->compaction != 0);
635         loh_compaction          = (gm->loh_compaction != 0);
636         heap_expansion          = (gm->heap_expansion != 0);
637         concurrent              = (gm->concurrent != 0);
638         demotion                = (gm->demotion != 0);
639         card_bundles            = (gm->card_bundles != 0);
640         gen0_reduction_count    = gm->gen0_reduction_count;
641         should_lock_elevation   = (gm->should_lock_elevation != 0);
642         elevation_locked_count  = gm->elevation_locked_count;
643         reason                  = gm->reason;
644         pause_mode              = gm->pause_mode;
645         found_finalizers        = (gm->found_finalizers != 0);
646
647 #ifdef BACKGROUND_GC
648         background_p            = (gm->background_p != 0);
649         b_state                 = gm->b_state;
650 #endif //BACKGROUND_GC
651
652 #ifdef STRESS_HEAP
653         stress_induced          = (gm->stress_induced != 0);
654 #endif // STRESS_HEAP
655
656 #ifdef _WIN64
657         entry_memory_load       = gm->entry_memory_load;
658 #endif //_WIN64        
659     }
660 };
661
662 #ifdef GC_STATS
663
664 // GC specific statistics, tracking counts and timings for GCs occuring in the system.
665 // This writes the statistics to a file every 60 seconds, if a file is specified in
666 // COMPLUS_GcMixLog
667
668 struct GCStatistics
669     : public StatisticsBase
670 {
671     // initialized to the contents of COMPLUS_GcMixLog, or NULL, if not present
672     static WCHAR* logFileName;
673     static FILE*  logFile;
674
675     // number of times we executed a background GC, a foreground GC, or a
676     // non-concurrent GC
677     int cntBGC, cntFGC, cntNGC;
678
679     // min, max, and total time spent performing BGCs, FGCs, NGCs
680     // (BGC time includes everything between the moment the BGC starts until 
681     // it completes, i.e. the times of all FGCs occuring concurrently)
682     MinMaxTot bgc, fgc, ngc;
683
684     // number of times we executed a compacting GC (sweeping counts can be derived)
685     int cntCompactNGC, cntCompactFGC;
686
687     // count of reasons
688     int cntReasons[reason_max];
689
690     // count of condemned generation, by NGC and FGC:
691     int cntNGCGen[max_generation+1];
692     int cntFGCGen[max_generation];
693     
694     ///////////////////////////////////////////////////////////////////////////////////////////////
695     // Internal mechanism:
696
697     virtual void Initialize();
698     virtual void DisplayAndUpdate();
699
700     // Public API
701
702     static BOOL Enabled()
703     { return logFileName != NULL; }
704
705     void AddGCStats(const gc_mechanisms& settings, size_t timeInMSec);
706 };
707
708 extern GCStatistics g_GCStatistics;
709 extern GCStatistics g_LastGCStatistics;
710
711 #endif // GC_STATS
712
713
714 typedef DPTR(class heap_segment)               PTR_heap_segment;
715 typedef DPTR(class gc_heap)                    PTR_gc_heap;
716 typedef DPTR(PTR_gc_heap)                      PTR_PTR_gc_heap;
717 #ifdef FEATURE_PREMORTEM_FINALIZATION
718 typedef DPTR(class CFinalize)                  PTR_CFinalize;
719 #endif // FEATURE_PREMORTEM_FINALIZATION
720
721 //-------------------------------------
722 //generation free list. It is an array of free lists bucketed by size, starting at sizes lower than first_bucket_size 
723 //and doubling each time. The last bucket (index == num_buckets) is for largest sizes with no limit
724
725 #define MAX_BUCKET_COUNT (13)//Max number of buckets for the small generations. 
726 class alloc_list 
727 {
728     BYTE* head;
729     BYTE* tail;
730 public:
731     BYTE*& alloc_list_head () { return head;}
732     BYTE*& alloc_list_tail () { return tail;}
733     alloc_list()
734     {
735         head = 0; 
736         tail = 0; 
737     }
738 };
739
740
741 class allocator 
742 {
743     size_t num_buckets;
744     size_t frst_bucket_size;
745     alloc_list first_bucket;
746     alloc_list* buckets;
747     alloc_list& alloc_list_of (unsigned int bn);
748
749 public:
750     allocator (unsigned int num_b, size_t fbs, alloc_list* b);
751     allocator()
752     {
753         num_buckets = 1;
754         frst_bucket_size = SIZE_T_MAX;
755     }
756     unsigned int number_of_buckets() {return (unsigned int)num_buckets;}
757
758     size_t first_bucket_size() {return frst_bucket_size;}
759     BYTE*& alloc_list_head_of (unsigned int bn)
760     {
761         return alloc_list_of (bn).alloc_list_head();
762     }
763     BYTE*& alloc_list_tail_of (unsigned int bn)
764     {
765         return alloc_list_of (bn).alloc_list_tail();
766     }
767     void clear();
768     BOOL discard_if_no_fit_p()
769     {
770         return (num_buckets == 1);
771     }
772
773     // This is when we know there's nothing to repair because this free
774     // list has never gone through plan phase. Right now it's only used
775     // by the background ephemeral sweep when we copy the local free list
776     // to gen0's free list.
777     //
778     // We copy head and tail manually (vs together like copy_to_alloc_list)
779     // since we need to copy tail first because when we get the free items off
780     // of each bucket we check head first. We also need to copy the
781     // smaller buckets first so when gen0 allocation needs to thread
782     // smaller items back that bucket is guaranteed to have been full
783     // copied.
784     void copy_with_no_repair (allocator* allocator_to_copy)
785     {
786         assert (num_buckets == allocator_to_copy->number_of_buckets());
787         for (unsigned int i = 0; i < num_buckets; i++)
788         {
789             alloc_list* al = &(allocator_to_copy->alloc_list_of (i));
790             alloc_list_tail_of(i) = al->alloc_list_tail();
791             alloc_list_head_of(i) = al->alloc_list_head();
792         }
793     }
794
795     void unlink_item (unsigned int bucket_number, BYTE* item, BYTE* previous_item, BOOL use_undo_p);
796     void thread_item (BYTE* item, size_t size);
797     void thread_item_front (BYTE* itme, size_t size);
798     void thread_free_item (BYTE* free_item, BYTE*& head, BYTE*& tail);
799     void copy_to_alloc_list (alloc_list* toalist);
800     void copy_from_alloc_list (alloc_list* fromalist);
801     void commit_alloc_list_changes();
802 };
803
804 #define NUM_GEN_POWER2 (20)
805 #define BASE_GEN_SIZE (1*512)
806
807 // group the frequently used ones together (need intrumentation on accessors)
808 class generation
809 {
810 public:
811     // Don't move these first two fields without adjusting the references
812     // from the __asm in jitinterface.cpp.
813     alloc_context   allocation_context;
814     heap_segment*   allocation_segment;
815     PTR_heap_segment start_segment;
816     BYTE*           allocation_context_start_region;
817     BYTE*           allocation_start;
818     allocator       free_list_allocator;
819     size_t          free_list_allocated;
820     size_t          end_seg_allocated;
821     BOOL            allocate_end_seg_p;
822     size_t          condemned_allocated;
823     size_t          free_list_space;
824     size_t          free_obj_space;
825     size_t          allocation_size;
826     BYTE*           plan_allocation_start;
827     size_t          plan_allocation_start_size;
828
829     // this is the pinned plugs that got allocated into this gen.
830     size_t          pinned_allocated;
831     size_t          pinned_allocation_compact_size;
832     size_t          pinned_allocation_sweep_size;
833     int             gen_num;
834
835 #ifdef FREE_USAGE_STATS
836     size_t          gen_free_spaces[NUM_GEN_POWER2];
837     // these are non pinned plugs only
838     size_t          gen_plugs[NUM_GEN_POWER2];
839     size_t          gen_current_pinned_free_spaces[NUM_GEN_POWER2];
840     size_t          pinned_free_obj_space;
841     // this is what got allocated into the pinned free spaces.
842     size_t          allocated_in_pinned_free;
843     size_t          allocated_since_last_pin;
844 #endif //FREE_USAGE_STATS
845 };
846
847 // The dynamic data fields are grouped into 3 categories:
848 //
849 // calculated logical data (like desired_allocation)
850 // physical data (like fragmentation)
851 // const data (like min_gc_size), initialized at the beginning
852 class dynamic_data
853 {
854 public:
855     ptrdiff_t new_allocation;
856     ptrdiff_t gc_new_allocation; // new allocation at beginning of gc
857     float     surv;
858     size_t    desired_allocation;
859
860     // # of bytes taken by objects (ie, not free space) at the beginning
861     // of the GC.
862     size_t    begin_data_size;
863     // # of bytes taken by survived objects after mark.
864     size_t    survived_size;
865     // # of bytes taken by survived pinned plugs after mark.
866     size_t    pinned_survived_size;
867     size_t    artificial_pinned_survived_size;
868     size_t    added_pinned_size;
869
870 #ifdef SHORT_PLUGS
871     size_t    padding_size;
872 #endif //SHORT_PLUGS
873 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
874     // # of plugs that are not pinned plugs.
875     size_t    num_npinned_plugs;
876 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
877     //total object size after a GC, ie, doesn't include fragmentation
878     size_t    current_size; 
879     size_t    collection_count;
880     size_t    promoted_size;
881     size_t    freach_previous_promotion;
882     size_t    fragmentation;    //fragmentation when we don't compact
883     size_t    gc_clock;         //gc# when last GC happened
884     size_t    time_clock;       //time when last gc started
885     size_t    gc_elapsed_time;  // Time it took for the gc to complete
886     float     gc_speed;         //  speed in bytes/msec for the gc to complete
887
888     // min_size is always the same as min_gc_size..
889     size_t    min_gc_size;
890     size_t    max_size;
891     size_t    min_size;
892     size_t    default_new_allocation;
893     size_t    fragmentation_limit;
894     float     fragmentation_burden_limit;
895     float     limit;
896     float     max_limit;
897 };
898
899 #define ro_in_entry 0x1
900
901 #ifdef SEG_MAPPING_TABLE
902 // Note that I am storing both h0 and seg0, even though in Server GC you can get to 
903 // the heap* from the segment info. This is because heap_of needs to be really fast
904 // and we would not want yet another indirection.
905 struct seg_mapping
906 {
907     // if an address is > boundary it belongs to h1; else h0.
908     // since we init h0 and h1 to 0, if we get 0 it means that
909     // address doesn't exist on managed segments. And heap_of 
910     // would just return heap0 which is what it does now.
911     BYTE* boundary;
912 #ifdef MULTIPLE_HEAPS
913     gc_heap* h0;
914     gc_heap* h1;
915 #endif //MULTIPLE_HEAPS
916     // You could have an address that's inbetween 2 segments and 
917     // this would return a seg, the caller then will use 
918     // in_range_for_segment to determine if it's on that seg.
919     heap_segment* seg0; // this is what the seg for h0 is.
920     heap_segment* seg1; // this is what the seg for h1 is.
921     // Note that when frozen objects are used we mask seg1
922     // with 0x1 to indicate that there is a ro segment for
923     // this entry.
924 };
925 #endif //SEG_MAPPING_TABLE
926
927 // alignment helpers
928 //Alignment constant for allocation
929 #define ALIGNCONST (DATA_ALIGNMENT-1)
930
931 inline
932 size_t Align (size_t nbytes, int alignment=ALIGNCONST)
933 {
934     return (nbytes + alignment) & ~alignment;
935 }
936
937 //return alignment constant for small object heap vs large object heap
938 inline
939 int get_alignment_constant (BOOL small_object_p)
940 {
941 #ifdef FEATURE_STRUCTALIGN
942     // If any objects on the large object heap require 8-byte alignment,
943     // the compiler will tell us so.  Let's not guess an alignment here.
944     return ALIGNCONST;
945 #else // FEATURE_STRUCTALIGN
946     return small_object_p ? ALIGNCONST : 7;
947 #endif // FEATURE_STRUCTALIGN
948 }
949
950 struct etw_opt_info
951 {
952     size_t desired_allocation;
953     size_t new_allocation;
954     int    gen_number;
955 };
956
957 enum alloc_wait_reason
958 {
959     // When we don't care about firing an event for
960     // this.
961     awr_ignored = -1,
962
963     // when we detect we are in low memory
964     awr_low_memory = 0,
965
966     // when we detect the ephemeral segment is too full
967     awr_low_ephemeral = 1,
968
969     // we've given out too much budget for gen0.
970     awr_gen0_alloc = 2,
971
972     // we've given out too much budget for loh.
973     awr_loh_alloc = 3,
974
975     // this event is really obsolete - it's for pre-XP
976     // OSs where low mem notification is not supported.
977     awr_alloc_loh_low_mem = 4,
978
979     // we ran out of VM spaced to reserve on loh.
980     awr_loh_oos = 5, 
981
982     // ran out of space when allocating a small object
983     awr_gen0_oos_bgc = 6,
984
985     // ran out of space when allocating a large object
986     awr_loh_oos_bgc = 7,
987
988     // waiting for BGC to let FGC happen
989     awr_fgc_wait_for_bgc = 8,
990
991     // wait for bgc to finish to get loh seg.
992     awr_get_loh_seg = 9,
993
994     // we don't allow loh allocation during bgc planning.
995     awr_loh_alloc_during_plan = 10,
996
997     // we don't allow too much loh allocation during bgc.
998     awr_loh_alloc_during_bgc = 11
999 };
1000
1001 struct alloc_thread_wait_data
1002 {
1003     int awr;
1004 };
1005
1006 enum msl_take_state
1007 {
1008     mt_get_large_seg,
1009     mt_wait_bgc_plan,
1010     mt_wait_bgc,
1011     mt_block_gc,
1012     mt_clr_mem,
1013     mt_clr_large_mem,
1014     mt_t_eph_gc,
1015     mt_t_full_gc,
1016     mt_alloc_small,
1017     mt_alloc_large,
1018     mt_alloc_small_cant,
1019     mt_alloc_large_cant,
1020     mt_try_alloc,
1021     mt_try_budget
1022 };
1023
1024 enum msl_enter_state
1025 {
1026     me_acquire,
1027     me_release
1028 };
1029
1030 struct spinlock_info
1031 {
1032     msl_enter_state enter_state;
1033     msl_take_state take_state;
1034     DWORD thread_id;
1035 };
1036
1037 const unsigned HS_CACHE_LINE_SIZE = 128;
1038
1039 #ifdef SNOOP_STATS
1040 struct snoop_stats_data
1041 {
1042     int heap_index;
1043
1044     // total number of objects that we called
1045     // gc_mark on.
1046     size_t objects_checked_count;
1047     // total number of time we called gc_mark
1048     // on a 0 reference.
1049     size_t zero_ref_count;
1050     // total objects actually marked.
1051     size_t objects_marked_count;
1052     // number of objects written to the mark stack because
1053     // of mark_stolen.
1054     size_t stolen_stack_count;
1055     // number of objects pushed onto the mark stack because
1056     // of the partial mark code path.
1057     size_t partial_stack_count;
1058     // number of objects pushed onto the mark stack because
1059     // of the non partial mark code path.
1060     size_t normal_stack_count;
1061     // number of references marked without mark stack.
1062     size_t non_stack_count;
1063
1064     // number of times we detect next heap's mark stack
1065     // is not busy.
1066     size_t stack_idle_count;
1067
1068     // number of times we do switch to thread.
1069     size_t switch_to_thread_count;
1070
1071     // number of times we are checking if the next heap's
1072     // mark stack is busy.
1073     size_t check_level_count;
1074     // number of times next stack is busy and level is 
1075     // at the bottom.
1076     size_t busy_count;
1077     // how many interlocked exchange operations we did
1078     size_t interlocked_count;
1079     // numer of times parent objects stolen
1080     size_t partial_mark_parent_count;
1081     // numer of times we look at a normal stolen entry, 
1082     // or the beginning/ending PM pair.
1083     size_t stolen_or_pm_count; 
1084     // number of times we see 2 for the entry.
1085     size_t stolen_entry_count; 
1086     // number of times we see a PM entry that's not ready.
1087     size_t pm_not_ready_count; 
1088     // number of stolen normal marked objects and partial mark children.
1089     size_t normal_count;
1090     // number of times the bottom of mark stack was cleared.
1091     size_t stack_bottom_clear_count;
1092 };
1093 #endif //SNOOP_STATS
1094
1095 struct no_gc_region_info
1096 {
1097     size_t soh_allocation_size;
1098     size_t loh_allocation_size;
1099     size_t started;
1100     size_t num_gcs;
1101     size_t num_gcs_induced;
1102     start_no_gc_region_status start_status;
1103     gc_pause_mode saved_pause_mode;
1104     size_t saved_gen0_min_size;
1105     size_t saved_gen3_min_size;
1106     BOOL minimal_gc_p;
1107 };
1108
1109 //class definition of the internal class
1110 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
1111 extern void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1112 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
1113 class gc_heap
1114 {
1115 #ifdef DACCESS_COMPILE
1116     friend class ::ClrDataAccess;
1117     friend class ::DacHeapWalker;
1118 #endif //DACCESS_COMPILE
1119
1120     friend class GCHeap;
1121 #ifdef FEATURE_PREMORTEM_FINALIZATION
1122     friend class CFinalize;
1123 #endif // FEATURE_PREMORTEM_FINALIZATION
1124     friend struct ::alloc_context;
1125     friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, DWORD dwFlags);
1126     friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1127     friend class t_join;
1128     friend class gc_mechanisms;
1129     friend class seg_free_spaces;
1130
1131 #ifdef BACKGROUND_GC
1132     friend class exclusive_sync;
1133     friend class recursive_gc_sync;
1134 #endif //BACKGROUND_GC
1135
1136 #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1137     friend void checkGCWriteBarrier();
1138     friend void initGCShadow();
1139 #endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1140
1141 #ifdef MULTIPLE_HEAPS
1142     typedef void (gc_heap::* card_fn) (BYTE**, int);
1143 #define call_fn(fn) (this->*fn)
1144 #define __this this
1145 #else
1146     typedef void (* card_fn) (BYTE**);
1147 #define call_fn(fn) (*fn)
1148 #define __this (gc_heap*)0
1149 #endif
1150
1151 public:
1152
1153 #ifdef TRACE_GC
1154     PER_HEAP
1155     void print_free_list (int gen, heap_segment* seg);
1156 #endif // TRACE_GC
1157
1158 #ifdef SYNCHRONIZATION_STATS
1159
1160     PER_HEAP_ISOLATED
1161     void init_sync_stats()
1162     {
1163 #ifdef MULTIPLE_HEAPS
1164         for (int i = 0; i < gc_heap::n_heaps; i++)
1165         {
1166             gc_heap::g_heaps[i]->init_heap_sync_stats();
1167         }
1168 #else  //MULTIPLE_HEAPS
1169         init_heap_sync_stats();
1170 #endif  //MULTIPLE_HEAPS
1171     }
1172
1173     PER_HEAP_ISOLATED
1174     void print_sync_stats(unsigned int gc_count_during_log)
1175     {
1176         // bad/good gl acquire is accumulative during the log interval (because the numbers are too small)
1177         // min/max msl_acquire is the min/max during the log interval, not each GC.
1178         // Threads is however many allocation threads for the last GC.
1179         // num of msl acquired, avg_msl, high and low are all for each GC.
1180         printf("%2s%2s%10s%10s%12s%6s%4s%8s(  st,  wl, stw, dpw)\n",
1181             "H", "T", "good_sus", "bad_sus", "avg_msl", "high", "low", "num_msl");
1182
1183 #ifdef MULTIPLE_HEAPS
1184         for (int i = 0; i < gc_heap::n_heaps; i++)
1185         {
1186             gc_heap::g_heaps[i]->print_heap_sync_stats(i, gc_count_during_log);
1187         }
1188 #else  //MULTIPLE_HEAPS
1189         print_heap_sync_stats(0, gc_count_during_log);
1190 #endif  //MULTIPLE_HEAPS
1191     }
1192
1193 #endif //SYNCHRONIZATION_STATS
1194
1195     PER_HEAP
1196     void verify_soh_segment_list();
1197     PER_HEAP
1198     void verify_mark_array_cleared (heap_segment* seg);
1199     PER_HEAP
1200     void verify_mark_array_cleared();
1201     PER_HEAP
1202     void verify_seg_end_mark_array_cleared();
1203     PER_HEAP
1204     void verify_partial();
1205
1206 #ifdef VERIFY_HEAP
1207     PER_HEAP
1208     void verify_free_lists(); 
1209     PER_HEAP
1210     void verify_heap (BOOL begin_gc_p);
1211 #endif //VERIFY_HEAP
1212
1213     PER_HEAP_ISOLATED
1214     void fire_pevents();
1215
1216 #ifdef FEATURE_BASICFREEZE
1217     static void walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef);
1218 #endif
1219
1220     static
1221     heap_segment* make_heap_segment (BYTE* new_pages, 
1222                                      size_t size, 
1223                                      int h_number);
1224     static
1225     l_heap* make_large_heap (BYTE* new_pages, size_t size, BOOL managed);
1226
1227     static
1228     gc_heap* make_gc_heap(
1229 #if defined (MULTIPLE_HEAPS)
1230         GCHeap* vm_heap,
1231         int heap_number
1232 #endif //MULTIPLE_HEAPS
1233         );
1234
1235     static
1236     void destroy_gc_heap(gc_heap* heap);
1237
1238     static
1239     HRESULT initialize_gc  (size_t segment_size,
1240                             size_t heap_size
1241 #ifdef MULTIPLE_HEAPS
1242                             , unsigned number_of_heaps
1243 #endif //MULTIPLE_HEAPS
1244         );
1245
1246     static
1247     void shutdown_gc();
1248
1249     PER_HEAP
1250     CObjectHeader* allocate (size_t jsize,
1251                              alloc_context* acontext);
1252
1253 #ifdef MULTIPLE_HEAPS
1254     static void balance_heaps (alloc_context* acontext);
1255     static 
1256     gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
1257     static
1258     DWORD __stdcall gc_thread_stub (void* arg);
1259 #endif //MULTIPLE_HEAPS
1260
1261     CObjectHeader* try_fast_alloc (size_t jsize);
1262
1263     // For LOH allocations we only update the alloc_bytes_loh in allocation
1264     // context - we don't actually use the ptr/limit from it so I am
1265     // making this explicit by not passing in the alloc_context.
1266     PER_HEAP
1267     CObjectHeader* allocate_large_object (size_t size, __int64& alloc_bytes);
1268
1269 #ifdef FEATURE_STRUCTALIGN
1270     PER_HEAP
1271     BYTE* pad_for_alignment_large (BYTE* newAlloc, int requiredAlignment, size_t size);
1272 #endif // FEATURE_STRUCTALIGN
1273
1274     PER_HEAP
1275     void do_pre_gc();
1276
1277     PER_HEAP
1278     void do_post_gc();
1279
1280     PER_HEAP
1281     BOOL expand_soh_with_minimal_gc();
1282
1283     // EE is always suspended when this method is called.
1284     // returning FALSE means we actually didn't do a GC. This happens
1285     // when we figured that we needed to do a BGC.
1286     PER_HEAP
1287     int garbage_collect (int n);
1288
1289     static 
1290     DWORD* make_card_table (BYTE* start, BYTE* end);
1291
1292     static
1293     void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
1294
1295     static
1296     int grow_brick_card_tables (BYTE* start, 
1297                                 BYTE* end, 
1298                                 size_t size,
1299                                 heap_segment* new_seg, 
1300                                 gc_heap* hp,
1301                                 BOOL loh_p);
1302
1303     PER_HEAP
1304     BOOL is_mark_set (BYTE* o);
1305
1306 protected:
1307
1308     PER_HEAP
1309     void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1310
1311     struct walk_relocate_args
1312     {
1313         BYTE* last_plug;
1314         BOOL is_shortened;
1315         mark* pinned_plug_entry;
1316     };
1317
1318     PER_HEAP
1319     void walk_plug (BYTE* plug, size_t size, BOOL check_last_object_p, 
1320                     walk_relocate_args* args, size_t profiling_context);
1321
1322     PER_HEAP
1323     void walk_relocation (int condemned_gen_number,
1324                           BYTE* first_condemned_address, size_t profiling_context);
1325
1326     PER_HEAP
1327     void walk_relocation_in_brick (BYTE* tree, walk_relocate_args* args, size_t profiling_context);
1328
1329 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1330     PER_HEAP
1331     void walk_relocation_for_bgc(size_t profiling_context);
1332
1333     PER_HEAP
1334     void make_free_lists_for_profiler_for_bgc();
1335 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1336
1337     PER_HEAP
1338     int generation_to_condemn (int n, 
1339                                BOOL* blocking_collection_p,
1340                                BOOL* elevation_requested_p,
1341                                BOOL check_only_p);
1342
1343     PER_HEAP_ISOLATED
1344     int joined_generation_to_condemn (BOOL should_evaluate_elevation, int n_initial, BOOL* blocking_collection
1345                                         STRESS_HEAP_ARG(int n_original));
1346
1347     PER_HEAP_ISOLATED
1348     size_t min_reclaim_fragmentation_threshold(ULONGLONG total_mem, DWORD num_heaps);
1349
1350     PER_HEAP_ISOLATED
1351     ULONGLONG min_high_fragmentation_threshold(ULONGLONG available_mem, DWORD num_heaps);
1352
1353     PER_HEAP
1354     void concurrent_print_time_delta (const char* msg);
1355     PER_HEAP
1356     void free_list_info (int gen_num, const char* msg);
1357
1358     // in svr GC on entry and exit of this method, the GC threads are not 
1359     // synchronized
1360     PER_HEAP
1361     void gc1();
1362
1363     PER_HEAP_ISOLATED
1364     void save_data_for_no_gc();
1365
1366     PER_HEAP_ISOLATED
1367     void restore_data_for_no_gc();
1368
1369     PER_HEAP_ISOLATED
1370     void update_collection_counts_for_no_gc();
1371
1372     PER_HEAP_ISOLATED
1373     BOOL should_proceed_with_gc();
1374
1375     PER_HEAP_ISOLATED
1376     void record_gcs_during_no_gc();
1377
1378     PER_HEAP
1379     BOOL find_loh_free_for_no_gc();
1380
1381     PER_HEAP
1382     BOOL find_loh_space_for_no_gc();
1383
1384     PER_HEAP
1385     BOOL commit_loh_for_no_gc (heap_segment* seg);
1386
1387     PER_HEAP_ISOLATED
1388     start_no_gc_region_status prepare_for_no_gc_region (ULONGLONG total_size, 
1389                                                         BOOL loh_size_known, 
1390                                                         ULONGLONG loh_size, 
1391                                                         BOOL disallow_full_blocking);
1392
1393     PER_HEAP
1394     BOOL loh_allocated_for_no_gc();
1395
1396     PER_HEAP_ISOLATED
1397     void release_no_gc_loh_segments();    
1398
1399     PER_HEAP_ISOLATED
1400     void thread_no_gc_loh_segments();
1401
1402     PER_HEAP
1403     void allocate_for_no_gc_after_gc();
1404
1405     PER_HEAP
1406     void set_loh_allocations_for_no_gc();
1407
1408     PER_HEAP
1409     void set_soh_allocations_for_no_gc();
1410
1411     PER_HEAP
1412     void prepare_for_no_gc_after_gc();
1413
1414     PER_HEAP_ISOLATED
1415     void set_allocations_for_no_gc();
1416
1417     PER_HEAP_ISOLATED
1418     BOOL should_proceed_for_no_gc();
1419
1420     PER_HEAP_ISOLATED
1421     start_no_gc_region_status get_start_no_gc_region_status();
1422
1423     PER_HEAP_ISOLATED
1424     end_no_gc_region_status end_no_gc_region();
1425
1426     PER_HEAP_ISOLATED
1427     void handle_failure_for_no_gc();
1428
1429     PER_HEAP
1430     void fire_etw_allocation_event (size_t allocation_amount, int gen_number, BYTE* object_address);
1431
1432     PER_HEAP
1433     void fire_etw_pin_object_event (BYTE* object, BYTE** ppObject);
1434
1435     PER_HEAP
1436     size_t limit_from_size (size_t size, size_t room, int gen_number,
1437                             int align_const);
1438     PER_HEAP
1439     int try_allocate_more_space (alloc_context* acontext, size_t jsize,
1440                                  int alloc_generation_number);
1441     PER_HEAP
1442     BOOL allocate_more_space (alloc_context* acontext, size_t jsize,
1443                               int alloc_generation_number);
1444
1445     PER_HEAP
1446     size_t get_full_compact_gc_count();
1447
1448     PER_HEAP
1449     BOOL short_on_end_of_seg (int gen_number,
1450                               heap_segment* seg,
1451                               int align_const);
1452
1453     PER_HEAP
1454     BOOL a_fit_free_list_p (int gen_number, 
1455                             size_t size, 
1456                             alloc_context* acontext,
1457                             int align_const);
1458
1459 #ifdef BACKGROUND_GC
1460     PER_HEAP
1461     void wait_for_background (alloc_wait_reason awr);
1462
1463     PER_HEAP
1464     void wait_for_bgc_high_memory (alloc_wait_reason awr);
1465
1466     PER_HEAP
1467     void bgc_loh_alloc_clr (BYTE* alloc_start, 
1468                             size_t size, 
1469                             alloc_context* acontext,
1470                             int align_const, 
1471                             int lock_index,
1472                             BOOL check_used_p,
1473                             heap_segment* seg);
1474 #endif //BACKGROUND_GC
1475     
1476 #ifdef BACKGROUND_GC
1477     PER_HEAP
1478     void wait_for_background_planning (alloc_wait_reason awr);
1479
1480     PER_HEAP
1481     BOOL bgc_loh_should_allocate();
1482 #endif //BACKGROUND_GC
1483
1484 #define max_saved_spinlock_info 48
1485
1486 #ifdef SPINLOCK_HISTORY
1487     PER_HEAP
1488     int spinlock_info_index;
1489
1490     PER_HEAP
1491     spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
1492 #endif //SPINLOCK_HISTORY
1493
1494     PER_HEAP
1495     void add_saved_spinlock_info (
1496             msl_enter_state enter_state, 
1497             msl_take_state take_state);
1498
1499     PER_HEAP
1500     BOOL a_fit_free_list_large_p (size_t size, 
1501                                   alloc_context* acontext,
1502                                   int align_const);
1503
1504     PER_HEAP
1505     BOOL a_fit_segment_end_p (int gen_number,
1506                               heap_segment* seg,
1507                               size_t size, 
1508                               alloc_context* acontext,
1509                               int align_const,
1510                               BOOL* commit_failed_p);
1511     PER_HEAP
1512     BOOL loh_a_fit_segment_end_p (int gen_number,
1513                                   size_t size, 
1514                                   alloc_context* acontext,
1515                                   int align_const,
1516                                   BOOL* commit_failed_p,
1517                                   oom_reason* oom_r);
1518     PER_HEAP
1519     BOOL loh_get_new_seg (generation* gen,
1520                           size_t size,
1521                           int align_const,
1522                           BOOL* commit_failed_p,
1523                           oom_reason* oom_r);
1524
1525     PER_HEAP_ISOLATED
1526     size_t get_large_seg_size (size_t size);
1527
1528     PER_HEAP
1529     BOOL retry_full_compact_gc (size_t size);
1530
1531     PER_HEAP
1532     BOOL check_and_wait_for_bgc (alloc_wait_reason awr,
1533                                  BOOL* did_full_compact_gc);
1534
1535     PER_HEAP
1536     BOOL trigger_full_compact_gc (gc_reason gr, 
1537                                   oom_reason* oom_r);
1538
1539     PER_HEAP
1540     BOOL trigger_ephemeral_gc (gc_reason gr);
1541
1542     PER_HEAP
1543     BOOL soh_try_fit (int gen_number,
1544                       size_t size, 
1545                       alloc_context* acontext,
1546                       int align_const,
1547                       BOOL* commit_failed_p,
1548                       BOOL* short_seg_end_p);
1549     PER_HEAP
1550     BOOL loh_try_fit (int gen_number,
1551                       size_t size, 
1552                       alloc_context* acontext,
1553                       int align_const,
1554                       BOOL* commit_failed_p,
1555                       oom_reason* oom_r);
1556
1557     PER_HEAP
1558     BOOL allocate_small (int gen_number,
1559                          size_t size, 
1560                          alloc_context* acontext,
1561                          int align_const);
1562
1563     enum c_gc_state
1564     {
1565         c_gc_state_marking,
1566         c_gc_state_planning,
1567         c_gc_state_free
1568     };
1569
1570 #ifdef RECORD_LOH_STATE
1571     #define max_saved_loh_states 12
1572     PER_HEAP
1573     int loh_state_index;
1574
1575     struct loh_state_info
1576     {
1577         allocation_state alloc_state;
1578         DWORD thread_id;
1579     };
1580
1581     PER_HEAP
1582     loh_state_info last_loh_states[max_saved_loh_states];
1583     PER_HEAP
1584     void add_saved_loh_state (allocation_state loh_state_to_save, DWORD thread_id);
1585 #endif //RECORD_LOH_STATE
1586     PER_HEAP
1587     BOOL allocate_large (int gen_number,
1588                          size_t size, 
1589                          alloc_context* acontext,
1590                          int align_const);
1591
1592     PER_HEAP_ISOLATED
1593     int init_semi_shared();
1594     PER_HEAP
1595     int init_gc_heap (int heap_number);
1596     PER_HEAP
1597     void self_destroy();
1598     PER_HEAP_ISOLATED
1599     void destroy_semi_shared();
1600     PER_HEAP
1601     void repair_allocation_contexts (BOOL repair_p);
1602     PER_HEAP
1603     void fix_allocation_contexts (BOOL for_gc_p);
1604     PER_HEAP
1605     void fix_youngest_allocation_area (BOOL for_gc_p);
1606     PER_HEAP
1607     void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
1608                                  int align_const);
1609     PER_HEAP
1610     void fix_large_allocation_area (BOOL for_gc_p);
1611     PER_HEAP
1612     void fix_older_allocation_area (generation* older_gen);
1613     PER_HEAP
1614     void set_allocation_heap_segment (generation* gen);
1615     PER_HEAP
1616     void reset_allocation_pointers (generation* gen, BYTE* start);
1617     PER_HEAP
1618     int object_gennum (BYTE* o);
1619     PER_HEAP
1620     int object_gennum_plan (BYTE* o);
1621     PER_HEAP_ISOLATED
1622     void init_heap_segment (heap_segment* seg);
1623     PER_HEAP
1624     void delete_heap_segment (heap_segment* seg, BOOL consider_hoarding=FALSE);
1625 #ifdef FEATURE_BASICFREEZE
1626     PER_HEAP
1627     BOOL insert_ro_segment (heap_segment* seg);
1628     PER_HEAP
1629     void remove_ro_segment (heap_segment* seg);
1630 #endif //FEATURE_BASICFREEZE
1631     PER_HEAP
1632     BOOL set_ro_segment_in_range (heap_segment* seg);
1633     PER_HEAP
1634     BOOL unprotect_segment (heap_segment* seg);
1635     PER_HEAP
1636     heap_segment* soh_get_segment_to_expand();
1637     PER_HEAP
1638     heap_segment* get_segment (size_t size, BOOL loh_p);
1639     PER_HEAP_ISOLATED
1640     void seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp);
1641     PER_HEAP_ISOLATED
1642     void seg_mapping_table_remove_segment (heap_segment* seg);
1643     PER_HEAP
1644     heap_segment* get_large_segment (size_t size, BOOL* did_full_compact_gc);
1645     PER_HEAP
1646     void thread_loh_segment (heap_segment* new_seg);
1647     PER_HEAP_ISOLATED
1648     heap_segment* get_segment_for_loh (size_t size
1649 #ifdef MULTIPLE_HEAPS
1650                                       , gc_heap* hp
1651 #endif //MULTIPLE_HEAPS
1652                                       );
1653     PER_HEAP
1654     void reset_heap_segment_pages (heap_segment* seg);
1655     PER_HEAP
1656     void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
1657     PER_HEAP
1658     void decommit_heap_segment (heap_segment* seg);
1659     PER_HEAP
1660     void clear_gen0_bricks();
1661 #ifdef BACKGROUND_GC
1662     PER_HEAP
1663     void rearrange_small_heap_segments();
1664 #endif //BACKGROUND_GC
1665     PER_HEAP
1666     void rearrange_large_heap_segments();
1667     PER_HEAP
1668     void rearrange_heap_segments(BOOL compacting);
1669     PER_HEAP
1670     void switch_one_quantum();
1671     PER_HEAP
1672     void reset_ww_by_chunk (BYTE* start_address, size_t total_reset_size);
1673     PER_HEAP
1674     void switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size);
1675     PER_HEAP
1676     void reset_write_watch (BOOL concurrent_p);
1677     PER_HEAP
1678     void adjust_ephemeral_limits ();
1679     PER_HEAP
1680     void make_generation (generation& gen, heap_segment* seg,
1681                           BYTE* start, BYTE* pointer);
1682
1683
1684 #define USE_PADDING_FRONT 1
1685 #define USE_PADDING_TAIL  2
1686
1687     PER_HEAP
1688     BOOL size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, BYTE* alloc_pointer, BYTE* alloc_limit,
1689                      BYTE* old_loc=0, int use_padding=USE_PADDING_TAIL);
1690     PER_HEAP
1691     BOOL a_size_fit_p (size_t size, BYTE* alloc_pointer, BYTE* alloc_limit,
1692                        int align_const);
1693
1694     PER_HEAP
1695     void handle_oom (int heap_num, oom_reason reason, size_t alloc_size, 
1696                      BYTE* allocated, BYTE* reserved);
1697
1698     PER_HEAP
1699     size_t card_of ( BYTE* object);
1700     PER_HEAP
1701     BYTE* brick_address (size_t brick);
1702     PER_HEAP
1703     size_t brick_of (BYTE* add);
1704     PER_HEAP
1705     BYTE* card_address (size_t card);
1706     PER_HEAP
1707     size_t card_to_brick (size_t card);
1708     PER_HEAP
1709     void clear_card (size_t card);
1710     PER_HEAP
1711     void set_card (size_t card);
1712     PER_HEAP
1713     BOOL  card_set_p (size_t card);
1714     PER_HEAP
1715     void card_table_set_bit (BYTE* location);
1716
1717 #ifdef CARD_BUNDLE
1718     PER_HEAP
1719     void update_card_table_bundle();
1720     PER_HEAP
1721     void reset_card_table_write_watch();
1722     PER_HEAP
1723     void card_bundle_clear(size_t cardb);
1724     PER_HEAP
1725     void card_bundles_set (size_t start_cardb, size_t end_cardb);
1726     PER_HEAP
1727     BOOL card_bundle_set_p (size_t cardb);
1728     PER_HEAP
1729     BOOL find_card_dword (size_t& cardw, size_t cardw_end);
1730     PER_HEAP
1731     void enable_card_bundles();
1732     PER_HEAP_ISOLATED
1733     BOOL card_bundles_enabled();
1734
1735 #endif //CARD_BUNDLE
1736
1737     PER_HEAP
1738     BOOL find_card (DWORD* card_table, size_t& card,
1739                     size_t card_word_end, size_t& end_card);
1740     PER_HEAP
1741     BOOL grow_heap_segment (heap_segment* seg, BYTE* high_address);
1742     PER_HEAP
1743     int grow_heap_segment (heap_segment* seg, BYTE* high_address, BYTE* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL);
1744     PER_HEAP
1745     void copy_brick_card_range (BYTE* la, DWORD* old_card_table,
1746                                 short* old_brick_table,
1747                                 heap_segment* seg,
1748                                 BYTE* start, BYTE* end, BOOL heap_expand);
1749     PER_HEAP
1750     void init_brick_card_range (heap_segment* seg);
1751     PER_HEAP
1752     void copy_brick_card_table_l_heap ();
1753     PER_HEAP
1754     void copy_brick_card_table(BOOL heap_expand);
1755     PER_HEAP
1756     void clear_brick_table (BYTE* from, BYTE* end);
1757     PER_HEAP
1758     void set_brick (size_t index, ptrdiff_t val);
1759     PER_HEAP
1760     int brick_entry (size_t index);
1761 #ifdef MARK_ARRAY
1762     PER_HEAP
1763     unsigned int mark_array_marked (BYTE* add);
1764     PER_HEAP
1765     void mark_array_set_marked (BYTE* add);
1766     PER_HEAP
1767     BOOL is_mark_bit_set (BYTE* add);
1768     PER_HEAP
1769     void gc_heap::gmark_array_set_marked (BYTE* add);
1770     PER_HEAP
1771     void set_mark_array_bit (size_t mark_bit);
1772     PER_HEAP
1773     BOOL mark_array_bit_set (size_t mark_bit);
1774     PER_HEAP
1775     void mark_array_clear_marked (BYTE* add);
1776     PER_HEAP
1777     void clear_mark_array (BYTE* from, BYTE* end, BOOL check_only=TRUE);
1778 #ifdef BACKGROUND_GC
1779     PER_HEAP
1780     void seg_clear_mark_array_bits_soh (heap_segment* seg);
1781     PER_HEAP
1782     void clear_batch_mark_array_bits (BYTE* start, BYTE* end);
1783     PER_HEAP
1784     void bgc_clear_batch_mark_array_bits (BYTE* start, BYTE* end);
1785     PER_HEAP
1786     void clear_mark_array_by_objects (BYTE* from, BYTE* end, BOOL loh_p);
1787 #ifdef VERIFY_HEAP
1788     PER_HEAP
1789     void set_batch_mark_array_bits (BYTE* start, BYTE* end);
1790     PER_HEAP
1791     void check_batch_mark_array_bits (BYTE* start, BYTE* end);
1792 #endif //VERIFY_HEAP
1793 #endif //BACKGROUND_GC
1794 #endif //MARK_ARRAY
1795
1796     PER_HEAP
1797     BOOL large_object_marked (BYTE* o, BOOL clearp);
1798
1799 #ifdef BACKGROUND_GC
1800     PER_HEAP
1801     BOOL background_allowed_p();
1802 #endif //BACKGROUND_GC
1803
1804     PER_HEAP_ISOLATED
1805     void send_full_gc_notification (int gen_num, BOOL due_to_alloc_p);
1806
1807     PER_HEAP
1808     void check_for_full_gc (int gen_num, size_t size);
1809
1810     PER_HEAP
1811     void adjust_limit (BYTE* start, size_t limit_size, generation* gen,
1812                        int gen_number);
1813     PER_HEAP
1814     void adjust_limit_clr (BYTE* start, size_t limit_size,
1815                            alloc_context* acontext, heap_segment* seg,
1816                            int align_const);
1817     PER_HEAP
1818     void  leave_allocation_segment (generation* gen);
1819
1820     PER_HEAP
1821     void init_free_and_plug();
1822
1823     PER_HEAP
1824     void print_free_and_plug (const char* msg);
1825
1826     PER_HEAP
1827     void add_gen_plug (int gen_number, size_t plug_size);
1828
1829     PER_HEAP
1830     void add_gen_free (int gen_number, size_t free_size);
1831
1832     PER_HEAP
1833     void add_item_to_current_pinned_free (int gen_number, size_t free_size);
1834     
1835     PER_HEAP
1836     void remove_gen_free (int gen_number, size_t free_size);
1837
1838     PER_HEAP
1839     BYTE* allocate_in_older_generation (generation* gen, size_t size,
1840                                         int from_gen_number,
1841                                         BYTE* old_loc=0
1842                                         REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1843     PER_HEAP
1844     generation*  ensure_ephemeral_heap_segment (generation* consing_gen);
1845     PER_HEAP
1846     BYTE* allocate_in_condemned_generations (generation* gen,
1847                                              size_t size,
1848                                              int from_gen_number,
1849 #ifdef SHORT_PLUGS
1850                                              BYTE* next_pinned_plug=0,
1851                                              heap_segment* current_seg=0,
1852 #endif //SHORT_PLUGS
1853                                              BYTE* old_loc=0
1854                                              REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1855 #ifdef INTERIOR_POINTERS
1856     // Verifies that interior is actually in the range of seg; otherwise 
1857     // returns 0.
1858     PER_HEAP_ISOLATED
1859     heap_segment* find_segment (BYTE* interior, BOOL small_segment_only_p);
1860
1861     PER_HEAP
1862     heap_segment* find_segment_per_heap (BYTE* interior, BOOL small_segment_only_p);
1863
1864     PER_HEAP
1865     BYTE* find_object_for_relocation (BYTE* o, BYTE* low, BYTE* high);
1866 #endif //INTERIOR_POINTERS
1867
1868     PER_HEAP_ISOLATED
1869     gc_heap* heap_of (BYTE* object);
1870
1871     PER_HEAP_ISOLATED
1872     gc_heap* heap_of_gc (BYTE* object);
1873
1874     PER_HEAP_ISOLATED
1875     size_t&  promoted_bytes (int);
1876
1877     PER_HEAP
1878     BYTE* find_object (BYTE* o, BYTE* low);
1879
1880     PER_HEAP
1881     dynamic_data* dynamic_data_of (int gen_number);
1882     PER_HEAP
1883     ptrdiff_t  get_desired_allocation (int gen_number);
1884     PER_HEAP
1885     ptrdiff_t  get_new_allocation (int gen_number);
1886     PER_HEAP
1887     ptrdiff_t  get_allocation (int gen_number);
1888     PER_HEAP
1889     bool new_allocation_allowed (int gen_number);
1890 #ifdef BACKGROUND_GC
1891     PER_HEAP_ISOLATED
1892     void allow_new_allocation (int gen_number);
1893     PER_HEAP_ISOLATED
1894     void disallow_new_allocation (int gen_number);
1895 #endif //BACKGROUND_GC
1896     PER_HEAP
1897     void reset_pinned_queue();
1898     PER_HEAP
1899     void reset_pinned_queue_bos();
1900     PER_HEAP
1901     void set_allocator_next_pin (generation* gen);
1902     PER_HEAP
1903     void set_allocator_next_pin (BYTE* alloc_pointer, BYTE*& alloc_limit);
1904     PER_HEAP
1905     void enque_pinned_plug (generation* gen, BYTE* plug, size_t len);
1906     PER_HEAP
1907     void enque_pinned_plug (BYTE* plug, 
1908                             BOOL save_pre_plug_info_p, 
1909                             BYTE* last_object_in_last_plug);
1910     PER_HEAP
1911     void merge_with_last_pinned_plug (BYTE* last_pinned_plug, size_t plug_size);
1912     PER_HEAP
1913     void set_pinned_info (BYTE* last_pinned_plug, 
1914                           size_t plug_len, 
1915                           BYTE* alloc_pointer, 
1916                           BYTE*& alloc_limit);
1917     PER_HEAP
1918     void set_pinned_info (BYTE* last_pinned_plug, size_t plug_len, generation* gen);
1919     PER_HEAP
1920     void save_post_plug_info (BYTE* last_pinned_plug, BYTE* last_object_in_last_plug, BYTE* post_plug);
1921     PER_HEAP
1922     size_t deque_pinned_plug ();
1923     PER_HEAP
1924     mark* pinned_plug_of (size_t bos);
1925     PER_HEAP
1926     mark* oldest_pin ();
1927     PER_HEAP
1928     mark* before_oldest_pin();
1929     PER_HEAP
1930     BOOL pinned_plug_que_empty_p ();
1931     PER_HEAP
1932     void make_mark_stack (mark* arr);
1933 #ifdef MH_SC_MARK
1934     PER_HEAP
1935     int& mark_stack_busy();
1936     PER_HEAP
1937     VOLATILE(BYTE*)& ref_mark_stack (gc_heap* hp, int index);
1938 #endif
1939 #ifdef BACKGROUND_GC
1940     PER_HEAP_ISOLATED
1941     size_t&  bpromoted_bytes (int);
1942     PER_HEAP
1943     void make_background_mark_stack (BYTE** arr);
1944     PER_HEAP
1945     void make_c_mark_list (BYTE** arr);
1946 #endif //BACKGROUND_GC
1947     PER_HEAP
1948     generation* generation_of (int  n);
1949     PER_HEAP
1950     BOOL gc_mark1 (BYTE* o);
1951     PER_HEAP
1952     BOOL gc_mark (BYTE* o, BYTE* low, BYTE* high);
1953     PER_HEAP
1954     BYTE* mark_object(BYTE* o THREAD_NUMBER_DCL);
1955 #ifdef HEAP_ANALYZE
1956     PER_HEAP
1957     void ha_mark_object_simple (BYTE** o THREAD_NUMBER_DCL);
1958 #endif //HEAP_ANALYZE
1959     PER_HEAP
1960     void mark_object_simple (BYTE** o THREAD_NUMBER_DCL);
1961     PER_HEAP
1962     void mark_object_simple1 (BYTE* o, BYTE* start THREAD_NUMBER_DCL);
1963
1964 #ifdef MH_SC_MARK
1965     PER_HEAP
1966     void mark_steal ();
1967 #endif //MH_SC_MARK
1968
1969 #ifdef BACKGROUND_GC
1970
1971     PER_HEAP
1972     BOOL background_marked (BYTE* o);
1973     PER_HEAP
1974     BOOL background_mark1 (BYTE* o);
1975     PER_HEAP
1976     BOOL background_mark (BYTE* o, BYTE* low, BYTE* high);
1977     PER_HEAP
1978     BYTE* background_mark_object (BYTE* o THREAD_NUMBER_DCL);
1979     PER_HEAP
1980     void background_mark_simple (BYTE* o THREAD_NUMBER_DCL);
1981     PER_HEAP
1982     void background_mark_simple1 (BYTE* o THREAD_NUMBER_DCL);
1983     PER_HEAP_ISOLATED
1984     void background_promote (Object**, ScanContext* , DWORD);
1985     PER_HEAP
1986     BOOL background_object_marked (BYTE* o, BOOL clearp);
1987     PER_HEAP
1988     void init_background_gc();
1989     PER_HEAP
1990     BYTE* background_next_end (heap_segment*, BOOL);
1991     PER_HEAP
1992     void generation_delete_heap_segment (generation*, 
1993                                          heap_segment*, heap_segment*, heap_segment*);
1994     PER_HEAP
1995     void set_mem_verify (BYTE*, BYTE*, BYTE);
1996     PER_HEAP
1997     void process_background_segment_end (heap_segment*, generation*, BYTE*,
1998                                      heap_segment*, BOOL*);
1999     PER_HEAP
2000     void process_n_background_segments (heap_segment*, heap_segment*, generation* gen);
2001     PER_HEAP
2002     BOOL fgc_should_consider_object (BYTE* o, 
2003                                      heap_segment* seg,
2004                                      BOOL consider_bgc_mark_p, 
2005                                      BOOL check_current_sweep_p,
2006                                      BOOL check_saved_sweep_p);
2007     PER_HEAP
2008     void should_check_bgc_mark (heap_segment* seg, 
2009                                 BOOL* consider_bgc_mark_p, 
2010                                 BOOL* check_current_sweep_p,
2011                                 BOOL* check_saved_sweep_p);
2012     PER_HEAP
2013     void background_ephemeral_sweep();
2014     PER_HEAP
2015     void background_sweep ();
2016     PER_HEAP
2017     void background_mark_through_object (BYTE* oo THREAD_NUMBER_DCL);
2018     PER_HEAP
2019     BYTE* background_seg_end (heap_segment* seg, BOOL concurrent_p);
2020     PER_HEAP
2021     BYTE* background_first_overflow (BYTE* min_add,
2022                                      heap_segment* seg,
2023                                      BOOL concurrent_p, 
2024                                      BOOL small_object_p);
2025     PER_HEAP
2026     void background_process_mark_overflow_internal (int condemned_gen_number,
2027                                                     BYTE* min_add, BYTE* max_add,
2028                                                     BOOL concurrent_p);
2029     PER_HEAP
2030     BOOL background_process_mark_overflow (BOOL concurrent_p);
2031
2032     // for foreground GC to get hold of background structures containing refs
2033     PER_HEAP
2034     void
2035     scan_background_roots (promote_func* fn, int hn, ScanContext *pSC);
2036
2037     PER_HEAP
2038     BOOL bgc_mark_array_range (heap_segment* seg, 
2039                                BOOL whole_seg_p,
2040                                BYTE** range_beg,
2041                                BYTE** range_end);
2042     PER_HEAP
2043     void bgc_verify_mark_array_cleared (heap_segment* seg);
2044     PER_HEAP
2045     void verify_mark_bits_cleared (BYTE* obj, size_t s);
2046     PER_HEAP
2047     void clear_all_mark_array();
2048 #endif //BACKGROUND_GC
2049
2050     PER_HEAP
2051     BYTE* next_end (heap_segment* seg, BYTE* f);
2052     PER_HEAP
2053     void fix_card_table ();
2054     PER_HEAP
2055     void mark_through_object (BYTE* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
2056     PER_HEAP
2057     BOOL process_mark_overflow (int condemned_gen_number);
2058     PER_HEAP
2059     void process_mark_overflow_internal (int condemned_gen_number,
2060                                          BYTE* min_address, BYTE* max_address);
2061
2062 #ifdef SNOOP_STATS
2063     PER_HEAP
2064     void print_snoop_stat();
2065 #endif //SNOOP_STATS
2066
2067 #ifdef MH_SC_MARK
2068
2069     PER_HEAP
2070     BOOL check_next_mark_stack (gc_heap* next_heap);
2071
2072 #endif //MH_SC_MARK
2073
2074     PER_HEAP
2075     void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
2076
2077     PER_HEAP
2078     void mark_phase (int condemned_gen_number, BOOL mark_only_p);
2079
2080     PER_HEAP
2081     void pin_object (BYTE* o, BYTE** ppObject, BYTE* low, BYTE* high);
2082     PER_HEAP
2083     void reset_mark_stack ();
2084     PER_HEAP
2085     BYTE* insert_node (BYTE* new_node, size_t sequence_number,
2086                        BYTE* tree, BYTE* last_node);
2087     PER_HEAP
2088     size_t update_brick_table (BYTE* tree, size_t current_brick,
2089                                BYTE* x, BYTE* plug_end);
2090
2091     PER_HEAP
2092     void plan_generation_start (generation* gen, generation* consing_gen, BYTE* next_plug_to_allocate);
2093
2094     PER_HEAP
2095     void realloc_plan_generation_start (generation* gen, generation* consing_gen);
2096
2097     PER_HEAP
2098     void plan_generation_starts (generation*& consing_gen);
2099
2100     PER_HEAP
2101     void advance_pins_for_demotion (generation* gen);
2102
2103     PER_HEAP
2104     void process_ephemeral_boundaries(BYTE* x, int& active_new_gen_number,
2105                                       int& active_old_gen_number,
2106                                       generation*& consing_gen,
2107                                       BOOL& allocate_in_condemned);
2108     PER_HEAP
2109     void seg_clear_mark_bits (heap_segment* seg);
2110     PER_HEAP
2111     void sweep_ro_segments (heap_segment* start_seg);
2112     PER_HEAP
2113     void store_plug_gap_info (BYTE* plug_start,
2114                               BYTE* plug_end,
2115                               BOOL& last_npinned_plug_p, 
2116                               BOOL& last_pinned_plug_p, 
2117                               BYTE*& last_pinned_plug,
2118                               BOOL& pinned_plug_p,
2119                               BYTE* last_object_in_last_plug,
2120                               BOOL& merge_with_last_pin_p,
2121                               // this is only for verification purpose
2122                               size_t last_plug_len);
2123     PER_HEAP
2124     void plan_phase (int condemned_gen_number);
2125
2126 #ifdef FEATURE_LOH_COMPACTION
2127     // plan_loh can allocate memory so it can fail. If it fails, we will
2128     // fall back to sweeping.  
2129     PER_HEAP
2130     BOOL plan_loh();
2131
2132     PER_HEAP
2133     void compact_loh();
2134
2135     PER_HEAP
2136     void relocate_in_loh_compact();
2137
2138 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2139     PER_HEAP
2140     void walk_relocation_loh (size_t profiling_context);
2141 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2142
2143     PER_HEAP
2144     BOOL loh_enque_pinned_plug (BYTE* plug, size_t len);
2145
2146     PER_HEAP
2147     void loh_set_allocator_next_pin();
2148
2149     PER_HEAP
2150     BOOL loh_pinned_plug_que_empty_p();
2151
2152     PER_HEAP
2153     size_t loh_deque_pinned_plug();
2154
2155     PER_HEAP
2156     mark* loh_pinned_plug_of (size_t bos);
2157
2158     PER_HEAP
2159     mark* loh_oldest_pin();
2160
2161     PER_HEAP
2162     BOOL loh_size_fit_p (size_t size, BYTE* alloc_pointer, BYTE* alloc_limit);
2163
2164     PER_HEAP
2165     BYTE* loh_allocate_in_condemned (BYTE* old_loc, size_t size);
2166
2167     PER_HEAP_ISOLATED
2168     BOOL loh_object_p (BYTE* o);
2169
2170     PER_HEAP_ISOLATED
2171     BOOL should_compact_loh();
2172
2173     // If the LOH compaction mode is just to compact once,
2174     // we need to see if we should reset it back to not compact.
2175     // We would only reset if every heap's LOH was compacted.
2176     PER_HEAP_ISOLATED
2177     void check_loh_compact_mode  (BOOL all_heaps_compacted_p);
2178 #endif //FEATURE_LOH_COMPACTION
2179
2180     PER_HEAP
2181     void decommit_ephemeral_segment_pages (int condemned_gen_number);
2182     PER_HEAP
2183     void fix_generation_bounds (int condemned_gen_number,
2184                                 generation* consing_gen);
2185     PER_HEAP
2186     BYTE* generation_limit (int gen_number);
2187
2188     struct make_free_args
2189     {
2190         int free_list_gen_number;
2191         BYTE* current_gen_limit;
2192         generation* free_list_gen;
2193         BYTE* highest_plug;
2194     };
2195     PER_HEAP
2196     BYTE* allocate_at_end (size_t size);
2197     PER_HEAP
2198     BOOL ensure_gap_allocation (int condemned_gen_number);
2199     // make_free_lists is only called by blocking GCs.
2200     PER_HEAP
2201     void make_free_lists (int condemned_gen_number);
2202     PER_HEAP
2203     void make_free_list_in_brick (BYTE* tree, make_free_args* args);
2204     PER_HEAP
2205     void thread_gap (BYTE* gap_start, size_t size, generation*  gen);
2206     PER_HEAP
2207     void loh_thread_gap_front (BYTE* gap_start, size_t size, generation*  gen);
2208     PER_HEAP
2209     void make_unused_array (BYTE* x, size_t size, BOOL clearp=FALSE, BOOL resetp=FALSE);
2210     PER_HEAP
2211     void clear_unused_array (BYTE* x, size_t size);
2212     PER_HEAP
2213     void relocate_address (BYTE** old_address THREAD_NUMBER_DCL);
2214     struct relocate_args
2215     {
2216         BYTE* last_plug;
2217         BYTE* low;
2218         BYTE* high;
2219         BOOL is_shortened;
2220         mark* pinned_plug_entry;
2221     };
2222
2223     PER_HEAP
2224     void reloc_survivor_helper (BYTE** pval);
2225     PER_HEAP
2226     void check_class_object_demotion (BYTE* obj);
2227     PER_HEAP
2228     void check_class_object_demotion_internal (BYTE* obj);
2229
2230     PER_HEAP 
2231     void check_demotion_helper (BYTE** pval, BYTE* parent_obj);
2232
2233     PER_HEAP
2234     void relocate_survivor_helper (BYTE* plug, BYTE* plug_end);
2235
2236     PER_HEAP
2237     void verify_pins_with_post_plug_info (const char* msg);
2238
2239 #ifdef COLLECTIBLE_CLASS
2240     PER_HEAP
2241     void unconditional_set_card_collectible (BYTE* obj);
2242 #endif //COLLECTIBLE_CLASS
2243
2244     PER_HEAP
2245     void relocate_shortened_survivor_helper (BYTE* plug, BYTE* plug_end, mark* pinned_plug_entry);
2246     
2247     PER_HEAP
2248     void relocate_obj_helper (BYTE* x, size_t s);
2249
2250     PER_HEAP
2251     void reloc_ref_in_shortened_obj (BYTE** address_to_set_card, BYTE** address_to_reloc);
2252
2253     PER_HEAP
2254     void relocate_pre_plug_info (mark* pinned_plug_entry);
2255
2256     PER_HEAP
2257     void relocate_shortened_obj_helper (BYTE* x, size_t s, BYTE* end, mark* pinned_plug_entry, BOOL is_pinned);
2258
2259     PER_HEAP
2260     void relocate_survivors_in_plug (BYTE* plug, BYTE* plug_end,
2261                                      BOOL check_last_object_p, 
2262                                      mark* pinned_plug_entry);
2263     PER_HEAP
2264     void relocate_survivors_in_brick (BYTE* tree, relocate_args* args);
2265
2266     PER_HEAP
2267     void update_oldest_pinned_plug();
2268
2269     PER_HEAP
2270     void relocate_survivors (int condemned_gen_number,
2271                              BYTE* first_condemned_address );
2272     PER_HEAP
2273     void relocate_phase (int condemned_gen_number,
2274                          BYTE* first_condemned_address);
2275
2276     struct compact_args
2277     {
2278         BOOL copy_cards_p;
2279         BYTE* last_plug;
2280         ptrdiff_t last_plug_relocation;
2281         BYTE* before_last_plug;
2282         size_t current_compacted_brick;
2283         BOOL is_shortened;
2284         mark* pinned_plug_entry;
2285         BOOL check_gennum_p;
2286         int src_gennum;
2287
2288         void print()
2289         {
2290             dprintf (3, ("last plug: %Ix, last plug reloc: %Ix, before last: %Ix, b: %Ix",
2291                 last_plug, last_plug_relocation, before_last_plug, current_compacted_brick));
2292         }
2293     };
2294
2295     PER_HEAP
2296     void copy_cards_range (BYTE* dest, BYTE* src, size_t len, BOOL copy_cards_p);
2297     PER_HEAP
2298     void  gcmemcopy (BYTE* dest, BYTE* src, size_t len, BOOL copy_cards_p);
2299     PER_HEAP
2300     void compact_plug (BYTE* plug, size_t size, BOOL check_last_object_p, compact_args* args);
2301     PER_HEAP
2302     void compact_in_brick (BYTE* tree, compact_args* args);
2303
2304     PER_HEAP
2305     mark* get_next_pinned_entry (BYTE* tree, 
2306                                  BOOL* has_pre_plug_info_p, 
2307                                  BOOL* has_post_plug_info_p,
2308                                  BOOL deque_p=TRUE);
2309
2310     PER_HEAP
2311     mark* get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p);
2312
2313     PER_HEAP
2314     void recover_saved_pinned_info();
2315
2316     PER_HEAP
2317     void compact_phase (int condemned_gen_number, BYTE*
2318                         first_condemned_address, BOOL clear_cards);
2319     PER_HEAP
2320     void clear_cards (size_t start_card, size_t end_card);
2321     PER_HEAP
2322     void clear_card_for_addresses (BYTE* start_address, BYTE* end_address);
2323     PER_HEAP
2324     void copy_cards (size_t dst_card, size_t src_card,
2325                      size_t end_card, BOOL nextp);
2326     PER_HEAP
2327     void copy_cards_for_addresses (BYTE* dest, BYTE* src, size_t len);
2328
2329 #ifdef BACKGROUND_GC
2330     PER_HEAP
2331     void copy_mark_bits (size_t dst_mark_bit, size_t src_mark_bit, size_t end_mark_bit);
2332     PER_HEAP
2333     void copy_mark_bits_for_addresses (BYTE* dest, BYTE* src, size_t len);
2334 #endif //BACKGROUND_GC
2335
2336
2337     PER_HEAP
2338     BOOL ephemeral_pointer_p (BYTE* o);
2339     PER_HEAP
2340     void fix_brick_to_highest (BYTE* o, BYTE* next_o);
2341     PER_HEAP
2342     BYTE* find_first_object (BYTE* start_address, BYTE* first_object);
2343     PER_HEAP
2344     BYTE* compute_next_boundary (BYTE* low, int gen_number, BOOL relocating);
2345     PER_HEAP
2346     void keep_card_live (BYTE* o, size_t& n_gen,
2347                          size_t& cg_pointers_found);
2348     PER_HEAP
2349     void mark_through_cards_helper (BYTE** poo, size_t& ngen,
2350                                     size_t& cg_pointers_found,
2351                                     card_fn fn, BYTE* nhigh,
2352                                     BYTE* next_boundary);
2353
2354     PER_HEAP
2355     BOOL card_transition (BYTE* po, BYTE* end, size_t card_word_end,
2356                                size_t& cg_pointers_found, 
2357                                size_t& n_eph, size_t& n_card_set,
2358                                size_t& card, size_t& end_card,
2359                                BOOL& foundp, BYTE*& start_address,
2360                                BYTE*& limit, size_t& n_cards_cleared);
2361     PER_HEAP
2362     void mark_through_cards_for_segments (card_fn fn, BOOL relocating);
2363
2364     PER_HEAP
2365     void repair_allocation_in_expanded_heap (generation* gen);
2366     PER_HEAP
2367     BOOL can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index);
2368     PER_HEAP
2369     BOOL can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index);
2370     PER_HEAP
2371     BOOL can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count);
2372 #ifdef SEG_REUSE_STATS
2373     PER_HEAP
2374     size_t dump_buckets (size_t* ordered_indices, int count, size_t* total_size);
2375 #endif //SEG_REUSE_STATS
2376     PER_HEAP
2377     void build_ordered_free_spaces (heap_segment* seg);
2378     PER_HEAP
2379     void count_plug (size_t last_plug_size, BYTE*& last_plug);
2380     PER_HEAP
2381     void count_plugs_in_brick (BYTE* tree, BYTE*& last_plug);
2382     PER_HEAP
2383     void build_ordered_plug_indices ();
2384     PER_HEAP
2385     void init_ordered_free_space_indices ();
2386     PER_HEAP
2387     void trim_free_spaces_indices ();
2388     PER_HEAP
2389     BOOL try_best_fit (BOOL end_of_segment_p);
2390     PER_HEAP
2391     BOOL best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space);
2392     PER_HEAP
2393     BOOL process_free_space (heap_segment* seg, 
2394                              size_t free_space,
2395                              size_t min_free_size, 
2396                              size_t min_cont_size,
2397                              size_t* total_free_space,
2398                              size_t* largest_free_space);
2399     PER_HEAP
2400     size_t compute_eph_gen_starts_size();
2401     PER_HEAP
2402     void compute_new_ephemeral_size();
2403     PER_HEAP
2404     BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size,
2405                             size_t min_cont_size, allocator* al);
2406     PER_HEAP
2407     BYTE* allocate_in_expanded_heap (generation* gen, size_t size,
2408                                      BOOL& adjacentp, BYTE* old_loc,
2409 #ifdef SHORT_PLUGS
2410                                      BOOL set_padding_on_saved_p,
2411                                      mark* pinned_plug_entry,
2412 #endif //SHORT_PLUGS
2413                                      BOOL consider_bestfit, int active_new_gen_number
2414                                      REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
2415     PER_HEAP
2416     void realloc_plug (size_t last_plug_size, BYTE*& last_plug,
2417                        generation* gen, BYTE* start_address,
2418                        unsigned int& active_new_gen_number,
2419                        BYTE*& last_pinned_gap, BOOL& leftp, 
2420                        BOOL shortened_p
2421 #ifdef SHORT_PLUGS
2422                        , mark* pinned_plug_entry
2423 #endif //SHORT_PLUGS
2424                        );
2425     PER_HEAP
2426     void realloc_in_brick (BYTE* tree, BYTE*& last_plug, BYTE* start_address,
2427                            generation* gen,
2428                            unsigned int& active_new_gen_number,
2429                            BYTE*& last_pinned_gap, BOOL& leftp);
2430     PER_HEAP
2431     void realloc_plugs (generation* consing_gen, heap_segment* seg,
2432                         BYTE* start_address, BYTE* end_address,
2433                         unsigned active_new_gen_number);
2434
2435     PER_HEAP
2436     void set_expand_in_full_gc (int condemned_gen_number);
2437
2438     PER_HEAP
2439     void verify_no_pins (BYTE* start, BYTE* end);
2440
2441     PER_HEAP
2442     generation* expand_heap (int condemned_generation,
2443                              generation* consing_gen,
2444                              heap_segment* new_heap_segment);
2445
2446     PER_HEAP
2447     void save_ephemeral_generation_starts();
2448
2449     static size_t get_time_now();
2450
2451     PER_HEAP
2452     bool init_dynamic_data ();
2453     PER_HEAP
2454     float surv_to_growth (float cst, float limit, float max_limit);
2455     PER_HEAP
2456     size_t desired_new_allocation (dynamic_data* dd, size_t out,
2457                                    int gen_number, int pass);
2458
2459     PER_HEAP
2460     void trim_youngest_desired_low_memory();
2461
2462     PER_HEAP
2463     void decommit_ephemeral_segment_pages();
2464
2465 #ifdef _WIN64
2466     PER_HEAP_ISOLATED
2467     size_t trim_youngest_desired (DWORD memory_load, 
2468                                   size_t total_new_allocation,
2469                                   size_t total_min_allocation);
2470     PER_HEAP_ISOLATED
2471     size_t joined_youngest_desired (size_t new_allocation);
2472 #endif //_WIN64
2473     PER_HEAP_ISOLATED
2474     size_t get_total_heap_size ();
2475     PER_HEAP
2476     size_t generation_size (int gen_number);
2477     PER_HEAP_ISOLATED
2478     size_t get_total_survived_size();
2479     PER_HEAP
2480     size_t get_current_allocated();
2481     PER_HEAP_ISOLATED
2482     size_t get_total_allocated();
2483     PER_HEAP
2484     size_t current_generation_size (int gen_number);
2485     PER_HEAP
2486     size_t generation_plan_size (int gen_number);
2487     PER_HEAP
2488     void  compute_promoted_allocation (int gen_number);
2489     PER_HEAP
2490     size_t  compute_in (int gen_number);
2491     PER_HEAP
2492     void compute_new_dynamic_data (int gen_number);
2493     PER_HEAP
2494     gc_history_per_heap* get_gc_data_per_heap();
2495     PER_HEAP
2496     size_t new_allocation_limit (size_t size, size_t free_size, int gen_number);
2497     PER_HEAP
2498     size_t generation_fragmentation (generation* gen,
2499                                      generation* consing_gen,
2500                                      BYTE* end);
2501     PER_HEAP
2502     size_t generation_sizes (generation* gen);
2503     PER_HEAP
2504     size_t approximate_new_allocation();
2505     PER_HEAP
2506     size_t end_space_after_gc();
2507     PER_HEAP
2508     BOOL decide_on_compacting (int condemned_gen_number,
2509                                size_t fragmentation,
2510                                BOOL& should_expand);
2511     PER_HEAP
2512     BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
2513     PER_HEAP
2514     void reset_large_object (BYTE* o);
2515     PER_HEAP
2516     void sweep_large_objects ();
2517     PER_HEAP
2518     void relocate_in_large_objects ();
2519     PER_HEAP
2520     void mark_through_cards_for_large_objects (card_fn fn, BOOL relocating);
2521     PER_HEAP
2522     void descr_segment (heap_segment* seg);
2523     PER_HEAP
2524     void descr_card_table ();
2525     PER_HEAP
2526     void descr_generations (BOOL begin_gc_p);
2527
2528 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2529     PER_HEAP_ISOLATED
2530     void descr_generations_to_profiler (gen_walk_fn fn, void *context);
2531     PER_HEAP
2532     void record_survived_for_profiler(int condemned_gen_number, BYTE * first_condemned_address);
2533     PER_HEAP
2534     void notify_profiler_of_surviving_large_objects ();
2535 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
2536
2537     /*------------ Multiple non isolated heaps ----------------*/
2538 #ifdef MULTIPLE_HEAPS
2539     PER_HEAP_ISOLATED
2540     BOOL   create_thread_support (unsigned number_of_heaps);
2541     PER_HEAP_ISOLATED
2542     void destroy_thread_support ();
2543     PER_HEAP
2544     HANDLE create_gc_thread();
2545     PER_HEAP
2546     DWORD gc_thread_function();
2547 #ifdef MARK_LIST
2548 #ifdef PARALLEL_MARK_LIST_SORT
2549     PER_HEAP
2550     void sort_mark_list();
2551     PER_HEAP
2552     void merge_mark_lists();
2553     PER_HEAP
2554     void append_to_mark_list(BYTE **start, BYTE **end);
2555 #else //PARALLEL_MARK_LIST_SORT
2556     PER_HEAP_ISOLATED
2557     void combine_mark_lists();
2558 #endif //PARALLEL_MARK_LIST_SORT
2559 #endif
2560 #endif //MULTIPLE_HEAPS
2561
2562     /*------------ End of Multiple non isolated heaps ---------*/
2563
2564 #ifndef SEG_MAPPING_TABLE
2565     PER_HEAP_ISOLATED
2566     heap_segment* segment_of (BYTE* add,  ptrdiff_t & delta,
2567                               BOOL verify_p = FALSE);
2568 #endif //SEG_MAPPING_TABLE
2569
2570 #ifdef BACKGROUND_GC
2571
2572     //this is called by revisit....
2573     PER_HEAP
2574     BYTE* high_page (heap_segment* seg, BOOL concurrent_p);
2575
2576     PER_HEAP
2577     void revisit_written_page (BYTE* page, BYTE* end, BOOL concurrent_p,
2578                                heap_segment* seg,  BYTE*& last_page,
2579                                BYTE*& last_object, BOOL large_objects_p,
2580                                size_t& num_marked_objects);
2581     PER_HEAP
2582     void revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p=FALSE);
2583
2584     PER_HEAP
2585     void concurrent_scan_dependent_handles (ScanContext *sc);
2586
2587     PER_HEAP_ISOLATED
2588     void suspend_EE ();
2589
2590     PER_HEAP_ISOLATED
2591     void bgc_suspend_EE ();
2592
2593     PER_HEAP_ISOLATED
2594     void restart_EE ();
2595
2596     PER_HEAP
2597     void background_verify_mark (Object*& object, ScanContext* sc, DWORD flags);
2598
2599     PER_HEAP
2600     void background_scan_dependent_handles (ScanContext *sc);
2601
2602     PER_HEAP
2603     void allow_fgc();
2604
2605     // Restores BGC settings if necessary.
2606     PER_HEAP_ISOLATED
2607     void recover_bgc_settings();
2608
2609     PER_HEAP
2610     void save_bgc_data_per_heap();
2611
2612     PER_HEAP
2613     BOOL should_commit_mark_array();
2614
2615     PER_HEAP
2616     void clear_commit_flag();
2617
2618     PER_HEAP_ISOLATED
2619     void clear_commit_flag_global();
2620
2621     PER_HEAP_ISOLATED
2622     void verify_mark_array_cleared (heap_segment* seg, DWORD* mark_array_addr);
2623
2624     PER_HEAP_ISOLATED
2625     void verify_mark_array_cleared (BYTE* begin, BYTE* end, DWORD* mark_array_addr);
2626
2627     PER_HEAP_ISOLATED
2628     BOOL commit_mark_array_by_range (BYTE* begin, 
2629                                      BYTE* end, 
2630                                      DWORD* mark_array_addr);
2631
2632     PER_HEAP_ISOLATED
2633     BOOL commit_mark_array_new_seg (gc_heap* hp, 
2634                                     heap_segment* seg,
2635                                     BYTE* new_lowest_address = 0);
2636
2637     PER_HEAP_ISOLATED
2638     BOOL commit_mark_array_with_check (heap_segment* seg, DWORD* mark_array_addr);
2639
2640     // commit the portion of the mark array that corresponds to 
2641     // this segment (from beginning to reserved).
2642     // seg and heap_segment_reserved (seg) are guaranteed to be 
2643     // page aligned.
2644     PER_HEAP_ISOLATED
2645     BOOL commit_mark_array_by_seg (heap_segment* seg, DWORD* mark_array_addr);
2646
2647     // During BGC init, we commit the mark array for all in range
2648     // segments whose mark array hasn't been committed or fully
2649     // committed. All rw segments are in range, only ro segments
2650     // can be partial in range.
2651     PER_HEAP
2652     BOOL commit_mark_array_bgc_init (DWORD* mark_array_addr);
2653
2654     PER_HEAP
2655     BOOL commit_new_mark_array (DWORD* new_mark_array);
2656
2657     // We need to commit all segments that intersect with the bgc
2658     // range. If a segment is only partially in range, we still
2659     // should commit the mark array for the whole segment as 
2660     // we will set the mark array commit flag for this segment.
2661     PER_HEAP_ISOLATED
2662     BOOL commit_new_mark_array_global (DWORD* new_mark_array);
2663
2664     // We can't decommit the first and the last page in the mark array
2665     // if the beginning and ending don't happen to be page aligned.
2666     PER_HEAP
2667     void decommit_mark_array_by_seg (heap_segment* seg);
2668
2669     PER_HEAP
2670     void background_mark_phase();
2671
2672     PER_HEAP
2673     void background_drain_mark_list (int thread);
2674
2675     PER_HEAP
2676     void background_grow_c_mark_list();
2677
2678     PER_HEAP_ISOLATED
2679     void background_promote_callback(Object** object, ScanContext* sc, DWORD flags);
2680
2681     PER_HEAP
2682     void mark_absorb_new_alloc();
2683
2684     PER_HEAP
2685     void restart_vm();
2686
2687     PER_HEAP
2688     BOOL prepare_bgc_thread(gc_heap* gh);
2689     PER_HEAP
2690     BOOL create_bgc_thread(gc_heap* gh);
2691     PER_HEAP_ISOLATED
2692     BOOL create_bgc_threads_support (int number_of_heaps);
2693     PER_HEAP
2694     BOOL create_bgc_thread_support();
2695     PER_HEAP_ISOLATED
2696     int check_for_ephemeral_alloc();
2697     PER_HEAP_ISOLATED
2698     void wait_to_proceed();
2699     PER_HEAP_ISOLATED
2700     void fire_alloc_wait_event_begin (alloc_wait_reason awr);
2701     PER_HEAP_ISOLATED
2702     void fire_alloc_wait_event_end (alloc_wait_reason awr);
2703     PER_HEAP
2704     void background_gc_wait_lh (alloc_wait_reason awr = awr_ignored);
2705     PER_HEAP
2706     DWORD background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE);
2707     PER_HEAP_ISOLATED
2708     void start_c_gc();
2709     PER_HEAP
2710     void kill_gc_thread();
2711     PER_HEAP
2712     DWORD bgc_thread_function();
2713     PER_HEAP_ISOLATED
2714     void do_background_gc();
2715     static
2716     DWORD __stdcall bgc_thread_stub (void* arg);
2717
2718 #ifdef FEATURE_REDHAWK
2719     // Helper used to wrap the start routine of background GC threads so we can do things like initialize the
2720     // Redhawk thread state which requires running in the new thread's context.
2721     static DWORD WINAPI rh_bgc_thread_stub(void * pContext);
2722
2723     // Context passed to the above.
2724     struct rh_bgc_thread_ctx
2725     {
2726         PTHREAD_START_ROUTINE   m_pRealStartRoutine;
2727         gc_heap *               m_pRealContext;
2728     };
2729 #endif //FEATURE_REDHAWK
2730
2731 #endif //BACKGROUND_GC
2732  
2733 public:
2734
2735     PER_HEAP_ISOLATED
2736     VOLATILE(bool) internal_gc_done;
2737
2738 #ifdef BACKGROUND_GC
2739     PER_HEAP_ISOLATED
2740     DWORD cm_in_progress;
2741
2742     PER_HEAP
2743     BOOL expanded_in_fgc;
2744
2745     // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
2746     // we do right before the bgc starts.
2747     PER_HEAP_ISOLATED
2748     BOOL     dont_restart_ee_p;
2749
2750     PER_HEAP_ISOLATED
2751     CLREvent bgc_start_event;
2752 #endif //BACKGROUND_GC
2753
2754     PER_HEAP_ISOLATED
2755     DWORD wait_for_gc_done(INT32 timeOut = INFINITE);
2756
2757     // Returns TRUE if the thread used to be in cooperative mode 
2758     // before calling this function.
2759     PER_HEAP_ISOLATED
2760     BOOL enable_preemptive (Thread* current_thread);
2761     PER_HEAP_ISOLATED
2762     void disable_preemptive (Thread* current_thread, BOOL restore_cooperative);
2763
2764     /* ------------------- per heap members --------------------------*/
2765
2766     PER_HEAP
2767 #ifndef MULTIPLE_HEAPS
2768     CLREvent gc_done_event;
2769 #else // MULTIPLE_HEAPS
2770     CLREvent gc_done_event;
2771 #endif // MULTIPLE_HEAPS
2772
2773     PER_HEAP
2774     VOLATILE(LONG) gc_done_event_lock;
2775
2776     PER_HEAP
2777     VOLATILE(bool) gc_done_event_set;
2778
2779     PER_HEAP 
2780     void set_gc_done();
2781
2782     PER_HEAP 
2783     void reset_gc_done();
2784
2785     PER_HEAP
2786     void enter_gc_done_event_lock();
2787
2788     PER_HEAP
2789     void exit_gc_done_event_lock();
2790
2791 #ifdef MULTIPLE_HEAPS
2792     PER_HEAP
2793     BYTE*  ephemeral_low;      //lowest ephemeral address
2794
2795     PER_HEAP
2796     BYTE*  ephemeral_high;     //highest ephemeral address
2797 #endif //MULTIPLE_HEAPS
2798
2799     PER_HEAP
2800     DWORD* card_table;
2801
2802     PER_HEAP
2803     short* brick_table;
2804
2805 #ifdef MARK_ARRAY
2806 #ifdef MULTIPLE_HEAPS
2807     PER_HEAP
2808     DWORD* mark_array;
2809 #else
2810     SPTR_DECL(DWORD, mark_array);
2811 #endif //MULTIPLE_HEAPS
2812 #endif //MARK_ARRAY
2813
2814 #ifdef CARD_BUNDLE
2815     PER_HEAP
2816     DWORD* card_bundle_table;
2817 #endif //CARD_BUNDLE
2818
2819 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
2820     PER_HEAP_ISOLATED
2821     sorted_table* seg_table;
2822 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
2823
2824     PER_HEAP_ISOLATED
2825     VOLATILE(BOOL) gc_started;
2826
2827     // The following 2 events are there to support the gen2 
2828     // notification feature which is only enabled if concurrent
2829     // GC is disabled.
2830     PER_HEAP_ISOLATED
2831     CLREvent full_gc_approach_event;
2832
2833     PER_HEAP_ISOLATED
2834     CLREvent full_gc_end_event;
2835
2836     // Full GC Notification percentages.
2837     PER_HEAP_ISOLATED
2838     DWORD fgn_maxgen_percent;
2839
2840     PER_HEAP_ISOLATED
2841     DWORD fgn_loh_percent;
2842
2843     PER_HEAP_ISOLATED
2844     VOLATILE(bool) full_gc_approach_event_set;
2845
2846 #ifdef BACKGROUND_GC
2847     PER_HEAP_ISOLATED
2848     BOOL fgn_last_gc_was_concurrent;
2849 #endif //BACKGROUND_GC
2850
2851     PER_HEAP
2852     size_t fgn_last_alloc;
2853
2854     static DWORD user_thread_wait (CLREvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
2855
2856     static wait_full_gc_status full_gc_wait (CLREvent *event, int time_out_ms);
2857
2858     PER_HEAP
2859     BYTE* demotion_low;
2860
2861     PER_HEAP
2862     BYTE* demotion_high;
2863
2864     PER_HEAP
2865     BOOL demote_gen1_p;
2866
2867     PER_HEAP
2868     BYTE* last_gen1_pin_end;
2869
2870     PER_HEAP
2871     gen_to_condemn_tuning gen_to_condemn_reasons;
2872
2873     PER_HEAP
2874     size_t etw_allocation_running_amount[2];
2875
2876     PER_HEAP
2877     int gc_policy;  //sweep, compact, expand
2878
2879 #ifdef MULTIPLE_HEAPS
2880     PER_HEAP_ISOLATED
2881     CLREvent gc_start_event;
2882
2883     PER_HEAP_ISOLATED
2884     CLREvent ee_suspend_event;
2885
2886     PER_HEAP
2887     heap_segment* new_heap_segment;
2888
2889 #define alloc_quantum_balance_units (16)
2890
2891     PER_HEAP_ISOLATED
2892     size_t min_balance_threshold;
2893 #else //MULTIPLE_HEAPS
2894
2895     PER_HEAP
2896     size_t allocation_running_time;
2897
2898     PER_HEAP
2899     size_t allocation_running_amount;
2900
2901 #endif //MULTIPLE_HEAPS
2902
2903     PER_HEAP_ISOLATED
2904     gc_mechanisms settings;
2905
2906     PER_HEAP_ISOLATED
2907     gc_history_global gc_data_global;
2908
2909     PER_HEAP_ISOLATED
2910     size_t gc_last_ephemeral_decommit_time;
2911
2912     PER_HEAP_ISOLATED
2913     size_t gc_gen0_desired_high;
2914
2915     PER_HEAP
2916     size_t gen0_big_free_spaces;
2917
2918 #ifdef _WIN64
2919     PER_HEAP_ISOLATED
2920     size_t youngest_gen_desired_th;
2921
2922     PER_HEAP_ISOLATED
2923     size_t mem_one_percent;
2924
2925     PER_HEAP_ISOLATED
2926     ULONGLONG total_physical_mem;
2927
2928     PER_HEAP_ISOLATED
2929     ULONGLONG available_physical_mem;
2930 #endif //_WIN64
2931
2932     PER_HEAP_ISOLATED
2933     size_t last_gc_index;
2934
2935     PER_HEAP_ISOLATED
2936     size_t min_segment_size;
2937
2938     PER_HEAP
2939     BYTE* lowest_address;
2940
2941     PER_HEAP
2942     BYTE* highest_address;
2943
2944     PER_HEAP
2945     BOOL ephemeral_promotion;
2946     PER_HEAP
2947     BYTE* saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
2948     PER_HEAP
2949     size_t saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];
2950
2951 protected:
2952 #ifdef MULTIPLE_HEAPS
2953     PER_HEAP
2954     GCHeap* vm_heap;
2955     PER_HEAP
2956     int heap_number;
2957     PER_HEAP
2958     VOLATILE(int) alloc_context_count;
2959 #else //MULTIPLE_HEAPS
2960 #define vm_heap ((GCHeap*) g_pGCHeap)
2961 #define heap_number (0)
2962 #endif //MULTIPLE_HEAPS
2963
2964 #ifndef MULTIPLE_HEAPS
2965     SPTR_DECL(heap_segment,ephemeral_heap_segment);
2966 #else
2967     PER_HEAP
2968     heap_segment* ephemeral_heap_segment;
2969 #endif // !MULTIPLE_HEAPS
2970
2971     PER_HEAP
2972     size_t time_bgc_last;
2973
2974     PER_HEAP
2975     BYTE*       gc_low; // lowest address being condemned
2976
2977     PER_HEAP
2978     BYTE*       gc_high; //highest address being condemned
2979
2980     PER_HEAP
2981     size_t      mark_stack_tos;
2982
2983     PER_HEAP
2984     size_t      mark_stack_bos;
2985
2986     PER_HEAP
2987     size_t      mark_stack_array_length;
2988
2989     PER_HEAP
2990     mark*       mark_stack_array;
2991
2992     PER_HEAP
2993     BOOL       verify_pinned_queue_p;
2994
2995     PER_HEAP
2996     BYTE*       oldest_pinned_plug;
2997
2998 #ifdef FEATURE_LOH_COMPACTION
2999     PER_HEAP
3000     size_t      loh_pinned_queue_tos;
3001
3002     PER_HEAP
3003     size_t      loh_pinned_queue_bos;
3004
3005     PER_HEAP
3006     size_t      loh_pinned_queue_length;
3007
3008     PER_HEAP_ISOLATED
3009     int         loh_pinned_queue_decay;
3010
3011     PER_HEAP
3012     mark*       loh_pinned_queue;
3013
3014     // This is for forced LOH compaction via the complus env var
3015     PER_HEAP_ISOLATED
3016     BOOL        loh_compaction_always_p;
3017
3018     // This is set by the user.
3019     PER_HEAP_ISOLATED
3020     gc_loh_compaction_mode loh_compaction_mode;
3021
3022     // We may not compact LOH on every heap if we can't
3023     // grow the pinned queue. This is to indicate whether
3024     // this heap's LOH is compacted or not. So even if
3025     // settings.loh_compaction is TRUE this may not be TRUE.
3026     PER_HEAP
3027     BOOL        loh_compacted_p;
3028 #endif //FEATURE_LOH_COMPACTION
3029
3030 #ifdef BACKGROUND_GC
3031
3032     PER_HEAP
3033     DWORD bgc_thread_id;
3034
3035 #ifdef WRITE_WATCH
3036     PER_HEAP
3037     BYTE* background_written_addresses [array_size+2];
3038 #endif //WRITE_WATCH
3039
3040 #if defined (DACCESS_COMPILE) && !defined (MULTIPLE_HEAPS)
3041     // doesn't need to be volatile for DAC.
3042     SVAL_DECL(c_gc_state, current_c_gc_state);
3043 #else
3044     PER_HEAP_ISOLATED
3045     VOLATILE(c_gc_state) current_c_gc_state;     //tells the large object allocator to
3046     //mark the object as new since the start of gc.
3047 #endif //DACCESS_COMPILE && !MULTIPLE_HEAPS
3048
3049     PER_HEAP_ISOLATED
3050     gc_mechanisms saved_bgc_settings;
3051
3052     PER_HEAP
3053     gc_history_per_heap saved_bgc_data_per_heap;
3054
3055     PER_HEAP
3056     BOOL bgc_data_saved_p;
3057
3058     PER_HEAP
3059     BOOL bgc_thread_running; // gc thread is its main loop
3060
3061     PER_HEAP_ISOLATED
3062     BOOL keep_bgc_threads_p;
3063
3064     // This event is used by BGC threads to do something on 
3065     // one specific thread while other BGC threads have to 
3066     // wait. This is different from a join 'cause you can't
3067     // specify which thread should be doing some task
3068     // while other threads have to wait.
3069     // For example, to make the BGC threads managed threads 
3070     // we need to create them on the thread that called 
3071     // SuspendEE which is heap 0.
3072     PER_HEAP_ISOLATED
3073     CLREvent bgc_threads_sync_event;
3074
3075     PER_HEAP
3076     Thread* bgc_thread;
3077
3078     PER_HEAP
3079     CRITICAL_SECTION bgc_threads_timeout_cs;
3080
3081     PER_HEAP_ISOLATED
3082     CLREvent background_gc_done_event;
3083
3084     PER_HEAP
3085     CLREvent background_gc_create_event;
3086
3087     PER_HEAP_ISOLATED
3088     CLREvent ee_proceed_event;
3089
3090     PER_HEAP
3091     CLREvent gc_lh_block_event;
3092
3093     PER_HEAP_ISOLATED
3094     BOOL gc_can_use_concurrent;
3095
3096     PER_HEAP_ISOLATED
3097     BOOL temp_disable_concurrent_p;
3098
3099     PER_HEAP_ISOLATED
3100     BOOL do_ephemeral_gc_p;
3101
3102     PER_HEAP_ISOLATED
3103     BOOL do_concurrent_p;
3104
3105     PER_HEAP
3106     VOLATILE(bgc_state) current_bgc_state;
3107
3108     struct gc_history
3109     {
3110         size_t gc_index;
3111         bgc_state current_bgc_state;
3112         DWORD gc_time_ms;
3113         // This is in bytes per ms; consider breaking it 
3114         // into the efficiency per phase.
3115         size_t gc_efficiency; 
3116         BYTE* eph_low;
3117         BYTE* gen0_start;
3118         BYTE* eph_high;
3119         BYTE* bgc_highest;
3120         BYTE* bgc_lowest;
3121         BYTE* fgc_highest;
3122         BYTE* fgc_lowest;
3123         BYTE* g_highest;
3124         BYTE* g_lowest;
3125     };
3126
3127 #define max_history_count 64
3128
3129     PER_HEAP
3130     int gchist_index_per_heap;
3131
3132     PER_HEAP
3133     gc_history gchist_per_heap[max_history_count];
3134
3135     PER_HEAP_ISOLATED
3136     int gchist_index;
3137
3138     PER_HEAP_ISOLATED
3139     gc_mechanisms_store gchist[max_history_count];
3140
3141     PER_HEAP
3142     void add_to_history_per_heap();
3143
3144     PER_HEAP_ISOLATED
3145     void add_to_history();
3146
3147     PER_HEAP
3148     size_t total_promoted_bytes;
3149
3150     PER_HEAP
3151     size_t     bgc_overflow_count;
3152
3153     PER_HEAP
3154     size_t     bgc_begin_loh_size;
3155     PER_HEAP
3156     size_t     end_loh_size;
3157
3158     // We need to throttle the LOH allocations during BGC since we can't
3159     // collect LOH when BGC is in progress. 
3160     // We allow the LOH heap size to double during a BGC. So for every
3161     // 10% increase we will have the LOH allocating thread sleep for one more
3162     // ms. So we are already 30% over the original heap size the thread will
3163     // sleep for 3ms.
3164     PER_HEAP
3165     DWORD      bgc_alloc_spin_loh;
3166
3167     // This includes what we allocate at the end of segment - allocating
3168     // in free list doesn't increase the heap size.
3169     PER_HEAP
3170     size_t     bgc_loh_size_increased;
3171
3172     PER_HEAP
3173     size_t     bgc_loh_allocated_in_free;
3174
3175     PER_HEAP
3176     size_t     background_soh_alloc_count;
3177
3178     PER_HEAP
3179     size_t     background_loh_alloc_count;
3180
3181     PER_HEAP
3182     BYTE**     background_mark_stack_tos;
3183
3184     PER_HEAP
3185     BYTE**     background_mark_stack_array;
3186
3187     PER_HEAP
3188     size_t    background_mark_stack_array_length;
3189
3190     PER_HEAP
3191     BYTE*     background_min_overflow_address;
3192
3193     PER_HEAP
3194     BYTE*     background_max_overflow_address;
3195
3196     // We can't process the soh range concurrently so we
3197     // wait till final mark to process it.
3198     PER_HEAP
3199     BOOL      processed_soh_overflow_p;
3200
3201     PER_HEAP
3202     BYTE*     background_min_soh_overflow_address;
3203
3204     PER_HEAP
3205     BYTE*     background_max_soh_overflow_address;
3206
3207     PER_HEAP
3208     heap_segment* saved_overflow_ephemeral_seg;
3209
3210 #ifndef MULTIPLE_HEAPS
3211     SPTR_DECL(heap_segment, saved_sweep_ephemeral_seg);
3212
3213     SPTR_DECL(BYTE, saved_sweep_ephemeral_start);
3214
3215     SPTR_DECL(BYTE, background_saved_lowest_address);
3216
3217     SPTR_DECL(BYTE, background_saved_highest_address);
3218 #else
3219
3220     PER_HEAP
3221     heap_segment* saved_sweep_ephemeral_seg;
3222
3223     PER_HEAP
3224     BYTE* saved_sweep_ephemeral_start;
3225
3226     PER_HEAP
3227     BYTE* background_saved_lowest_address;
3228
3229     PER_HEAP
3230     BYTE* background_saved_highest_address;
3231 #endif //!MULTIPLE_HEAPS
3232
3233     // This is used for synchronization between the bgc thread
3234     // for this heap and the user threads allocating on this
3235     // heap.
3236     PER_HEAP
3237     exclusive_sync* bgc_alloc_lock;
3238
3239 #ifdef SNOOP_STATS
3240     PER_HEAP
3241     snoop_stats_data snoop_stat;
3242 #endif //SNOOP_STATS
3243
3244
3245     PER_HEAP
3246     BYTE**          c_mark_list;
3247
3248     PER_HEAP
3249     size_t          c_mark_list_length;
3250
3251     PER_HEAP
3252     size_t          c_mark_list_index;
3253 #endif //BACKGROUND_GC
3254
3255 #ifdef MARK_LIST
3256     PER_HEAP
3257     BYTE** mark_list;
3258
3259     PER_HEAP_ISOLATED
3260     size_t mark_list_size;
3261
3262     PER_HEAP
3263     BYTE** mark_list_end;
3264
3265     PER_HEAP
3266     BYTE** mark_list_index;
3267
3268     PER_HEAP_ISOLATED
3269     BYTE** g_mark_list;
3270 #ifdef PARALLEL_MARK_LIST_SORT
3271     PER_HEAP_ISOLATED
3272     BYTE** g_mark_list_copy;
3273     PER_HEAP
3274     BYTE*** mark_list_piece_start;
3275     BYTE*** mark_list_piece_end;
3276 #endif //PARALLEL_MARK_LIST_SORT
3277 #endif //MARK_LIST
3278
3279     PER_HEAP
3280     BYTE*  min_overflow_address;
3281
3282     PER_HEAP
3283     BYTE*  max_overflow_address;
3284
3285     PER_HEAP
3286     BYTE*  shigh; //keeps track of the highest marked object
3287
3288     PER_HEAP
3289     BYTE*  slow; //keeps track of the lowest marked object
3290
3291     PER_HEAP
3292     size_t allocation_quantum;
3293
3294     PER_HEAP
3295     size_t alloc_contexts_used;
3296
3297     PER_HEAP_ISOLATED
3298     no_gc_region_info current_no_gc_region_info;
3299
3300     PER_HEAP
3301     size_t soh_allocation_no_gc;
3302
3303     PER_HEAP
3304     size_t loh_allocation_no_gc;
3305
3306     PER_HEAP
3307     heap_segment* saved_loh_segment_no_gc;
3308
3309     PER_HEAP_ISOLATED
3310     BOOL proceed_with_gc_p;
3311
3312 #define youngest_generation (generation_of (0))
3313 #define large_object_generation (generation_of (max_generation+1))
3314
3315 #ifndef MULTIPLE_HEAPS
3316     SPTR_DECL(BYTE,alloc_allocated);
3317 #else
3318     PER_HEAP
3319     BYTE* alloc_allocated; //keeps track of the highest
3320     //address allocated by alloc
3321 #endif // !MULTIPLE_HEAPS
3322
3323     // The more_space_lock and gc_lock is used for 3 purposes:
3324     //
3325     // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock)
3326     // 2) to synchronize allocations of large objects (more_space_lock)
3327     // 3) to synchronize the GC itself (gc_lock)
3328     //
3329     PER_HEAP_ISOLATED
3330     GCSpinLock gc_lock; //lock while doing GC
3331
3332     PER_HEAP
3333     GCSpinLock more_space_lock; //lock while allocating more space
3334
3335 #ifdef SYNCHRONIZATION_STATS
3336
3337     PER_HEAP
3338     unsigned int good_suspension;
3339
3340     PER_HEAP
3341     unsigned int bad_suspension;
3342
3343     // Number of times when msl_acquire is > 200 cycles.
3344     PER_HEAP
3345     unsigned int num_high_msl_acquire;
3346
3347     // Number of times when msl_acquire is < 200 cycles.
3348     PER_HEAP
3349     unsigned int num_low_msl_acquire;
3350
3351     // Number of times the more_space_lock is acquired.
3352     PER_HEAP
3353     unsigned int num_msl_acquired;
3354
3355     // Total cycles it takes to acquire the more_space_lock.
3356     PER_HEAP
3357     ULONGLONG total_msl_acquire;
3358
3359     PER_HEAP
3360     void init_heap_sync_stats()
3361     {
3362         good_suspension = 0;
3363         bad_suspension = 0;
3364         num_msl_acquired = 0;
3365         total_msl_acquire = 0;
3366         num_high_msl_acquire = 0;
3367         num_low_msl_acquire = 0;
3368         more_space_lock.init();
3369         gc_lock.init();
3370     }
3371
3372     PER_HEAP
3373     void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
3374     {
3375         printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
3376             heap_num,
3377             alloc_contexts_used,
3378             good_suspension,
3379             bad_suspension,
3380             (unsigned int)(total_msl_acquire / gc_count_during_log),
3381             num_high_msl_acquire / gc_count_during_log,
3382             num_low_msl_acquire / gc_count_during_log,
3383             num_msl_acquired / gc_count_during_log,
3384             more_space_lock.num_switch_thread / gc_count_during_log,
3385             more_space_lock.num_wait_longer / gc_count_during_log,
3386             more_space_lock.num_switch_thread_w / gc_count_during_log,
3387             more_space_lock.num_disable_preemptive_w / gc_count_during_log);
3388     }
3389
3390 #endif //SYNCHRONIZATION_STATS
3391
3392 #ifdef MULTIPLE_HEAPS
3393     PER_HEAP
3394     generation generation_table [NUMBERGENERATIONS+1];
3395 #endif
3396
3397
3398 #define NUM_LOH_ALIST (7)
3399 #define BASE_LOH_ALIST (64*1024)
3400     PER_HEAP 
3401     alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
3402
3403 #define NUM_GEN2_ALIST (12)
3404 #define BASE_GEN2_ALIST (1*64)
3405     PER_HEAP
3406     alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
3407
3408 //------------------------------------------    
3409
3410     PER_HEAP
3411     dynamic_data dynamic_data_table [NUMBERGENERATIONS+1];
3412
3413     PER_HEAP
3414     gc_history_per_heap gc_data_per_heap;
3415
3416     // dynamic tuning.
3417     PER_HEAP
3418     BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
3419     // if elevate_p is FALSE, it means we are determining fragmentation for a generation
3420     // to see if we should condemn this gen; otherwise it means we are determining if
3421     // we should elevate to doing max_gen from an ephemeral gen.
3422     PER_HEAP
3423     BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
3424     PER_HEAP
3425     BOOL 
3426     dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number, ULONGLONG total_mem);
3427     PER_HEAP
3428     BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, ULONGLONG available_mem);
3429     PER_HEAP
3430     BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
3431
3432     PER_HEAP
3433     int generation_skip_ratio;//in %
3434
3435     PER_HEAP
3436     BOOL gen0_bricks_cleared;
3437 #ifdef FFIND_OBJECT
3438     PER_HEAP
3439     int gen0_must_clear_bricks;
3440 #endif //FFIND_OBJECT
3441     
3442     PER_HEAP_ISOLATED
3443     size_t full_gc_counts[gc_type_max];
3444
3445     // the # of bytes allocates since the last full compacting GC.
3446     PER_HEAP
3447     unsigned __int64 loh_alloc_since_cg;
3448
3449     PER_HEAP
3450     BOOL elevation_requested;
3451
3452     // if this is TRUE, we should always guarantee that we do a 
3453     // full compacting GC before we OOM.
3454     PER_HEAP
3455     BOOL last_gc_before_oom;
3456
3457     PER_HEAP_ISOLATED
3458     BOOL should_expand_in_full_gc;
3459
3460 #ifdef BACKGROUND_GC
3461     PER_HEAP_ISOLATED
3462     size_t ephemeral_fgc_counts[max_generation];
3463
3464     PER_HEAP_ISOLATED
3465     BOOL alloc_wait_event_p;
3466
3467 #ifndef MULTIPLE_HEAPS
3468     SPTR_DECL(BYTE, next_sweep_obj);
3469 #else
3470     PER_HEAP
3471     BYTE* next_sweep_obj;
3472 #endif //MULTIPLE_HEAPS
3473
3474     PER_HEAP
3475     BYTE* current_sweep_pos;
3476
3477 #endif //BACKGROUND_GC
3478
3479 #ifndef MULTIPLE_HEAPS
3480     SVAL_DECL(oom_history, oom_info);
3481 #ifdef FEATURE_PREMORTEM_FINALIZATION
3482     SPTR_DECL(CFinalize,finalize_queue);
3483 #endif //FEATURE_PREMORTEM_FINALIZATION
3484 #else
3485
3486     PER_HEAP
3487     oom_history oom_info;
3488
3489 #ifdef FEATURE_PREMORTEM_FINALIZATION
3490     PER_HEAP
3491     PTR_CFinalize finalize_queue;
3492 #endif //FEATURE_PREMORTEM_FINALIZATION
3493 #endif // !MULTIPLE_HEAPS
3494
3495     PER_HEAP
3496     fgm_history fgm_result;
3497
3498     PER_HEAP_ISOLATED
3499     size_t eph_gen_starts_size;
3500
3501     PER_HEAP
3502     BOOL        ro_segments_in_range;
3503
3504 #ifdef BACKGROUND_GC
3505     PER_HEAP
3506     heap_segment* freeable_small_heap_segment;
3507 #endif //BACKGROUND_GC
3508
3509     PER_HEAP
3510     heap_segment* freeable_large_heap_segment;
3511
3512     PER_HEAP_ISOLATED
3513     heap_segment* segment_standby_list;
3514
3515     PER_HEAP
3516     size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
3517
3518     PER_HEAP
3519     size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
3520
3521     PER_HEAP
3522     size_t ordered_plug_indices[MAX_NUM_BUCKETS];
3523
3524     PER_HEAP
3525     size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
3526
3527     PER_HEAP
3528     BOOL ordered_plug_indices_init;
3529
3530     PER_HEAP
3531     BOOL use_bestfit;
3532
3533     PER_HEAP
3534     BYTE* bestfit_first_pin;
3535
3536     PER_HEAP
3537     BOOL commit_end_of_seg;
3538
3539     PER_HEAP
3540     size_t max_free_space_items; // dynamically adjusted.
3541
3542     PER_HEAP
3543     size_t free_space_buckets;
3544
3545     PER_HEAP
3546     size_t free_space_items;
3547
3548     // -1 means we are using all the free
3549     // spaces we have (not including
3550     // end of seg space).
3551     PER_HEAP
3552     int trimmed_free_space_index;
3553
3554     PER_HEAP
3555     size_t total_ephemeral_plugs;
3556
3557     PER_HEAP
3558     seg_free_spaces* bestfit_seg;
3559
3560     // Note: we know this from the plan phase.
3561     // total_ephemeral_plugs actually has the same value
3562     // but while we are calculating its value we also store
3563     // info on how big the plugs are for best fit which we
3564     // don't do in plan phase.
3565     // TODO: get rid of total_ephemeral_plugs.
3566     PER_HEAP
3567     size_t total_ephemeral_size;
3568
3569 public:
3570
3571 #ifdef HEAP_ANALYZE
3572
3573     PER_HEAP_ISOLATED
3574     BOOL heap_analyze_enabled;
3575
3576     PER_HEAP
3577     size_t internal_root_array_length;
3578
3579 #ifndef MULTIPLE_HEAPS
3580     SPTR_DECL(PTR_BYTE, internal_root_array);
3581     SVAL_DECL(size_t, internal_root_array_index);
3582     SVAL_DECL(BOOL,   heap_analyze_success);
3583 #else
3584     PER_HEAP
3585     BYTE** internal_root_array;
3586
3587     PER_HEAP
3588     size_t internal_root_array_index;
3589
3590     PER_HEAP
3591     BOOL   heap_analyze_success;
3592 #endif // !MULTIPLE_HEAPS
3593
3594     // next two fields are used to optimize the search for the object 
3595     // enclosing the current reference handled by ha_mark_object_simple.
3596     PER_HEAP
3597     BYTE*  current_obj;
3598
3599     PER_HEAP
3600     size_t current_obj_size;
3601
3602 #endif //HEAP_ANALYZE
3603
3604     /* ----------------------- global members ----------------------- */
3605 public:
3606
3607     PER_HEAP
3608     int         condemned_generation_num;
3609
3610     PER_HEAP
3611     BOOL        blocking_collection;
3612
3613 #ifdef MULTIPLE_HEAPS
3614     SVAL_DECL(int, n_heaps);
3615     SPTR_DECL(PTR_gc_heap, g_heaps);
3616
3617     static
3618     HANDLE*   g_gc_threads; // keep all of the gc threads.
3619     static
3620     size_t*   g_promoted;
3621 #ifdef BACKGROUND_GC
3622     static
3623     size_t*   g_bpromoted;
3624 #endif //BACKGROUND_GC
3625 #ifdef MH_SC_MARK
3626     PER_HEAP_ISOLATED
3627     int*  g_mark_stack_busy;
3628 #endif //MH_SC_MARK
3629 #else
3630     static
3631     size_t    g_promoted;
3632 #ifdef BACKGROUND_GC
3633     static
3634     size_t    g_bpromoted;
3635 #endif //BACKGROUND_GC
3636 #endif //MULTIPLE_HEAPS
3637     
3638     static
3639     size_t reserved_memory;
3640     static
3641     size_t reserved_memory_limit;
3642     static
3643     BOOL      g_low_memory_status;
3644
3645 protected:
3646     PER_HEAP
3647     void update_collection_counts ();
3648
3649 }; // class gc_heap
3650
3651
3652 #ifdef FEATURE_PREMORTEM_FINALIZATION
3653 class CFinalize
3654 {
3655 #ifdef DACCESS_COMPILE
3656     friend class ::ClrDataAccess;
3657 #endif // DACCESS_COMPILE
3658 private:
3659
3660     //adjust the count and add a constant to add a segment
3661     static const int ExtraSegCount = 2;
3662     static const int FinalizerListSeg = NUMBERGENERATIONS+1;
3663     static const int CriticalFinalizerListSeg = NUMBERGENERATIONS;
3664     //Does not correspond to a segment
3665     static const int FreeList = NUMBERGENERATIONS+ExtraSegCount;
3666
3667     PTR_PTR_Object m_Array;
3668     PTR_PTR_Object m_FillPointers[NUMBERGENERATIONS+ExtraSegCount];
3669     PTR_PTR_Object m_EndArray;
3670     size_t   m_PromotedCount;
3671     
3672     VOLATILE(LONG) lock;
3673 #ifdef _DEBUG
3674     DWORD lockowner_threadid;
3675 #endif // _DEBUG
3676
3677     BOOL GrowArray();
3678     void MoveItem (Object** fromIndex,
3679                    unsigned int fromSeg,
3680                    unsigned int toSeg);
3681
3682     inline PTR_PTR_Object& SegQueue (unsigned int Seg)
3683     {
3684         return (Seg ? m_FillPointers [Seg-1] : m_Array);
3685     }
3686     inline PTR_PTR_Object& SegQueueLimit (unsigned int Seg)
3687     {
3688         return m_FillPointers [Seg];
3689     }
3690
3691     BOOL IsSegEmpty ( unsigned int i)
3692     {
3693         ASSERT ( (int)i < FreeList);
3694         return (SegQueueLimit(i) == SegQueue (i));
3695
3696     }
3697
3698     BOOL FinalizeSegForAppDomain (AppDomain *pDomain, 
3699                                   BOOL fRunFinalizers, 
3700                                   unsigned int Seg);
3701
3702 public:
3703     ~CFinalize();
3704     bool Initialize();
3705     void EnterFinalizeLock();
3706     void LeaveFinalizeLock();
3707     bool RegisterForFinalization (int gen, Object* obj, size_t size=0);
3708     Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
3709     BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
3710     void RelocateFinalizationData (int gen, gc_heap* hp);
3711 #ifdef GC_PROFILING
3712     void WalkFReachableObjects (gc_heap* hp);
3713 #endif //GC_PROFILING
3714     void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
3715     void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
3716     size_t GetPromotedCount();
3717
3718     //Methods used by the shutdown code to call every finalizer
3719     void SetSegForShutDown(BOOL fHasLock);
3720     size_t GetNumberFinalizableObjects();
3721     void DiscardNonCriticalObjects();
3722
3723     //Methods used by the app domain unloading call to finalize objects in an app domain
3724     BOOL FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers);
3725
3726     void CheckFinalizerObjects();
3727 };
3728 #endif // FEATURE_PREMORTEM_FINALIZATION
3729
3730 inline
3731  size_t& dd_begin_data_size (dynamic_data* inst)
3732 {
3733   return inst->begin_data_size;
3734 }
3735 inline
3736  size_t& dd_survived_size (dynamic_data* inst)
3737 {
3738   return inst->survived_size;
3739 }
3740 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
3741 inline
3742  size_t& dd_num_npinned_plugs(dynamic_data* inst)
3743 {
3744   return inst->num_npinned_plugs;
3745 }
3746 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
3747 inline
3748 size_t& dd_pinned_survived_size (dynamic_data* inst)
3749 {
3750   return inst->pinned_survived_size;
3751 }
3752 inline
3753 size_t& dd_added_pinned_size (dynamic_data* inst)
3754 {
3755   return inst->added_pinned_size;
3756 }
3757 inline
3758 size_t& dd_artificial_pinned_survived_size (dynamic_data* inst)
3759 {
3760   return inst->artificial_pinned_survived_size;
3761 }
3762 #ifdef SHORT_PLUGS
3763 inline
3764 size_t& dd_padding_size (dynamic_data* inst)
3765 {
3766   return inst->padding_size;
3767 }
3768 #endif //SHORT_PLUGS
3769 inline
3770  size_t& dd_current_size (dynamic_data* inst)
3771 {
3772   return inst->current_size;
3773 }
3774 inline
3775 float& dd_surv (dynamic_data* inst)
3776 {
3777   return inst->surv;
3778 }
3779 inline
3780 size_t& dd_freach_previous_promotion (dynamic_data* inst)
3781 {
3782   return inst->freach_previous_promotion;
3783 }
3784 inline
3785 size_t& dd_desired_allocation (dynamic_data* inst)
3786 {
3787   return inst->desired_allocation;
3788 }
3789 inline
3790 size_t& dd_collection_count (dynamic_data* inst)
3791 {
3792     return inst->collection_count;
3793 }
3794 inline
3795 size_t& dd_promoted_size (dynamic_data* inst)
3796 {
3797     return inst->promoted_size;
3798 }
3799 inline
3800 float& dd_limit (dynamic_data* inst)
3801 {
3802   return inst->limit;
3803 }
3804 inline
3805 float& dd_max_limit (dynamic_data* inst)
3806 {
3807   return inst->max_limit;
3808 }
3809 inline
3810 size_t& dd_min_gc_size (dynamic_data* inst)
3811 {
3812   return inst->min_gc_size;
3813 }
3814 inline
3815 size_t& dd_max_size (dynamic_data* inst)
3816 {
3817   return inst->max_size;
3818 }
3819 inline
3820 size_t& dd_min_size (dynamic_data* inst)
3821 {
3822   return inst->min_size;
3823 }
3824 inline
3825 ptrdiff_t& dd_new_allocation (dynamic_data* inst)
3826 {
3827   return inst->new_allocation;
3828 }
3829 inline
3830 ptrdiff_t& dd_gc_new_allocation (dynamic_data* inst)
3831 {
3832   return inst->gc_new_allocation;
3833 }
3834 inline
3835 size_t& dd_default_new_allocation (dynamic_data* inst)
3836 {
3837   return inst->default_new_allocation;
3838 }
3839 inline
3840 size_t& dd_fragmentation_limit (dynamic_data* inst)
3841 {
3842   return inst->fragmentation_limit;
3843 }
3844 inline
3845 float& dd_fragmentation_burden_limit (dynamic_data* inst)
3846 {
3847   return inst->fragmentation_burden_limit;
3848 }
3849 inline
3850 float dd_v_fragmentation_burden_limit (dynamic_data* inst)
3851 {
3852   return (min (2*dd_fragmentation_burden_limit (inst), 0.75f));
3853 }
3854 inline
3855 size_t& dd_fragmentation (dynamic_data* inst)
3856 {
3857   return inst->fragmentation;
3858 }
3859
3860 inline
3861 size_t& dd_gc_clock (dynamic_data* inst)
3862 {
3863   return inst->gc_clock;
3864 }
3865 inline
3866 size_t& dd_time_clock (dynamic_data* inst)
3867 {
3868   return inst->time_clock;
3869 }
3870
3871 inline
3872 size_t& dd_gc_elapsed_time (dynamic_data* inst)
3873 {
3874     return inst->gc_elapsed_time;
3875 }
3876
3877 inline
3878 float& dd_gc_speed (dynamic_data* inst)
3879 {
3880     return inst->gc_speed;
3881 }
3882
3883 inline
3884 alloc_context* generation_alloc_context (generation* inst)
3885 {
3886     return &(inst->allocation_context);
3887 }
3888
3889 inline
3890 BYTE*& generation_allocation_start (generation* inst)
3891 {
3892   return inst->allocation_start;
3893 }
3894 inline
3895 BYTE*& generation_allocation_pointer (generation* inst)
3896 {
3897   return inst->allocation_context.alloc_ptr;
3898 }
3899 inline
3900 BYTE*& generation_allocation_limit (generation* inst)
3901 {
3902   return inst->allocation_context.alloc_limit;
3903 }
3904 inline 
3905 allocator* generation_allocator (generation* inst)
3906 {
3907     return &inst->free_list_allocator;
3908 }
3909
3910 inline
3911 PTR_heap_segment& generation_start_segment (generation* inst)
3912 {
3913   return inst->start_segment;
3914 }
3915 inline
3916 heap_segment*& generation_allocation_segment (generation* inst)
3917 {
3918   return inst->allocation_segment;
3919 }
3920 inline
3921 BYTE*& generation_plan_allocation_start (generation* inst)
3922 {
3923   return inst->plan_allocation_start;
3924 }
3925 inline
3926 size_t& generation_plan_allocation_start_size (generation* inst)
3927 {
3928   return inst->plan_allocation_start_size;
3929 }
3930 inline
3931 BYTE*& generation_allocation_context_start_region (generation* inst)
3932 {
3933   return inst->allocation_context_start_region;
3934 }
3935 inline
3936 size_t& generation_free_list_space (generation* inst)
3937 {
3938   return inst->free_list_space;
3939 }
3940 inline
3941 size_t& generation_free_obj_space (generation* inst)
3942 {
3943   return inst->free_obj_space;
3944 }
3945 inline
3946 size_t& generation_allocation_size (generation* inst)
3947 {
3948   return inst->allocation_size;
3949 }
3950
3951 inline
3952 size_t& generation_pinned_allocated (generation* inst)
3953 {
3954     return inst->pinned_allocated;
3955 }
3956 inline
3957 size_t& generation_pinned_allocation_sweep_size (generation* inst)
3958 {
3959     return inst->pinned_allocation_sweep_size;
3960 }
3961 inline
3962 size_t& generation_pinned_allocation_compact_size (generation* inst)
3963 {
3964     return inst->pinned_allocation_compact_size;
3965 }
3966 inline
3967 size_t&  generation_free_list_allocated (generation* inst)
3968 {
3969     return inst->free_list_allocated;
3970 }
3971 inline
3972 size_t&  generation_end_seg_allocated (generation* inst)
3973 {
3974     return inst->end_seg_allocated;
3975 }
3976 inline
3977 BOOL&  generation_allocate_end_seg_p (generation* inst)
3978 {
3979     return inst->allocate_end_seg_p;
3980 }
3981 inline
3982 size_t& generation_condemned_allocated (generation* inst)
3983 {
3984     return inst->condemned_allocated;
3985 }
3986 #ifdef FREE_USAGE_STATS
3987 inline
3988 size_t& generation_pinned_free_obj_space (generation* inst)
3989 {
3990     return inst->pinned_free_obj_space;
3991 }
3992 inline
3993 size_t& generation_allocated_in_pinned_free (generation* inst)
3994 {
3995     return inst->allocated_in_pinned_free;
3996 }
3997 inline
3998 size_t& generation_allocated_since_last_pin (generation* inst)
3999 {
4000     return inst->allocated_since_last_pin;
4001 }
4002 #endif //FREE_USAGE_STATS
4003 inline 
4004 float generation_allocator_efficiency (generation* inst)
4005 {
4006     if ((generation_free_list_allocated (inst) + generation_free_obj_space (inst)) != 0)
4007     {
4008         return ((float) (generation_free_list_allocated (inst)) / (float)(generation_free_list_allocated (inst) + generation_free_obj_space (inst)));
4009     }
4010     else
4011         return 0;
4012 }
4013 inline
4014 size_t generation_unusable_fragmentation (generation* inst)
4015 {
4016     return (size_t)(generation_free_obj_space (inst) + 
4017                     (1.0f-generation_allocator_efficiency(inst))*generation_free_list_space (inst));
4018 }
4019
4020 #define plug_skew           sizeof(ObjHeader)
4021 #define min_obj_size        (sizeof(BYTE*)+plug_skew+sizeof(size_t))//syncblock + vtable+ first field
4022 #define min_free_list       (sizeof(BYTE*)+min_obj_size) //Need one slot more
4023 //Note that this encodes the fact that plug_skew is a multiple of BYTE*.
4024 struct plug
4025 {
4026     BYTE *  skew[plug_skew / sizeof(BYTE *)];
4027 };
4028
4029 class pair
4030 {
4031 public:
4032     short left;
4033     short right;
4034 };
4035
4036 //Note that these encode the fact that plug_skew is a multiple of BYTE*.
4037 // Each of new field is prepended to the prior struct.
4038
4039 struct plug_and_pair
4040 {
4041     pair        m_pair;
4042     plug        m_plug;
4043 };
4044
4045 struct plug_and_reloc
4046 {
4047     ptrdiff_t   reloc;
4048     pair        m_pair;
4049     plug        m_plug;
4050 };
4051
4052 struct plug_and_gap
4053 {
4054     ptrdiff_t   gap;
4055     ptrdiff_t   reloc;
4056     union
4057     {
4058         pair    m_pair;
4059         int     lr;  //for clearing the entire pair in one instruction
4060     };
4061     plug        m_plug;
4062 };
4063
4064 struct gap_reloc_pair
4065 {
4066     size_t gap;
4067     size_t   reloc;
4068     pair        m_pair;
4069 };
4070
4071 #define min_pre_pin_obj_size (sizeof (gap_reloc_pair) + min_obj_size)
4072
4073 struct DECLSPEC_ALIGN(8) aligned_plug_and_gap
4074 {
4075     plug_and_gap plugandgap;
4076 };
4077
4078 struct loh_obj_and_pad
4079 {
4080     ptrdiff_t   reloc;    
4081     plug        m_plug;
4082 };
4083
4084 struct loh_padding_obj
4085 {
4086     BYTE*       mt;
4087     size_t      len;
4088     ptrdiff_t   reloc;
4089     plug        m_plug;
4090 };
4091 #define loh_padding_obj_size (sizeof(loh_padding_obj))
4092
4093 //flags description
4094 #define heap_segment_flags_readonly     1
4095 #define heap_segment_flags_inrange      2
4096 #define heap_segment_flags_unmappable   4
4097 #define heap_segment_flags_loh          8
4098 #ifdef BACKGROUND_GC
4099 #define heap_segment_flags_swept        16
4100 #define heap_segment_flags_decommitted  32
4101 #define heap_segment_flags_ma_committed 64
4102 // for segments whose mark array is only partially committed.
4103 #define heap_segment_flags_ma_pcommitted 128
4104 #endif //BACKGROUND_GC
4105
4106 //need to be careful to keep enough pad items to fit a relocation node
4107 //padded to QuadWord before the plug_skew
4108
4109 class heap_segment
4110 {
4111 public:
4112     BYTE*           allocated;
4113     BYTE*           committed;
4114     BYTE*           reserved;
4115     BYTE*           used;
4116     BYTE*           mem;
4117     size_t          flags;
4118     PTR_heap_segment next;
4119     BYTE*           plan_allocated;
4120 #ifdef BACKGROUND_GC
4121     BYTE*           background_allocated;
4122     BYTE*           saved_bg_allocated;
4123 #endif //BACKGROUND_GC
4124
4125 #ifdef MULTIPLE_HEAPS
4126     gc_heap*        heap;
4127 #endif //MULTIPLE_HEAPS
4128
4129 #ifdef _MSC_VER
4130 // Disable this warning - we intentionally want __declspec(align()) to insert padding for us
4131 #pragma warning(disable:4324)  // structure was padded due to __declspec(align())
4132 #endif
4133     aligned_plug_and_gap padandplug;
4134 #ifdef _MSC_VER
4135 #pragma warning(default:4324)  // structure was padded due to __declspec(align())
4136 #endif
4137 };
4138
4139 inline
4140 BYTE*& heap_segment_reserved (heap_segment* inst)
4141 {
4142   return inst->reserved;
4143 }
4144 inline
4145 BYTE*& heap_segment_committed (heap_segment* inst)
4146 {
4147   return inst->committed;
4148 }
4149 inline
4150 BYTE*& heap_segment_used (heap_segment* inst)
4151 {
4152   return inst->used;
4153 }
4154 inline
4155 BYTE*& heap_segment_allocated (heap_segment* inst)
4156 {
4157   return inst->allocated;
4158 }
4159
4160 inline
4161 BOOL heap_segment_read_only_p (heap_segment* inst)
4162 {
4163     return ((inst->flags & heap_segment_flags_readonly) != 0);
4164 }
4165
4166 inline
4167 BOOL heap_segment_in_range_p (heap_segment* inst)
4168 {
4169     return (!(inst->flags & heap_segment_flags_readonly) ||
4170             ((inst->flags & heap_segment_flags_inrange) != 0));
4171 }
4172
4173 inline
4174 BOOL heap_segment_unmappable_p (heap_segment* inst)
4175 {
4176     return (!(inst->flags & heap_segment_flags_readonly) ||
4177             ((inst->flags & heap_segment_flags_unmappable) != 0));
4178 }
4179
4180 inline
4181 BOOL heap_segment_loh_p (heap_segment * inst)
4182 {
4183     return !!(inst->flags & heap_segment_flags_loh);
4184 }
4185
4186 #ifdef BACKGROUND_GC
4187 inline
4188 BOOL heap_segment_decommitted_p (heap_segment * inst)
4189 {
4190     return !!(inst->flags & heap_segment_flags_decommitted);
4191 }
4192 #endif //BACKGROUND_GC
4193
4194 inline
4195 PTR_heap_segment & heap_segment_next (heap_segment* inst)
4196 {
4197   return inst->next;
4198 }
4199 inline
4200 BYTE*& heap_segment_mem (heap_segment* inst)
4201 {
4202   return inst->mem;
4203 }
4204 inline
4205 BYTE*& heap_segment_plan_allocated (heap_segment* inst)
4206 {
4207   return inst->plan_allocated;
4208 }
4209
4210 #ifdef BACKGROUND_GC
4211 inline
4212 BYTE*& heap_segment_background_allocated (heap_segment* inst)
4213 {
4214   return inst->background_allocated;
4215 }
4216 inline
4217 BYTE*& heap_segment_saved_bg_allocated (heap_segment* inst)
4218 {
4219   return inst->saved_bg_allocated;
4220 }
4221 #endif //BACKGROUND_GC
4222
4223 #ifdef MULTIPLE_HEAPS
4224 inline
4225 gc_heap*& heap_segment_heap (heap_segment* inst)
4226 {
4227     return inst->heap;
4228 }
4229 #endif //MULTIPLE_HEAPS
4230
4231 #ifndef MULTIPLE_HEAPS
4232
4233 #ifndef DACCESS_COMPILE
4234 extern "C" {
4235 #endif //!DACCESS_COMPILE
4236
4237 GARY_DECL(generation,generation_table,NUMBERGENERATIONS+1);
4238
4239 #ifndef DACCESS_COMPILE
4240 }
4241 #endif //!DACCESS_COMPILE
4242
4243 #endif //MULTIPLE_HEAPS
4244
4245 inline
4246 generation* gc_heap::generation_of (int  n)
4247 {
4248     assert (((n <= max_generation+1) && (n >= 0)));
4249     return &generation_table [ n ];
4250 }
4251
4252 inline
4253 dynamic_data* gc_heap::dynamic_data_of (int gen_number)
4254 {
4255     return &dynamic_data_table [ gen_number ];
4256 }
4257
4258 extern "C" BYTE* g_ephemeral_low;
4259 extern "C" BYTE* g_ephemeral_high;
4260
4261 #define card_word_width ((size_t)32)
4262
4263 //
4264 // The value of card_size is determined empirically according to the average size of an object
4265 // In the code we also rely on the assumption that one card_table entry (DWORD) covers an entire os page
4266 //
4267 #if defined (_WIN64)
4268 #define card_size ((size_t)(2*OS_PAGE_SIZE/card_word_width))
4269 #else
4270 #define card_size ((size_t)(OS_PAGE_SIZE/card_word_width))
4271 #endif //_WIN64
4272
4273 inline
4274 size_t card_word (size_t card)
4275 {
4276     return card / card_word_width;
4277 }
4278
4279 inline
4280 unsigned card_bit (size_t card)
4281 {
4282     return (unsigned)(card % card_word_width);
4283 }
4284
4285 inline
4286 size_t gcard_of (BYTE* object)
4287 {
4288     return (size_t)(object) / card_size;
4289 }
4290