Vxsort (#37159)
[platform/upstream/dotnet/runtime.git] / src / coreclr / src / gc / gcpriv.h
1 // Copyright (c) Microsoft. All rights reserved.
2 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
3 //
4 //
5 // Copyright (c) Microsoft. All rights reserved.
6 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
7 //
8 // optimize for speed
9
10
11 #ifndef _DEBUG
12 #ifdef _MSC_VER
13 #pragma optimize( "t", on )
14 #endif
15 #endif
16
17 #ifdef __GNUC__
18 #define inline __attribute__((always_inline)) inline
19 #else
20 #define inline __forceinline
21 #endif // __GNUC__
22
23 #include "gc.h"
24 #include "gcrecord.h"
25
26 #ifdef _MSC_VER
27 #pragma warning(disable:4293)
28 #pragma warning(disable:4477)
29 #endif //_MSC_VER
30
31 inline void FATAL_GC_ERROR()
32 {
33 #ifndef DACCESS_COMPILE
34     GCToOSInterface::DebugBreak();
35 #endif // DACCESS_COMPILE
36     _ASSERTE(!"Fatal Error in GC.");
37     GCToEEInterface::HandleFatalError((unsigned int)COR_E_EXECUTIONENGINE);
38 }
39
40 #ifdef MULTIPLE_HEAPS
41 // This turns on instrumentation that collects info for heap balancing.
42 // Define it and make sure you have HEAP_BALANCE_LOG/HEAP_BALANCE_TEMP_LOG
43 // level logging enabled *only*.
44 //#define HEAP_BALANCE_INSTRUMENTATION
45 #endif //MULTIPLE_HEAPS
46
47 #ifdef _MSC_VER
48 #pragma inline_depth(20)
49 #endif
50
51 // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
52 // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION
53 // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much
54 // work to make FEATURE_STRUCTALIGN not apply to LOH so they can be both
55 // turned on.
56 #define FEATURE_LOH_COMPACTION
57
58 #ifdef FEATURE_64BIT_ALIGNMENT
59 // We need the following feature as part of keeping 64-bit types aligned in the GC heap.
60 #define RESPECT_LARGE_ALIGNMENT //Preserve double alignment of objects during relocation
61 #endif //FEATURE_64BIT_ALIGNMENT
62
63 #define SHORT_PLUGS //used to keep ephemeral plugs short so they fit better into the oldest generation free items
64
65 #ifdef SHORT_PLUGS
66 #define DESIRED_PLUG_LENGTH (1000)
67 #endif //SHORT_PLUGS
68
69 #define FEATURE_PREMORTEM_FINALIZATION
70 #define GC_HISTORY
71
72 #ifndef FEATURE_REDHAWK
73 #define HEAP_ANALYZE
74 #define COLLECTIBLE_CLASS
75 #endif // !FEATURE_REDHAWK
76
77 #ifdef HEAP_ANALYZE
78 #define initial_internal_roots        (1024*16)
79 #endif // HEAP_ANALYZE
80
81 #define MARK_LIST         //used sorted list to speed up plan phase
82
83 #define BACKGROUND_GC   //concurrent background GC (requires WRITE_WATCH)
84
85 #ifdef SERVER_GC
86 #define MH_SC_MARK //scalable marking
87 //#define SNOOP_STATS //diagnostic
88 #define PARALLEL_MARK_LIST_SORT //do the sorting and merging of the multiple mark lists in server gc in parallel
89 #endif //SERVER_GC
90
91 //This is used to mark some type volatile only when the scalable marking is used.
92 #if defined (SERVER_GC) && defined (MH_SC_MARK)
93 #define SERVER_SC_MARK_VOLATILE(x) VOLATILE(x)
94 #else //SERVER_GC&&MH_SC_MARK
95 #define SERVER_SC_MARK_VOLATILE(x) x
96 #endif //SERVER_GC&&MH_SC_MARK
97
98 //#define MULTIPLE_HEAPS         //Allow multiple heaps for servers
99
100 #define CARD_BUNDLE         //enable card bundle feature.(requires WRITE_WATCH)
101
102 // #define ALLOW_REFERENCES_IN_POH  //Allow POH objects to contain references.
103
104 #ifdef BACKGROUND_GC
105 #define BGC_SERVO_TUNING
106 #endif //BACKGROUND_GC
107
108 #if defined(BACKGROUND_GC) || defined(CARD_BUNDLE) || defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP)
109 #define WRITE_WATCH     //Write Watch feature
110 #endif //BACKGROUND_GC || CARD_BUNDLE
111
112 #ifdef WRITE_WATCH
113 #define array_size 100
114 #endif //WRITE_WATCH
115
116 #define FFIND_DECAY  7      //Number of GC for which fast find will be active
117
118 #ifndef MAX_LONGPATH
119 #define MAX_LONGPATH 1024
120 #endif // MAX_LONGPATH
121
122 //#define TRACE_GC
123 //#define SIMPLE_DPRINTF
124
125 //#define JOIN_STATS         //amount of time spent in the join
126
127 //#define SYNCHRONIZATION_STATS
128 //#define SEG_REUSE_STATS
129
130 #ifdef SYNCHRONIZATION_STATS
131 #define BEGIN_TIMING(x) \
132     int64_t x##_start; \
133     x##_start = GCToOSInterface::QueryPerformanceCounter()
134
135 #define END_TIMING(x) \
136     int64_t x##_end; \
137     x##_end = GCToOSInterface::QueryPerformanceCounter(); \
138     x += x##_end - x##_start
139
140 #else //SYNCHRONIZATION_STATS
141 #define BEGIN_TIMING(x)
142 #define END_TIMING(x)
143 #endif //SYNCHRONIZATION_STATS
144
145 #ifdef GC_CONFIG_DRIVEN
146 void GCLogConfig (const char *fmt, ... );
147 #define cprintf(x) {GCLogConfig x;}
148 #endif //GC_CONFIG_DRIVEN
149
150 // For the bestfit algorithm when we relocate ephemeral generations into an
151 // existing gen2 segment.
152 // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
153 #define MIN_INDEX_POWER2 6
154
155 #ifdef SERVER_GC
156
157 #ifdef HOST_64BIT
158 #define MAX_INDEX_POWER2 30
159 #else
160 #define MAX_INDEX_POWER2 26
161 #endif  // HOST_64BIT
162
163 #else //SERVER_GC
164
165 #ifdef HOST_64BIT
166 #define MAX_INDEX_POWER2 28
167 #else
168 #define MAX_INDEX_POWER2 24
169 #endif  // HOST_64BIT
170
171 #endif //SERVER_GC
172
173 #define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1)
174
175 #define MAX_NUM_FREE_SPACES 200
176 #define MIN_NUM_FREE_SPACES 5
177
178 #ifdef memcpy
179 #undef memcpy
180 #endif //memcpy
181
182 #ifdef FEATURE_STRUCTALIGN
183 #define REQD_ALIGN_DCL ,int requiredAlignment
184 #define REQD_ALIGN_ARG ,requiredAlignment
185 #define REQD_ALIGN_AND_OFFSET_DCL ,int requiredAlignment,size_t alignmentOffset
186 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL ,int requiredAlignment=DATA_ALIGNMENT,size_t alignmentOffset=0
187 #define REQD_ALIGN_AND_OFFSET_ARG ,requiredAlignment,alignmentOffset
188 #else // FEATURE_STRUCTALIGN
189 #define REQD_ALIGN_DCL
190 #define REQD_ALIGN_ARG
191 #define REQD_ALIGN_AND_OFFSET_DCL
192 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL
193 #define REQD_ALIGN_AND_OFFSET_ARG
194 #endif // FEATURE_STRUCTALIGN
195
196 #ifdef MULTIPLE_HEAPS
197 #define THREAD_NUMBER_DCL ,int thread
198 #define THREAD_NUMBER_ARG ,thread
199 #define THREAD_NUMBER_FROM_CONTEXT int thread = sc->thread_number;
200 #define THREAD_FROM_HEAP  int thread = heap_number;
201 #define HEAP_FROM_THREAD  gc_heap* hpt = gc_heap::g_heaps[thread];
202 #else
203 #define THREAD_NUMBER_DCL
204 #define THREAD_NUMBER_ARG
205 #define THREAD_NUMBER_FROM_CONTEXT
206 #define THREAD_FROM_HEAP
207 #define HEAP_FROM_THREAD  gc_heap* hpt = 0;
208 #endif //MULTIPLE_HEAPS
209
210 //These constants are ordered
211 const int policy_sweep = 0;
212 const int policy_compact = 1;
213 const int policy_expand  = 2;
214
215 #ifdef TRACE_GC
216 #define MIN_CUSTOM_LOG_LEVEL 7
217 #define SEG_REUSE_LOG_0 (MIN_CUSTOM_LOG_LEVEL)
218 #define SEG_REUSE_LOG_1 (MIN_CUSTOM_LOG_LEVEL + 1)
219 #define DT_LOG_0 (MIN_CUSTOM_LOG_LEVEL + 2)
220 #define BGC_TUNING_LOG (MIN_CUSTOM_LOG_LEVEL + 3)
221 #define GTC_LOG (MIN_CUSTOM_LOG_LEVEL + 4)
222 #define GC_TABLE_LOG (MIN_CUSTOM_LOG_LEVEL + 5)
223 #define JOIN_LOG (MIN_CUSTOM_LOG_LEVEL + 6)
224 #define SPINLOCK_LOG (MIN_CUSTOM_LOG_LEVEL + 7)
225 #define SNOOP_LOG (MIN_CUSTOM_LOG_LEVEL + 8)
226 #define COMMIT_ACCOUNTING_LOG (MIN_CUSTOM_LOG_LEVEL + 9)
227
228 // NOTE! This is for HEAP_BALANCE_INSTRUMENTATION
229 // This particular one is special and needs to be well formatted because we
230 // do post processing on it with tools\GCLogParser. If you need to add some
231 // detail to help with investigation that's not 't processed by tooling
232 // prefix it with TEMP so that line will be written to the results as is in
233 // the result. I have some already logged with HEAP_BALANCE_TEMP_LOG.
234 #define HEAP_BALANCE_LOG (DT_LOG_0 + 7)
235 #define HEAP_BALANCE_TEMP_LOG (DT_LOG_0 + 8)
236
237 #ifndef DACCESS_COMPILE
238
239 #ifdef SIMPLE_DPRINTF
240
241 void GCLog (const char *fmt, ... );
242 #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
243 #else //SIMPLE_DPRINTF
244 // Nobody used the logging mechanism that used to be here. If we find ourselves
245 // wanting to inspect GC logs on unmodified builds, we can use this define here
246 // to do so.
247 #define dprintf(l, x)
248 //#define dprintf(l,x) STRESS_LOG_VA(x);
249
250 #endif //SIMPLE_DPRINTF
251
252 #else //DACCESS_COMPILE
253 #define dprintf(l,x)
254 #endif //DACCESS_COMPILE
255 #else //TRACE_GC
256 #define dprintf(l,x)
257 #endif //TRACE_GC
258
259 #if !defined(FEATURE_REDHAWK) && !defined(BUILD_AS_STANDALONE)
260 #undef  assert
261 #define assert _ASSERTE
262 #undef  ASSERT
263 #define ASSERT _ASSERTE
264 #endif // FEATURE_REDHAWK
265
266 struct GCDebugSpinLock {
267     VOLATILE(int32_t) lock;                   // -1 if free, 0 if held
268 #ifdef _DEBUG
269     VOLATILE(Thread *) holding_thread;     // -1 if no thread holds the lock.
270     VOLATILE(BOOL) released_by_gc_p;       // a GC thread released the lock.
271 #endif
272 #if defined (SYNCHRONIZATION_STATS)
273     // number of times we went into SwitchToThread in enter_spin_lock.
274     unsigned int num_switch_thread;
275     // number of times we went into WaitLonger.
276     unsigned int num_wait_longer;
277     // number of times we went to calling SwitchToThread in WaitLonger.
278     unsigned int num_switch_thread_w;
279     // number of times we went to calling DisablePreemptiveGC in WaitLonger.
280     unsigned int num_disable_preemptive_w;
281 #endif
282
283     GCDebugSpinLock()
284         : lock(-1)
285 #ifdef _DEBUG
286         , holding_thread((Thread*) -1)
287 #endif
288 #if defined (SYNCHRONIZATION_STATS)
289         , num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0)
290 #endif
291     {
292     }
293
294 #if defined (SYNCHRONIZATION_STATS)
295     void init()
296     {
297         num_switch_thread = 0;
298         num_wait_longer = 0;
299         num_switch_thread_w = 0;
300         num_disable_preemptive_w = 0;
301     }
302 #endif
303 };
304 typedef GCDebugSpinLock GCSpinLock;
305
306 class mark;
307 class heap_segment;
308 class CObjectHeader;
309 class sorted_table;
310 class seg_free_spaces;
311 class gc_heap;
312
313 #ifdef BACKGROUND_GC
314 class exclusive_sync;
315 class recursive_gc_sync;
316 #endif //BACKGROUND_GC
317
318 #ifdef MULTIPLE_HEAPS
319 // card marking stealing only makes sense in server GC
320 // but it works and is easier to debug for workstation GC
321 // so turn it on for server GC, turn on for workstation GC if necessary
322 #define FEATURE_CARD_MARKING_STEALING
323 #endif //MULTIPLE_HEAPS
324
325 #ifdef FEATURE_CARD_MARKING_STEALING
326 class card_marking_enumerator;
327 #define CARD_MARKING_STEALING_ARG(a)    ,a
328 #define CARD_MARKING_STEALING_ARGS(a,b,c)    ,a,b,c
329 #else // FEATURE_CARD_MARKING_STEALING
330 #define CARD_MARKING_STEALING_ARG(a)
331 #define CARD_MARKING_STEALING_ARGS(a,b,c)
332 #endif // FEATURE_CARD_MARKING_STEALING
333
334 // The following 2 modes are of the same format as in clr\src\bcl\system\runtime\gcsettings.cs
335 // make sure you change that one if you change this one!
336 enum gc_pause_mode
337 {
338     pause_batch = 0, //We are not concerned about pause length
339     pause_interactive = 1,     //We are running an interactive app
340     pause_low_latency = 2,     //short pauses are essential
341     //avoid long pauses from blocking full GCs unless running out of memory
342     pause_sustained_low_latency = 3,
343     pause_no_gc = 4
344 };
345
346 enum gc_loh_compaction_mode
347 {
348     loh_compaction_default = 1, // the default mode, don't compact LOH.
349     loh_compaction_once = 2, // only compact once the next time a blocking full GC happens.
350     loh_compaction_auto = 4 // GC decides when to compact LOH, to be implemented.
351 };
352
353 enum set_pause_mode_status
354 {
355     set_pause_mode_success = 0,
356     set_pause_mode_no_gc = 1 // NoGCRegion is in progress, can't change pause mode.
357 };
358
359 /*
360  Latency modes required user to have specific GC knowledge (eg, budget, full blocking GC).
361  We are trying to move away from them as it makes a lot more sense for users to tell
362  us what's the most important out of the perf aspects that make sense to them.
363
364  In general there are 3 such aspects:
365
366  + memory footprint
367  + throughput
368  + pause predictibility
369
370  Currently the following levels are supported. We may (and will likely) add more
371  in the future.
372
373  +----------+--------------------+---------------------------------------+
374  | Level    | Optimization Goals | Latency Charactaristics               |
375  +==========+====================+=======================================+
376  | 0        | memory footprint   | pauses can be long and more frequent  |
377  +----------+--------------------+---------------------------------------+
378  | 1        | balanced           | pauses are more predictable and more  |
379  |          |                    | frequent. the longest pauses are      |
380  |          |                    | shorter than 1.                       |
381  +----------+--------------------+---------------------------------------+
382 */
383 enum gc_latency_level
384 {
385     latency_level_first = 0,
386     latency_level_memory_footprint = latency_level_first,
387     latency_level_balanced = 1,
388     latency_level_last = latency_level_balanced,
389     latency_level_default = latency_level_balanced
390 };
391
392 enum gc_tuning_point
393 {
394     tuning_deciding_condemned_gen = 0,
395     tuning_deciding_full_gc = 1,
396     tuning_deciding_compaction = 2,
397     tuning_deciding_expansion = 3,
398     tuning_deciding_promote_ephemeral = 4,
399     tuning_deciding_short_on_seg = 5
400 };
401
402 enum gc_oh_num
403 {
404     soh = 0,
405     loh = 1,
406     poh = 2,
407     none = 3,
408     total_oh_count = 4
409 };
410
411 gc_oh_num gen_to_oh (int gen);
412
413 #if defined(TRACE_GC) && defined(BACKGROUND_GC)
414 static const char * const str_bgc_state[] =
415 {
416     "not_in_process",
417     "mark_handles",
418     "mark_stack",
419     "revisit_soh",
420     "revisit_loh",
421     "overflow_soh",
422     "overflow_loh",
423     "final_marking",
424     "sweep_soh",
425     "sweep_loh",
426     "plan_phase"
427 };
428 #endif // defined(TRACE_GC) && defined(BACKGROUND_GC)
429
430 enum allocation_state
431 {
432     a_state_start = 0,
433     a_state_can_allocate,
434     a_state_cant_allocate,
435     // This could be due to having to wait till a GC is done,
436     // or having to try a different heap.
437     a_state_retry_allocate,
438     a_state_try_fit,
439     a_state_try_fit_new_seg,
440     a_state_try_fit_after_cg,
441     a_state_try_fit_after_bgc,
442     a_state_try_free_full_seg_in_bgc,
443     a_state_try_free_after_bgc,
444     a_state_try_seg_end,
445     a_state_acquire_seg,
446     a_state_acquire_seg_after_cg,
447     a_state_acquire_seg_after_bgc,
448     a_state_check_and_wait_for_bgc,
449     a_state_trigger_full_compact_gc,
450     a_state_trigger_ephemeral_gc,
451     a_state_trigger_2nd_ephemeral_gc,
452     a_state_check_retry_seg,
453     a_state_max
454 };
455
456 enum gc_type
457 {
458     gc_type_compacting = 0,
459     gc_type_blocking = 1,
460 #ifdef BACKGROUND_GC
461     gc_type_background = 2,
462 #endif //BACKGROUND_GC
463     gc_type_max = 3
464 };
465
466 //encapsulates the mechanism for the current gc
467 class gc_mechanisms
468 {
469 public:
470     VOLATILE(size_t) gc_index; // starts from 1 for the first GC, like dd_collection_count
471     int condemned_generation;
472     BOOL promotion;
473     BOOL compaction;
474     BOOL loh_compaction;
475     BOOL heap_expansion;
476     uint32_t concurrent;
477     BOOL demotion;
478     BOOL card_bundles;
479     int  gen0_reduction_count;
480     BOOL should_lock_elevation;
481     int elevation_locked_count;
482     BOOL elevation_reduced;
483     BOOL minimal_gc;
484     gc_reason reason;
485     gc_pause_mode pause_mode;
486     BOOL found_finalizers;
487
488 #ifdef BACKGROUND_GC
489     BOOL background_p;
490     bgc_state b_state;
491     BOOL allocations_allowed;
492 #endif //BACKGROUND_GC
493
494 #ifdef STRESS_HEAP
495     BOOL stress_induced;
496 #endif // STRESS_HEAP
497
498     // These are opportunistically set
499     uint32_t entry_memory_load;
500     uint64_t entry_available_physical_mem;
501     uint32_t exit_memory_load;
502
503     void init_mechanisms(); //for each GC
504     void first_init(); // for the life of the EE
505
506     void record (gc_history_global* history);
507 };
508
509 // This is a compact version of gc_mechanism that we use to save in the history.
510 class gc_mechanisms_store
511 {
512 public:
513     size_t gc_index;
514     bool promotion;
515     bool compaction;
516     bool loh_compaction;
517     bool heap_expansion;
518     bool concurrent;
519     bool demotion;
520     bool card_bundles;
521     bool should_lock_elevation;
522     int condemned_generation   : 8;
523     int gen0_reduction_count   : 8;
524     int elevation_locked_count : 8;
525     gc_reason reason           : 8;
526     gc_pause_mode pause_mode   : 8;
527 #ifdef BACKGROUND_GC
528     bgc_state b_state          : 8;
529 #endif //BACKGROUND_GC
530     bool found_finalizers;
531
532 #ifdef BACKGROUND_GC
533     bool background_p;
534 #endif //BACKGROUND_GC
535
536 #ifdef STRESS_HEAP
537     bool stress_induced;
538 #endif // STRESS_HEAP
539
540 #ifdef HOST_64BIT
541     uint32_t entry_memory_load;
542 #endif // HOST_64BIT
543
544     void store (gc_mechanisms* gm)
545     {
546         gc_index                = gm->gc_index;
547         condemned_generation    = gm->condemned_generation;
548         promotion               = (gm->promotion != 0);
549         compaction              = (gm->compaction != 0);
550         loh_compaction          = (gm->loh_compaction != 0);
551         heap_expansion          = (gm->heap_expansion != 0);
552         concurrent              = (gm->concurrent != 0);
553         demotion                = (gm->demotion != 0);
554         card_bundles            = (gm->card_bundles != 0);
555         gen0_reduction_count    = gm->gen0_reduction_count;
556         should_lock_elevation   = (gm->should_lock_elevation != 0);
557         elevation_locked_count  = gm->elevation_locked_count;
558         reason                  = gm->reason;
559         pause_mode              = gm->pause_mode;
560         found_finalizers        = (gm->found_finalizers != 0);
561
562 #ifdef BACKGROUND_GC
563         background_p            = (gm->background_p != 0);
564         b_state                 = gm->b_state;
565 #endif //BACKGROUND_GC
566
567 #ifdef STRESS_HEAP
568         stress_induced          = (gm->stress_induced != 0);
569 #endif // STRESS_HEAP
570
571 #ifdef HOST_64BIT
572         entry_memory_load       = gm->entry_memory_load;
573 #endif // HOST_64BIT
574     }
575 };
576
577 typedef DPTR(class heap_segment)               PTR_heap_segment;
578 typedef DPTR(class gc_heap)                    PTR_gc_heap;
579 typedef DPTR(PTR_gc_heap)                      PTR_PTR_gc_heap;
580 #ifdef FEATURE_PREMORTEM_FINALIZATION
581 typedef DPTR(class CFinalize)                  PTR_CFinalize;
582 #endif // FEATURE_PREMORTEM_FINALIZATION
583
584 //-------------------------------------
585 //generation free list. It is an array of free lists bucketed by size, starting at sizes lower than (1 << first_bucket_bits)
586 //and doubling each time. The last bucket (index == num_buckets) is for largest sizes with no limit
587
588 #define MAX_SOH_BUCKET_COUNT (13)//Max number of buckets for the SOH generations.
589 #define MAX_BUCKET_COUNT (20)//Max number of buckets.
590 class alloc_list
591 {
592     uint8_t* head;
593     uint8_t* tail;
594
595     size_t damage_count;
596 public:
597 #ifdef FL_VERIFICATION
598     size_t item_count;
599 #endif //FL_VERIFICATION
600
601     uint8_t*& alloc_list_head () { return head;}
602     uint8_t*& alloc_list_tail () { return tail;}
603     size_t& alloc_list_damage_count(){ return damage_count; }
604     alloc_list()
605     {
606         head = 0;
607         tail = 0;
608         damage_count = 0;
609     }
610 };
611
612
613 class allocator
614 {
615     int first_bucket_bits;
616     unsigned int num_buckets;
617     alloc_list first_bucket;
618     alloc_list* buckets;
619     alloc_list& alloc_list_of (unsigned int bn);
620     size_t& alloc_list_damage_count_of (unsigned int bn);
621
622 public:
623     allocator (unsigned int num_b, int fbb, alloc_list* b);
624
625     allocator()
626     {
627         num_buckets = 1;
628         first_bucket_bits = sizeof(size_t) * 8 - 1;
629     }
630
631     unsigned int number_of_buckets()
632     {
633         return num_buckets;
634     }
635
636     // skip buckets that cannot possibly fit "size" and return the next one
637     // there is always such bucket since the last one fits everything
638     unsigned int first_suitable_bucket(size_t size)
639     {
640         // sizes taking first_bucket_bits or less are mapped to bucket 0
641         // others are mapped to buckets 0, 1, 2 respectively
642         size = (size >> first_bucket_bits) | 1;
643
644         DWORD highest_set_bit_index;
645     #ifdef HOST_64BIT
646         BitScanReverse64(&highest_set_bit_index, size);
647     #else
648         BitScanReverse(&highest_set_bit_index, size);
649     #endif
650
651         return min ((unsigned int)highest_set_bit_index, num_buckets - 1);
652     }
653
654     size_t first_bucket_size()
655     {
656         return ((size_t)1 << (first_bucket_bits + 1));
657     }
658
659     uint8_t*& alloc_list_head_of (unsigned int bn)
660     {
661         return alloc_list_of (bn).alloc_list_head();
662     }
663
664     uint8_t*& alloc_list_tail_of (unsigned int bn)
665     {
666         return alloc_list_of (bn).alloc_list_tail();
667     }
668
669     void clear();
670
671     BOOL discard_if_no_fit_p()
672     {
673         return (num_buckets == 1);
674     }
675
676     // This is when we know there's nothing to repair because this free
677     // list has never gone through plan phase. Right now it's only used
678     // by the background ephemeral sweep when we copy the local free list
679     // to gen0's free list.
680     //
681     // We copy head and tail manually (vs together like copy_to_alloc_list)
682     // since we need to copy tail first because when we get the free items off
683     // of each bucket we check head first. We also need to copy the
684     // smaller buckets first so when gen0 allocation needs to thread
685     // smaller items back that bucket is guaranteed to have been full
686     // copied.
687     void copy_with_no_repair (allocator* allocator_to_copy)
688     {
689         assert (num_buckets == allocator_to_copy->number_of_buckets());
690         for (unsigned int i = 0; i < num_buckets; i++)
691         {
692             alloc_list* al = &(allocator_to_copy->alloc_list_of (i));
693             alloc_list_tail_of(i) = al->alloc_list_tail();
694             alloc_list_head_of(i) = al->alloc_list_head();
695         }
696     }
697
698     void unlink_item (unsigned int bucket_number, uint8_t* item, uint8_t* previous_item, BOOL use_undo_p);
699     void thread_item (uint8_t* item, size_t size);
700     void thread_item_front (uint8_t* itme, size_t size);
701     void copy_to_alloc_list (alloc_list* toalist);
702     void copy_from_alloc_list (alloc_list* fromalist);
703     void commit_alloc_list_changes();
704 };
705
706 #define NUM_GEN_POWER2 (20)
707 #define BASE_GEN_SIZE (1*512)
708
709 // group the frequently used ones together (need intrumentation on accessors)
710 class generation
711 {
712 public:
713     // Don't move these first two fields without adjusting the references
714     // from the __asm in jitinterface.cpp.
715     alloc_context   allocation_context;
716     PTR_heap_segment start_segment;
717     uint8_t*        allocation_start;
718     heap_segment*   allocation_segment;
719     uint8_t*        allocation_context_start_region;
720     allocator       free_list_allocator;
721     size_t          free_list_allocated;
722     size_t          end_seg_allocated;
723     BOOL            allocate_end_seg_p;
724     size_t          condemned_allocated;
725     size_t          sweep_allocated;
726     size_t          free_list_space;
727     size_t          free_obj_space;
728     size_t          allocation_size;
729     uint8_t*        plan_allocation_start;
730     size_t          plan_allocation_start_size;
731
732     // this is the pinned plugs that got allocated into this gen.
733     size_t          pinned_allocated;
734     size_t          pinned_allocation_compact_size;
735     size_t          pinned_allocation_sweep_size;
736     int             gen_num;
737
738 #ifdef FREE_USAGE_STATS
739     size_t          gen_free_spaces[NUM_GEN_POWER2];
740     // these are non pinned plugs only
741     size_t          gen_plugs[NUM_GEN_POWER2];
742     size_t          gen_current_pinned_free_spaces[NUM_GEN_POWER2];
743     size_t          pinned_free_obj_space;
744     // this is what got allocated into the pinned free spaces.
745     size_t          allocated_in_pinned_free;
746     size_t          allocated_since_last_pin;
747 #endif //FREE_USAGE_STATS
748 };
749
750 static_assert(offsetof(dac_generation, allocation_context) == offsetof(generation, allocation_context), "DAC generation offset mismatch");
751 static_assert(offsetof(dac_generation, start_segment) == offsetof(generation, start_segment), "DAC generation offset mismatch");
752 static_assert(offsetof(dac_generation, allocation_start) == offsetof(generation, allocation_start), "DAC generation offset mismatch");
753
754 // static data remains the same after it's initialized.
755 // It's per generation.
756 // TODO: for gen_time_tuning, we should put the multipliers in static data.
757 struct static_data
758 {
759     size_t min_size;
760     size_t max_size;
761     size_t fragmentation_limit;
762     float fragmentation_burden_limit;
763     float limit;
764     float max_limit;
765     uint64_t time_clock; // time after which to collect generation, in performance counts (see QueryPerformanceCounter)
766     size_t gc_clock; // nubmer of gcs after which to collect generation
767 };
768
769 // The dynamic data fields are grouped into 3 categories:
770 //
771 // calculated logical data (like desired_allocation)
772 // physical data (like fragmentation)
773 // const data (sdata), initialized at the beginning
774 class dynamic_data
775 {
776 public:
777     ptrdiff_t new_allocation;
778     ptrdiff_t gc_new_allocation; // new allocation at beginning of gc
779     float     surv;
780     size_t    desired_allocation;
781
782     // # of bytes taken by objects (ie, not free space) at the beginning
783     // of the GC.
784     size_t    begin_data_size;
785     // # of bytes taken by survived objects after mark.
786     size_t    survived_size;
787     // # of bytes taken by survived pinned plugs after mark.
788     size_t    pinned_survived_size;
789     size_t    artificial_pinned_survived_size;
790     size_t    added_pinned_size;
791
792 #ifdef SHORT_PLUGS
793     size_t    padding_size;
794 #endif //SHORT_PLUGS
795 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
796     // # of plugs that are not pinned plugs.
797     size_t    num_npinned_plugs;
798 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
799     //total object size after a GC, ie, doesn't include fragmentation
800     size_t    current_size;
801     size_t    collection_count;
802     size_t    promoted_size;
803     size_t    freach_previous_promotion;
804     size_t    fragmentation;    //fragmentation when we don't compact
805     size_t    gc_clock;         //gc# when last GC happened
806     uint64_t  time_clock;       //time when last gc started
807     size_t    gc_elapsed_time;  // Time it took for the gc to complete
808     float     gc_speed;         //  speed in bytes/msec for the gc to complete
809
810     size_t    min_size;
811
812     static_data* sdata;
813 };
814
815 struct recorded_generation_info
816 {
817     size_t size_before;
818     size_t fragmentation_before;
819     size_t size_after;
820     size_t fragmentation_after;
821 };
822
823 struct last_recorded_gc_info
824 {
825     VOLATILE(size_t) index;
826     size_t total_committed;
827     size_t promoted;
828     size_t pinned_objects;
829     size_t finalize_promoted_objects;
830     size_t pause_durations[2];
831     float pause_percentage;
832     recorded_generation_info gen_info[total_generation_count];
833     size_t heap_size;
834     size_t fragmentation;
835     uint32_t memory_load;
836     uint8_t condemned_generation;
837     bool compaction;
838     bool concurrent;
839 };
840
841 #define ro_in_entry 0x1
842
843 // Note that I am storing both h0 and seg0, even though in Server GC you can get to
844 // the heap* from the segment info. This is because heap_of needs to be really fast
845 // and we would not want yet another indirection.
846 struct seg_mapping
847 {
848     // if an address is > boundary it belongs to h1; else h0.
849     // since we init h0 and h1 to 0, if we get 0 it means that
850     // address doesn't exist on managed segments. And heap_of
851     // would just return heap0 which is what it does now.
852     uint8_t* boundary;
853 #ifdef MULTIPLE_HEAPS
854     gc_heap* h0;
855     gc_heap* h1;
856 #endif //MULTIPLE_HEAPS
857     // You could have an address that's inbetween 2 segments and
858     // this would return a seg, the caller then will use
859     // in_range_for_segment to determine if it's on that seg.
860     heap_segment* seg0; // this is what the seg for h0 is.
861     heap_segment* seg1; // this is what the seg for h1 is.
862     // Note that when frozen objects are used we mask seg1
863     // with 0x1 to indicate that there is a ro segment for
864     // this entry.
865 };
866
867 // alignment helpers
868 //Alignment constant for allocation
869 #define ALIGNCONST (DATA_ALIGNMENT-1)
870
871 inline
872 size_t Align (size_t nbytes, int alignment=ALIGNCONST)
873 {
874     return (nbytes + alignment) & ~alignment;
875 }
876
877 //return alignment constant for small object heap vs large object heap
878 inline
879 int get_alignment_constant (BOOL small_object_p)
880 {
881 #ifdef FEATURE_STRUCTALIGN
882     // If any objects on the large object heap require 8-byte alignment,
883     // the compiler will tell us so.  Let's not guess an alignment here.
884     return ALIGNCONST;
885 #else // FEATURE_STRUCTALIGN
886     return small_object_p ? ALIGNCONST : 7;
887 #endif // FEATURE_STRUCTALIGN
888 }
889
890 struct etw_opt_info
891 {
892     size_t desired_allocation;
893     size_t new_allocation;
894     int    gen_number;
895 };
896
897 // Note, I am not removing the ones that are no longer used
898 // because the older versions of the runtime still use them
899 // and ETW interprets them.
900 enum alloc_wait_reason
901 {
902     // When we don't care about firing an event for
903     // this.
904     awr_ignored = -1,
905
906     // when we detect we are in low memory
907     awr_low_memory = 0,
908
909     // when we detect the ephemeral segment is too full
910     awr_low_ephemeral = 1,
911
912     // we've given out too much budget for gen0.
913     awr_gen0_alloc = 2,
914
915     // we've given out too much budget for loh.
916     awr_loh_alloc = 3,
917
918     // this event is really obsolete - it's for pre-XP
919     // OSs where low mem notification is not supported.
920     awr_alloc_loh_low_mem = 4,
921
922     // we ran out of VM spaced to reserve on loh.
923     awr_loh_oos = 5,
924
925     // ran out of space when allocating a small object
926     awr_gen0_oos_bgc = 6,
927
928     // ran out of space when allocating a large object
929     awr_loh_oos_bgc = 7,
930
931     // waiting for BGC to let FGC happen
932     awr_fgc_wait_for_bgc = 8,
933
934     // wait for bgc to finish to get loh seg.
935     // no longer used with the introduction of loh msl.
936     awr_get_loh_seg = 9,
937
938     // we don't allow loh allocation during bgc planning.
939     // no longer used with the introduction of loh msl.
940     awr_loh_alloc_during_plan = 10,
941
942     // we don't allow too much uoh allocation during bgc.
943     awr_uoh_alloc_during_bgc = 11
944 };
945
946 struct alloc_thread_wait_data
947 {
948     int awr;
949 };
950
951 enum msl_take_state
952 {
953     mt_get_large_seg = 0,
954     mt_bgc_uoh_sweep,
955     mt_wait_bgc,
956     mt_block_gc,
957     mt_clr_mem,
958     mt_clr_large_mem,
959     mt_t_eph_gc,
960     mt_t_full_gc,
961     mt_alloc_small,
962     mt_alloc_large,
963     mt_alloc_small_cant,
964     mt_alloc_large_cant,
965     mt_try_alloc,
966     mt_try_budget,
967     mt_try_servo_budget
968 };
969
970 enum msl_enter_state
971 {
972     me_acquire,
973     me_release
974 };
975
976 struct spinlock_info
977 {
978     msl_enter_state enter_state;
979     msl_take_state take_state;
980     EEThreadId thread_id;
981     bool loh_p;
982 };
983
984 #define HS_CACHE_LINE_SIZE 128
985
986 #ifdef SNOOP_STATS
987 struct snoop_stats_data
988 {
989     int heap_index;
990
991     // total number of objects that we called
992     // gc_mark on.
993     size_t objects_checked_count;
994     // total number of time we called gc_mark
995     // on a 0 reference.
996     size_t zero_ref_count;
997     // total objects actually marked.
998     size_t objects_marked_count;
999     // number of objects written to the mark stack because
1000     // of mark_stolen.
1001     size_t stolen_stack_count;
1002     // number of objects pushed onto the mark stack because
1003     // of the partial mark code path.
1004     size_t partial_stack_count;
1005     // number of objects pushed onto the mark stack because
1006     // of the non partial mark code path.
1007     size_t normal_stack_count;
1008     // number of references marked without mark stack.
1009     size_t non_stack_count;
1010
1011     // number of times we detect next heap's mark stack
1012     // is not busy.
1013     size_t stack_idle_count;
1014
1015     // number of times we do switch to thread.
1016     size_t switch_to_thread_count;
1017
1018     // number of times we are checking if the next heap's
1019     // mark stack is busy.
1020     size_t check_level_count;
1021     // number of times next stack is busy and level is
1022     // at the bottom.
1023     size_t busy_count;
1024     // how many interlocked exchange operations we did
1025     size_t interlocked_count;
1026     // numer of times parent objects stolen
1027     size_t partial_mark_parent_count;
1028     // numer of times we look at a normal stolen entry,
1029     // or the beginning/ending PM pair.
1030     size_t stolen_or_pm_count;
1031     // number of times we see 2 for the entry.
1032     size_t stolen_entry_count;
1033     // number of times we see a PM entry that's not ready.
1034     size_t pm_not_ready_count;
1035     // number of stolen normal marked objects and partial mark children.
1036     size_t normal_count;
1037     // number of times the bottom of mark stack was cleared.
1038     size_t stack_bottom_clear_count;
1039 };
1040 #endif //SNOOP_STATS
1041
1042 struct no_gc_region_info
1043 {
1044     size_t soh_allocation_size;
1045     size_t loh_allocation_size;
1046     size_t started;
1047     size_t num_gcs;
1048     size_t num_gcs_induced;
1049     start_no_gc_region_status start_status;
1050     gc_pause_mode saved_pause_mode;
1051     size_t saved_gen0_min_size;
1052     size_t saved_gen3_min_size;
1053     BOOL minimal_gc_p;
1054 };
1055
1056 // if you change these, make sure you update them for sos (strike.cpp) as well.
1057 //
1058 // !!!NOTE!!!
1059 // Right now I am only recording data from blocking GCs. When recording from BGC,
1060 // it should have its own copy just like gc_data_per_heap.
1061 // for BGCs we will have a very different set of datapoints to record.
1062 enum interesting_data_point
1063 {
1064     idp_pre_short = 0,
1065     idp_post_short = 1,
1066     idp_merged_pin = 2,
1067     idp_converted_pin = 3,
1068     idp_pre_pin = 4,
1069     idp_post_pin = 5,
1070     idp_pre_and_post_pin = 6,
1071     idp_pre_short_padded = 7,
1072     idp_post_short_padded = 8,
1073     max_idp_count
1074 };
1075
1076 //class definition of the internal class
1077 class gc_heap
1078 {
1079     friend class GCHeap;
1080 #ifdef FEATURE_PREMORTEM_FINALIZATION
1081     friend class CFinalize;
1082 #endif // FEATURE_PREMORTEM_FINALIZATION
1083     friend struct ::alloc_context;
1084     friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, uint32_t dwFlags);
1085     friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1086     friend class t_join;
1087     friend class gc_mechanisms;
1088     friend class seg_free_spaces;
1089
1090 #ifdef BACKGROUND_GC
1091     friend class exclusive_sync;
1092     friend class recursive_gc_sync;
1093 #endif //BACKGROUND_GC
1094
1095 #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1096     friend void checkGCWriteBarrier();
1097     friend void initGCShadow();
1098 #endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1099
1100     friend void PopulateDacVars(GcDacVars *gcDacVars);
1101
1102 #ifdef MULTIPLE_HEAPS
1103     typedef void (gc_heap::* card_fn) (uint8_t**, int);
1104 #define call_fn(this_arg,fn) (this_arg->*fn)
1105 #define __this this
1106 #else
1107     typedef void (* card_fn) (uint8_t**);
1108 #define call_fn(this_arg,fn) (*fn)
1109 #define __this (gc_heap*)0
1110 #endif
1111
1112 public:
1113
1114 #ifdef TRACE_GC
1115     PER_HEAP
1116     void print_free_list (int gen, heap_segment* seg);
1117 #endif // TRACE_GC
1118
1119 #ifdef SYNCHRONIZATION_STATS
1120
1121     PER_HEAP_ISOLATED
1122     void init_sync_stats()
1123     {
1124 #ifdef MULTIPLE_HEAPS
1125         for (int i = 0; i < gc_heap::n_heaps; i++)
1126         {
1127             gc_heap::g_heaps[i]->init_heap_sync_stats();
1128         }
1129 #else  //MULTIPLE_HEAPS
1130         init_heap_sync_stats();
1131 #endif  //MULTIPLE_HEAPS
1132     }
1133
1134     PER_HEAP_ISOLATED
1135     void print_sync_stats(unsigned int gc_count_during_log)
1136     {
1137         // bad/good gl acquire is accumulative during the log interval (because the numbers are too small)
1138         // min/max msl_acquire is the min/max during the log interval, not each GC.
1139         // Threads is however many allocation threads for the last GC.
1140         // num of msl acquired, avg_msl, high and low are all for each GC.
1141         printf("%2s%2s%10s%10s%12s%6s%4s%8s(  st,  wl, stw, dpw)\n",
1142             "H", "T", "good_sus", "bad_sus", "avg_msl", "high", "low", "num_msl");
1143
1144 #ifdef MULTIPLE_HEAPS
1145         for (int i = 0; i < gc_heap::n_heaps; i++)
1146         {
1147             gc_heap::g_heaps[i]->print_heap_sync_stats(i, gc_count_during_log);
1148         }
1149 #else  //MULTIPLE_HEAPS
1150         print_heap_sync_stats(0, gc_count_during_log);
1151 #endif  //MULTIPLE_HEAPS
1152     }
1153
1154 #endif //SYNCHRONIZATION_STATS
1155
1156     PER_HEAP
1157     void verify_soh_segment_list();
1158
1159 #ifdef VERIFY_HEAP
1160     PER_HEAP
1161     void verify_free_lists();
1162     PER_HEAP
1163     void verify_heap (BOOL begin_gc_p);
1164 #endif //VERIFY_HEAP
1165
1166     PER_HEAP_ISOLATED
1167     void fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num);
1168
1169     PER_HEAP_ISOLATED
1170     void fire_pevents();
1171
1172 #ifdef FEATURE_BASICFREEZE
1173     static void walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef);
1174 #endif
1175
1176     static
1177     heap_segment* make_heap_segment (uint8_t* new_pages,
1178                                      size_t size,
1179                                      gc_oh_num oh,
1180                                      int h_number);
1181
1182     static
1183     gc_heap* make_gc_heap(
1184 #if defined (MULTIPLE_HEAPS)
1185         GCHeap* vm_heap,
1186         int heap_number
1187 #endif //MULTIPLE_HEAPS
1188         );
1189
1190     static
1191     void destroy_gc_heap(gc_heap* heap);
1192
1193     static
1194     HRESULT initialize_gc  (size_t soh_segment_size,
1195                             size_t loh_segment_size,
1196                             size_t poh_segment_size
1197 #ifdef MULTIPLE_HEAPS
1198                             , int number_of_heaps
1199 #endif //MULTIPLE_HEAPS
1200         );
1201
1202     static
1203     void shutdown_gc();
1204
1205     // If the hard limit is specified, take that into consideration
1206     // and this means it may modify the # of heaps.
1207     PER_HEAP_ISOLATED
1208     size_t get_segment_size_hard_limit (uint32_t* num_heaps, bool should_adjust_num_heaps);
1209
1210     PER_HEAP_ISOLATED
1211     bool should_retry_other_heap (int gen_number, size_t size);
1212
1213     PER_HEAP
1214     CObjectHeader* allocate (size_t jsize,
1215                              alloc_context* acontext,
1216                              uint32_t flags);
1217
1218 #ifdef MULTIPLE_HEAPS
1219     PER_HEAP_ISOLATED
1220     void hb_log_new_allocation();
1221
1222     PER_HEAP_ISOLATED
1223     void hb_log_balance_activities();
1224
1225     static
1226     void balance_heaps (alloc_context* acontext);
1227     PER_HEAP
1228     ptrdiff_t get_balance_heaps_uoh_effective_budget (int generation_num);
1229     static 
1230     gc_heap* balance_heaps_uoh (alloc_context* acontext, size_t size, int generation_num);
1231     // Unlike balance_heaps_uoh, this may return nullptr if we failed to change heaps.
1232     static
1233     gc_heap* balance_heaps_uoh_hard_limit_retry (alloc_context* acontext, size_t size, int generation_num);
1234     static
1235     void gc_thread_stub (void* arg);
1236 #endif //MULTIPLE_HEAPS
1237
1238     // For UOH allocations we only update the alloc_bytes_uoh in allocation
1239     // context - we don't actually use the ptr/limit from it so I am
1240     // making this explicit by not passing in the alloc_context.
1241     // Note: This are instance methods, but the heap instance is only used for
1242     // lowest_address and highest_address, which are currently the same accross all heaps.
1243     PER_HEAP
1244     CObjectHeader* allocate_uoh_object (size_t size, uint32_t flags, int gen_num, int64_t& alloc_bytes);
1245
1246 #ifdef FEATURE_STRUCTALIGN
1247     PER_HEAP
1248     uint8_t* pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size);
1249 #endif // FEATURE_STRUCTALIGN
1250
1251     PER_HEAP_ISOLATED
1252     void do_pre_gc();
1253
1254     PER_HEAP_ISOLATED
1255     void do_post_gc();
1256
1257     PER_HEAP_ISOLATED
1258     void update_recorded_gen_data (last_recorded_gc_info* gc_info);
1259
1260     PER_HEAP
1261     void update_end_gc_time_per_heap();
1262
1263     PER_HEAP_ISOLATED
1264     void update_end_ngc_time();
1265
1266     PER_HEAP
1267     void add_to_history_per_heap();
1268
1269     PER_HEAP_ISOLATED
1270     void add_to_history();
1271
1272 #ifdef BGC_SERVO_TUNING
1273     PER_HEAP_ISOLATED
1274     void check_and_adjust_bgc_tuning (int gen_number, size_t physical_size, ptrdiff_t virtual_fl_size);
1275     PER_HEAP_ISOLATED
1276     void get_and_reset_loh_alloc_info();
1277 #endif //BGC_SERVO_TUNING
1278
1279     PER_HEAP
1280     BOOL expand_soh_with_minimal_gc();
1281
1282     // EE is always suspended when this method is called.
1283     // returning FALSE means we actually didn't do a GC. This happens
1284     // when we figured that we needed to do a BGC.
1285     PER_HEAP
1286     void garbage_collect (int n);
1287
1288     // Since we don't want to waste a join just to do this, I am doing
1289     // doing this at the last join in gc1.
1290     PER_HEAP_ISOLATED
1291     void pm_full_gc_init_or_clear();
1292
1293     // This does a GC when pm_trigger_full_gc is set
1294     PER_HEAP
1295     void garbage_collect_pm_full_gc();
1296
1297     PER_HEAP_ISOLATED
1298     bool is_pm_ratio_exceeded();
1299
1300     PER_HEAP
1301     void init_records();
1302
1303     static
1304     uint32_t* make_card_table (uint8_t* start, uint8_t* end);
1305
1306     static
1307     void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
1308
1309     static
1310     int grow_brick_card_tables (uint8_t* start,
1311                                 uint8_t* end,
1312                                 size_t size,
1313                                 heap_segment* new_seg,
1314                                 gc_heap* hp,
1315                                 BOOL loh_p);
1316
1317     PER_HEAP
1318     BOOL is_mark_set (uint8_t* o);
1319
1320 #ifdef FEATURE_BASICFREEZE
1321     PER_HEAP_ISOLATED
1322     bool frozen_object_p(Object* obj);
1323 #endif // FEATURE_BASICFREEZE
1324
1325 protected:
1326     PER_HEAP_ISOLATED
1327     BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t pinned_size, int num_heaps, bool use_large_pages_p, bool separated_poh_p);
1328
1329     PER_HEAP_ISOLATED
1330     void destroy_initial_memory();
1331
1332     PER_HEAP_ISOLATED
1333     void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1334
1335     PER_HEAP
1336     void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1337
1338     struct walk_relocate_args
1339     {
1340         uint8_t* last_plug;
1341         BOOL is_shortened;
1342         mark* pinned_plug_entry;
1343         void* profiling_context;
1344         record_surv_fn fn;
1345     };
1346
1347     PER_HEAP
1348     void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type);
1349
1350     PER_HEAP
1351     void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
1352                     walk_relocate_args* args);
1353
1354     PER_HEAP
1355     void walk_relocation (void* profiling_context, record_surv_fn fn);
1356
1357     PER_HEAP
1358     void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
1359
1360     PER_HEAP
1361     void walk_finalize_queue (fq_walk_fn fn);
1362
1363 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1364     PER_HEAP
1365     void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn);
1366 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1367
1368     // used in blocking GCs after plan phase so this walks the plugs.
1369     PER_HEAP
1370     void walk_survivors_relocation (void* profiling_context, record_surv_fn fn);
1371     PER_HEAP
1372     void walk_survivors_for_uoh (void* profiling_context, record_surv_fn fn, int gen_number);
1373
1374     PER_HEAP
1375     int generation_to_condemn (int n,
1376                                BOOL* blocking_collection_p,
1377                                BOOL* elevation_requested_p,
1378                                BOOL check_only_p);
1379
1380     PER_HEAP_ISOLATED
1381     int joined_generation_to_condemn (BOOL should_evaluate_elevation,
1382                                       int initial_gen,
1383                                       int current_gen,
1384                                       BOOL* blocking_collection
1385                                       STRESS_HEAP_ARG(int n_original));
1386
1387     PER_HEAP
1388     size_t min_reclaim_fragmentation_threshold (uint32_t num_heaps);
1389
1390     PER_HEAP_ISOLATED
1391     uint64_t min_high_fragmentation_threshold (uint64_t available_mem, uint32_t num_heaps);
1392
1393     PER_HEAP
1394     void concurrent_print_time_delta (const char* msg);
1395     PER_HEAP
1396     void free_list_info (int gen_num, const char* msg);
1397
1398     // in svr GC on entry and exit of this method, the GC threads are not
1399     // synchronized
1400     PER_HEAP
1401     void gc1();
1402
1403     PER_HEAP_ISOLATED
1404     void save_data_for_no_gc();
1405
1406     PER_HEAP_ISOLATED
1407     void restore_data_for_no_gc();
1408
1409     PER_HEAP_ISOLATED
1410     void update_collection_counts_for_no_gc();
1411
1412     PER_HEAP_ISOLATED
1413     BOOL should_proceed_with_gc();
1414
1415     PER_HEAP_ISOLATED
1416     void record_gcs_during_no_gc();
1417
1418     PER_HEAP
1419     BOOL find_loh_free_for_no_gc();
1420
1421     PER_HEAP
1422     BOOL find_loh_space_for_no_gc();
1423
1424     PER_HEAP
1425     BOOL commit_loh_for_no_gc (heap_segment* seg);
1426
1427     PER_HEAP_ISOLATED
1428     start_no_gc_region_status prepare_for_no_gc_region (uint64_t total_size,
1429                                                         BOOL loh_size_known,
1430                                                         uint64_t loh_size,
1431                                                         BOOL disallow_full_blocking);
1432
1433     PER_HEAP
1434     BOOL loh_allocated_for_no_gc();
1435
1436     PER_HEAP_ISOLATED
1437     void release_no_gc_loh_segments();
1438
1439     PER_HEAP_ISOLATED
1440     void thread_no_gc_loh_segments();
1441
1442     PER_HEAP
1443     void check_and_set_no_gc_oom();
1444
1445     PER_HEAP
1446     void allocate_for_no_gc_after_gc();
1447
1448     PER_HEAP
1449     void set_loh_allocations_for_no_gc();
1450
1451     PER_HEAP
1452     void set_soh_allocations_for_no_gc();
1453
1454     PER_HEAP
1455     void prepare_for_no_gc_after_gc();
1456
1457     PER_HEAP_ISOLATED
1458     void set_allocations_for_no_gc();
1459
1460     PER_HEAP_ISOLATED
1461     BOOL should_proceed_for_no_gc();
1462
1463     PER_HEAP_ISOLATED
1464     start_no_gc_region_status get_start_no_gc_region_status();
1465
1466     PER_HEAP_ISOLATED
1467     end_no_gc_region_status end_no_gc_region();
1468
1469     PER_HEAP_ISOLATED
1470     void handle_failure_for_no_gc();
1471
1472     PER_HEAP
1473     void fire_etw_allocation_event (size_t allocation_amount, int gen_number, uint8_t* object_address);
1474
1475     PER_HEAP
1476     void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject);
1477
1478     PER_HEAP
1479     size_t limit_from_size (size_t size, uint32_t flags, size_t room, int gen_number,
1480                             int align_const);
1481     PER_HEAP
1482     allocation_state try_allocate_more_space (alloc_context* acontext, size_t jsize, uint32_t flags,
1483                                               int alloc_generation_number);
1484     PER_HEAP_ISOLATED
1485     BOOL allocate_more_space (alloc_context* acontext, size_t jsize, uint32_t flags,
1486                               int alloc_generation_number);
1487
1488     PER_HEAP
1489     size_t get_full_compact_gc_count();
1490
1491     PER_HEAP
1492     BOOL short_on_end_of_seg (heap_segment* seg, int align_const);
1493
1494     PER_HEAP
1495     BOOL a_fit_free_list_p (int gen_number,
1496                             size_t size,
1497                             alloc_context* acontext,
1498                             uint32_t flags,
1499                             int align_const);
1500
1501 #ifdef BACKGROUND_GC
1502     PER_HEAP
1503     void wait_for_background (alloc_wait_reason awr, bool loh_p);
1504
1505     PER_HEAP
1506     void wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p);
1507
1508     PER_HEAP
1509     void bgc_uoh_alloc_clr (uint8_t* alloc_start,
1510                             size_t size,
1511                             alloc_context* acontext,
1512                             uint32_t flags,
1513                             int align_const,
1514                             int lock_index,
1515                             BOOL check_used_p,
1516                             heap_segment* seg);
1517 #endif //BACKGROUND_GC
1518
1519 #ifdef BACKGROUND_GC
1520     PER_HEAP
1521     void bgc_track_uoh_alloc();
1522
1523     PER_HEAP
1524     void bgc_untrack_uoh_alloc();
1525
1526     PER_HEAP
1527     BOOL bgc_loh_allocate_spin();
1528
1529     PER_HEAP
1530     BOOL bgc_poh_allocate_spin();
1531 #endif //BACKGROUND_GC
1532
1533 #define max_saved_spinlock_info 48
1534
1535 #ifdef SPINLOCK_HISTORY
1536     PER_HEAP
1537     int spinlock_info_index;
1538
1539     PER_HEAP
1540     spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
1541 #endif //SPINLOCK_HISTORY
1542
1543     PER_HEAP
1544     void add_saved_spinlock_info (
1545             bool loh_p,
1546             msl_enter_state enter_state,
1547             msl_take_state take_state);
1548
1549     PER_HEAP
1550     void trigger_gc_for_alloc (int gen_number, gc_reason reason,
1551                                GCSpinLock* spin_lock, bool loh_p,
1552                                msl_take_state take_state);
1553
1554     PER_HEAP
1555     BOOL a_fit_free_list_uoh_p (size_t size,
1556                                   alloc_context* acontext,
1557                                   uint32_t flags, 
1558                                   int align_const,
1559                                   int gen_number);
1560
1561     PER_HEAP
1562     BOOL a_fit_segment_end_p (int gen_number,
1563                               heap_segment* seg,
1564                               size_t size,
1565                               alloc_context* acontext,
1566                               uint32_t flags,
1567                               int align_const,
1568                               BOOL* commit_failed_p);
1569     PER_HEAP
1570     BOOL uoh_a_fit_segment_end_p (int gen_number,
1571                                   size_t size,
1572                                   alloc_context* acontext,
1573                                   uint32_t flags,
1574                                   int align_const,
1575                                   BOOL* commit_failed_p,
1576                                   oom_reason* oom_r);
1577     PER_HEAP
1578     BOOL uoh_get_new_seg (int gen_number,
1579                           size_t size,
1580                           BOOL* commit_failed_p,
1581                           oom_reason* oom_r);
1582
1583     PER_HEAP_ISOLATED
1584     size_t get_uoh_seg_size (size_t size);
1585
1586     PER_HEAP
1587     BOOL retry_full_compact_gc (size_t size);
1588
1589     PER_HEAP
1590     BOOL check_and_wait_for_bgc (alloc_wait_reason awr,
1591                                  BOOL* did_full_compact_gc,
1592                                  bool loh_p);
1593
1594     PER_HEAP
1595     BOOL trigger_full_compact_gc (gc_reason gr,
1596                                   oom_reason* oom_r,
1597                                   bool loh_p);
1598
1599     PER_HEAP
1600     BOOL trigger_ephemeral_gc (gc_reason gr);
1601
1602     PER_HEAP
1603     BOOL soh_try_fit (int gen_number,
1604                       size_t size,
1605                       alloc_context* acontext,
1606                       uint32_t flags,
1607                       int align_const,
1608                       BOOL* commit_failed_p,
1609                       BOOL* short_seg_end_p);
1610     PER_HEAP
1611     BOOL uoh_try_fit (int gen_number,
1612                       size_t size, 
1613                       alloc_context* acontext,
1614                       uint32_t flags,
1615                       int align_const,
1616                       BOOL* commit_failed_p,
1617                       oom_reason* oom_r);
1618
1619     PER_HEAP
1620     allocation_state allocate_soh (int gen_number,
1621                                      size_t size,
1622                                      alloc_context* acontext,
1623                                      uint32_t flags,
1624                                      int align_const);
1625
1626 #ifdef RECORD_LOH_STATE
1627     #define max_saved_loh_states 12
1628     PER_HEAP
1629     int loh_state_index;
1630
1631     struct loh_state_info
1632     {
1633         allocation_state alloc_state;
1634         EEThreadId thread_id;
1635     };
1636
1637     PER_HEAP
1638     loh_state_info last_loh_states[max_saved_loh_states];
1639     PER_HEAP
1640     void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id);
1641 #endif //RECORD_LOH_STATE
1642     PER_HEAP
1643     allocation_state allocate_uoh (int gen_number,
1644                                      size_t size,
1645                                      alloc_context* acontext,
1646                                      uint32_t flags,
1647                                      int align_const);
1648
1649     PER_HEAP_ISOLATED
1650     int init_semi_shared();
1651     PER_HEAP
1652     int init_gc_heap (int heap_number);
1653     PER_HEAP
1654     void self_destroy();
1655     PER_HEAP_ISOLATED
1656     void destroy_semi_shared();
1657     PER_HEAP
1658     void repair_allocation_contexts (BOOL repair_p);
1659     PER_HEAP
1660     void fix_allocation_contexts (BOOL for_gc_p);
1661     PER_HEAP
1662     void fix_youngest_allocation_area();
1663     PER_HEAP
1664     void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
1665                                  int align_const);
1666     PER_HEAP
1667     void fix_uoh_allocation_area();
1668     PER_HEAP
1669     void fix_older_allocation_area (generation* older_gen);
1670     PER_HEAP
1671     void set_allocation_heap_segment (generation* gen);
1672     PER_HEAP
1673     void reset_allocation_pointers (generation* gen, uint8_t* start);
1674     PER_HEAP
1675     int object_gennum (uint8_t* o);
1676     PER_HEAP
1677     int object_gennum_plan (uint8_t* o);
1678     PER_HEAP_ISOLATED
1679     void init_heap_segment (heap_segment* seg);
1680     PER_HEAP
1681     void delete_heap_segment (heap_segment* seg, BOOL consider_hoarding=FALSE);
1682 #ifdef FEATURE_BASICFREEZE
1683     PER_HEAP
1684     BOOL insert_ro_segment (heap_segment* seg);
1685     PER_HEAP
1686     void remove_ro_segment (heap_segment* seg);
1687 #endif //FEATURE_BASICFREEZE
1688     PER_HEAP
1689     BOOL set_ro_segment_in_range (heap_segment* seg);
1690     PER_HEAP
1691     heap_segment* soh_get_segment_to_expand();
1692     PER_HEAP
1693     heap_segment* get_segment (size_t size, gc_oh_num oh);
1694     PER_HEAP_ISOLATED
1695     void release_segment (heap_segment* sg);
1696     PER_HEAP_ISOLATED
1697     void seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp);
1698     PER_HEAP_ISOLATED
1699     void seg_mapping_table_remove_segment (heap_segment* seg);
1700     PER_HEAP
1701     heap_segment* get_uoh_segment (int gen_number, size_t size, BOOL* did_full_compact_gc);
1702     PER_HEAP
1703     void thread_uoh_segment (int gen_number, heap_segment* new_seg);
1704     PER_HEAP_ISOLATED
1705     heap_segment* get_segment_for_uoh (int gen_number, size_t size
1706 #ifdef MULTIPLE_HEAPS
1707                                       , gc_heap* hp
1708 #endif //MULTIPLE_HEAPS
1709                                       );
1710     PER_HEAP
1711     void reset_heap_segment_pages (heap_segment* seg);
1712     PER_HEAP
1713     void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
1714     PER_HEAP
1715     size_t decommit_ephemeral_segment_pages_step ();
1716     PER_HEAP
1717     size_t decommit_heap_segment_pages_worker (heap_segment* seg, uint8_t *new_committed);
1718     PER_HEAP_ISOLATED
1719     bool decommit_step ();
1720     PER_HEAP
1721     void decommit_heap_segment (heap_segment* seg);
1722     PER_HEAP_ISOLATED
1723     bool virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number);
1724     PER_HEAP_ISOLATED
1725     bool virtual_commit (void* address, size_t size, gc_oh_num oh, int h_number=-1, bool* hard_limit_exceeded_p=NULL);
1726     PER_HEAP_ISOLATED
1727     bool virtual_decommit (void* address, size_t size, gc_oh_num oh, int h_number=-1);
1728     PER_HEAP_ISOLATED
1729     void virtual_free (void* add, size_t size, heap_segment* sg=NULL);
1730     PER_HEAP
1731     void clear_gen0_bricks();
1732 #ifdef BACKGROUND_GC
1733     PER_HEAP
1734     void rearrange_small_heap_segments();
1735 #endif //BACKGROUND_GC
1736     PER_HEAP
1737     void rearrange_uoh_segments();
1738     PER_HEAP
1739     void rearrange_heap_segments(BOOL compacting);
1740
1741     PER_HEAP_ISOLATED
1742     void reset_write_watch_for_gc_heap(void* base_address, size_t region_size);
1743     PER_HEAP_ISOLATED
1744     void get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended);
1745
1746     PER_HEAP
1747     void switch_one_quantum();
1748     PER_HEAP
1749     void reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size);
1750     PER_HEAP
1751     void switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size);
1752     PER_HEAP
1753     void reset_write_watch (BOOL concurrent_p);
1754     PER_HEAP
1755     void adjust_ephemeral_limits();
1756     PER_HEAP
1757     void make_generation (int gen_num, heap_segment* seg, uint8_t* start);
1758
1759 #define USE_PADDING_FRONT 1
1760 #define USE_PADDING_TAIL  2
1761
1762     PER_HEAP
1763     BOOL size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1764                      uint8_t* old_loc=0, int use_padding=USE_PADDING_TAIL);
1765     PER_HEAP
1766     BOOL a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1767                        int align_const);
1768
1769     PER_HEAP
1770     void handle_oom (oom_reason reason, size_t alloc_size,
1771                      uint8_t* allocated, uint8_t* reserved);
1772
1773     PER_HEAP
1774     size_t card_of ( uint8_t* object);
1775     PER_HEAP
1776     uint8_t* brick_address (size_t brick);
1777     PER_HEAP
1778     size_t brick_of (uint8_t* add);
1779     PER_HEAP
1780     uint8_t* card_address (size_t card);
1781     PER_HEAP
1782     size_t card_to_brick (size_t card);
1783     PER_HEAP
1784     void clear_card (size_t card);
1785     PER_HEAP
1786     void set_card (size_t card);
1787     PER_HEAP
1788     BOOL  card_set_p (size_t card);
1789     PER_HEAP
1790     void card_table_set_bit (uint8_t* location);
1791
1792 #ifdef CARD_BUNDLE
1793     PER_HEAP
1794     void update_card_table_bundle();
1795     PER_HEAP
1796     void reset_card_table_write_watch();
1797     PER_HEAP
1798     void card_bundle_clear(size_t cardb);
1799     PER_HEAP
1800     void card_bundle_set (size_t cardb);
1801     PER_HEAP
1802     void card_bundles_set (size_t start_cardb, size_t end_cardb);
1803     PER_HEAP
1804     void verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word);
1805     PER_HEAP
1806     void verify_card_bundles();
1807     PER_HEAP
1808     BOOL card_bundle_set_p (size_t cardb);
1809     PER_HEAP
1810     BOOL find_card_dword (size_t& cardw, size_t cardw_end);
1811     PER_HEAP
1812     void enable_card_bundles();
1813     PER_HEAP_ISOLATED
1814     BOOL card_bundles_enabled();
1815
1816 #endif //CARD_BUNDLE
1817
1818     PER_HEAP
1819     BOOL find_card (uint32_t* card_table, size_t& card,
1820                     size_t card_word_end, size_t& end_card);
1821     PER_HEAP
1822     BOOL grow_heap_segment (heap_segment* seg, uint8_t* high_address, bool* hard_limit_exceeded_p=NULL);
1823     PER_HEAP
1824     int grow_heap_segment (heap_segment* seg, uint8_t* high_address, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL);
1825     PER_HEAP
1826     void copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
1827                                 short* old_brick_table,
1828                                 uint8_t* start, uint8_t* end);
1829     PER_HEAP
1830     void init_brick_card_range (heap_segment* seg);
1831     PER_HEAP
1832     void copy_brick_card_table_l_heap ();
1833     PER_HEAP
1834     void copy_brick_card_table();
1835     PER_HEAP
1836     void clear_brick_table (uint8_t* from, uint8_t* end);
1837     PER_HEAP
1838     void set_brick (size_t index, ptrdiff_t val);
1839     PER_HEAP
1840     int get_brick_entry (size_t index);
1841 #ifdef BACKGROUND_GC
1842     PER_HEAP
1843     unsigned int mark_array_marked (uint8_t* add);
1844     PER_HEAP
1845     void mark_array_set_marked (uint8_t* add);
1846     PER_HEAP
1847     BOOL is_mark_bit_set (uint8_t* add);
1848     PER_HEAP
1849     void gmark_array_set_marked (uint8_t* add);
1850     PER_HEAP
1851     void set_mark_array_bit (size_t mark_bit);
1852     PER_HEAP
1853     BOOL mark_array_bit_set (size_t mark_bit);
1854     PER_HEAP
1855     void mark_array_clear_marked (uint8_t* add);
1856     PER_HEAP
1857     void clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only=TRUE
1858 #ifdef FEATURE_BASICFREEZE
1859         , BOOL read_only=FALSE
1860 #endif // FEATURE_BASICFREEZE
1861         );
1862     PER_HEAP
1863     void seg_clear_mark_array_bits_soh (heap_segment* seg);
1864     PER_HEAP
1865     void clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1866     PER_HEAP
1867     void bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1868 #ifdef VERIFY_HEAP
1869     PER_HEAP
1870     void set_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1871     PER_HEAP
1872     void check_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1873 #endif //VERIFY_HEAP
1874 #endif //BACKGROUND_GC
1875
1876     PER_HEAP
1877     BOOL uoh_object_marked (uint8_t* o, BOOL clearp);
1878
1879 #ifdef BACKGROUND_GC
1880     PER_HEAP
1881     BOOL background_allowed_p();
1882 #endif //BACKGROUND_GC
1883
1884     PER_HEAP_ISOLATED
1885     void send_full_gc_notification (int gen_num, BOOL due_to_alloc_p);
1886
1887     PER_HEAP
1888     void check_for_full_gc (int gen_num, size_t size);
1889
1890     PER_HEAP
1891     void adjust_limit (uint8_t* start, size_t limit_size, generation* gen);
1892     PER_HEAP
1893     void adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
1894                            alloc_context* acontext, uint32_t flags, heap_segment* seg,
1895                            int align_const, int gen_number);
1896     PER_HEAP
1897     void  leave_allocation_segment (generation* gen);
1898
1899     PER_HEAP
1900     void init_free_and_plug();
1901
1902     PER_HEAP
1903     void print_free_and_plug (const char* msg);
1904
1905     PER_HEAP
1906     void add_gen_plug (int gen_number, size_t plug_size);
1907
1908     PER_HEAP
1909     void add_gen_free (int gen_number, size_t free_size);
1910
1911     PER_HEAP
1912     void add_item_to_current_pinned_free (int gen_number, size_t free_size);
1913
1914     PER_HEAP
1915     void remove_gen_free (int gen_number, size_t free_size);
1916
1917     PER_HEAP
1918     uint8_t* allocate_in_older_generation (generation* gen, size_t size,
1919                                         int from_gen_number,
1920                                         uint8_t* old_loc=0
1921                                         REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1922     PER_HEAP
1923     generation*  ensure_ephemeral_heap_segment (generation* consing_gen);
1924     PER_HEAP
1925     uint8_t* allocate_in_condemned_generations (generation* gen,
1926                                              size_t size,
1927                                              int from_gen_number,
1928 #ifdef SHORT_PLUGS
1929                                              BOOL* convert_to_pinned_p=NULL,
1930                                              uint8_t* next_pinned_plug=0,
1931                                              heap_segment* current_seg=0,
1932 #endif //SHORT_PLUGS
1933                                              uint8_t* old_loc=0
1934                                              REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1935     // Verifies that interior is actually in the range of seg; otherwise
1936     // returns 0.
1937     PER_HEAP_ISOLATED
1938     heap_segment* find_segment (uint8_t* interior, BOOL small_segment_only_p);
1939
1940     PER_HEAP_ISOLATED
1941     gc_heap* heap_of (uint8_t* object);
1942
1943     PER_HEAP_ISOLATED
1944     gc_heap* heap_of_gc (uint8_t* object);
1945
1946     PER_HEAP_ISOLATED
1947     size_t&  promoted_bytes (int);
1948
1949     PER_HEAP
1950     uint8_t* find_object (uint8_t* o);
1951
1952     PER_HEAP
1953     dynamic_data* dynamic_data_of (int gen_number);
1954     PER_HEAP
1955     ptrdiff_t  get_desired_allocation (int gen_number);
1956     PER_HEAP
1957     ptrdiff_t  get_new_allocation (int gen_number);
1958     PER_HEAP
1959     ptrdiff_t  get_allocation (int gen_number);
1960     PER_HEAP
1961     bool new_allocation_allowed (int gen_number);
1962 #ifdef BACKGROUND_GC
1963     PER_HEAP_ISOLATED
1964     void allow_new_allocation (int gen_number);
1965     PER_HEAP_ISOLATED
1966     void disallow_new_allocation (int gen_number);
1967 #endif //BACKGROUND_GC
1968     PER_HEAP
1969     void reset_pinned_queue();
1970     PER_HEAP
1971     void reset_pinned_queue_bos();
1972     PER_HEAP
1973     void set_allocator_next_pin (generation* gen);
1974     PER_HEAP
1975     void enque_pinned_plug (generation* gen, uint8_t* plug, size_t len);
1976     PER_HEAP
1977     void enque_pinned_plug (uint8_t* plug,
1978                             BOOL save_pre_plug_info_p,
1979                             uint8_t* last_object_in_last_plug);
1980     PER_HEAP
1981     void merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size);
1982     PER_HEAP
1983     void set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen);
1984     PER_HEAP
1985     void save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug);
1986     PER_HEAP
1987     size_t deque_pinned_plug ();
1988     PER_HEAP
1989     mark* pinned_plug_of (size_t bos);
1990     PER_HEAP
1991     mark* oldest_pin ();
1992     PER_HEAP
1993     mark* before_oldest_pin();
1994     PER_HEAP
1995     BOOL pinned_plug_que_empty_p ();
1996     PER_HEAP
1997     void make_mark_stack (mark* arr);
1998 #ifdef MH_SC_MARK
1999     PER_HEAP
2000     int& mark_stack_busy();
2001     PER_HEAP
2002     VOLATILE(uint8_t*)& ref_mark_stack (gc_heap* hp, int index);
2003 #endif
2004 #ifdef BACKGROUND_GC
2005     PER_HEAP_ISOLATED
2006     size_t&  bpromoted_bytes (int);
2007     PER_HEAP
2008     void make_background_mark_stack (uint8_t** arr);
2009     PER_HEAP
2010     void make_c_mark_list (uint8_t** arr);
2011 #endif //BACKGROUND_GC
2012     PER_HEAP
2013     generation* generation_of (int  n);
2014     PER_HEAP
2015     BOOL gc_mark1 (uint8_t* o);
2016     PER_HEAP
2017     BOOL gc_mark (uint8_t* o, uint8_t* low, uint8_t* high);
2018     PER_HEAP
2019     void mark_object (uint8_t* o THREAD_NUMBER_DCL);
2020 #ifdef HEAP_ANALYZE
2021     PER_HEAP
2022     void ha_mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
2023 #endif //HEAP_ANALYZE
2024     PER_HEAP
2025     void mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
2026     PER_HEAP
2027     void mark_object_simple1 (uint8_t* o, uint8_t* start THREAD_NUMBER_DCL);
2028
2029 #ifdef MH_SC_MARK
2030     PER_HEAP
2031     void mark_steal ();
2032 #endif //MH_SC_MARK
2033
2034 #ifdef BACKGROUND_GC
2035     PER_HEAP
2036     BOOL background_marked (uint8_t* o);
2037     PER_HEAP
2038     BOOL background_mark1 (uint8_t* o);
2039     PER_HEAP
2040     BOOL background_mark (uint8_t* o, uint8_t* low, uint8_t* high);
2041     PER_HEAP
2042     uint8_t* background_mark_object (uint8_t* o THREAD_NUMBER_DCL);
2043     PER_HEAP
2044     void background_mark_simple (uint8_t* o THREAD_NUMBER_DCL);
2045     PER_HEAP
2046     void background_mark_simple1 (uint8_t* o THREAD_NUMBER_DCL);
2047     PER_HEAP_ISOLATED
2048     void background_promote (Object**, ScanContext* , uint32_t);
2049     PER_HEAP
2050     BOOL background_object_marked (uint8_t* o, BOOL clearp);
2051     PER_HEAP
2052     void init_background_gc();
2053     PER_HEAP
2054     uint8_t* background_next_end (heap_segment*, BOOL);
2055     // while we are in LOH sweep we can't modify the segment list
2056     // there so we mark them as to be deleted and deleted them
2057     // at the next chance we get.
2058     PER_HEAP
2059     void background_delay_delete_uoh_segments();
2060     PER_HEAP
2061     void generation_delete_heap_segment (generation*,
2062                                          heap_segment*, heap_segment*, heap_segment*);
2063     PER_HEAP
2064     void set_mem_verify (uint8_t*, uint8_t*, uint8_t);
2065     PER_HEAP
2066     void process_background_segment_end (heap_segment*, generation*, uint8_t*,
2067                                      heap_segment*, BOOL*);
2068     PER_HEAP
2069     BOOL fgc_should_consider_object (uint8_t* o,
2070                                      heap_segment* seg,
2071                                      BOOL consider_bgc_mark_p,
2072                                      BOOL check_current_sweep_p,
2073                                      BOOL check_saved_sweep_p);
2074     PER_HEAP
2075     void should_check_bgc_mark (heap_segment* seg,
2076                                 BOOL* consider_bgc_mark_p,
2077                                 BOOL* check_current_sweep_p,
2078                                 BOOL* check_saved_sweep_p);
2079     PER_HEAP
2080     void background_ephemeral_sweep();
2081     PER_HEAP
2082     void background_sweep ();
2083     PER_HEAP
2084     uint8_t* background_seg_end (heap_segment* seg, BOOL concurrent_p);
2085     PER_HEAP
2086     uint8_t* background_first_overflow (uint8_t* min_add,
2087                                      heap_segment* seg,
2088                                      BOOL concurrent_p,
2089                                      BOOL small_object_p);
2090     PER_HEAP
2091     void background_process_mark_overflow_internal (int condemned_gen_number,
2092                                                     uint8_t* min_add, uint8_t* max_add,
2093                                                     BOOL concurrent_p);
2094     PER_HEAP
2095     BOOL background_process_mark_overflow (BOOL concurrent_p);
2096
2097     // for foreground GC to get hold of background structures containing refs
2098     PER_HEAP
2099     void
2100     scan_background_roots (promote_func* fn, int hn, ScanContext *pSC);
2101
2102     PER_HEAP
2103     BOOL bgc_mark_array_range (heap_segment* seg,
2104                                BOOL whole_seg_p,
2105                                uint8_t** range_beg,
2106                                uint8_t** range_end);
2107     PER_HEAP
2108     void bgc_verify_mark_array_cleared (heap_segment* seg);
2109     PER_HEAP
2110     void verify_mark_array_cleared();
2111     PER_HEAP
2112     void verify_partial();
2113     PER_HEAP
2114     void verify_mark_bits_cleared (uint8_t* obj, size_t s);
2115     PER_HEAP
2116     void clear_all_mark_array();
2117
2118 #ifdef BGC_SERVO_TUNING
2119
2120     // Currently BGC servo tuning is an experimental feature.
2121     class bgc_tuning
2122     {
2123     public:
2124         struct tuning_calculation
2125         {
2126             // We use this virtual size that represents the generation
2127             // size at goal. We calculate the flr based on this.
2128             size_t end_gen_size_goal;
2129
2130             // sweep goal is expressed as flr as we want to avoid
2131             // expanding the gen size.
2132             double sweep_flr_goal;
2133
2134             // gen2 size at the end of last bgc.
2135             size_t last_bgc_size;
2136
2137             //
2138             // these need to be double so we don't loose so much accurancy
2139             // they are *100.0
2140             //
2141             // the FL ratio at the start of current bgc sweep.
2142             double current_bgc_sweep_flr;
2143             // the FL ratio at the end of last bgc.
2144             // Only used for FF.
2145             double last_bgc_flr;
2146             // the FL ratio last time we started a bgc
2147             double current_bgc_start_flr;
2148
2149             double above_goal_accu_error;
2150
2151             // We will trigger the next BGC if this much
2152             // alloc has been consumed between the last
2153             // bgc end and now.
2154             size_t alloc_to_trigger;
2155             // actual consumed alloc
2156             size_t actual_alloc_to_trigger;
2157
2158             // the alloc between last bgc sweep start and end.
2159             size_t last_bgc_end_alloc;
2160
2161             //
2162             // For smoothing calc
2163             //
2164             size_t smoothed_alloc_to_trigger;
2165
2166             //
2167             // For TBH
2168             //
2169             // last time we checked, were we above sweep flr goal?
2170             bool last_sweep_above_p;
2171             size_t alloc_to_trigger_0;
2172
2173             // This is to get us started. It's set when we observe in a gen1
2174             // GC when the memory load is high enough and is used to seed the first
2175             // BGC triggered due to this tuning.
2176             size_t first_alloc_to_trigger;
2177         };
2178
2179         struct tuning_stats
2180         {
2181             size_t last_bgc_physical_size;
2182
2183             size_t last_alloc_end_to_start;
2184             size_t last_alloc_start_to_sweep;
2185             size_t last_alloc_sweep_to_end;
2186             // records the alloc at the last significant point,
2187             // used to calculate the 3 alloc's above.
2188             // It's reset at bgc sweep start as that's when we reset
2189             // all the allocation data (sweep_allocated/condemned_allocated/etc)
2190             size_t last_alloc;
2191
2192             // the FL size at the end of last bgc.
2193             size_t last_bgc_fl_size;
2194
2195             // last gen2 surv rate
2196             double last_bgc_surv_rate;
2197
2198             // the FL ratio last time gen size increased.
2199             double last_gen_increase_flr;
2200         };
2201
2202         // This is just so that I don't need to calculate things multiple
2203         // times. Only used during bgc end calculations. Everything that
2204         // needs to be perserved across GCs will be saved in the other 2
2205         // structs.
2206         struct bgc_size_data
2207         {
2208             size_t gen_size;
2209             size_t gen_physical_size;
2210             size_t gen_fl_size;
2211             // The actual physical fl size, unadjusted
2212             size_t gen_actual_phys_fl_size;
2213             // I call this physical_fl but really it's adjusted based on alloc
2214             // that we haven't consumed because the other generation consumed
2215             // its alloc and triggered the BGC. See init_bgc_end_data.
2216             // We don't allow it to go negative.
2217             ptrdiff_t gen_physical_fl_size;
2218             double gen_physical_flr;
2219             double gen_flr;
2220         };
2221
2222         static bool enable_fl_tuning;
2223         // the memory load we aim to maintain.
2224         static uint32_t memory_load_goal;
2225
2226         // if we are BGCMemGoalSlack above BGCMemGoal, this is where we
2227         // panic and start to see if we should do NGC2.
2228         static uint32_t memory_load_goal_slack;
2229         // This is calculated based on memory_load_goal.
2230         static uint64_t available_memory_goal;
2231         // If we are above (ml goal + slack), we need to panic.
2232         // Currently we just trigger the next GC as an NGC2, but
2233         // we do track the accumulated error and could be more
2234         // sophisticated about triggering NGC2 especially when
2235         // slack is small. We could say unless we see the error
2236         // is large enough would we actually trigger an NGC2.
2237         static bool panic_activated_p;
2238         static double accu_error_panic;
2239
2240         static double above_goal_kp;
2241         static double above_goal_ki;
2242         static bool enable_ki;
2243         static bool enable_kd;
2244         static bool enable_smooth;
2245         static bool enable_tbh;
2246         static bool enable_ff;
2247         static bool enable_gradual_d;
2248         static double above_goal_kd;
2249         static double above_goal_ff;
2250         static double num_gen1s_smooth_factor;
2251
2252         // for ML servo loop
2253         static double ml_kp;
2254         static double ml_ki;
2255
2256         // for ML loop ki
2257         static double accu_error;
2258
2259         // did we start tuning with FL yet?
2260         static bool fl_tuning_triggered;
2261
2262         // ==================================================
2263         // ============what's used in calculation============
2264         // ==================================================
2265         //
2266         // only used in smoothing.
2267         static size_t num_bgcs_since_tuning_trigger;
2268
2269         // gen1 GC setting the next GC as a BGC when it observes the
2270         // memory load is high enough for the first time.
2271         static bool next_bgc_p;
2272
2273         // this is organized as:
2274         // element 0 is for max_generation
2275         // element 1 is for max_generation+1
2276         static tuning_calculation gen_calc[2];
2277
2278         // ======================================================
2279         // ============what's used to only show stats============
2280         // ======================================================
2281         //
2282         // how many gen1's actually happened before triggering next bgc.
2283         static size_t actual_num_gen1s_to_trigger;
2284
2285         static size_t gen1_index_last_bgc_end;
2286         static size_t gen1_index_last_bgc_start;
2287         static size_t gen1_index_last_bgc_sweep;
2288
2289         static tuning_stats gen_stats[2];
2290         // ============end of stats============
2291
2292         static bgc_size_data current_bgc_end_data[2];
2293
2294         static size_t last_stepping_bgc_count;
2295         static uint32_t last_stepping_mem_load;
2296         static uint32_t stepping_interval;
2297
2298         // When we are in the initial stage before fl tuning is triggered.
2299         static bool use_stepping_trigger_p;
2300
2301         // the gen2 correction factor is used to put more emphasis
2302         // on the gen2 when it triggered the BGC.
2303         // If the BGC was triggered due to gen3, we decrease this
2304         // factor.
2305         static double gen2_ratio_correction;
2306         static double ratio_correction_step;
2307
2308         // Since we have 2 loops, this BGC was caused by one of them; for the other loop we know
2309         // we didn't reach the goal so use the output from last time.
2310         static void calculate_tuning (int gen_number, bool use_this_loop_p);
2311
2312         static void init_bgc_end_data (int gen_number, bool use_this_loop_p);
2313         static void calc_end_bgc_fl (int gen_number);
2314
2315         static void convert_to_fl (bool use_gen2_loop_p, bool use_gen3_loop_p);
2316         static double calculate_ml_tuning (uint64_t current_available_physical, bool reduce_p, ptrdiff_t* _vfl_from_kp, ptrdiff_t* _vfl_from_ki);
2317
2318         // This invokes the ml tuning loop and sets the total gen sizes, ie
2319         // including vfl.
2320         static void set_total_gen_sizes (bool use_gen2_loop_p, bool use_gen3_loop_p);
2321
2322         static bool should_trigger_bgc_loh();
2323
2324         // This is only called when we've already stopped for GC.
2325         // For LOH we'd be doing this in the alloc path.
2326         static bool should_trigger_bgc();
2327
2328         // If we keep being above ml goal, we need to compact.
2329         static bool should_trigger_ngc2();
2330
2331         // Only implemented for gen2 now while we are in sweep.
2332         // Before we could build up enough fl, we delay gen1 consuming
2333         // gen2 alloc so we don't get into panic.
2334         // When we maintain the fl instead of building a new one, this
2335         // can be eliminated.
2336         static bool should_delay_alloc (int gen_number);
2337
2338         // When we are under the memory load goal, we'd like to do 10 BGCs
2339         // before we reach the goal.
2340         static bool stepping_trigger (uint32_t current_memory_load, size_t current_gen2_count);
2341
2342         static void update_bgc_start (int gen_number, size_t num_gen1s_since_end);
2343         // Updates the following:
2344         // current_bgc_start_flr
2345         // actual_alloc_to_trigger
2346         // last_alloc_end_to_start
2347         // last_alloc
2348         // actual_num_gen1s_to_trigger
2349         // gen1_index_last_bgc_start
2350         static void record_bgc_start();
2351
2352         static void update_bgc_sweep_start (int gen_number, size_t num_gen1s_since_start);
2353         // Updates the following:
2354         // current_bgc_sweep_flr
2355         // last_alloc_start_to_sweep
2356         // last_alloc
2357         // gen1_index_last_bgc_sweep
2358         static void record_bgc_sweep_start();
2359         // Updates the rest
2360         static void record_and_adjust_bgc_end();
2361     };
2362
2363     // This tells us why we chose to do a bgc in tuning.
2364     PER_HEAP_ISOLATED
2365     int saved_bgc_tuning_reason;
2366 #endif //BGC_SERVO_TUNING
2367
2368 #endif //BACKGROUND_GC
2369
2370     PER_HEAP
2371     void mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
2372     PER_HEAP
2373     BOOL process_mark_overflow (int condemned_gen_number);
2374     PER_HEAP
2375     void process_mark_overflow_internal (int condemned_gen_number,
2376                                          uint8_t* min_address, uint8_t* max_address);
2377
2378 #ifdef SNOOP_STATS
2379     PER_HEAP
2380     void print_snoop_stat();
2381 #endif //SNOOP_STATS
2382
2383 #ifdef MH_SC_MARK
2384
2385     PER_HEAP
2386     BOOL check_next_mark_stack (gc_heap* next_heap);
2387
2388 #endif //MH_SC_MARK
2389
2390     PER_HEAP
2391     void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
2392
2393     PER_HEAP
2394     void mark_phase (int condemned_gen_number, BOOL mark_only_p);
2395
2396     PER_HEAP
2397     void pin_object (uint8_t* o, uint8_t** ppObject);
2398
2399     PER_HEAP_ISOLATED
2400     size_t get_total_pinned_objects();
2401
2402     PER_HEAP
2403     void reset_mark_stack ();
2404     PER_HEAP
2405     uint8_t* insert_node (uint8_t* new_node, size_t sequence_number,
2406                        uint8_t* tree, uint8_t* last_node);
2407     PER_HEAP
2408     size_t update_brick_table (uint8_t* tree, size_t current_brick,
2409                                uint8_t* x, uint8_t* plug_end);
2410
2411     PER_HEAP
2412     void plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate);
2413
2414     PER_HEAP
2415     void realloc_plan_generation_start (generation* gen, generation* consing_gen);
2416
2417     PER_HEAP
2418     void plan_generation_starts (generation*& consing_gen);
2419
2420     PER_HEAP
2421     void advance_pins_for_demotion (generation* gen);
2422
2423     PER_HEAP
2424     void process_ephemeral_boundaries(uint8_t* x, int& active_new_gen_number,
2425                                       int& active_old_gen_number,
2426                                       generation*& consing_gen,
2427                                       BOOL& allocate_in_condemned);
2428     PER_HEAP
2429     void seg_clear_mark_bits (heap_segment* seg);
2430     PER_HEAP
2431     void sweep_ro_segments (heap_segment* start_seg);
2432     PER_HEAP
2433     void convert_to_pinned_plug (BOOL& last_npinned_plug_p,
2434                                  BOOL& last_pinned_plug_p,
2435                                  BOOL& pinned_plug_p,
2436                                  size_t ps,
2437                                  size_t& artificial_pinned_size);
2438     PER_HEAP
2439     void store_plug_gap_info (uint8_t* plug_start,
2440                               uint8_t* plug_end,
2441                               BOOL& last_npinned_plug_p,
2442                               BOOL& last_pinned_plug_p,
2443                               uint8_t*& last_pinned_plug,
2444                               BOOL& pinned_plug_p,
2445                               uint8_t* last_object_in_last_plug,
2446                               BOOL& merge_with_last_pin_p,
2447                               // this is only for verification purpose
2448                               size_t last_plug_len);
2449     PER_HEAP
2450     void plan_phase (int condemned_gen_number);
2451
2452     PER_HEAP
2453     void record_interesting_data_point (interesting_data_point idp);
2454
2455 #ifdef GC_CONFIG_DRIVEN
2456     PER_HEAP
2457     void record_interesting_info_per_heap();
2458     PER_HEAP_ISOLATED
2459     void record_global_mechanisms();
2460     PER_HEAP_ISOLATED
2461     BOOL should_do_sweeping_gc (BOOL compact_p);
2462 #endif //GC_CONFIG_DRIVEN
2463
2464 #ifdef FEATURE_LOH_COMPACTION
2465     // plan_loh can allocate memory so it can fail. If it fails, we will
2466     // fall back to sweeping.
2467     PER_HEAP
2468     BOOL plan_loh();
2469
2470     PER_HEAP
2471     void compact_loh();
2472
2473     PER_HEAP
2474     void relocate_in_loh_compact();
2475
2476     PER_HEAP
2477     void walk_relocation_for_loh (void* profiling_context, record_surv_fn fn);
2478
2479     PER_HEAP
2480     BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
2481
2482     PER_HEAP
2483     void loh_set_allocator_next_pin();
2484
2485     PER_HEAP
2486     BOOL loh_pinned_plug_que_empty_p();
2487
2488     PER_HEAP
2489     size_t loh_deque_pinned_plug();
2490
2491     PER_HEAP
2492     mark* loh_pinned_plug_of (size_t bos);
2493
2494     PER_HEAP
2495     mark* loh_oldest_pin();
2496
2497     PER_HEAP
2498     BOOL loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit);
2499
2500     PER_HEAP
2501     uint8_t* loh_allocate_in_condemned (size_t size);
2502
2503     PER_HEAP_ISOLATED
2504     BOOL loh_object_p (uint8_t* o);
2505
2506     PER_HEAP_ISOLATED
2507     BOOL loh_compaction_requested();
2508
2509     // If the LOH compaction mode is just to compact once,
2510     // we need to see if we should reset it back to not compact.
2511     // We would only reset if every heap's LOH was compacted.
2512     PER_HEAP_ISOLATED
2513     void check_loh_compact_mode  (BOOL all_heaps_compacted_p);
2514 #endif //FEATURE_LOH_COMPACTION
2515
2516     PER_HEAP
2517     void fix_generation_bounds (int condemned_gen_number,
2518                                 generation* consing_gen);
2519     PER_HEAP
2520     uint8_t* generation_limit (int gen_number);
2521
2522     struct make_free_args
2523     {
2524         int free_list_gen_number;
2525         uint8_t* current_gen_limit;
2526         generation* free_list_gen;
2527         uint8_t* highest_plug;
2528     };
2529     PER_HEAP
2530     uint8_t* allocate_at_end (size_t size);
2531     PER_HEAP
2532     BOOL ensure_gap_allocation (int condemned_gen_number);
2533     // make_free_lists is only called by blocking GCs.
2534     PER_HEAP
2535     void make_free_lists (int condemned_gen_number);
2536     PER_HEAP
2537     void make_free_list_in_brick (uint8_t* tree, make_free_args* args);
2538     PER_HEAP
2539     void thread_gap (uint8_t* gap_start, size_t size, generation*  gen);
2540     PER_HEAP
2541     void loh_thread_gap_front (uint8_t* gap_start, size_t size, generation*  gen);
2542     PER_HEAP
2543     void make_unused_array (uint8_t* x, size_t size, BOOL clearp=FALSE, BOOL resetp=FALSE);
2544     PER_HEAP
2545     void clear_unused_array (uint8_t* x, size_t size);
2546     PER_HEAP
2547     void relocate_address (uint8_t** old_address THREAD_NUMBER_DCL);
2548     struct relocate_args
2549     {
2550         uint8_t* last_plug;
2551         BOOL is_shortened;
2552         mark* pinned_plug_entry;
2553     };
2554
2555     PER_HEAP
2556     void reloc_survivor_helper (uint8_t** pval);
2557     PER_HEAP
2558     void check_class_object_demotion (uint8_t* obj);
2559     PER_HEAP
2560     void check_class_object_demotion_internal (uint8_t* obj);
2561
2562     PER_HEAP
2563     void check_demotion_helper (uint8_t** pval, uint8_t* parent_obj);
2564
2565     PER_HEAP
2566     void relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end);
2567
2568     PER_HEAP
2569     void verify_pins_with_post_plug_info (const char* msg);
2570
2571 #ifdef COLLECTIBLE_CLASS
2572     PER_HEAP
2573     void unconditional_set_card_collectible (uint8_t* obj);
2574 #endif //COLLECTIBLE_CLASS
2575
2576     PER_HEAP
2577     void relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry);
2578
2579     PER_HEAP
2580     void relocate_obj_helper (uint8_t* x, size_t s);
2581
2582     PER_HEAP
2583     void reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc);
2584
2585     PER_HEAP
2586     void relocate_pre_plug_info (mark* pinned_plug_entry);
2587
2588     PER_HEAP
2589     void relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned);
2590
2591     PER_HEAP
2592     void relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end,
2593                                      BOOL check_last_object_p,
2594                                      mark* pinned_plug_entry);
2595     PER_HEAP
2596     void relocate_survivors_in_brick (uint8_t* tree, relocate_args* args);
2597
2598     PER_HEAP
2599     void update_oldest_pinned_plug();
2600
2601     PER_HEAP
2602     void relocate_survivors (int condemned_gen_number,
2603                              uint8_t* first_condemned_address );
2604     PER_HEAP
2605     void relocate_phase (int condemned_gen_number,
2606                          uint8_t* first_condemned_address);
2607
2608     struct compact_args
2609     {
2610         BOOL copy_cards_p;
2611         uint8_t* last_plug;
2612         ptrdiff_t last_plug_relocation;
2613         uint8_t* before_last_plug;
2614         size_t current_compacted_brick;
2615         BOOL is_shortened;
2616         mark* pinned_plug_entry;
2617         BOOL check_gennum_p;
2618         int src_gennum;
2619
2620         void print()
2621         {
2622             dprintf (3, ("last plug: %Ix, last plug reloc: %Ix, before last: %Ix, b: %Ix",
2623                 last_plug, last_plug_relocation, before_last_plug, current_compacted_brick));
2624         }
2625     };
2626
2627     PER_HEAP
2628     void copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2629     PER_HEAP
2630     void  gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2631     PER_HEAP
2632     void compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args);
2633     PER_HEAP
2634     void compact_in_brick (uint8_t* tree, compact_args* args);
2635
2636     PER_HEAP
2637     mark* get_next_pinned_entry (uint8_t* tree,
2638                                  BOOL* has_pre_plug_info_p,
2639                                  BOOL* has_post_plug_info_p,
2640                                  BOOL deque_p=TRUE);
2641
2642     PER_HEAP
2643     mark* get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p);
2644
2645     PER_HEAP
2646     void recover_saved_pinned_info();
2647
2648     PER_HEAP
2649     void compact_phase (int condemned_gen_number, uint8_t*
2650                         first_condemned_address, BOOL clear_cards);
2651     PER_HEAP
2652     void clear_cards (size_t start_card, size_t end_card);
2653     PER_HEAP
2654     void clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address);
2655     PER_HEAP
2656     void copy_cards (size_t dst_card, size_t src_card,
2657                      size_t end_card, BOOL nextp);
2658     PER_HEAP
2659     void copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2660
2661 #ifdef BACKGROUND_GC
2662     PER_HEAP
2663     void copy_mark_bits (size_t dst_mark_bit, size_t src_mark_bit, size_t end_mark_bit);
2664     PER_HEAP
2665     void copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2666 #endif //BACKGROUND_GC
2667
2668
2669     PER_HEAP
2670     BOOL ephemeral_pointer_p (uint8_t* o);
2671     PER_HEAP
2672     void fix_brick_to_highest (uint8_t* o, uint8_t* next_o);
2673     PER_HEAP
2674     uint8_t* find_first_object (uint8_t* start_address, uint8_t* first_object);
2675     PER_HEAP
2676     uint8_t* compute_next_boundary (int gen_number, BOOL relocating);
2677     PER_HEAP
2678     void keep_card_live (uint8_t* o, size_t& n_gen,
2679                          size_t& cg_pointers_found);
2680     PER_HEAP
2681     void mark_through_cards_helper (uint8_t** poo, size_t& ngen,
2682                                     size_t& cg_pointers_found,
2683                                     card_fn fn, uint8_t* nhigh,
2684                                     uint8_t* next_boundary
2685                                     CARD_MARKING_STEALING_ARG(gc_heap* hpt));
2686
2687     PER_HEAP
2688     BOOL card_transition (uint8_t* po, uint8_t* end, size_t card_word_end,
2689                           size_t& cg_pointers_found,
2690                           size_t& n_eph, size_t& n_card_set,
2691                           size_t& card, size_t& end_card,
2692                           BOOL& foundp, uint8_t*& start_address,
2693                           uint8_t*& limit, size_t& n_cards_cleared
2694                           CARD_MARKING_STEALING_ARGS(card_marking_enumerator& card_mark_enumerator, heap_segment* seg, size_t& card_word_end_out));
2695     PER_HEAP
2696     void mark_through_cards_for_segments(card_fn fn, BOOL relocating CARD_MARKING_STEALING_ARG(gc_heap* hpt));
2697
2698     PER_HEAP
2699     void repair_allocation_in_expanded_heap (generation* gen);
2700     PER_HEAP
2701     BOOL can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index);
2702     PER_HEAP
2703     BOOL can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index);
2704     PER_HEAP
2705     BOOL can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count);
2706 #ifdef SEG_REUSE_STATS
2707     PER_HEAP
2708     size_t dump_buckets (size_t* ordered_indices, int count, size_t* total_size);
2709 #endif //SEG_REUSE_STATS
2710     PER_HEAP
2711     void build_ordered_free_spaces (heap_segment* seg);
2712     PER_HEAP
2713     void count_plug (size_t last_plug_size, uint8_t*& last_plug);
2714     PER_HEAP
2715     void count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug);
2716     PER_HEAP
2717     void build_ordered_plug_indices ();
2718     PER_HEAP
2719     void init_ordered_free_space_indices ();
2720     PER_HEAP
2721     void trim_free_spaces_indices ();
2722     PER_HEAP
2723     BOOL try_best_fit (BOOL end_of_segment_p);
2724     PER_HEAP
2725     BOOL best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space);
2726     PER_HEAP
2727     BOOL process_free_space (heap_segment* seg,
2728                              size_t free_space,
2729                              size_t min_free_size,
2730                              size_t min_cont_size,
2731                              size_t* total_free_space,
2732                              size_t* largest_free_space);
2733     PER_HEAP
2734     size_t compute_eph_gen_starts_size();
2735     PER_HEAP
2736     void compute_new_ephemeral_size();
2737     PER_HEAP
2738     BOOL expand_reused_seg_p();
2739     PER_HEAP
2740     BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size,
2741                             size_t min_cont_size, allocator* al);
2742     PER_HEAP
2743     uint8_t* allocate_in_expanded_heap (generation* gen, size_t size,
2744                                      BOOL& adjacentp, uint8_t* old_loc,
2745 #ifdef SHORT_PLUGS
2746                                      BOOL set_padding_on_saved_p,
2747                                      mark* pinned_plug_entry,
2748 #endif //SHORT_PLUGS
2749                                      BOOL consider_bestfit, int active_new_gen_number
2750                                      REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
2751     PER_HEAP
2752     void realloc_plug (size_t last_plug_size, uint8_t*& last_plug,
2753                        generation* gen, uint8_t* start_address,
2754                        unsigned int& active_new_gen_number,
2755                        uint8_t*& last_pinned_gap, BOOL& leftp,
2756                        BOOL shortened_p
2757 #ifdef SHORT_PLUGS
2758                        , mark* pinned_plug_entry
2759 #endif //SHORT_PLUGS
2760                        );
2761     PER_HEAP
2762     void realloc_in_brick (uint8_t* tree, uint8_t*& last_plug, uint8_t* start_address,
2763                            generation* gen,
2764                            unsigned int& active_new_gen_number,
2765                            uint8_t*& last_pinned_gap, BOOL& leftp);
2766     PER_HEAP
2767     void realloc_plugs (generation* consing_gen, heap_segment* seg,
2768                         uint8_t* start_address, uint8_t* end_address,
2769                         unsigned active_new_gen_number);
2770
2771     PER_HEAP
2772     void set_expand_in_full_gc (int condemned_gen_number);
2773
2774     PER_HEAP
2775     void verify_no_pins (uint8_t* start, uint8_t* end);
2776
2777     PER_HEAP
2778     generation* expand_heap (int condemned_generation,
2779                              generation* consing_gen,
2780                              heap_segment* new_heap_segment);
2781
2782     PER_HEAP
2783     void save_ephemeral_generation_starts();
2784
2785     PER_HEAP_ISOLATED
2786     size_t get_gen0_min_size();
2787
2788     PER_HEAP
2789     void set_static_data();
2790
2791     PER_HEAP_ISOLATED
2792     void init_static_data();
2793
2794     PER_HEAP
2795     bool init_dynamic_data ();
2796     PER_HEAP
2797     float surv_to_growth (float cst, float limit, float max_limit);
2798     PER_HEAP
2799     size_t desired_new_allocation (dynamic_data* dd, size_t out,
2800                                    int gen_number, int pass);
2801
2802     PER_HEAP
2803     void trim_youngest_desired_low_memory();
2804
2805     PER_HEAP
2806     void decommit_ephemeral_segment_pages();
2807
2808 #ifdef HOST_64BIT
2809     PER_HEAP_ISOLATED
2810     size_t trim_youngest_desired (uint32_t memory_load,
2811                                   size_t total_new_allocation,
2812                                   size_t total_min_allocation);
2813     PER_HEAP_ISOLATED
2814     size_t joined_youngest_desired (size_t new_allocation);
2815 #endif // HOST_64BIT
2816     PER_HEAP_ISOLATED
2817     size_t get_total_heap_size ();
2818     PER_HEAP_ISOLATED
2819     size_t get_total_committed_size();
2820     PER_HEAP_ISOLATED
2821     size_t get_total_fragmentation();
2822     PER_HEAP_ISOLATED
2823     size_t get_total_gen_fragmentation (int gen_number);
2824     PER_HEAP_ISOLATED
2825     size_t get_total_gen_estimated_reclaim (int gen_number);
2826     PER_HEAP_ISOLATED
2827     void get_memory_info (uint32_t* memory_load,
2828                           uint64_t* available_physical=NULL,
2829                           uint64_t* available_page_file=NULL);
2830     PER_HEAP
2831     size_t generation_size (int gen_number);
2832     PER_HEAP_ISOLATED
2833     size_t get_total_survived_size();
2834     // this also resets allocated_since_last_gc
2835     PER_HEAP_ISOLATED
2836     size_t get_total_allocated_since_last_gc();
2837     PER_HEAP
2838     size_t get_current_allocated();
2839     PER_HEAP_ISOLATED
2840     size_t get_total_allocated();
2841     PER_HEAP_ISOLATED
2842     size_t get_total_promoted();
2843 #ifdef BGC_SERVO_TUNING
2844     PER_HEAP_ISOLATED
2845     size_t get_total_generation_size (int gen_number);
2846     PER_HEAP_ISOLATED
2847     size_t get_total_servo_alloc (int gen_number);
2848     PER_HEAP_ISOLATED
2849     size_t get_total_bgc_promoted();
2850     PER_HEAP_ISOLATED
2851     size_t get_total_surv_size (int gen_number);
2852     PER_HEAP_ISOLATED
2853     size_t get_total_begin_data_size (int gen_number);
2854     PER_HEAP_ISOLATED
2855     size_t get_total_generation_fl_size (int gen_number);
2856     PER_HEAP_ISOLATED
2857     size_t get_current_gc_index (int gen_number);
2858 #endif //BGC_SERVO_TUNING
2859     PER_HEAP
2860     size_t current_generation_size (int gen_number);
2861     PER_HEAP
2862     size_t generation_plan_size (int gen_number);
2863     PER_HEAP
2864     void  compute_promoted_allocation (int gen_number);
2865     PER_HEAP
2866     size_t  compute_in (int gen_number);
2867     PER_HEAP
2868     void compute_new_dynamic_data (int gen_number);
2869     PER_HEAP_ISOLATED
2870     gc_history_global* get_gc_data_global();
2871     PER_HEAP
2872     gc_history_per_heap* get_gc_data_per_heap();
2873     PER_HEAP
2874     size_t new_allocation_limit (size_t size, size_t free_size, int gen_number);
2875     PER_HEAP
2876     size_t generation_fragmentation (generation* gen,
2877                                      generation* consing_gen,
2878                                      uint8_t* end);
2879     PER_HEAP
2880     size_t generation_sizes (generation* gen);
2881     PER_HEAP
2882     size_t committed_size();
2883     PER_HEAP
2884     size_t uoh_committed_size (int gen_number, size_t* allocated);
2885     PER_HEAP
2886     size_t approximate_new_allocation();
2887     PER_HEAP
2888     size_t end_space_after_gc();
2889     PER_HEAP
2890     size_t estimated_reclaim (int gen_number);
2891     PER_HEAP
2892     BOOL decide_on_compacting (int condemned_gen_number,
2893                                size_t fragmentation,
2894                                BOOL& should_expand);
2895     PER_HEAP
2896     BOOL sufficient_space_end_seg (uint8_t* start, uint8_t* seg_end,
2897                                    size_t end_space_required,
2898                                    gc_tuning_point tp);
2899     PER_HEAP
2900     BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
2901     PER_HEAP
2902     void sweep_uoh_objects (int gen_num);
2903     PER_HEAP
2904     void relocate_in_uoh_objects (int gen_num);
2905     PER_HEAP
2906     void mark_through_cards_for_uoh_objects(card_fn fn, int oldest_gen_num, BOOL relocating
2907                                               CARD_MARKING_STEALING_ARG(gc_heap* hpt));
2908     PER_HEAP
2909     void descr_generations (BOOL begin_gc_p);
2910
2911     PER_HEAP_ISOLATED
2912     void descr_generations_to_profiler (gen_walk_fn fn, void *context);
2913
2914     /*------------ Multiple non isolated heaps ----------------*/
2915 #ifdef MULTIPLE_HEAPS
2916     PER_HEAP_ISOLATED
2917     BOOL   create_thread_support (int number_of_heaps);
2918     PER_HEAP_ISOLATED
2919     void destroy_thread_support ();
2920     PER_HEAP
2921     bool create_gc_thread();
2922     PER_HEAP
2923     void gc_thread_function();
2924 #ifdef MARK_LIST
2925 #ifdef PARALLEL_MARK_LIST_SORT
2926     PER_HEAP
2927     void sort_mark_list();
2928     PER_HEAP
2929     void merge_mark_lists();
2930     PER_HEAP
2931     void append_to_mark_list(uint8_t **start, uint8_t **end);
2932 #else //PARALLEL_MARK_LIST_SORT
2933     PER_HEAP_ISOLATED
2934     void combine_mark_lists();
2935 #endif //PARALLEL_MARK_LIST_SORT
2936 #endif
2937 #endif //MULTIPLE_HEAPS
2938
2939 #ifdef MARK_LIST
2940     PER_HEAP_ISOLATED
2941     void grow_mark_list();
2942 #endif //MARK_LIST
2943
2944 #ifdef BACKGROUND_GC
2945
2946     PER_HEAP
2947     uint8_t* high_page (heap_segment* seg, BOOL concurrent_p);
2948
2949     PER_HEAP
2950     void revisit_written_page (uint8_t* page, uint8_t* end, 
2951                                BOOL concurrent_p, uint8_t*& last_page,
2952                                uint8_t*& last_object, BOOL large_objects_p,
2953                                size_t& num_marked_objects);
2954     PER_HEAP
2955     void revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p=FALSE);
2956
2957     PER_HEAP
2958     void concurrent_scan_dependent_handles (ScanContext *sc);
2959
2960     PER_HEAP_ISOLATED
2961     void suspend_EE ();
2962
2963     PER_HEAP_ISOLATED
2964     void bgc_suspend_EE ();
2965
2966     PER_HEAP_ISOLATED
2967     void restart_EE ();
2968
2969     PER_HEAP
2970     void background_scan_dependent_handles (ScanContext *sc);
2971
2972     PER_HEAP
2973     void allow_fgc();
2974
2975     // Restores BGC settings if necessary.
2976     PER_HEAP_ISOLATED
2977     void recover_bgc_settings();
2978
2979     PER_HEAP
2980     BOOL should_commit_mark_array();
2981
2982     PER_HEAP
2983     void clear_commit_flag();
2984
2985     PER_HEAP_ISOLATED
2986     void clear_commit_flag_global();
2987
2988     PER_HEAP_ISOLATED
2989     void verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr);
2990
2991     PER_HEAP_ISOLATED
2992     BOOL commit_mark_array_by_range (uint8_t* begin,
2993                                      uint8_t* end,
2994                                      uint32_t* mark_array_addr);
2995
2996     PER_HEAP_ISOLATED
2997     BOOL commit_mark_array_new_seg (gc_heap* hp,
2998                                     heap_segment* seg,
2999                                     uint32_t* new_card_table = 0,
3000                                     uint8_t* new_lowest_address = 0);
3001
3002     PER_HEAP_ISOLATED
3003     BOOL commit_mark_array_with_check (heap_segment* seg, uint32_t* mark_array_addr);
3004
3005     // commit the portion of the mark array that corresponds to
3006     // this segment (from beginning to reserved).
3007     // seg and heap_segment_reserved (seg) are guaranteed to be
3008     // page aligned.
3009     PER_HEAP_ISOLATED
3010     BOOL commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr);
3011
3012     // During BGC init, we commit the mark array for all in range
3013     // segments whose mark array hasn't been committed or fully
3014     // committed. All rw segments are in range, only ro segments
3015     // can be partial in range.
3016     PER_HEAP
3017     BOOL commit_mark_array_bgc_init();
3018
3019     PER_HEAP
3020     BOOL commit_new_mark_array (uint32_t* new_mark_array);
3021
3022     // We need to commit all segments that intersect with the bgc
3023     // range. If a segment is only partially in range, we still
3024     // should commit the mark array for the whole segment as
3025     // we will set the mark array commit flag for this segment.
3026     PER_HEAP_ISOLATED
3027     BOOL commit_new_mark_array_global (uint32_t* new_mark_array);
3028
3029     // We can't decommit the first and the last page in the mark array
3030     // if the beginning and ending don't happen to be page aligned.
3031     PER_HEAP
3032     void decommit_mark_array_by_seg (heap_segment* seg);
3033
3034     PER_HEAP
3035     void background_mark_phase();
3036
3037     PER_HEAP
3038     void background_drain_mark_list (int thread);
3039
3040     PER_HEAP
3041     void background_grow_c_mark_list();
3042
3043     PER_HEAP_ISOLATED
3044     void background_promote_callback(Object** object, ScanContext* sc, uint32_t flags);
3045
3046     PER_HEAP
3047     void mark_absorb_new_alloc();
3048
3049     PER_HEAP
3050     void restart_vm();
3051
3052     PER_HEAP
3053     BOOL prepare_bgc_thread(gc_heap* gh);
3054     PER_HEAP
3055     BOOL create_bgc_thread(gc_heap* gh);
3056     PER_HEAP_ISOLATED
3057     BOOL create_bgc_threads_support (int number_of_heaps);
3058     PER_HEAP
3059     BOOL create_bgc_thread_support();
3060     PER_HEAP_ISOLATED
3061     int check_for_ephemeral_alloc();
3062     PER_HEAP_ISOLATED
3063     void wait_to_proceed();
3064     PER_HEAP_ISOLATED
3065     void fire_alloc_wait_event_begin (alloc_wait_reason awr);
3066     PER_HEAP_ISOLATED
3067     void fire_alloc_wait_event_end (alloc_wait_reason awr);
3068     PER_HEAP
3069     uint32_t background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE);
3070     PER_HEAP_ISOLATED
3071     BOOL background_running_p() { return gc_background_running; }
3072     PER_HEAP_ISOLATED
3073     void start_c_gc();
3074     PER_HEAP
3075     void kill_gc_thread();
3076     PER_HEAP
3077     void bgc_thread_function();
3078     PER_HEAP_ISOLATED
3079     void do_background_gc();
3080     static
3081     void bgc_thread_stub (void* arg);
3082 #endif //BACKGROUND_GC
3083
3084 public:
3085
3086     PER_HEAP_ISOLATED
3087     VOLATILE(bool) internal_gc_done;
3088
3089 #ifdef BACKGROUND_GC
3090     PER_HEAP_ISOLATED
3091     uint32_t cm_in_progress;
3092
3093     // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
3094     // we do right before the bgc starts.
3095     PER_HEAP_ISOLATED
3096     BOOL     dont_restart_ee_p;
3097
3098     PER_HEAP_ISOLATED
3099     GCEvent bgc_start_event;
3100 #endif //BACKGROUND_GC
3101
3102     // The variables in this block are known to the DAC and must come first
3103     // in the gc_heap class.
3104
3105     // Keeps track of the highest address allocated by Alloc
3106     PER_HEAP
3107     uint8_t* alloc_allocated;
3108
3109     // The ephemeral heap segment
3110     PER_HEAP
3111     heap_segment* ephemeral_heap_segment;
3112
3113     // The finalize queue.
3114     PER_HEAP
3115     CFinalize* finalize_queue;
3116
3117     // OOM info.
3118     PER_HEAP
3119     oom_history oom_info;
3120
3121     // Interesting data, recorded per-heap.
3122     PER_HEAP
3123     size_t interesting_data_per_heap[max_idp_count];
3124
3125     PER_HEAP
3126     size_t compact_reasons_per_heap[max_compact_reasons_count];
3127
3128     PER_HEAP
3129     size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
3130
3131     PER_HEAP
3132     size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
3133
3134     PER_HEAP
3135     uint8_t** internal_root_array;
3136
3137     PER_HEAP
3138     size_t internal_root_array_index;
3139
3140     PER_HEAP
3141     BOOL heap_analyze_success;
3142
3143     // The generation table. Must always be last.
3144     PER_HEAP
3145     generation generation_table [total_generation_count];
3146
3147     // End DAC zone
3148
3149 #define max_oom_history_count 4
3150
3151     PER_HEAP
3152     int oomhist_index_per_heap;
3153
3154     PER_HEAP
3155     oom_history oomhist_per_heap[max_oom_history_count];
3156
3157     PER_HEAP
3158     void add_to_oom_history_per_heap();
3159
3160     PER_HEAP
3161     BOOL expanded_in_fgc;
3162
3163     PER_HEAP_ISOLATED
3164     uint32_t wait_for_gc_done(int32_t timeOut = INFINITE);
3165
3166     // Returns TRUE if the current thread used to be in cooperative mode
3167     // before calling this function.
3168     PER_HEAP_ISOLATED
3169     bool enable_preemptive ();
3170     PER_HEAP_ISOLATED
3171     void disable_preemptive (bool restore_cooperative);
3172
3173     /* ------------------- per heap members --------------------------*/
3174
3175     PER_HEAP
3176 #ifndef MULTIPLE_HEAPS
3177     GCEvent gc_done_event;
3178 #else // MULTIPLE_HEAPS
3179     GCEvent gc_done_event;
3180 #endif // MULTIPLE_HEAPS
3181
3182     PER_HEAP
3183     VOLATILE(int32_t) gc_done_event_lock;
3184
3185     PER_HEAP
3186     VOLATILE(bool) gc_done_event_set;
3187
3188     PER_HEAP
3189     void set_gc_done();
3190
3191     PER_HEAP
3192     void reset_gc_done();
3193
3194     PER_HEAP
3195     void enter_gc_done_event_lock();
3196
3197     PER_HEAP
3198     void exit_gc_done_event_lock();
3199
3200     PER_HEAP
3201     uint8_t*  ephemeral_low;      //lowest ephemeral address
3202
3203     PER_HEAP
3204     uint8_t*  ephemeral_high;     //highest ephemeral address
3205
3206     PER_HEAP
3207     uint32_t* card_table;
3208
3209     PER_HEAP
3210     short* brick_table;
3211
3212 #ifdef BACKGROUND_GC
3213     PER_HEAP
3214     uint32_t* mark_array;
3215 #endif //BACKGROUND_GC
3216
3217 #ifdef CARD_BUNDLE
3218     PER_HEAP
3219     uint32_t* card_bundle_table;
3220 #endif //CARD_BUNDLE
3221
3222 #ifdef FEATURE_BASICFREEZE
3223     PER_HEAP_ISOLATED
3224     sorted_table* seg_table;
3225 #endif //FEATURE_BASICFREEZE
3226
3227     PER_HEAP_ISOLATED
3228     VOLATILE(BOOL) gc_started;
3229
3230     // The following 2 events are there to support the gen2
3231     // notification feature which is only enabled if concurrent
3232     // GC is disabled.
3233     PER_HEAP_ISOLATED
3234     GCEvent full_gc_approach_event;
3235
3236     PER_HEAP_ISOLATED
3237     GCEvent full_gc_end_event;
3238
3239     // Full GC Notification percentages.
3240     PER_HEAP
3241     uint32_t fgn_maxgen_percent;
3242
3243     PER_HEAP_ISOLATED
3244     uint32_t fgn_loh_percent;
3245
3246     PER_HEAP_ISOLATED
3247     VOLATILE(bool) full_gc_approach_event_set;
3248
3249 #ifdef BACKGROUND_GC
3250     PER_HEAP_ISOLATED
3251     BOOL fgn_last_gc_was_concurrent;
3252 #endif //BACKGROUND_GC
3253
3254     PER_HEAP
3255     size_t fgn_last_alloc;
3256
3257     static uint32_t user_thread_wait (GCEvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
3258
3259     static wait_full_gc_status full_gc_wait (GCEvent *event, int time_out_ms);
3260
3261     PER_HEAP
3262     uint8_t* demotion_low;
3263
3264     PER_HEAP
3265     uint8_t* demotion_high;
3266
3267     PER_HEAP
3268     BOOL demote_gen1_p;
3269
3270     PER_HEAP
3271     uint8_t* last_gen1_pin_end;
3272
3273     PER_HEAP
3274     gen_to_condemn_tuning gen_to_condemn_reasons;
3275
3276     PER_HEAP
3277     size_t etw_allocation_running_amount[2];
3278
3279     PER_HEAP
3280     uint64_t total_alloc_bytes_soh;
3281
3282     PER_HEAP
3283     uint64_t total_alloc_bytes_uoh;
3284
3285     PER_HEAP
3286     int gc_policy;  //sweep, compact, expand
3287
3288 #ifdef MULTIPLE_HEAPS
3289     PER_HEAP_ISOLATED
3290     bool gc_thread_no_affinitize_p;
3291
3292     PER_HEAP_ISOLATED
3293     GCEvent gc_start_event;
3294
3295     PER_HEAP_ISOLATED
3296     GCEvent ee_suspend_event;
3297
3298     PER_HEAP
3299     heap_segment* new_heap_segment;
3300
3301     PER_HEAP_ISOLATED
3302     size_t min_gen0_balance_delta;
3303
3304 #define alloc_quantum_balance_units (16)
3305
3306     PER_HEAP_ISOLATED
3307     size_t min_balance_threshold;
3308 #else //MULTIPLE_HEAPS
3309
3310     PER_HEAP
3311     size_t allocation_running_time;
3312
3313     PER_HEAP
3314     size_t allocation_running_amount;
3315
3316 #endif //MULTIPLE_HEAPS
3317
3318     PER_HEAP_ISOLATED
3319     gc_latency_level latency_level;
3320
3321     PER_HEAP_ISOLATED
3322     gc_mechanisms settings;
3323
3324     PER_HEAP_ISOLATED
3325     gc_history_global gc_data_global;
3326
3327     PER_HEAP_ISOLATED
3328     uint64_t gc_last_ephemeral_decommit_time;
3329
3330     PER_HEAP
3331     size_t gen0_big_free_spaces;
3332
3333 #ifdef SHORT_PLUGS
3334     PER_HEAP_ISOLATED
3335     double short_plugs_pad_ratio;
3336 #endif //SHORT_PLUGS
3337
3338     // We record the time GC work is done while EE is suspended.
3339     // suspended_start_ts is what we get right before we call
3340     // SuspendEE. We omit the time between GC end and RestartEE
3341     // because it's very short and by the time we are calling it
3342     // the settings may have changed and we'd have to do more work
3343     // to figure out the right GC to record info of.
3344     // 
3345     // The complications are the GCs triggered without their own
3346     // SuspendEE, in which case we will record that GC's duration
3347     // as its pause duration and the rest toward the GC that
3348     // the SuspendEE was for. The ephemeral GC we might trigger
3349     // at the beginning of a BGC and the PM triggered full GCs
3350     // fall into this case.
3351     PER_HEAP_ISOLATED
3352     uint64_t suspended_start_time;
3353
3354     PER_HEAP_ISOLATED
3355     uint64_t end_gc_time;
3356
3357     PER_HEAP_ISOLATED
3358     uint64_t total_suspended_time;
3359
3360     PER_HEAP_ISOLATED
3361     uint64_t process_start_time;
3362
3363     PER_HEAP_ISOLATED
3364     last_recorded_gc_info last_ephemeral_gc_info;
3365
3366     PER_HEAP_ISOLATED
3367     last_recorded_gc_info last_full_blocking_gc_info;
3368
3369 #ifdef BACKGROUND_GC
3370     // If the user didn't specify which kind of GC info to return, we need
3371     // to return the last recorded one. There's a complication with BGC as BGC
3372     // end runs concurrently. If 2 BGCs run back to back, we can't have one
3373     // update the info while the user thread is reading it (and we'd still like
3374     // to return the last BGC info otherwise if we only did BGCs we could frequently
3375     // return nothing). So we maintain 2 of these for BGC and the older one is
3376     // guaranteed to be consistent.
3377     PER_HEAP_ISOLATED
3378     last_recorded_gc_info last_bgc_info[2];
3379     // This is either 0 or 1.
3380     PER_HEAP_ISOLATED
3381     VOLATILE(int) last_bgc_info_index;
3382     // Since a BGC can finish later than blocking GCs with larger indices,
3383     // we can't just compare the index recorded in the GC info. We use this
3384     // to know whether we should be looking for a bgc info or a blocking GC,
3385     // if the user asks for the latest GC info of any kind.
3386     // This can only go from false to true concurrently so if it is true,
3387     // it means the bgc info is ready.
3388     PER_HEAP_ISOLATED
3389     VOLATILE(bool) is_last_recorded_bgc;
3390
3391     PER_HEAP_ISOLATED
3392     void add_bgc_pause_duration_0();
3393
3394     PER_HEAP_ISOLATED
3395     last_recorded_gc_info* get_completed_bgc_info();
3396 #endif //BACKGROUND_GC
3397
3398 #ifdef HOST_64BIT
3399     PER_HEAP_ISOLATED
3400         size_t youngest_gen_desired_th;
3401 #endif //HOST_64BIT
3402
3403     PER_HEAP_ISOLATED
3404     uint32_t high_memory_load_th;
3405
3406     PER_HEAP_ISOLATED
3407     uint32_t m_high_memory_load_th;
3408
3409     PER_HEAP_ISOLATED
3410     uint32_t v_high_memory_load_th;
3411
3412     PER_HEAP_ISOLATED
3413     bool is_restricted_physical_mem;
3414
3415     PER_HEAP_ISOLATED
3416     uint64_t mem_one_percent;
3417
3418     PER_HEAP_ISOLATED
3419     uint64_t total_physical_mem;
3420
3421     PER_HEAP_ISOLATED
3422     uint64_t entry_available_physical_mem;
3423
3424     // Hard limit for the heap, only supported on 64-bit.
3425     //
3426     // Users can specify a hard limit for the GC heap via GCHeapHardLimit or
3427     // a percentage of the physical memory this process is allowed to use via
3428     // GCHeapHardLimitPercent. This is the maximum commit size the GC heap
3429     // can consume.
3430     //
3431     // The way the hard limit is decided is:
3432     //
3433     // If the GCHeapHardLimit config is specified that's the value we use;
3434     // else if the GCHeapHardLimitPercent config is specified we use that
3435     // value;
3436     // else if the process is running inside a container with a memory limit,
3437     // the hard limit is
3438     // max (20mb, 75% of the memory limit on the container).
3439     //
3440     // Due to the different perf charicteristics of containers we make the
3441     // following policy changes:
3442     //
3443     // 1) No longer affinitize Server GC threads by default because we wouldn't
3444     // want all the containers on the machine to only affinitize to use the
3445     // first few CPUs (and we don't know which CPUs are already used). You
3446     // can however override this by specifying the GCHeapAffinitizeMask
3447     // config which will decide which CPUs the process will affinitize the
3448     // Server GC threads to.
3449     //
3450     // 2) Segment size is determined by limit / number of heaps but has a
3451     // minimum value of 16mb. This can be changed by specifying the number
3452     // of heaps via the GCHeapCount config. The minimum size is to avoid
3453     // the scenario where the hard limit is small but the process can use
3454     // many procs and we end up with tiny segments which doesn't make sense.
3455     //
3456     // 3) LOH compaction occurs automatically if needed.
3457     //
3458     // Since we do allow both gen0 and gen3 allocations, and we don't know
3459     // the distinction (and it's unrealistic to request users to specify
3460     // this distribution) we reserve memory this way -
3461     //
3462     // For SOH we reserve (limit / number of heaps) per heap.
3463     // For LOH we reserve (limit * 2 / number of heaps) per heap.
3464     //
3465     // This means the following -
3466     //
3467     // + we never need to acquire new segments. This simplies the perf
3468     // calculations by a lot.
3469     //
3470     // + we now need a different definition of "end of seg" because we
3471     // need to make sure the total does not exceed the limit.
3472     //
3473     // + if we detect that we exceed the commit limit in the allocator we
3474     // wouldn't want to treat that as a normal commit failure because that
3475     // would mean we always do full compacting GCs.
3476     //
3477     // TODO: some of the logic here applies to the general case as well
3478     // such as LOH automatic compaction. However it will require more
3479     //testing to change the general case.
3480     PER_HEAP_ISOLATED
3481     size_t heap_hard_limit;
3482
3483     PER_HEAP_ISOLATED
3484     size_t heap_hard_limit_oh[total_oh_count - 1];
3485
3486     PER_HEAP_ISOLATED
3487     CLRCriticalSection check_commit_cs;
3488
3489     PER_HEAP_ISOLATED
3490     size_t current_total_committed;
3491
3492     PER_HEAP_ISOLATED
3493     size_t committed_by_oh[total_oh_count];
3494
3495     // This is what GC uses for its own bookkeeping.
3496     PER_HEAP_ISOLATED
3497     size_t current_total_committed_bookkeeping;
3498
3499     // This is what GC's own book keeping consumes.
3500     PER_HEAP_ISOLATED
3501     size_t current_total_committed_gc_own;
3502
3503     // This is if large pages should be used.
3504     PER_HEAP_ISOLATED
3505     bool use_large_pages_p;
3506
3507 #ifdef HEAP_BALANCE_INSTRUMENTATION
3508     PER_HEAP_ISOLATED
3509     size_t last_gc_end_time_us;
3510 #endif //HEAP_BALANCE_INSTRUMENTATION
3511
3512     PER_HEAP_ISOLATED
3513     size_t min_segment_size;
3514
3515     PER_HEAP_ISOLATED
3516     size_t min_segment_size_shr;
3517
3518     // For SOH we always allocate segments of the same
3519     // size unless no_gc_region requires larger ones.
3520     PER_HEAP_ISOLATED
3521     size_t soh_segment_size;
3522
3523     PER_HEAP_ISOLATED
3524     size_t min_uoh_segment_size;
3525
3526     PER_HEAP_ISOLATED
3527     size_t segment_info_size;
3528
3529     PER_HEAP
3530     uint8_t* lowest_address;
3531
3532     PER_HEAP
3533     uint8_t* highest_address;
3534
3535     PER_HEAP
3536     BOOL ephemeral_promotion;
3537     PER_HEAP
3538     uint8_t* saved_ephemeral_plan_start[ephemeral_generation_count];
3539     PER_HEAP
3540     size_t saved_ephemeral_plan_start_size[ephemeral_generation_count];
3541
3542 protected:
3543 #ifdef MULTIPLE_HEAPS
3544     PER_HEAP
3545     GCHeap* vm_heap;
3546     PER_HEAP
3547     int heap_number;
3548     PER_HEAP
3549     VOLATILE(int) alloc_context_count;
3550 #else //MULTIPLE_HEAPS
3551 #define vm_heap ((GCHeap*) g_theGCHeap)
3552 #define heap_number (0)
3553 #endif //MULTIPLE_HEAPS
3554
3555     PER_HEAP
3556     uint64_t time_bgc_last;
3557
3558     PER_HEAP
3559     uint8_t*       gc_low; // lowest address being condemned
3560
3561     PER_HEAP
3562     uint8_t*       gc_high; //highest address being condemned
3563
3564     PER_HEAP
3565     size_t      mark_stack_tos;
3566
3567     PER_HEAP
3568     size_t      mark_stack_bos;
3569
3570     PER_HEAP
3571     size_t      mark_stack_array_length;
3572
3573     PER_HEAP
3574     mark*       mark_stack_array;
3575
3576 #if defined (_DEBUG) && defined (VERIFY_HEAP)
3577     PER_HEAP
3578     BOOL       verify_pinned_queue_p;
3579 #endif // _DEBUG && VERIFY_HEAP
3580
3581     PER_HEAP
3582     uint8_t*    oldest_pinned_plug;
3583
3584     PER_HEAP
3585     size_t      num_pinned_objects;
3586
3587 #ifdef FEATURE_LOH_COMPACTION
3588     PER_HEAP
3589     size_t      loh_pinned_queue_tos;
3590
3591     PER_HEAP
3592     size_t      loh_pinned_queue_bos;
3593
3594     PER_HEAP
3595     size_t      loh_pinned_queue_length;
3596
3597     PER_HEAP_ISOLATED
3598     int         loh_pinned_queue_decay;
3599
3600     PER_HEAP
3601     mark*       loh_pinned_queue;
3602
3603     // This is for forced LOH compaction via the complus env var
3604     PER_HEAP_ISOLATED
3605     BOOL        loh_compaction_always_p;
3606
3607     // This is set by the user.
3608     PER_HEAP_ISOLATED
3609     gc_loh_compaction_mode loh_compaction_mode;
3610
3611     // We may not compact LOH on every heap if we can't
3612     // grow the pinned queue. This is to indicate whether
3613     // this heap's LOH is compacted or not. So even if
3614     // settings.loh_compaction is TRUE this may not be TRUE.
3615     PER_HEAP
3616     BOOL        loh_compacted_p;
3617 #endif //FEATURE_LOH_COMPACTION
3618
3619 #ifdef BACKGROUND_GC
3620
3621     PER_HEAP
3622     EEThreadId bgc_thread_id;
3623
3624 #ifdef WRITE_WATCH
3625     PER_HEAP
3626     uint8_t* background_written_addresses [array_size+2];
3627 #endif //WRITE_WATCH
3628
3629     PER_HEAP_ISOLATED
3630     VOLATILE(c_gc_state) current_c_gc_state;     //tells the large object allocator to
3631     //mark the object as new since the start of gc.
3632
3633     PER_HEAP_ISOLATED
3634     gc_mechanisms saved_bgc_settings;
3635
3636     PER_HEAP_ISOLATED
3637     gc_history_global bgc_data_global;
3638
3639     PER_HEAP_ISOLATED
3640     VOLATILE(BOOL) gc_background_running;
3641
3642     PER_HEAP
3643     gc_history_per_heap bgc_data_per_heap;
3644
3645     PER_HEAP
3646     BOOL bgc_thread_running; // gc thread is its main loop
3647
3648     PER_HEAP_ISOLATED
3649     BOOL keep_bgc_threads_p;
3650
3651     // This event is used by BGC threads to do something on
3652     // one specific thread while other BGC threads have to
3653     // wait. This is different from a join 'cause you can't
3654     // specify which thread should be doing some task
3655     // while other threads have to wait.
3656     // For example, to make the BGC threads managed threads
3657     // we need to create them on the thread that called
3658     // SuspendEE which is heap 0.
3659     PER_HEAP_ISOLATED
3660     GCEvent bgc_threads_sync_event;
3661
3662     PER_HEAP
3663     Thread* bgc_thread;
3664
3665     PER_HEAP
3666     CLRCriticalSection bgc_threads_timeout_cs;
3667
3668     PER_HEAP_ISOLATED
3669     GCEvent background_gc_done_event;
3670
3671     PER_HEAP_ISOLATED
3672     GCEvent ee_proceed_event;
3673
3674     PER_HEAP_ISOLATED
3675     bool gc_can_use_concurrent;
3676
3677     PER_HEAP_ISOLATED
3678     bool temp_disable_concurrent_p;
3679
3680     PER_HEAP_ISOLATED
3681     BOOL do_ephemeral_gc_p;
3682
3683     PER_HEAP_ISOLATED
3684     BOOL do_concurrent_p;
3685
3686     PER_HEAP
3687     VOLATILE(bgc_state) current_bgc_state;
3688
3689     struct gc_history
3690     {
3691         size_t gc_index;
3692         bgc_state current_bgc_state;
3693         uint32_t gc_time_ms;
3694         // This is in bytes per ms; consider breaking it
3695         // into the efficiency per phase.
3696         size_t gc_efficiency;
3697         uint8_t* eph_low;
3698         uint8_t* gen0_start;
3699         uint8_t* eph_high;
3700         uint8_t* bgc_highest;
3701         uint8_t* bgc_lowest;
3702         uint8_t* fgc_highest;
3703         uint8_t* fgc_lowest;
3704         uint8_t* g_highest;
3705         uint8_t* g_lowest;
3706     };
3707
3708 #define max_history_count 64
3709
3710     PER_HEAP
3711     int gchist_index_per_heap;
3712
3713     PER_HEAP
3714     gc_history gchist_per_heap[max_history_count];
3715
3716     PER_HEAP_ISOLATED
3717     int gchist_index;
3718
3719     PER_HEAP_ISOLATED
3720     gc_mechanisms_store gchist[max_history_count];
3721
3722     PER_HEAP
3723     size_t total_promoted_bytes;
3724
3725     PER_HEAP
3726     size_t     bgc_overflow_count;
3727
3728     PER_HEAP
3729     size_t     bgc_begin_loh_size;
3730     PER_HEAP
3731     size_t     bgc_begin_poh_size;
3732     PER_HEAP
3733     size_t     end_loh_size;
3734     PER_HEAP
3735     size_t     end_poh_size;
3736
3737 #ifdef BGC_SERVO_TUNING
3738     PER_HEAP
3739     uint64_t   loh_a_no_bgc;
3740
3741     PER_HEAP
3742     uint64_t   loh_a_bgc_marking;
3743
3744     PER_HEAP
3745     uint64_t   loh_a_bgc_planning;
3746
3747     // Total allocated last BGC's plan + between last and this bgc +
3748     // this bgc's mark
3749     PER_HEAP_ISOLATED
3750     uint64_t   total_loh_a_last_bgc;
3751
3752     PER_HEAP
3753     size_t     bgc_maxgen_end_fl_size;
3754 #endif //BGC_SERVO_TUNING
3755
3756     // We need to throttle the LOH allocations during BGC since we can't
3757     // collect LOH when BGC is in progress.
3758     // We allow the LOH heap size to double during a BGC. So for every
3759     // 10% increase we will have the LOH allocating thread sleep for one more
3760     // ms. So we are already 30% over the original heap size the thread will
3761     // sleep for 3ms.
3762     PER_HEAP
3763     uint32_t   bgc_alloc_spin_uoh;
3764
3765     // This includes what we allocate at the end of segment - allocating
3766     // in free list doesn't increase the heap size.
3767     PER_HEAP
3768     size_t     bgc_loh_size_increased;
3769     PER_HEAP
3770     size_t     bgc_poh_size_increased;
3771
3772     PER_HEAP
3773     size_t     background_soh_alloc_count;
3774
3775     PER_HEAP
3776     size_t     background_uoh_alloc_count;
3777
3778     PER_HEAP
3779     VOLATILE(int32_t) uoh_alloc_thread_count;
3780
3781     PER_HEAP
3782     uint8_t**  background_mark_stack_tos;
3783
3784     PER_HEAP
3785     uint8_t**  background_mark_stack_array;
3786
3787     PER_HEAP
3788     size_t    background_mark_stack_array_length;
3789
3790     PER_HEAP
3791     uint8_t*  background_min_overflow_address;
3792
3793     PER_HEAP
3794     uint8_t*  background_max_overflow_address;
3795
3796     // We can't process the soh range concurrently so we
3797     // wait till final mark to process it.
3798     PER_HEAP
3799     BOOL      processed_soh_overflow_p;
3800
3801     PER_HEAP
3802     uint8_t*  background_min_soh_overflow_address;
3803
3804     PER_HEAP
3805     uint8_t*  background_max_soh_overflow_address;
3806
3807     PER_HEAP
3808     heap_segment* saved_overflow_ephemeral_seg;
3809
3810     PER_HEAP
3811     heap_segment* saved_sweep_ephemeral_seg;
3812
3813     PER_HEAP
3814     uint8_t* saved_sweep_ephemeral_start;
3815
3816     PER_HEAP
3817     uint8_t* background_saved_lowest_address;
3818
3819     PER_HEAP
3820     uint8_t* background_saved_highest_address;
3821
3822     // This is used for synchronization between the bgc thread
3823     // for this heap and the user threads allocating on this
3824     // heap.
3825     PER_HEAP
3826     exclusive_sync* bgc_alloc_lock;
3827
3828 #ifdef SNOOP_STATS
3829     PER_HEAP
3830     snoop_stats_data snoop_stat;
3831 #endif //SNOOP_STATS
3832
3833
3834     PER_HEAP
3835     uint8_t**          c_mark_list;
3836
3837     PER_HEAP
3838     size_t          c_mark_list_length;
3839
3840     PER_HEAP
3841     size_t          c_mark_list_index;
3842 #endif //BACKGROUND_GC
3843
3844 #ifdef MARK_LIST
3845     PER_HEAP
3846     uint8_t** mark_list;
3847
3848     PER_HEAP_ISOLATED
3849     size_t mark_list_size;
3850
3851     PER_HEAP_ISOLATED
3852     bool mark_list_overflow;
3853
3854     PER_HEAP
3855     uint8_t** mark_list_end;
3856
3857     PER_HEAP
3858     uint8_t** mark_list_index;
3859
3860     PER_HEAP_ISOLATED
3861     uint8_t** g_mark_list;
3862 #ifdef PARALLEL_MARK_LIST_SORT
3863     PER_HEAP_ISOLATED
3864     uint8_t** g_mark_list_copy;
3865     PER_HEAP
3866     uint8_t*** mark_list_piece_start;
3867     uint8_t*** mark_list_piece_end;
3868 #endif //PARALLEL_MARK_LIST_SORT
3869 #endif //MARK_LIST
3870
3871     PER_HEAP
3872     uint8_t*  min_overflow_address;
3873
3874     PER_HEAP
3875     uint8_t*  max_overflow_address;
3876
3877 #ifndef MULTIPLE_HEAPS
3878     PER_HEAP
3879     uint8_t*  shigh; //keeps track of the highest marked object
3880
3881     PER_HEAP
3882     uint8_t*  slow; //keeps track of the lowest marked object
3883 #endif //MULTIPLE_HEAPS
3884
3885     PER_HEAP
3886     size_t allocation_quantum;
3887
3888     PER_HEAP
3889     size_t alloc_contexts_used;
3890
3891     PER_HEAP_ISOLATED
3892     no_gc_region_info current_no_gc_region_info;
3893
3894     PER_HEAP
3895     size_t soh_allocation_no_gc;
3896
3897     PER_HEAP
3898     size_t loh_allocation_no_gc;
3899
3900     PER_HEAP
3901     bool no_gc_oom_p;
3902
3903     PER_HEAP
3904     heap_segment* saved_loh_segment_no_gc;
3905
3906     PER_HEAP_ISOLATED
3907     BOOL proceed_with_gc_p;
3908
3909 #ifdef MULTIPLE_HEAPS
3910     PER_HEAP_ISOLATED
3911     BOOL gradual_decommit_in_progress_p;
3912
3913     PER_HEAP_ISOLATED
3914     size_t max_decommit_step_size;
3915 #endif //MULTIPLE_HEAPS
3916
3917 #define youngest_generation (generation_of (0))
3918 #define large_object_generation (generation_of (loh_generation))
3919 #define pinned_object_generation (generation_of (poh_generation))
3920
3921     // The more_space_lock and gc_lock is used for 3 purposes:
3922     //
3923     // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock_soh)
3924     // 2) to synchronize allocations of large objects (more_space_lock_uoh)
3925     // 3) to synchronize the GC itself (gc_lock)
3926     //
3927     PER_HEAP_ISOLATED
3928     GCSpinLock gc_lock; //lock while doing GC
3929
3930     PER_HEAP
3931     GCSpinLock more_space_lock_soh; //lock while allocating more space for soh
3932
3933     PER_HEAP
3934     GCSpinLock more_space_lock_uoh;
3935
3936 #ifdef SYNCHRONIZATION_STATS
3937
3938     PER_HEAP
3939     unsigned int good_suspension;
3940
3941     PER_HEAP
3942     unsigned int bad_suspension;
3943
3944     // Number of times when msl_acquire is > 200 cycles.
3945     PER_HEAP
3946     unsigned int num_high_msl_acquire;
3947
3948     // Number of times when msl_acquire is < 200 cycles.
3949     PER_HEAP
3950     unsigned int num_low_msl_acquire;
3951
3952     // Number of times the more_space_lock is acquired.
3953     PER_HEAP
3954     unsigned int num_msl_acquired;
3955
3956     // Total cycles it takes to acquire the more_space_lock.
3957     PER_HEAP
3958     uint64_t total_msl_acquire;
3959
3960     PER_HEAP
3961     void init_heap_sync_stats()
3962     {
3963         good_suspension = 0;
3964         bad_suspension = 0;
3965         num_msl_acquired = 0;
3966         total_msl_acquire = 0;
3967         num_high_msl_acquire = 0;
3968         num_low_msl_acquire = 0;
3969         more_space_lock.init();
3970         gc_lock.init();
3971     }
3972
3973     PER_HEAP
3974     void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
3975     {
3976         printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
3977             heap_num,
3978             alloc_contexts_used,
3979             good_suspension,
3980             bad_suspension,
3981             (unsigned int)(total_msl_acquire / gc_count_during_log),
3982             num_high_msl_acquire / gc_count_during_log,
3983             num_low_msl_acquire / gc_count_during_log,
3984             num_msl_acquired / gc_count_during_log,
3985             more_space_lock.num_switch_thread / gc_count_during_log,
3986             more_space_lock.num_wait_longer / gc_count_during_log,
3987             more_space_lock.num_switch_thread_w / gc_count_during_log,
3988             more_space_lock.num_disable_preemptive_w / gc_count_during_log);
3989     }
3990
3991 #endif //SYNCHRONIZATION_STATS
3992
3993 #define NUM_LOH_ALIST (7)
3994     // bucket 0 contains sizes less than 64*1024
3995     // the "BITS" number here is the highest bit in 64*1024 - 1, zero-based as in BitScanReverse.
3996     // see first_suitable_bucket(size_t size) for details.
3997 #define BASE_LOH_ALIST_BITS (15)
3998     PER_HEAP
3999     alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
4000
4001 #define NUM_GEN2_ALIST (12)
4002 #ifdef HOST_64BIT
4003     // bucket 0 contains sizes less than 256
4004 #define BASE_GEN2_ALIST_BITS (7)
4005 #else
4006     // bucket 0 contains sizes less than 128
4007 #define BASE_GEN2_ALIST_BITS (6)
4008 #endif // HOST_64BIT
4009     PER_HEAP
4010     alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
4011
4012 #define NUM_POH_ALIST (19)
4013     // bucket 0 contains sizes less than 256
4014 #define BASE_POH_ALIST_BITS (7)
4015     PER_HEAP
4016     alloc_list poh_alloc_list[NUM_POH_ALIST-1];
4017
4018 //------------------------------------------    
4019
4020     PER_HEAP
4021     dynamic_data dynamic_data_table [total_generation_count];
4022
4023     PER_HEAP
4024     gc_history_per_heap gc_data_per_heap;
4025
4026     PER_HEAP
4027     size_t maxgen_pinned_compact_before_advance;
4028
4029     // dynamic tuning.
4030     PER_HEAP
4031     BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
4032     // if elevate_p is FALSE, it means we are determining fragmentation for a generation
4033     // to see if we should condemn this gen; otherwise it means we are determining if
4034     // we should elevate to doing max_gen from an ephemeral gen.
4035     PER_HEAP
4036     BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
4037     PER_HEAP
4038     BOOL
4039     dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number);
4040     PER_HEAP
4041     BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem);
4042     PER_HEAP
4043     BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
4044
4045     PER_HEAP
4046     int generation_skip_ratio;//in %
4047
4048     PER_HEAP
4049     BOOL gen0_bricks_cleared;
4050     PER_HEAP
4051     int gen0_must_clear_bricks;
4052
4053     PER_HEAP_ISOLATED
4054     bool maxgen_size_inc_p;
4055
4056     PER_HEAP_ISOLATED
4057     size_t full_gc_counts[gc_type_max];
4058
4059     // the # of bytes allocates since the last full compacting GC.
4060     PER_HEAP
4061     uint64_t loh_alloc_since_cg;
4062
4063     PER_HEAP
4064     BOOL elevation_requested;
4065
4066     // if this is TRUE, we should always guarantee that we do a
4067     // full compacting GC before we OOM.
4068     PER_HEAP
4069     BOOL last_gc_before_oom;
4070
4071     PER_HEAP_ISOLATED
4072     BOOL should_expand_in_full_gc;
4073
4074     // When we decide if we should expand the heap or not, we are
4075     // fine NOT to expand if we find enough free space in gen0's free
4076     // list or end of seg and we check this in decide_on_compacting.
4077     // This is an expensive check so we just record the fact and not
4078     // need to check in the allocator again.
4079     PER_HEAP
4080     BOOL sufficient_gen0_space_p;
4081
4082 #ifdef MULTIPLE_HEAPS
4083     PER_HEAP
4084     bool gen0_allocated_after_gc_p;
4085 #endif //MULTIPLE_HEAPS
4086
4087     // A provisional mode means we could change our mind in the middle of a GC
4088     // and want to do a different GC instead.
4089     //
4090     // Right now there's only one such case which is in the middle of a gen1
4091     // GC we want to do a blocking gen2 instead. If/When we have more we should
4092     // have an enum that tells us which case in this provisional mode
4093     // we are in.
4094     //
4095     // When this mode is triggered, our current (only) condition says
4096     // we have high fragmentation in gen2 even after we do a compacting
4097     // full GC which is an indication of heavy pinning in gen2. In this
4098     // case we never do BGCs, we just do either gen0 or gen1's till a
4099     // gen1 needs to increase the gen2 size, in which case we finish up
4100     // the current gen1 as a sweeping GC and immediately do a compacting
4101     // full GC instead (without restarting EE).
4102     PER_HEAP_ISOLATED
4103     bool provisional_mode_triggered;
4104
4105     PER_HEAP_ISOLATED
4106     bool pm_trigger_full_gc;
4107
4108     // For testing only BEG
4109     // pm_stress_on currently means (since we just have one mode) we
4110     // randomly turn the mode on; and after a random # of NGC2s we
4111     // turn it off.
4112     // NOTE that this means concurrent will be disabled so we can
4113     // simulate what this mode is supposed to be used.
4114     PER_HEAP_ISOLATED
4115     bool pm_stress_on;
4116
4117     PER_HEAP_ISOLATED
4118     size_t provisional_triggered_gc_count;
4119
4120     PER_HEAP_ISOLATED
4121     size_t provisional_off_gc_count;
4122     // For testing only END
4123
4124     PER_HEAP_ISOLATED
4125     size_t num_provisional_triggered;
4126
4127     PER_HEAP
4128     size_t allocated_since_last_gc;
4129
4130 #ifdef BACKGROUND_GC
4131     PER_HEAP_ISOLATED
4132     size_t ephemeral_fgc_counts[max_generation];
4133
4134     PER_HEAP_ISOLATED
4135     BOOL alloc_wait_event_p;
4136
4137     PER_HEAP
4138     uint8_t* next_sweep_obj;
4139
4140     PER_HEAP
4141     uint8_t* current_sweep_pos;
4142
4143 #endif //BACKGROUND_GC
4144
4145     PER_HEAP
4146     fgm_history fgm_result;
4147
4148     PER_HEAP_ISOLATED
4149     size_t eph_gen_starts_size;
4150
4151 #ifdef GC_CONFIG_DRIVEN
4152     // 0 stores compacting GCs;
4153     // 1 stores sweeping GCs;
4154     PER_HEAP_ISOLATED
4155     size_t compact_or_sweep_gcs[2];
4156
4157     PER_HEAP
4158     size_t interesting_data_per_gc[max_idp_count];
4159 #endif //GC_CONFIG_DRIVEN
4160
4161     PER_HEAP
4162     BOOL        ro_segments_in_range;
4163
4164 #ifdef BACKGROUND_GC
4165     PER_HEAP
4166     heap_segment* freeable_soh_segment;
4167 #endif //BACKGROUND_GC
4168
4169     PER_HEAP
4170     heap_segment* freeable_uoh_segment;
4171
4172     PER_HEAP_ISOLATED
4173     heap_segment* segment_standby_list;
4174
4175     PER_HEAP
4176     size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
4177
4178     PER_HEAP
4179     size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
4180
4181     PER_HEAP
4182     size_t ordered_plug_indices[MAX_NUM_BUCKETS];
4183
4184     PER_HEAP
4185     size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
4186
4187     PER_HEAP
4188     BOOL ordered_plug_indices_init;
4189
4190     PER_HEAP
4191     BOOL use_bestfit;
4192
4193     PER_HEAP
4194     uint8_t* bestfit_first_pin;
4195
4196     PER_HEAP
4197     BOOL commit_end_of_seg;
4198
4199     PER_HEAP
4200     size_t max_free_space_items; // dynamically adjusted.
4201
4202     PER_HEAP
4203     size_t free_space_buckets;
4204
4205     PER_HEAP
4206     size_t free_space_items;
4207
4208     // -1 means we are using all the free
4209     // spaces we have (not including
4210     // end of seg space).
4211     PER_HEAP
4212     int trimmed_free_space_index;
4213
4214     PER_HEAP
4215     size_t total_ephemeral_plugs;
4216
4217     PER_HEAP
4218     seg_free_spaces* bestfit_seg;
4219
4220     // Note: we know this from the plan phase.
4221     // total_ephemeral_plugs actually has the same value
4222     // but while we are calculating its value we also store
4223     // info on how big the plugs are for best fit which we
4224     // don't do in plan phase.
4225     // TODO: get rid of total_ephemeral_plugs.
4226     PER_HEAP
4227     size_t total_ephemeral_size;
4228
4229 public:
4230
4231 #ifdef HEAP_ANALYZE
4232
4233     PER_HEAP_ISOLATED
4234     BOOL heap_analyze_enabled;
4235
4236     PER_HEAP
4237     size_t internal_root_array_length;
4238
4239     // next two fields are used to optimize the search for the object
4240     // enclosing the current reference handled by ha_mark_object_simple.
4241     PER_HEAP
4242     uint8_t*  current_obj;
4243
4244     PER_HEAP
4245     size_t current_obj_size;
4246
4247 #endif //HEAP_ANALYZE
4248
4249 public:
4250
4251     PER_HEAP
4252     int         condemned_generation_num;
4253
4254     PER_HEAP
4255     BOOL        blocking_collection;
4256
4257 #ifdef MULTIPLE_HEAPS
4258     static
4259     int n_heaps;
4260
4261     static
4262     gc_heap** g_heaps;
4263
4264     static
4265     size_t*   g_promoted;
4266 #ifdef BACKGROUND_GC
4267     static
4268     size_t*   g_bpromoted;
4269 #endif //BACKGROUND_GC
4270 #ifdef MH_SC_MARK
4271     PER_HEAP_ISOLATED
4272     int*  g_mark_stack_busy;
4273 #endif //MH_SC_MARK
4274 #else
4275     static
4276     size_t    g_promoted;
4277 #ifdef BACKGROUND_GC
4278     static
4279     size_t    g_bpromoted;
4280 #endif //BACKGROUND_GC
4281 #endif //MULTIPLE_HEAPS
4282
4283     static
4284     size_t reserved_memory;
4285     static
4286     size_t reserved_memory_limit;
4287     static
4288     BOOL      g_low_memory_status;
4289
4290 #ifdef FEATURE_CARD_MARKING_STEALING
4291     PER_HEAP
4292     VOLATILE(uint32_t)    card_mark_chunk_index_soh;
4293
4294     PER_HEAP
4295     VOLATILE(bool)        card_mark_done_soh;
4296
4297     PER_HEAP
4298     VOLATILE(uint32_t)    card_mark_chunk_index_loh;
4299
4300     PER_HEAP
4301     VOLATILE(uint32_t)    card_mark_chunk_index_poh;
4302
4303     PER_HEAP
4304     VOLATILE(bool)        card_mark_done_uoh;
4305
4306     PER_HEAP
4307     void reset_card_marking_enumerators()
4308     {
4309         // set chunk index to all 1 bits so that incrementing it yields 0 as the first index
4310         card_mark_chunk_index_soh = ~0;
4311         card_mark_done_soh = false;
4312
4313         card_mark_chunk_index_loh = ~0;
4314         card_mark_chunk_index_poh = ~0;
4315         card_mark_done_uoh = false;
4316     }
4317
4318     PER_HEAP
4319     bool find_next_chunk(card_marking_enumerator& card_mark_enumerator, heap_segment* seg,
4320                          size_t& n_card_set, uint8_t*& start_address, uint8_t*& limit,
4321                          size_t& card, size_t& end_card, size_t& card_word_end);
4322 #endif //FEATURE_CARD_MARKING_STEALING
4323
4324 protected:
4325     PER_HEAP
4326     void update_collection_counts ();
4327 }; // class gc_heap
4328
4329 #define ASSERT_OFFSETS_MATCH(field) \
4330   static_assert(offsetof(dac_gc_heap, field) == offsetof(gc_heap, field), #field " offset mismatch")
4331
4332 #ifdef MULTIPLE_HEAPS
4333 ASSERT_OFFSETS_MATCH(alloc_allocated);
4334 ASSERT_OFFSETS_MATCH(ephemeral_heap_segment);
4335 ASSERT_OFFSETS_MATCH(finalize_queue);
4336 ASSERT_OFFSETS_MATCH(oom_info);
4337 ASSERT_OFFSETS_MATCH(interesting_data_per_heap);
4338 ASSERT_OFFSETS_MATCH(compact_reasons_per_heap);
4339 ASSERT_OFFSETS_MATCH(expand_mechanisms_per_heap);
4340 ASSERT_OFFSETS_MATCH(interesting_mechanism_bits_per_heap);
4341 ASSERT_OFFSETS_MATCH(internal_root_array);
4342 ASSERT_OFFSETS_MATCH(internal_root_array_index);
4343 ASSERT_OFFSETS_MATCH(heap_analyze_success);
4344 ASSERT_OFFSETS_MATCH(generation_table);
4345 #endif // MULTIPLE_HEAPS
4346
4347 #ifdef FEATURE_PREMORTEM_FINALIZATION
4348 class CFinalize
4349 {
4350 #ifdef DACCESS_COMPILE
4351     friend class ::ClrDataAccess;
4352 #endif // DACCESS_COMPILE
4353
4354     friend class CFinalizeStaticAsserts;
4355
4356 private:
4357
4358     //adjust the count and add a constant to add a segment
4359     static const int ExtraSegCount = 2;
4360     static const int FinalizerListSeg = total_generation_count + 1;
4361     static const int CriticalFinalizerListSeg = total_generation_count;
4362     //Does not correspond to a segment
4363     static const int FreeList = total_generation_count + ExtraSegCount;
4364
4365     PTR_PTR_Object m_FillPointers[total_generation_count + ExtraSegCount];
4366     PTR_PTR_Object m_Array;
4367     PTR_PTR_Object m_EndArray;
4368     size_t   m_PromotedCount;
4369
4370     VOLATILE(int32_t) lock;
4371 #ifdef _DEBUG
4372     EEThreadId lockowner_threadid;
4373 #endif // _DEBUG
4374
4375     BOOL GrowArray();
4376     void MoveItem (Object** fromIndex,
4377                    unsigned int fromSeg,
4378                    unsigned int toSeg);
4379
4380     inline PTR_PTR_Object& SegQueue (unsigned int Seg)
4381     {
4382         return (Seg ? m_FillPointers [Seg-1] : m_Array);
4383     }
4384     inline PTR_PTR_Object& SegQueueLimit (unsigned int Seg)
4385     {
4386         return m_FillPointers [Seg];
4387     }
4388
4389     BOOL IsSegEmpty ( unsigned int i)
4390     {
4391         ASSERT ( (int)i < FreeList);
4392         return (SegQueueLimit(i) == SegQueue (i));
4393
4394     }
4395
4396 public:
4397     ~CFinalize();
4398     bool Initialize();
4399     void EnterFinalizeLock();
4400     void LeaveFinalizeLock();
4401     bool RegisterForFinalization (int gen, Object* obj, size_t size=0);
4402     Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
4403     BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
4404     void RelocateFinalizationData (int gen, gc_heap* hp);
4405     void WalkFReachableObjects (fq_walk_fn fn);
4406     void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
4407     void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
4408     size_t GetPromotedCount();
4409
4410     //Methods used by the shutdown code to call every finalizer
4411     size_t GetNumberFinalizableObjects();
4412
4413     void CheckFinalizerObjects();
4414 };
4415
4416 class CFinalizeStaticAsserts {
4417     static_assert(dac_finalize_queue::ExtraSegCount == CFinalize::ExtraSegCount, "ExtraSegCount mismatch");
4418     static_assert(offsetof(dac_finalize_queue, m_FillPointers) == offsetof(CFinalize, m_FillPointers), "CFinalize layout mismatch");
4419 };
4420 #endif // FEATURE_PREMORTEM_FINALIZATION
4421
4422 inline
4423  size_t& dd_begin_data_size (dynamic_data* inst)
4424 {
4425   return inst->begin_data_size;
4426 }
4427 inline
4428  size_t& dd_survived_size (dynamic_data* inst)
4429 {
4430   return inst->survived_size;
4431 }
4432 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
4433 inline
4434  size_t& dd_num_npinned_plugs(dynamic_data* inst)
4435 {
4436   return inst->num_npinned_plugs;
4437 }
4438 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
4439 inline
4440 size_t& dd_pinned_survived_size (dynamic_data* inst)
4441 {
4442   return inst->pinned_survived_size;
4443 }
4444 inline
4445 size_t& dd_added_pinned_size (dynamic_data* inst)
4446 {
4447   return inst->added_pinned_size;
4448 }
4449 inline
4450 size_t& dd_artificial_pinned_survived_size (dynamic_data* inst)
4451 {
4452   return inst->artificial_pinned_survived_size;
4453 }
4454 #ifdef SHORT_PLUGS
4455 inline
4456 size_t& dd_padding_size (dynamic_data* inst)
4457 {
4458   return inst->padding_size;
4459 }
4460 #endif //SHORT_PLUGS
4461 inline
4462  size_t& dd_current_size (dynamic_data* inst)
4463 {
4464   return inst->current_size;
4465 }
4466 inline
4467 float& dd_surv (dynamic_data* inst)
4468 {
4469   return inst->surv;
4470 }
4471 inline
4472 size_t& dd_freach_previous_promotion (dynamic_data* inst)
4473 {
4474   return inst->freach_previous_promotion;
4475 }
4476 inline
4477 size_t& dd_desired_allocation (dynamic_data* inst)
4478 {
4479   return inst->desired_allocation;
4480 }
4481 inline
4482 size_t& dd_collection_count (dynamic_data* inst)
4483 {
4484     return inst->collection_count;
4485 }
4486 inline
4487 size_t& dd_promoted_size (dynamic_data* inst)
4488 {
4489     return inst->promoted_size;
4490 }
4491 inline
4492 float& dd_limit (dynamic_data* inst)
4493 {
4494   return inst->sdata->limit;
4495 }
4496 inline
4497 float& dd_max_limit (dynamic_data* inst)
4498 {
4499   return inst->sdata->max_limit;
4500 }
4501 inline
4502 size_t& dd_max_size (dynamic_data* inst)
4503 {
4504   return inst->sdata->max_size;
4505 }
4506 inline
4507 size_t& dd_min_size (dynamic_data* inst)
4508 {
4509   return inst->min_size;
4510 }
4511 inline
4512 ptrdiff_t& dd_new_allocation (dynamic_data* inst)
4513 {
4514   return inst->new_allocation;
4515 }
4516 inline
4517 ptrdiff_t& dd_gc_new_allocation (dynamic_data* inst)
4518 {
4519   return inst->gc_new_allocation;
4520 }
4521 inline
4522 size_t& dd_fragmentation_limit (dynamic_data* inst)
4523 {
4524   return inst->sdata->fragmentation_limit;
4525 }
4526 inline
4527 float& dd_fragmentation_burden_limit (dynamic_data* inst)
4528 {
4529   return inst->sdata->fragmentation_burden_limit;
4530 }
4531 inline
4532 float dd_v_fragmentation_burden_limit (dynamic_data* inst)
4533 {
4534   return (min (2*dd_fragmentation_burden_limit (inst), 0.75f));
4535 }
4536 inline
4537 size_t& dd_fragmentation (dynamic_data* inst)
4538 {
4539   return inst->fragmentation;
4540 }
4541 inline
4542 size_t& dd_gc_clock (dynamic_data* inst)
4543 {
4544   return inst->gc_clock;
4545 }
4546 inline
4547 uint64_t& dd_time_clock (dynamic_data* inst)
4548 {
4549   return inst->time_clock;
4550 }
4551
4552 inline
4553 size_t& dd_gc_clock_interval (dynamic_data* inst)
4554 {
4555   return inst->sdata->gc_clock;
4556 }
4557 inline
4558 uint64_t& dd_time_clock_interval (dynamic_data* inst)
4559 {
4560   return inst->sdata->time_clock;
4561 }
4562
4563 inline
4564 size_t& dd_gc_elapsed_time (dynamic_data* inst)
4565 {
4566     return inst->gc_elapsed_time;
4567 }
4568
4569 inline
4570 float& dd_gc_speed (dynamic_data* inst)
4571 {
4572     return inst->gc_speed;
4573 }
4574
4575 inline
4576 alloc_context* generation_alloc_context (generation* inst)
4577 {
4578     return &(inst->allocation_context);
4579 }
4580
4581 inline
4582 uint8_t*& generation_allocation_start (generation* inst)
4583 {
4584   return inst->allocation_start;
4585 }
4586 inline
4587 uint8_t*& generation_allocation_pointer (generation* inst)
4588 {
4589   return inst->allocation_context.alloc_ptr;
4590 }
4591 inline
4592 uint8_t*& generation_allocation_limit (generation* inst)
4593 {
4594   return inst->allocation_context.alloc_limit;
4595 }
4596 inline
4597 allocator* generation_allocator (generation* inst)
4598 {
4599     return &inst->free_list_allocator;
4600 }
4601
4602 inline
4603 PTR_heap_segment& generation_start_segment (generation* inst)
4604 {
4605   return inst->start_segment;
4606 }
4607 inline
4608 heap_segment*& generation_allocation_segment (generation* inst)
4609 {
4610   return inst->allocation_segment;
4611 }
4612 inline
4613 uint8_t*& generation_plan_allocation_start (generation* inst)
4614 {
4615   return inst->plan_allocation_start;
4616 }
4617 inline
4618 size_t& generation_plan_allocation_start_size (generation* inst)
4619 {
4620   return inst->plan_allocation_start_size;
4621 }
4622 inline
4623 uint8_t*& generation_allocation_context_start_region (generation* inst)
4624 {
4625   return inst->allocation_context_start_region;
4626 }
4627 inline
4628 size_t& generation_free_list_space (generation* inst)
4629 {
4630   return inst->free_list_space;
4631 }
4632 inline
4633 size_t& generation_free_obj_space (generation* inst)
4634 {
4635   return inst->free_obj_space;
4636 }
4637 inline
4638 size_t& generation_allocation_size (generation* inst)
4639 {
4640   return inst->allocation_size;
4641 }
4642
4643 inline
4644 size_t& generation_pinned_allocated (generation* inst)
4645 {
4646     return inst->pinned_allocated;
4647 }
4648 inline
4649 size_t& generation_pinned_allocation_sweep_size (generation* inst)
4650 {
4651     return inst->pinned_allocation_sweep_size;
4652 }
4653 inline
4654 size_t& generation_pinned_allocation_compact_size (generation* inst)
4655 {
4656     return inst->pinned_allocation_compact_size;
4657 }
4658 inline
4659 size_t&  generation_free_list_allocated (generation* inst)
4660 {
4661     return inst->free_list_allocated;
4662 }
4663 inline
4664 size_t&  generation_end_seg_allocated (generation* inst)
4665 {
4666     return inst->end_seg_allocated;
4667 }
4668 inline
4669 BOOL&  generation_allocate_end_seg_p (generation* inst)
4670 {
4671     return inst->allocate_end_seg_p;
4672 }
4673 inline
4674 size_t& generation_condemned_allocated (generation* inst)
4675 {
4676     return inst->condemned_allocated;
4677 }
4678 inline
4679 size_t& generation_sweep_allocated (generation* inst)
4680 {
4681     return inst->sweep_allocated;
4682 }
4683 #ifdef FREE_USAGE_STATS
4684 inline
4685 size_t& generation_pinned_free_obj_space (generation* inst)
4686 {
4687     return inst->pinned_free_obj_space;
4688 }
4689 inline
4690 size_t& generation_allocated_in_pinned_free (generation* inst)
4691 {
4692     return inst->allocated_in_pinned_free;
4693 }
4694 inline
4695 size_t& generation_allocated_since_last_pin (generation* inst)
4696 {
4697     return inst->allocated_since_last_pin;
4698 }
4699 #endif //FREE_USAGE_STATS
4700 inline
4701 float generation_allocator_efficiency (generation* inst)
4702 {
4703     if ((generation_free_list_allocated (inst) + generation_free_obj_space (inst)) != 0)
4704     {
4705         return ((float) (generation_free_list_allocated (inst)) / (float)(generation_free_list_allocated (inst) + generation_free_obj_space (inst)));
4706     }
4707     else
4708         return 0;
4709 }
4710 inline
4711 size_t generation_unusable_fragmentation (generation* inst)
4712 {
4713     return (size_t)(generation_free_obj_space (inst) +
4714                     (1.0f-generation_allocator_efficiency(inst))*generation_free_list_space (inst));
4715 }
4716
4717 #define plug_skew           sizeof(ObjHeader)
4718 // We always use USE_PADDING_TAIL when fitting so items on the free list should be
4719 // twice the min_obj_size.
4720 #define min_free_list       (2*min_obj_size)
4721 struct plug
4722 {
4723     uint8_t *  skew[plug_skew / sizeof(uint8_t *)];
4724 };
4725
4726 class pair
4727 {
4728 public:
4729     short left;
4730     short right;
4731 };
4732
4733 //Note that these encode the fact that plug_skew is a multiple of uint8_t*.
4734 // Each of new field is prepended to the prior struct.
4735
4736 struct plug_and_pair
4737 {
4738     pair        m_pair;
4739     plug        m_plug;
4740 };
4741
4742 struct plug_and_reloc
4743 {
4744     ptrdiff_t   reloc;
4745     pair        m_pair;
4746     plug        m_plug;
4747 };
4748
4749 struct plug_and_gap
4750 {
4751     ptrdiff_t   gap;
4752     ptrdiff_t   reloc;
4753     union
4754     {
4755         pair    m_pair;
4756         int     lr;  //for clearing the entire pair in one instruction
4757     };
4758     plug        m_plug;
4759 };
4760
4761 struct gap_reloc_pair
4762 {
4763     size_t gap;
4764     size_t   reloc;
4765     pair        m_pair;
4766 };
4767
4768 #define min_pre_pin_obj_size (sizeof (gap_reloc_pair) + min_obj_size)
4769
4770 struct DECLSPEC_ALIGN(8) aligned_plug_and_gap
4771 {
4772     plug_and_gap plugandgap;
4773 };
4774
4775 struct loh_obj_and_pad
4776 {
4777     ptrdiff_t   reloc;
4778     plug        m_plug;
4779 };
4780
4781 struct loh_padding_obj
4782 {
4783     uint8_t*    mt;
4784     size_t      len;
4785     ptrdiff_t   reloc;
4786     plug        m_plug;
4787 };
4788 #define loh_padding_obj_size (sizeof(loh_padding_obj))
4789
4790 //flags description
4791 #define heap_segment_flags_readonly     1
4792 #define heap_segment_flags_inrange      2
4793 #define heap_segment_flags_unmappable   4
4794 #define heap_segment_flags_loh          8
4795 #ifdef BACKGROUND_GC
4796 #define heap_segment_flags_swept        16
4797 #define heap_segment_flags_decommitted  32
4798 #define heap_segment_flags_ma_committed 64
4799 // for segments whose mark array is only partially committed.
4800 #define heap_segment_flags_ma_pcommitted 128
4801 #define heap_segment_flags_uoh_delete   256
4802
4803 #define heap_segment_flags_poh          512
4804 #endif //BACKGROUND_GC
4805
4806 //need to be careful to keep enough pad items to fit a relocation node
4807 //padded to QuadWord before the plug_skew
4808
4809 class heap_segment
4810 {
4811 public:
4812     uint8_t*        allocated;
4813     uint8_t*        committed;
4814     uint8_t*        reserved;
4815     uint8_t*        used;
4816     uint8_t*        mem;
4817     size_t          flags;
4818     PTR_heap_segment next;
4819     uint8_t*        background_allocated;
4820 #ifdef MULTIPLE_HEAPS
4821     gc_heap*        heap;
4822 #ifdef _DEBUG
4823     uint8_t*        saved_committed;
4824     size_t          saved_desired_allocation;
4825 #endif // _DEBUG
4826 #endif //MULTIPLE_HEAPS
4827     uint8_t*        decommit_target;
4828     uint8_t*        plan_allocated;
4829     uint8_t*        saved_bg_allocated;
4830
4831 #ifdef _MSC_VER
4832 // Disable this warning - we intentionally want __declspec(align()) to insert padding for us
4833 #pragma warning(disable:4324)  // structure was padded due to __declspec(align())
4834 #endif
4835     aligned_plug_and_gap padandplug;
4836 #ifdef _MSC_VER
4837 #pragma warning(default:4324)  // structure was padded due to __declspec(align())
4838 #endif
4839 };
4840
4841 static_assert(offsetof(dac_heap_segment, allocated) == offsetof(heap_segment, allocated), "DAC heap segment layout mismatch");
4842 static_assert(offsetof(dac_heap_segment, committed) == offsetof(heap_segment, committed), "DAC heap segment layout mismatch");
4843 static_assert(offsetof(dac_heap_segment, reserved) == offsetof(heap_segment, reserved), "DAC heap segment layout mismatch");
4844 static_assert(offsetof(dac_heap_segment, used) == offsetof(heap_segment, used), "DAC heap segment layout mismatch");
4845 static_assert(offsetof(dac_heap_segment, mem) == offsetof(heap_segment, mem), "DAC heap segment layout mismatch");
4846 static_assert(offsetof(dac_heap_segment, flags) == offsetof(heap_segment, flags), "DAC heap segment layout mismatch");
4847 static_assert(offsetof(dac_heap_segment, next) == offsetof(heap_segment, next), "DAC heap segment layout mismatch");
4848 static_assert(offsetof(dac_heap_segment, background_allocated) == offsetof(heap_segment, background_allocated), "DAC heap segment layout mismatch");
4849 #ifdef MULTIPLE_HEAPS
4850 static_assert(offsetof(dac_heap_segment, heap) == offsetof(heap_segment, heap), "DAC heap segment layout mismatch");
4851 #endif // MULTIPLE_HEAPS
4852
4853 inline
4854 uint8_t*& heap_segment_reserved (heap_segment* inst)
4855 {
4856   return inst->reserved;
4857 }
4858 inline
4859 uint8_t*& heap_segment_committed (heap_segment* inst)
4860 {
4861   return inst->committed;
4862 }
4863 inline
4864 uint8_t*& heap_segment_decommit_target (heap_segment* inst)
4865 {
4866     return inst->decommit_target;
4867 }
4868 inline
4869 uint8_t*& heap_segment_used (heap_segment* inst)
4870 {
4871   return inst->used;
4872 }
4873 inline
4874 uint8_t*& heap_segment_allocated (heap_segment* inst)
4875 {
4876   return inst->allocated;
4877 }
4878
4879 inline
4880 BOOL heap_segment_read_only_p (heap_segment* inst)
4881 {
4882     return ((inst->flags & heap_segment_flags_readonly) != 0);
4883 }
4884
4885 inline
4886 BOOL heap_segment_in_range_p (heap_segment* inst)
4887 {
4888     return (!(inst->flags & heap_segment_flags_readonly) ||
4889             ((inst->flags & heap_segment_flags_inrange) != 0));
4890 }
4891
4892 inline
4893 BOOL heap_segment_unmappable_p (heap_segment* inst)
4894 {
4895     return (!(inst->flags & heap_segment_flags_readonly) ||
4896             ((inst->flags & heap_segment_flags_unmappable) != 0));
4897 }
4898
4899 inline
4900 BOOL heap_segment_uoh_p (heap_segment * inst)
4901 {
4902     return !!(inst->flags & (heap_segment_flags_loh | heap_segment_flags_poh));
4903 }
4904
4905 inline gc_oh_num heap_segment_oh (heap_segment * inst)
4906 {
4907     if ((inst->flags & heap_segment_flags_loh) != 0)
4908     {
4909         return gc_oh_num::loh;
4910     }
4911     else if ((inst->flags & heap_segment_flags_poh) != 0)
4912     {
4913         return gc_oh_num::poh;
4914     }
4915     else
4916     {
4917         return gc_oh_num::soh;
4918     }
4919 }
4920
4921 #ifdef BACKGROUND_GC
4922 inline
4923 BOOL heap_segment_decommitted_p (heap_segment * inst)
4924 {
4925     return !!(inst->flags & heap_segment_flags_decommitted);
4926 }
4927 #endif //BACKGROUND_GC
4928
4929 inline
4930 PTR_heap_segment & heap_segment_next (heap_segment* inst)
4931 {
4932   return inst->next;
4933 }
4934 inline
4935 uint8_t*& heap_segment_mem (heap_segment* inst)
4936 {
4937   return inst->mem;
4938 }
4939 inline
4940 uint8_t*& heap_segment_plan_allocated (heap_segment* inst)
4941 {
4942   return inst->plan_allocated;
4943 }
4944
4945 #ifdef BACKGROUND_GC
4946 inline
4947 uint8_t*& heap_segment_background_allocated (heap_segment* inst)
4948 {
4949   return inst->background_allocated;
4950 }
4951 inline
4952 uint8_t*& heap_segment_saved_bg_allocated (heap_segment* inst)
4953 {
4954   return inst->saved_bg_allocated;
4955 }
4956 #endif //BACKGROUND_GC
4957
4958 #ifdef MULTIPLE_HEAPS
4959 inline
4960 gc_heap*& heap_segment_heap (heap_segment* inst)
4961 {
4962     return inst->heap;
4963 }
4964 #endif //MULTIPLE_HEAPS
4965
4966 inline
4967 generation* gc_heap::generation_of (int  n)
4968 {
4969     assert (((n < total_generation_count) && (n >= 0)));
4970     return &generation_table [ n ];
4971 }
4972
4973 inline
4974 dynamic_data* gc_heap::dynamic_data_of (int gen_number)
4975 {
4976     return &dynamic_data_table [ gen_number ];
4977 }
4978
4979 #define GC_PAGE_SIZE 0x1000
4980
4981 #define card_word_width ((size_t)32)
4982
4983 //
4984 // The value of card_size is determined empirically according to the average size of an object
4985 // In the code we also rely on the assumption that one card_table entry (uint32_t) covers an entire os page
4986 //
4987 #if defined (HOST_64BIT)
4988 #define card_size ((size_t)(2*GC_PAGE_SIZE/card_word_width))
4989 #else
4990 #define card_size ((size_t)(GC_PAGE_SIZE/card_word_width))
4991 #endif // HOST_64BIT
4992
4993 inline
4994 size_t card_word (size_t card)
4995 {
4996     return card / card_word_width;
4997 }
4998
4999 inline
5000 unsigned card_bit (size_t card)
5001 {
5002     return (unsigned)(card % card_word_width);
5003 }
5004
5005 inline
5006 size_t gcard_of (uint8_t* object)
5007 {
5008     return (size_t)(object) / card_size;
5009 }
5010 #ifdef FEATURE_CARD_MARKING_STEALING
5011 // make this 8 card bundle bits (2 MB in 64-bit architectures, 1 MB in 32-bit) - should be at least 1 card bundle bit
5012 #define CARD_MARKING_STEALING_GRANULARITY (card_size*card_word_width*card_bundle_size*8)
5013
5014 #define THIS_ARG    , __this
5015 class card_marking_enumerator
5016 {
5017 private:
5018     heap_segment*       segment;
5019     uint8_t*            gc_low;
5020     uint32_t            segment_start_chunk_index;
5021     VOLATILE(uint32_t)* chunk_index_counter;
5022     uint8_t*            chunk_high;
5023     uint32_t            old_chunk_index;
5024     static const uint32_t INVALID_CHUNK_INDEX = ~0u;
5025
5026 public:
5027     card_marking_enumerator(heap_segment* seg, uint8_t* low, VOLATILE(uint32_t)* counter) :
5028         segment(seg), gc_low(low), segment_start_chunk_index(0), chunk_index_counter(counter), chunk_high(nullptr), old_chunk_index(INVALID_CHUNK_INDEX)
5029     {
5030     }
5031
5032     // move to the next chunk in this segment - return false if no more chunks in this segment
5033     bool move_next(heap_segment* seg, uint8_t*& low, uint8_t*& high);
5034
5035     void exhaust_segment(heap_segment* seg)
5036     {
5037         uint8_t* low;
5038         uint8_t* high;
5039         // make sure no more chunks in this segment - do this via move_next because we want to keep
5040         // incrementing the chunk_index_counter rather than updating it via interlocked compare exchange
5041         while (move_next(seg, low, high))
5042             ;
5043     }
5044
5045     uint8_t* get_chunk_high()
5046     {
5047         return chunk_high;
5048     }
5049 };
5050 #else
5051 #define THIS_ARG
5052 #endif // FEATURE_CARD_MARKING_STEALING