Merge pull request #21804 from briansull/fix-unbox-opt
[platform/upstream/coreclr.git] / src / gc / gc.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5
6 //
7 // #Overview
8 //
9 // GC automatically manages memory allocated by managed code.
10 // The design doc for GC can be found at Documentation/botr/garbage-collection.md
11 //
12 // This file includes both the code for GC and the allocator. The most common
13 // case for a GC to be triggered is from the allocator code. See
14 // code:#try_allocate_more_space where it calls GarbageCollectGeneration.
15 //
16 // Entry points for the allocator are GCHeap::Alloc* which are called by the
17 // allocation helpers in gcscan.cpp
18 //
19
20 #include "gcpriv.h"
21
22 #define USE_INTROSORT
23
24 // We just needed a simple random number generator for testing.
25 class gc_rand
26 {
27 public:
28     static uint64_t x;
29
30     static uint64_t get_rand() 
31     {
32             x = (314159269*x+278281) & 0x7FFFFFFF;
33             return x;
34     }
35
36     // obtain random number in the range 0 .. r-1
37     static uint64_t get_rand(uint64_t r) {
38             // require r >= 0
39             uint64_t x = (uint64_t)((get_rand() * r) >> 31);
40             return x;
41     }
42 };
43
44 uint64_t gc_rand::x = 0;
45
46 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
47 BOOL bgc_heap_walk_for_etw_p = FALSE;
48 #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
49
50 #if defined(FEATURE_REDHAWK)
51 #define MAYBE_UNUSED_VAR(v) v = v
52 #else
53 #define MAYBE_UNUSED_VAR(v)
54 #endif // FEATURE_REDHAWK
55
56 #define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0))
57
58 #ifdef SERVER_GC
59 #define partial_size_th 100
60 #define num_partial_refs 64
61 #else //SERVER_GC
62 #define partial_size_th 100
63 #define num_partial_refs 32
64 #endif //SERVER_GC
65
66 #define demotion_plug_len_th (6*1024*1024)
67
68 #ifdef BIT64
69 #define MARK_STACK_INITIAL_LENGTH 1024
70 #else
71 #define MARK_STACK_INITIAL_LENGTH 128
72 #endif // BIT64
73
74 #define LOH_PIN_QUEUE_LENGTH 100
75 #define LOH_PIN_DECAY 10
76
77 #ifdef BIT64
78 // Right now we support maximum 1024 procs - meaning that we will create at most
79 // that many GC threads and GC heaps. 
80 #define MAX_SUPPORTED_CPUS 1024
81 #else
82 #define MAX_SUPPORTED_CPUS 64
83 #endif // BIT64
84
85 uint32_t yp_spin_count_unit = 0;
86 size_t loh_size_threshold = LARGE_OBJECT_SIZE;
87
88 #ifdef GC_CONFIG_DRIVEN
89 int compact_ratio = 0;
90 #endif //GC_CONFIG_DRIVEN
91
92 // See comments in reset_memory.
93 BOOL reset_mm_p = TRUE;
94
95 bool g_fFinalizerRunOnShutDown = false;
96
97 #ifdef FEATURE_SVR_GC
98 bool g_built_with_svr_gc = true;
99 #else
100 bool g_built_with_svr_gc = false;
101 #endif // FEATURE_SVR_GC
102
103 #if defined(BUILDENV_DEBUG)
104 uint8_t g_build_variant = 0;
105 #elif defined(BUILDENV_CHECKED)
106 uint8_t g_build_variant = 1;
107 #else
108 uint8_t g_build_variant = 2;
109 #endif // defined(BUILDENV_DEBUG)
110
111 VOLATILE(int32_t) g_no_gc_lock = -1;
112
113 #if defined (TRACE_GC) && !defined (DACCESS_COMPILE)
114 const char * const allocation_state_str[] = {
115     "start",
116     "can_allocate",
117     "cant_allocate",
118     "try_fit",
119     "try_fit_new_seg",
120     "try_fit_new_seg_after_cg",
121     "try_fit_no_seg",
122     "try_fit_after_cg",
123     "try_fit_after_bgc",
124     "try_free_full_seg_in_bgc", 
125     "try_free_after_bgc",
126     "try_seg_end",
127     "acquire_seg",
128     "acquire_seg_after_cg",
129     "acquire_seg_after_bgc",
130     "check_and_wait_for_bgc",
131     "trigger_full_compact_gc",
132     "trigger_ephemeral_gc",
133     "trigger_2nd_ephemeral_gc",
134     "check_retry_seg"
135 };
136
137 const char * const msl_take_state_str[] = {
138     "get_large_seg",
139     "bgc_loh_sweep",
140     "wait_bgc",
141     "block_gc",
142     "clr_mem",
143     "clr_large_mem",
144     "t_eph_gc",
145     "t_full_gc",
146     "alloc_small",
147     "alloc_large",
148     "alloc_small_cant",
149     "alloc_large_cant",
150     "try_alloc",
151     "try_budget"
152 };
153 #endif //TRACE_GC && !DACCESS_COMPILE
154
155
156 // Keep this in sync with the definition of gc_reason
157 #if (defined(DT_LOG) || defined(TRACE_GC)) && !defined (DACCESS_COMPILE)
158 static const char* const str_gc_reasons[] = 
159 {
160     "alloc_soh",
161     "induced",
162     "lowmem",
163     "empty",
164     "alloc_loh",
165     "oos_soh",
166     "oos_loh",
167     "induced_noforce",
168     "gcstress",
169     "induced_lowmem",
170     "induced_compacting",
171     "lowmemory_host",
172     "pm_full_gc",
173     "lowmemory_host_blocking"
174 };
175
176 static const char* const str_gc_pause_modes[] = 
177 {
178     "batch",
179     "interactive",
180     "low_latency",
181     "sustained_low_latency",
182     "no_gc"
183 };
184 #endif // defined(DT_LOG) || defined(TRACE_GC)
185
186 inline
187 BOOL is_induced (gc_reason reason)
188 {
189     return ((reason == reason_induced) ||
190             (reason == reason_induced_noforce) ||
191             (reason == reason_lowmemory) ||
192             (reason == reason_lowmemory_blocking) || 
193             (reason == reason_induced_compacting) ||
194             (reason == reason_lowmemory_host) || 
195             (reason == reason_lowmemory_host_blocking));
196 }
197
198 inline
199 BOOL is_induced_blocking (gc_reason reason)
200 {
201     return ((reason == reason_induced) ||
202             (reason == reason_lowmemory_blocking) || 
203             (reason == reason_induced_compacting) ||
204             (reason == reason_lowmemory_host_blocking));
205 }
206
207 #ifndef DACCESS_COMPILE
208 int64_t qpf;
209
210 size_t GetHighPrecisionTimeStamp()
211 {
212     int64_t ts = GCToOSInterface::QueryPerformanceCounter();
213     
214     return (size_t)(ts / (qpf / 1000));    
215 }
216 #endif
217
218 #ifdef GC_STATS
219 // There is a current and a prior copy of the statistics.  This allows us to display deltas per reporting
220 // interval, as well as running totals.  The 'min' and 'max' values require special treatment.  They are
221 // Reset (zeroed) in the current statistics when we begin a new interval and they are updated via a
222 // comparison with the global min/max.
223 GCStatistics g_GCStatistics;
224 GCStatistics g_LastGCStatistics;
225
226 char* GCStatistics::logFileName = NULL;
227 FILE*  GCStatistics::logFile = NULL;
228
229 void GCStatistics::AddGCStats(const gc_mechanisms& settings, size_t timeInMSec)
230 {
231 #ifdef BACKGROUND_GC
232     if (settings.concurrent)
233     {
234         bgc.Accumulate((uint32_t)timeInMSec*1000);
235         cntBGC++;
236     }
237     else if (settings.background_p)
238     {
239         fgc.Accumulate((uint32_t)timeInMSec*1000);
240         cntFGC++;
241         if (settings.compaction)
242             cntCompactFGC++;
243         assert(settings.condemned_generation < max_generation);
244         cntFGCGen[settings.condemned_generation]++;
245     }
246     else
247 #endif // BACKGROUND_GC
248     {
249         ngc.Accumulate((uint32_t)timeInMSec*1000);
250         cntNGC++;
251         if (settings.compaction)
252             cntCompactNGC++;
253         cntNGCGen[settings.condemned_generation]++;
254     }
255
256     if (is_induced (settings.reason))
257         cntReasons[(int)reason_induced]++;
258     else if (settings.stress_induced)
259         cntReasons[(int)reason_gcstress]++;
260     else
261         cntReasons[(int)settings.reason]++;
262
263 #ifdef BACKGROUND_GC
264     if (settings.concurrent || !settings.background_p)
265     {
266 #endif // BACKGROUND_GC
267         RollOverIfNeeded();
268 #ifdef BACKGROUND_GC
269     }
270 #endif // BACKGROUND_GC
271 }
272
273 void GCStatistics::Initialize()
274 {
275     LIMITED_METHOD_CONTRACT;
276     // for efficiency sake we're taking a dependency on the layout of a C++ object
277     // with a vtable. protect against violations of our premise:
278     static_assert(offsetof(GCStatistics, cntDisplay) == sizeof(void*),
279             "The first field of GCStatistics follows the pointer sized vtable");
280
281     int podOffs = offsetof(GCStatistics, cntDisplay);       // offset of the first POD field
282     memset((uint8_t*)(&g_GCStatistics)+podOffs, 0, sizeof(g_GCStatistics)-podOffs);
283     memset((uint8_t*)(&g_LastGCStatistics)+podOffs, 0, sizeof(g_LastGCStatistics)-podOffs);
284 }
285
286 void GCStatistics::DisplayAndUpdate()
287 {
288     LIMITED_METHOD_CONTRACT;
289
290     if (logFileName == NULL || logFile == NULL)
291         return;
292
293     {
294         if (cntDisplay == 0)
295             fprintf(logFile, "\nGCMix **** Initialize *****\n\n");
296             
297         fprintf(logFile, "GCMix **** Summary ***** %d\n", cntDisplay);
298
299         // NGC summary (total, timing info)
300         ngc.DisplayAndUpdate(logFile, "NGC ", &g_LastGCStatistics.ngc, cntNGC, g_LastGCStatistics.cntNGC, msec);
301
302         // FGC summary (total, timing info)
303         fgc.DisplayAndUpdate(logFile, "FGC ", &g_LastGCStatistics.fgc, cntFGC, g_LastGCStatistics.cntFGC, msec);
304
305         // BGC summary
306         bgc.DisplayAndUpdate(logFile, "BGC ", &g_LastGCStatistics.bgc, cntBGC, g_LastGCStatistics.cntBGC, msec);
307
308         // NGC/FGC break out by generation & compacting vs. sweeping
309         fprintf(logFile, "NGC   ");
310         for (int i = max_generation; i >= 0; --i)
311             fprintf(logFile, "gen%d %d (%d). ", i, cntNGCGen[i]-g_LastGCStatistics.cntNGCGen[i], cntNGCGen[i]);
312         fprintf(logFile, "\n");
313
314         fprintf(logFile, "FGC   ");
315         for (int i = max_generation-1; i >= 0; --i)
316             fprintf(logFile, "gen%d %d (%d). ", i, cntFGCGen[i]-g_LastGCStatistics.cntFGCGen[i], cntFGCGen[i]);
317         fprintf(logFile, "\n");
318
319         // Compacting vs. Sweeping break out
320         int _cntSweep = cntNGC-cntCompactNGC;
321         int _cntLastSweep = g_LastGCStatistics.cntNGC-g_LastGCStatistics.cntCompactNGC;
322         fprintf(logFile, "NGC   Sweeping %d (%d) Compacting %d (%d)\n",
323                _cntSweep - _cntLastSweep, _cntSweep,
324                cntCompactNGC - g_LastGCStatistics.cntCompactNGC, cntCompactNGC);
325
326         _cntSweep = cntFGC-cntCompactFGC;
327         _cntLastSweep = g_LastGCStatistics.cntFGC-g_LastGCStatistics.cntCompactFGC;
328         fprintf(logFile, "FGC   Sweeping %d (%d) Compacting %d (%d)\n",
329                _cntSweep - _cntLastSweep, _cntSweep,
330                cntCompactFGC - g_LastGCStatistics.cntCompactFGC, cntCompactFGC);
331
332 #ifdef TRACE_GC
333         // GC reasons...
334         for (int reason=(int)reason_alloc_soh; reason <= (int)reason_gcstress; ++reason)
335         {
336             if (cntReasons[reason] != 0)
337                 fprintf(logFile, "%s %d (%d). ", str_gc_reasons[reason], 
338                     cntReasons[reason]-g_LastGCStatistics.cntReasons[reason], cntReasons[reason]);
339         }
340 #endif // TRACE_GC
341         fprintf(logFile, "\n\n");
342
343         // flush the log file...
344         fflush(logFile);
345     }
346
347     g_LastGCStatistics = *this;
348
349     ngc.Reset();
350     fgc.Reset();
351     bgc.Reset();
352 }
353
354 #endif // GC_STATS
355
356 inline
357 size_t round_up_power2 (size_t size)
358 {
359     // Get the 0-based index of the most-significant bit in size-1.
360     // If the call failed (because size-1 is zero), size must be 1,
361     // so return 1 (because 1 rounds up to itself).
362     DWORD highest_set_bit_index;
363     if (0 ==
364 #ifdef BIT64
365         BitScanReverse64(
366 #else
367         BitScanReverse(
368 #endif
369             &highest_set_bit_index, size - 1)) { return 1; }
370
371     // The size == 0 case (which would have overflowed to SIZE_MAX when decremented)
372     // is handled below by relying on the fact that highest_set_bit_index is the maximum value
373     // (31 or 63, depending on sizeof(size_t)) and left-shifting a value >= 2 by that
374     // number of bits shifts in zeros from the right, resulting in an output of zero.
375     return static_cast<size_t>(2) << highest_set_bit_index;
376 }
377
378 inline
379 size_t round_down_power2 (size_t size)
380 {
381     // Get the 0-based index of the most-significant bit in size.
382     // If the call failed, size must be zero so return zero.
383     DWORD highest_set_bit_index;
384     if (0 ==
385 #ifdef BIT64
386         BitScanReverse64(
387 #else
388         BitScanReverse(
389 #endif
390             &highest_set_bit_index, size)) { return 0; }
391
392     // Left-shift 1 by highest_set_bit_index to get back a value containing only
393     // the most-significant set bit of size, i.e. size rounded down
394     // to the next power-of-two value.
395     return static_cast<size_t>(1) << highest_set_bit_index;
396 }
397
398 // Get the 0-based index of the most-significant bit in the value.
399 // Returns -1 if the input value is zero (i.e. has no set bits).
400 inline
401 int index_of_highest_set_bit (size_t value)
402 {
403     // Get the 0-based index of the most-significant bit in the value.
404     // If the call failed (because value is zero), return -1.
405     DWORD highest_set_bit_index;
406     return (0 ==
407 #ifdef BIT64
408         BitScanReverse64(
409 #else
410         BitScanReverse(
411 #endif
412             &highest_set_bit_index, value)) ? -1 : static_cast<int>(highest_set_bit_index);
413 }
414
415 inline
416 int relative_index_power2_plug (size_t power2)
417 {
418     int index = index_of_highest_set_bit (power2);
419     assert (index <= MAX_INDEX_POWER2);
420
421     return ((index < MIN_INDEX_POWER2) ? 0 : (index - MIN_INDEX_POWER2));
422 }
423
424 inline
425 int relative_index_power2_free_space (size_t power2)
426 {
427     int index = index_of_highest_set_bit (power2);
428     assert (index <= MAX_INDEX_POWER2);
429
430     return ((index < MIN_INDEX_POWER2) ? -1 : (index - MIN_INDEX_POWER2));
431 }
432
433 #ifdef BACKGROUND_GC
434 uint32_t bgc_alloc_spin_count = 140;
435 uint32_t bgc_alloc_spin_count_loh = 16;
436 uint32_t bgc_alloc_spin = 2;
437
438
439 inline
440 void c_write (uint32_t& place, uint32_t value)
441 {
442     Interlocked::Exchange (&place, value);
443     //place = value;
444 }
445
446 #ifndef DACCESS_COMPILE
447 // If every heap's gen2 or gen3 size is less than this threshold we will do a blocking GC.
448 const size_t bgc_min_per_heap = 4*1024*1024;
449
450 int gc_heap::gchist_index = 0;
451 gc_mechanisms_store gc_heap::gchist[max_history_count];
452
453 #ifndef MULTIPLE_HEAPS
454 size_t gc_heap::total_promoted_bytes = 0;
455 VOLATILE(bgc_state) gc_heap::current_bgc_state = bgc_not_in_process;
456 int gc_heap::gchist_index_per_heap = 0;
457 gc_heap::gc_history gc_heap::gchist_per_heap[max_history_count];
458 #endif //MULTIPLE_HEAPS
459
460 void gc_heap::add_to_history_per_heap()
461 {
462 #ifdef GC_HISTORY
463     gc_history* current_hist = &gchist_per_heap[gchist_index_per_heap];
464     current_hist->gc_index = settings.gc_index;
465     current_hist->current_bgc_state = current_bgc_state;
466     size_t elapsed = dd_gc_elapsed_time (dynamic_data_of (0));
467     current_hist->gc_time_ms = (uint32_t)elapsed;
468     current_hist->gc_efficiency = (elapsed ? (total_promoted_bytes / elapsed) : total_promoted_bytes);
469     current_hist->eph_low = generation_allocation_start (generation_of (max_generation-1));
470     current_hist->gen0_start = generation_allocation_start (generation_of (0));
471     current_hist->eph_high = heap_segment_allocated (ephemeral_heap_segment);
472 #ifdef BACKGROUND_GC
473     current_hist->bgc_lowest = background_saved_lowest_address;
474     current_hist->bgc_highest = background_saved_highest_address;
475 #endif //BACKGROUND_GC
476     current_hist->fgc_lowest = lowest_address;
477     current_hist->fgc_highest = highest_address;
478     current_hist->g_lowest = g_gc_lowest_address;
479     current_hist->g_highest = g_gc_highest_address;
480
481     gchist_index_per_heap++;
482     if (gchist_index_per_heap == max_history_count)
483     {
484         gchist_index_per_heap = 0;
485     }
486 #endif //GC_HISTORY
487 }
488
489 void gc_heap::add_to_history()
490 {
491 #ifdef GC_HISTORY
492     gc_mechanisms_store* current_settings = &gchist[gchist_index];
493     current_settings->store (&settings);
494
495     gchist_index++;
496     if (gchist_index == max_history_count)
497     {
498         gchist_index = 0;
499     }
500 #endif //GC_HISTORY
501 }
502
503 #endif //DACCESS_COMPILE
504 #endif //BACKGROUND_GC
505
506 #if defined(TRACE_GC) && !defined(DACCESS_COMPILE)
507 BOOL   gc_log_on = TRUE;
508 FILE* gc_log = NULL;
509 size_t gc_log_file_size = 0;
510
511 size_t gc_buffer_index = 0;
512 size_t max_gc_buffers = 0;
513
514 static CLRCriticalSection gc_log_lock;
515
516 // we keep this much in a buffer and only flush when the buffer is full
517 #define gc_log_buffer_size (1024*1024)
518 uint8_t* gc_log_buffer = 0;
519 size_t gc_log_buffer_offset = 0;
520
521 void log_va_msg(const char *fmt, va_list args)
522 {
523     gc_log_lock.Enter();
524
525     const int BUFFERSIZE = 512;
526     static char rgchBuffer[BUFFERSIZE];
527     char *  pBuffer  = &rgchBuffer[0];
528
529     pBuffer[0] = '\n';
530     int buffer_start = 1;
531     int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging());
532     buffer_start += pid_len;
533     memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start);
534     int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args);
535     if (msg_len == -1)
536     {
537         msg_len = BUFFERSIZE - buffer_start;
538     }
539
540     msg_len += buffer_start;
541
542     if ((gc_log_buffer_offset + msg_len) > (gc_log_buffer_size - 12))
543     {
544         char index_str[8];
545         memset (index_str, '-', 8);
546         sprintf_s (index_str, _countof(index_str), "%d", (int)gc_buffer_index);
547         gc_log_buffer[gc_log_buffer_offset] = '\n';
548         memcpy (gc_log_buffer + (gc_log_buffer_offset + 1), index_str, 8);
549
550         gc_buffer_index++;
551         if (gc_buffer_index > max_gc_buffers)
552         {
553             fseek (gc_log, 0, SEEK_SET);
554             gc_buffer_index = 0;
555         }
556         fwrite(gc_log_buffer, gc_log_buffer_size, 1, gc_log);
557         fflush(gc_log);
558         memset (gc_log_buffer, '*', gc_log_buffer_size);
559         gc_log_buffer_offset = 0;
560     }
561
562     memcpy (gc_log_buffer + gc_log_buffer_offset, pBuffer, msg_len);
563     gc_log_buffer_offset += msg_len;
564
565     gc_log_lock.Leave();
566 }
567
568 void GCLog (const char *fmt, ... )
569 {
570     if (gc_log_on && (gc_log != NULL))
571     {
572         va_list     args;
573         va_start(args, fmt);
574         log_va_msg (fmt, args);
575         va_end(args);
576     }
577 }
578 #endif // TRACE_GC && !DACCESS_COMPILE
579
580 #if defined(GC_CONFIG_DRIVEN) && !defined(DACCESS_COMPILE)
581
582 BOOL   gc_config_log_on = FALSE;
583 FILE* gc_config_log = NULL;
584
585 // we keep this much in a buffer and only flush when the buffer is full
586 #define gc_config_log_buffer_size (1*1024) // TEMP
587 uint8_t* gc_config_log_buffer = 0;
588 size_t gc_config_log_buffer_offset = 0;
589
590 // For config since we log so little we keep the whole history. Also it's only
591 // ever logged by one thread so no need to synchronize.
592 void log_va_msg_config(const char *fmt, va_list args)
593 {
594     const int BUFFERSIZE = 256;
595     static char rgchBuffer[BUFFERSIZE];
596     char *  pBuffer  = &rgchBuffer[0];
597
598     pBuffer[0] = '\n';
599     int buffer_start = 1;
600     int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args );
601     assert (msg_len != -1);
602     msg_len += buffer_start;
603
604     if ((gc_config_log_buffer_offset + msg_len) > gc_config_log_buffer_size)
605     {
606         fwrite(gc_config_log_buffer, gc_config_log_buffer_offset, 1, gc_config_log);
607         fflush(gc_config_log);
608         gc_config_log_buffer_offset = 0;
609     }
610
611     memcpy (gc_config_log_buffer + gc_config_log_buffer_offset, pBuffer, msg_len);
612     gc_config_log_buffer_offset += msg_len;
613 }
614
615 void GCLogConfig (const char *fmt, ... )
616 {
617     if (gc_config_log_on && (gc_config_log != NULL))
618     {
619         va_list     args;
620         va_start( args, fmt );
621         log_va_msg_config (fmt, args);
622     }
623 }
624 #endif // GC_CONFIG_DRIVEN && !DACCESS_COMPILE
625
626 #ifdef SYNCHRONIZATION_STATS
627
628 // Number of GCs have we done since we last logged.
629 static unsigned int         gc_count_during_log;
630  // In ms. This is how often we print out stats.
631 static const unsigned int   log_interval = 5000;
632 // Time (in ms) when we start a new log interval.
633 static unsigned int         log_start_tick;
634 static unsigned int         gc_lock_contended;
635 static int64_t              log_start_hires;
636 // Cycles accumulated in SuspendEE during log_interval.
637 static uint64_t             suspend_ee_during_log;
638 // Cycles accumulated in RestartEE during log_interval.
639 static uint64_t             restart_ee_during_log;
640 static uint64_t             gc_during_log;
641
642 #endif //SYNCHRONIZATION_STATS
643
644 void
645 init_sync_log_stats()
646 {
647 #ifdef SYNCHRONIZATION_STATS
648     if (gc_count_during_log == 0)
649     {
650         gc_heap::init_sync_stats();
651         suspend_ee_during_log = 0;
652         restart_ee_during_log = 0;
653         gc_during_log = 0;
654         gc_lock_contended = 0;
655
656         log_start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
657         log_start_hires = GCToOSInterface::QueryPerformanceCounter();
658     }
659     gc_count_during_log++;
660 #endif //SYNCHRONIZATION_STATS
661 }
662
663 void
664 process_sync_log_stats()
665 {
666 #ifdef SYNCHRONIZATION_STATS
667
668     unsigned int log_elapsed = GCToOSInterface::GetLowPrecisionTimeStamp() - log_start_tick;
669
670     if (log_elapsed > log_interval)
671     {
672         uint64_t total = GCToOSInterface::QueryPerformanceCounter() - log_start_hires;
673         // Print out the cycles we spent on average in each suspend and restart.
674         printf("\n_________________________________________________________________________________\n"
675             "Past %d(s): #%3d GCs; Total gc_lock contended: %8u; GC: %12u\n"
676             "SuspendEE: %8u; RestartEE: %8u GC %.3f%%\n",
677             log_interval / 1000,
678             gc_count_during_log,
679             gc_lock_contended,
680             (unsigned int)(gc_during_log / gc_count_during_log),
681             (unsigned int)(suspend_ee_during_log / gc_count_during_log),
682             (unsigned int)(restart_ee_during_log / gc_count_during_log),
683             (double)(100.0f * gc_during_log / total));
684         gc_heap::print_sync_stats(gc_count_during_log);
685
686         gc_count_during_log = 0;
687     }
688 #endif //SYNCHRONIZATION_STATS
689 }
690
691 #ifdef MULTIPLE_HEAPS
692
693 enum gc_join_stage
694 {
695     gc_join_init_cpu_mapping = 0,
696     gc_join_done = 1,
697     gc_join_generation_determined = 2,
698     gc_join_begin_mark_phase = 3,
699     gc_join_scan_dependent_handles = 4,
700     gc_join_rescan_dependent_handles = 5,
701     gc_join_scan_sizedref_done = 6,
702     gc_join_null_dead_short_weak = 7,
703     gc_join_scan_finalization = 8,
704     gc_join_null_dead_long_weak = 9, 
705     gc_join_null_dead_syncblk = 10, 
706     gc_join_decide_on_compaction = 11, 
707     gc_join_rearrange_segs_compaction = 12, 
708     gc_join_adjust_handle_age_compact = 13,
709     gc_join_adjust_handle_age_sweep = 14,
710     gc_join_begin_relocate_phase = 15,
711     gc_join_relocate_phase_done = 16,
712     gc_join_verify_objects_done = 17,
713     gc_join_start_bgc = 18,
714     gc_join_restart_ee = 19,
715     gc_join_concurrent_overflow = 20,
716     gc_join_suspend_ee = 21,
717     gc_join_bgc_after_ephemeral = 22,
718     gc_join_allow_fgc = 23,
719     gc_join_bgc_sweep = 24,
720     gc_join_suspend_ee_verify = 25,
721     gc_join_restart_ee_verify = 26,
722     gc_join_set_state_free = 27,
723     gc_r_join_update_card_bundle = 28,
724     gc_join_after_absorb = 29, 
725     gc_join_verify_copy_table = 30,
726     gc_join_after_reset = 31,
727     gc_join_after_ephemeral_sweep = 32,
728     gc_join_after_profiler_heap_walk = 33,
729     gc_join_minimal_gc = 34,
730     gc_join_after_commit_soh_no_gc = 35,
731     gc_join_expand_loh_no_gc = 36,
732     gc_join_final_no_gc = 37,
733     gc_join_disable_software_write_watch = 38,
734     gc_join_max = 39
735 };
736
737 enum gc_join_flavor
738 {
739     join_flavor_server_gc = 0,
740     join_flavor_bgc = 1
741 };
742   
743 #define first_thread_arrived 2
744 #pragma warning(push)
745 #pragma warning(disable:4324) // don't complain if DECLSPEC_ALIGN actually pads
746 struct DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE) join_structure
747 {
748     // Shared non volatile keep on separate line to prevent eviction
749     int n_threads;
750
751     // Keep polling/wait structures on separate line write once per join
752     DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE)
753     GCEvent joined_event[3]; // the last event in the array is only used for first_thread_arrived.
754     Volatile<int> lock_color;
755     VOLATILE(BOOL) wait_done;
756     VOLATILE(BOOL) joined_p;
757
758     // Keep volatile counted locks on separate cache line write many per join
759     DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE)
760     VOLATILE(int32_t) join_lock;
761     VOLATILE(int32_t) r_join_lock;
762
763 };
764 #pragma warning(pop)
765
766 enum join_type 
767 {
768     type_last_join = 0, 
769     type_join = 1, 
770     type_restart = 2, 
771     type_first_r_join = 3, 
772     type_r_join = 4
773 };
774
775 enum join_time 
776 {
777     time_start = 0, 
778     time_end = 1
779 };
780
781 enum join_heap_index
782 {
783     join_heap_restart = 100,
784     join_heap_r_restart = 200
785 };
786
787 struct join_event
788 {
789     uint32_t heap;
790     join_time time;
791     join_type type;
792 };
793
794 class t_join
795 {
796     join_structure join_struct;
797
798     int id;
799     gc_join_flavor flavor;
800
801 #ifdef JOIN_STATS
802     uint64_t start[MAX_SUPPORTED_CPUS], end[MAX_SUPPORTED_CPUS], start_seq;
803     // remember join id and last thread to arrive so restart can use these
804     int thd;
805     // we want to print statistics every 10 seconds - this is to remember the start of the 10 sec interval
806     uint32_t start_tick;
807     // counters for joins, in 1000's of clock cycles
808     uint64_t elapsed_total[gc_join_max], wake_total[gc_join_max], seq_loss_total[gc_join_max], par_loss_total[gc_join_max], in_join_total[gc_join_max];
809 #endif //JOIN_STATS
810
811 public:
812     BOOL init (int n_th, gc_join_flavor f)
813     {
814         dprintf (JOIN_LOG, ("Initializing join structure"));
815         join_struct.n_threads = n_th;
816         join_struct.lock_color = 0;
817         for (int i = 0; i < 3; i++)
818         {
819             if (!join_struct.joined_event[i].IsValid())
820             {
821                 join_struct.joined_p = FALSE;
822                 dprintf (JOIN_LOG, ("Creating join event %d", i));
823                 // TODO - changing this to a non OS event
824                 // because this is also used by BGC threads which are 
825                 // managed threads and WaitEx does not allow you to wait
826                 // for an OS event on a managed thread.
827                 // But we are not sure if this plays well in the hosting 
828                 // environment.
829                 //join_struct.joined_event[i].CreateOSManualEventNoThrow(FALSE);
830                 if (!join_struct.joined_event[i].CreateManualEventNoThrow(FALSE))
831                     return FALSE;
832             }
833         }
834         join_struct.join_lock = join_struct.n_threads;
835         join_struct.r_join_lock = join_struct.n_threads;
836         join_struct.wait_done = FALSE;
837         flavor = f;
838
839 #ifdef JOIN_STATS
840         start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
841 #endif //JOIN_STATS
842
843         return TRUE;
844     }
845     
846     void destroy ()
847     {
848         dprintf (JOIN_LOG, ("Destroying join structure"));
849         for (int i = 0; i < 3; i++)
850         {
851             if (join_struct.joined_event[i].IsValid())
852                 join_struct.joined_event[i].CloseEvent();
853         }
854     }
855
856     inline void fire_event (int heap, join_time time, join_type type, int join_id)
857     {
858         FIRE_EVENT(GCJoin_V2, heap, time, type, join_id);
859     }
860
861     void join (gc_heap* gch, int join_id)
862     {
863 #ifdef JOIN_STATS
864         // parallel execution ends here
865         end[gch->heap_number] = get_ts();
866 #endif //JOIN_STATS
867
868         assert (!join_struct.joined_p);
869         int color = join_struct.lock_color.LoadWithoutBarrier();
870
871         if (Interlocked::Decrement(&join_struct.join_lock) != 0)
872         {
873             dprintf (JOIN_LOG, ("join%d(%d): Join() Waiting...join_lock is now %d", 
874                 flavor, join_id, (int32_t)(join_struct.join_lock)));
875
876             fire_event (gch->heap_number, time_start, type_join, join_id);
877
878             //busy wait around the color
879             if (color == join_struct.lock_color.LoadWithoutBarrier())
880             {
881 respin:
882                 int spin_count = 128 * yp_spin_count_unit;
883                 for (int j = 0; j < spin_count; j++)
884                 {
885                     if (color != join_struct.lock_color.LoadWithoutBarrier())
886                     {
887                         break;
888                     }
889                     YieldProcessor();           // indicate to the processor that we are spinning
890                 }
891
892                 // we've spun, and if color still hasn't changed, fall into hard wait
893                 if (color == join_struct.lock_color.LoadWithoutBarrier())
894                 {
895                     dprintf (JOIN_LOG, ("join%d(%d): Join() hard wait on reset event %d, join_lock is now %d", 
896                         flavor, join_id, color, (int32_t)(join_struct.join_lock)));
897
898                     //Thread* current_thread = GCToEEInterface::GetThread();
899                     //BOOL cooperative_mode = gc_heap::enable_preemptive ();
900                     uint32_t dwJoinWait = join_struct.joined_event[color].Wait(INFINITE, FALSE);
901                     //gc_heap::disable_preemptive (cooperative_mode);
902
903                     if (dwJoinWait != WAIT_OBJECT_0)
904                     {
905                         STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait);
906                         FATAL_GC_ERROR ();
907                     }
908                 }
909
910                 // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent()
911                 if (color == join_struct.lock_color.LoadWithoutBarrier())
912                 {
913                     goto respin;
914                 }
915
916                 dprintf (JOIN_LOG, ("join%d(%d): Join() done, join_lock is %d", 
917                     flavor, join_id, (int32_t)(join_struct.join_lock)));
918             }
919
920             fire_event (gch->heap_number, time_end, type_join, join_id);
921
922 #ifdef JOIN_STATS
923             // parallel execution starts here
924             start[gch->heap_number] = get_ts();
925             Interlocked::ExchangeAdd(&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number]));
926 #endif //JOIN_STATS
927         }
928         else
929         {
930             fire_event (gch->heap_number, time_start, type_last_join, join_id);
931
932             join_struct.joined_p = TRUE;
933             dprintf (JOIN_LOG, ("join%d(%d): Last thread to complete the join, setting id", flavor, join_id));
934             join_struct.joined_event[!color].Reset();
935             id = join_id;
936             // this one is alone so it can proceed
937 #ifdef JOIN_STATS
938             // remember the join id, the last thread arriving, the start of the sequential phase,
939             // and keep track of the cycles spent waiting in the join
940             thd = gch->heap_number;
941             start_seq = get_ts();
942             Interlocked::ExchangeAdd(&in_join_total[join_id], (start_seq - end[gch->heap_number]));
943 #endif //JOIN_STATS
944         }
945     }
946
947     // Reverse join - first thread gets here does the work; other threads will only proceed
948     // after the work is done.
949     // Note that you cannot call this twice in a row on the same thread. Plus there's no 
950     // need to call it twice in row - you should just merge the work.
951     BOOL r_join (gc_heap* gch, int join_id)
952     {
953
954         if (join_struct.n_threads == 1)
955         {
956             return TRUE;
957         }
958
959         if (Interlocked::CompareExchange(&join_struct.r_join_lock, 0, join_struct.n_threads) == 0)
960         {
961             if (!join_struct.wait_done)
962             {
963                 dprintf (JOIN_LOG, ("r_join() Waiting..."));
964
965                 fire_event (gch->heap_number, time_start, type_join, join_id);
966
967                 //busy wait around the color
968                 if (!join_struct.wait_done)
969                 {
970         respin:
971                     int spin_count = 256 * yp_spin_count_unit;
972                     for (int j = 0; j < spin_count; j++)
973                     {
974                         if (join_struct.wait_done)
975                         {
976                             break;
977                         }
978                         YieldProcessor();           // indicate to the processor that we are spinning
979                     }
980
981                     // we've spun, and if color still hasn't changed, fall into hard wait
982                     if (!join_struct.wait_done)
983                     {
984                         dprintf (JOIN_LOG, ("Join() hard wait on reset event %d", first_thread_arrived));
985                         uint32_t dwJoinWait = join_struct.joined_event[first_thread_arrived].Wait(INFINITE, FALSE);
986                         if (dwJoinWait != WAIT_OBJECT_0)
987                         {
988                             STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait);
989                             FATAL_GC_ERROR ();
990                         }
991                     }
992
993                     // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent()
994                     if (!join_struct.wait_done)
995                     {
996                         goto respin;
997                     }
998
999                     dprintf (JOIN_LOG, ("r_join() done"));
1000                 }
1001
1002                 fire_event (gch->heap_number, time_end, type_join, join_id);
1003             }
1004
1005             return FALSE;
1006         }
1007         else
1008         {
1009             fire_event (gch->heap_number, time_start, type_first_r_join, join_id);
1010             return TRUE;
1011         }
1012     }
1013
1014 #ifdef JOIN_STATS
1015     uint64_t get_ts()
1016     {
1017         return GCToOSInterface::QueryPerformanceCounter();
1018     }
1019
1020     void start_ts (gc_heap* gch)
1021     {
1022         // parallel execution ends here
1023         start[gch->heap_number] = get_ts();
1024     }
1025 #endif //JOIN_STATS
1026
1027     void restart()
1028     {
1029 #ifdef JOIN_STATS
1030         uint64_t elapsed_seq = get_ts() - start_seq;
1031         uint64_t max = 0, sum = 0, wake = 0;
1032         uint64_t min_ts = start[0];
1033         for (int i = 1; i < join_struct.n_threads; i++)
1034         {
1035             if(min_ts > start[i]) min_ts = start[i];
1036         }
1037
1038         for (int i = 0; i < join_struct.n_threads; i++)
1039         {
1040             uint64_t wake_delay = start[i] - min_ts;
1041             uint64_t elapsed = end[i] - start[i];
1042             if (max < elapsed)
1043                 max = elapsed;
1044             sum += elapsed;
1045             wake += wake_delay;
1046         }
1047         uint64_t seq_loss = (join_struct.n_threads - 1)*elapsed_seq;
1048         uint64_t par_loss = join_struct.n_threads*max - sum;
1049         double efficiency = 0.0;
1050         if (max > 0)
1051             efficiency = sum*100.0/(join_struct.n_threads*max);
1052
1053         const double ts_scale = 1e-6;
1054
1055         // enable this printf to get statistics on each individual join as it occurs
1056 //      printf("join #%3d  seq_loss = %5g   par_loss = %5g  efficiency = %3.0f%%\n", join_id, ts_scale*seq_loss, ts_scale*par_loss, efficiency);
1057
1058         elapsed_total[id] += sum;
1059         wake_total[id] += wake;
1060         seq_loss_total[id] += seq_loss;
1061         par_loss_total[id] += par_loss;
1062
1063         // every 10 seconds, print a summary of the time spent in each type of join
1064         if (GCToOSInterface::GetLowPrecisionTimeStamp() - start_tick > 10*1000)
1065         {
1066             printf("**** summary *****\n");
1067             for (int i = 0; i < 16; i++)
1068             {
1069                 printf("join #%3d  elapsed_total = %8g wake_loss = %8g seq_loss = %8g  par_loss = %8g  in_join_total = %8g\n",
1070                    i,
1071                    ts_scale*elapsed_total[i],
1072                    ts_scale*wake_total[i],
1073                    ts_scale*seq_loss_total[i],
1074                    ts_scale*par_loss_total[i],
1075                    ts_scale*in_join_total[i]);
1076                 elapsed_total[i] = wake_total[i] = seq_loss_total[i] = par_loss_total[i] = in_join_total[i] = 0;
1077             }
1078             start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
1079         }
1080 #endif //JOIN_STATS
1081
1082         fire_event (join_heap_restart, time_start, type_restart, -1);
1083         assert (join_struct.joined_p);
1084         join_struct.joined_p = FALSE;
1085         join_struct.join_lock = join_struct.n_threads;
1086         dprintf (JOIN_LOG, ("join%d(%d): Restarting from join: join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
1087 //        printf("restart from join #%d at cycle %u from start of gc\n", join_id, GetCycleCount32() - gc_start);
1088         int color = join_struct.lock_color.LoadWithoutBarrier();
1089         join_struct.lock_color = !color;
1090         join_struct.joined_event[color].Set();
1091
1092 //        printf("Set joined_event %d\n", !join_struct.lock_color);
1093
1094         fire_event (join_heap_restart, time_end, type_restart, -1);
1095
1096 #ifdef JOIN_STATS
1097         start[thd] = get_ts();
1098 #endif //JOIN_STATS
1099     }
1100     
1101     BOOL joined()
1102     {
1103         dprintf (JOIN_LOG, ("join%d(%d): joined, join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
1104         return join_struct.joined_p;
1105     }
1106
1107     void r_restart()
1108     {
1109         if (join_struct.n_threads != 1)
1110         {
1111             fire_event (join_heap_r_restart, time_start, type_restart, -1);
1112             join_struct.wait_done = TRUE;
1113             join_struct.joined_event[first_thread_arrived].Set();
1114             fire_event (join_heap_r_restart, time_end, type_restart, -1);
1115         }
1116     }
1117
1118     void r_init()
1119     {
1120         if (join_struct.n_threads != 1)
1121         {
1122             join_struct.r_join_lock = join_struct.n_threads;
1123             join_struct.wait_done = FALSE;
1124             join_struct.joined_event[first_thread_arrived].Reset();
1125         }
1126     }
1127 };
1128
1129 t_join gc_t_join;
1130
1131 #ifdef BACKGROUND_GC
1132 t_join bgc_t_join;
1133 #endif //BACKGROUND_GC
1134
1135 #endif //MULTIPLE_HEAPS
1136
1137 #define spin_and_switch(count_to_spin, expr) \
1138 { \
1139     for (int j = 0; j < count_to_spin; j++) \
1140     { \
1141         if (expr) \
1142         { \
1143             break;\
1144         } \
1145         YieldProcessor(); \
1146     } \
1147     if (!(expr)) \
1148     { \
1149         GCToOSInterface::YieldThread(0); \
1150     } \
1151 }
1152
1153 #ifndef DACCESS_COMPILE
1154 #ifdef BACKGROUND_GC
1155
1156 #define max_pending_allocs 64
1157
1158 class exclusive_sync
1159 {
1160     // TODO - verify that this is the right syntax for Volatile.
1161     VOLATILE(uint8_t*) rwp_object;
1162     VOLATILE(int32_t) needs_checking;
1163     
1164     int spin_count;
1165
1166     uint8_t cache_separator[HS_CACHE_LINE_SIZE - sizeof (int) - sizeof (int32_t)];
1167
1168     // TODO - perhaps each object should be on its own cache line...
1169     VOLATILE(uint8_t*) alloc_objects[max_pending_allocs];
1170
1171     int find_free_index ()
1172     {
1173         for (int i = 0; i < max_pending_allocs; i++)
1174         {
1175             if (alloc_objects [i] == (uint8_t*)0)
1176             {
1177                 return i;
1178             }
1179         }
1180  
1181         return -1;
1182     }
1183
1184 public:
1185     void init()
1186     {
1187         spin_count = 32 * (g_num_processors - 1);
1188         rwp_object = 0;
1189         needs_checking = 0;
1190         for (int i = 0; i < max_pending_allocs; i++)
1191         {
1192             alloc_objects [i] = (uint8_t*)0;
1193         }
1194     }
1195
1196     void check()
1197     {
1198         for (int i = 0; i < max_pending_allocs; i++)
1199         {
1200             if (alloc_objects [i] != (uint8_t*)0)
1201             {
1202                 GCToOSInterface::DebugBreak();
1203             }
1204         }
1205     }
1206
1207     void bgc_mark_set (uint8_t* obj)
1208     {
1209         dprintf (3, ("cm: probing %Ix", obj));
1210 retry:
1211         if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0)
1212         {
1213             // If we spend too much time spending all the allocs,
1214             // consider adding a high water mark and scan up
1215             // to that; we'll need to interlock in done when
1216             // we update the high watermark.
1217             for (int i = 0; i < max_pending_allocs; i++)
1218             {
1219                 if (obj == alloc_objects[i])
1220                 {
1221                     needs_checking = 0;
1222                     dprintf (3, ("cm: will spin"));
1223                     spin_and_switch (spin_count, (obj != alloc_objects[i]));
1224                     goto retry;
1225                 }
1226             }
1227
1228             rwp_object = obj;
1229             needs_checking = 0;
1230             dprintf (3, ("cm: set %Ix", obj));
1231             return;
1232         }
1233         else
1234         {
1235             spin_and_switch (spin_count, (needs_checking == 0));
1236             goto retry;
1237         }
1238     }
1239
1240     int loh_alloc_set (uint8_t* obj)
1241     {
1242         if (!gc_heap::cm_in_progress)
1243         {
1244             return -1;
1245         }
1246
1247 retry:
1248         dprintf (3, ("loh alloc: probing %Ix", obj));
1249
1250         if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0)
1251         {
1252             if (obj == rwp_object)
1253             {
1254                 needs_checking = 0;
1255                 spin_and_switch (spin_count, (obj != rwp_object));
1256                 goto retry;
1257             }
1258             else
1259             {
1260                 int cookie = find_free_index();
1261
1262                 if (cookie != -1)
1263                 {
1264                     alloc_objects[cookie] = obj;
1265                     needs_checking = 0;
1266                     //if (cookie >= 4)
1267                     //{
1268                     //    GCToOSInterface::DebugBreak();
1269                     //}
1270
1271                     dprintf (3, ("loh alloc: set %Ix at %d", obj, cookie));
1272                     return cookie;
1273                 } 
1274                 else 
1275                 {
1276                     needs_checking = 0;
1277                     dprintf (3, ("loh alloc: setting %Ix will spin to acquire a free index", obj));
1278                     spin_and_switch (spin_count, (find_free_index () != -1));
1279                     goto retry;
1280                 }
1281             }
1282         }
1283         else
1284         {
1285             dprintf (3, ("loh alloc: will spin on checking %Ix", obj));
1286             spin_and_switch (spin_count, (needs_checking == 0));
1287             goto retry;
1288         }
1289     }
1290
1291     void bgc_mark_done ()
1292     {
1293         dprintf (3, ("cm: release lock on %Ix", (uint8_t *)rwp_object));
1294         rwp_object = 0;
1295     }
1296
1297     void loh_alloc_done_with_index (int index)
1298     {
1299         dprintf (3, ("loh alloc: release lock on %Ix based on %d", (uint8_t *)alloc_objects[index], index));
1300         assert ((index >= 0) && (index < max_pending_allocs)); 
1301         alloc_objects[index] = (uint8_t*)0;
1302     }
1303
1304     void loh_alloc_done (uint8_t* obj)
1305     {
1306 #ifdef BACKGROUND_GC
1307         if (!gc_heap::cm_in_progress)
1308         {
1309             return;
1310         }
1311
1312         for (int i = 0; i < max_pending_allocs; i++)
1313         {
1314             if (alloc_objects [i] == obj)
1315             {
1316                 dprintf (3, ("loh alloc: release lock on %Ix at %d", (uint8_t *)alloc_objects[i], i));
1317                 alloc_objects[i] = (uint8_t*)0;
1318                 return;
1319             }
1320         }
1321 #endif //BACKGROUND_GC
1322     }
1323 };
1324
1325 // Note that this class was written assuming just synchronization between
1326 // one background GC thread and multiple user threads that might request 
1327 // an FGC - it does not take into account what kind of locks the multiple
1328 // user threads might be holding at the time (eg, there could only be one
1329 // user thread requesting an FGC because it needs to take gc_lock first)
1330 // so you'll see checks that may not be necessary if you take those conditions
1331 // into consideration.
1332 //
1333 // With the introduction of Server Background GC we no longer use this
1334 // class to do synchronization between FGCs and BGC.
1335 class recursive_gc_sync
1336 {
1337     static VOLATILE(int32_t) foreground_request_count;//initial state 0
1338     static VOLATILE(BOOL) gc_background_running; //initial state FALSE
1339     static VOLATILE(int32_t) foreground_count; // initial state 0;
1340     static VOLATILE(uint32_t) foreground_gate; // initial state FALSE;
1341     static GCEvent foreground_complete;//Auto Reset
1342     static GCEvent foreground_allowed;//Auto Reset
1343 public:
1344     static void begin_background();
1345     static void end_background();
1346     static void begin_foreground();
1347     static void end_foreground();
1348     BOOL allow_foreground ();
1349     static BOOL init();
1350     static void shutdown();
1351     static BOOL background_running_p() {return gc_background_running;}
1352 };
1353
1354 VOLATILE(int32_t) recursive_gc_sync::foreground_request_count = 0;//initial state 0
1355 VOLATILE(int32_t) recursive_gc_sync::foreground_count = 0; // initial state 0;
1356 VOLATILE(BOOL) recursive_gc_sync::gc_background_running = FALSE; //initial state FALSE
1357 VOLATILE(uint32_t) recursive_gc_sync::foreground_gate = 0;
1358 GCEvent recursive_gc_sync::foreground_complete;//Auto Reset
1359 GCEvent recursive_gc_sync::foreground_allowed;//Manual Reset
1360
1361 BOOL recursive_gc_sync::init ()
1362 {
1363     foreground_request_count = 0;
1364     foreground_count = 0;
1365     gc_background_running = FALSE;
1366     foreground_gate = 0;
1367
1368     if (!foreground_complete.CreateOSAutoEventNoThrow(FALSE))
1369     {
1370         goto error;
1371     }
1372     if (!foreground_allowed.CreateManualEventNoThrow(FALSE))
1373     {
1374         goto error;
1375     }
1376     return TRUE;
1377
1378 error:
1379     shutdown();
1380     return FALSE;
1381
1382 }
1383
1384 void recursive_gc_sync::shutdown()
1385 {
1386     if (foreground_complete.IsValid())
1387         foreground_complete.CloseEvent();
1388     if (foreground_allowed.IsValid())
1389         foreground_allowed.CloseEvent();
1390 }
1391
1392 void recursive_gc_sync::begin_background()
1393 {
1394     dprintf (2, ("begin background"));
1395     foreground_request_count = 1;
1396     foreground_count = 1;
1397     foreground_allowed.Reset();
1398     gc_background_running = TRUE;
1399 }
1400 void recursive_gc_sync::end_background()
1401 {
1402     dprintf (2, ("end background"));
1403     gc_background_running = FALSE;
1404     foreground_gate = 1;
1405     foreground_allowed.Set();
1406 }
1407
1408 void recursive_gc_sync::begin_foreground()
1409 {
1410     dprintf (2, ("begin_foreground"));
1411
1412     bool cooperative_mode = false;
1413     if (gc_background_running)
1414     {
1415         gc_heap::fire_alloc_wait_event_begin (awr_fgc_wait_for_bgc);
1416         gc_heap::alloc_wait_event_p = TRUE;
1417
1418 try_again_top:
1419
1420         Interlocked::Increment (&foreground_request_count);
1421
1422 try_again_no_inc:
1423         dprintf(2, ("Waiting sync gc point"));
1424         assert (foreground_allowed.IsValid());
1425         assert (foreground_complete.IsValid());
1426
1427         cooperative_mode = gc_heap::enable_preemptive ();
1428
1429         foreground_allowed.Wait(INFINITE, FALSE);
1430
1431         dprintf(2, ("Waiting sync gc point is done"));
1432
1433         gc_heap::disable_preemptive (cooperative_mode);
1434
1435         if (foreground_gate)
1436         {
1437             Interlocked::Increment (&foreground_count);
1438             dprintf (2, ("foreground_count: %d", (int32_t)foreground_count));
1439             if (foreground_gate)
1440             {
1441                 gc_heap::settings.concurrent = FALSE;
1442                 return;
1443             }
1444             else
1445             {
1446                 end_foreground();
1447                 goto try_again_top;
1448             }
1449         }
1450         else
1451         {
1452             goto try_again_no_inc;
1453         }
1454     }
1455 }
1456
1457 void recursive_gc_sync::end_foreground()
1458 {
1459     dprintf (2, ("end_foreground"));
1460     if (gc_background_running)
1461     {
1462         Interlocked::Decrement (&foreground_request_count);
1463         dprintf (2, ("foreground_count before decrement: %d", (int32_t)foreground_count));
1464         if (Interlocked::Decrement (&foreground_count) == 0)
1465         {
1466             //c_write ((BOOL*)&foreground_gate, 0);
1467             // TODO - couldn't make the syntax work with Volatile<T>
1468             foreground_gate = 0;
1469             if (foreground_count == 0)
1470             {
1471                 foreground_allowed.Reset ();
1472                 dprintf(2, ("setting foreground complete event"));
1473                 foreground_complete.Set();
1474             }
1475         }
1476     }
1477 }
1478
1479 inline
1480 BOOL recursive_gc_sync::allow_foreground()
1481 {
1482     assert (gc_heap::settings.concurrent);
1483     dprintf (100, ("enter allow_foreground, f_req_count: %d, f_count: %d",
1484                    (int32_t)foreground_request_count, (int32_t)foreground_count));
1485
1486     BOOL did_fgc = FALSE;
1487
1488     //if we have suspended the EE, just return because
1489     //some thread could be waiting on this to proceed.
1490     if (!GCHeap::GcInProgress)
1491     {
1492         //TODO BACKGROUND_GC This is to stress the concurrency between
1493         //background and foreground
1494 //        gc_heap::disallow_new_allocation (0);
1495
1496         //GCToOSInterface::YieldThread(0);
1497
1498         //END of TODO
1499         if (foreground_request_count != 0)
1500         {
1501             //foreground wants to run
1502             //save the important settings
1503             //TODO BACKGROUND_GC be more selective about the important settings.
1504             gc_mechanisms saved_settings = gc_heap::settings;
1505             do
1506             {
1507                 did_fgc = TRUE;
1508                 //c_write ((BOOL*)&foreground_gate, 1);
1509                 // TODO - couldn't make the syntax work with Volatile<T>
1510                 foreground_gate = 1;
1511                 foreground_allowed.Set ();
1512                 foreground_complete.Wait (INFINITE, FALSE);
1513             }while (/*foreground_request_count ||*/ foreground_gate);
1514
1515             assert (!foreground_gate);
1516
1517             //restore the important settings
1518             gc_heap::settings = saved_settings;
1519             GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation;
1520             //the background GC shouldn't be using gc_high and gc_low
1521             //gc_low = lowest_address;
1522             //gc_high = highest_address;
1523         }
1524
1525         //TODO BACKGROUND_GC This is to stress the concurrency between
1526         //background and foreground
1527 //        gc_heap::allow_new_allocation (0);
1528         //END of TODO
1529     }
1530
1531     dprintf (100, ("leave allow_foreground"));
1532     assert (gc_heap::settings.concurrent);
1533     return did_fgc;
1534 }
1535
1536 #endif //BACKGROUND_GC
1537 #endif //DACCESS_COMPILE
1538
1539
1540 #if  defined(COUNT_CYCLES)
1541 #ifdef _MSC_VER
1542 #pragma warning(disable:4035)
1543 #endif //_MSC_VER
1544
1545 static
1546 unsigned        GetCycleCount32()        // enough for about 40 seconds
1547 {
1548 __asm   push    EDX
1549 __asm   _emit   0x0F
1550 __asm   _emit   0x31
1551 __asm   pop     EDX
1552 };
1553
1554 #pragma warning(default:4035)
1555
1556 #endif //COUNT_CYCLES
1557
1558 #ifdef TIME_GC
1559 int mark_time, plan_time, sweep_time, reloc_time, compact_time;
1560 #endif //TIME_GC
1561
1562 #ifndef MULTIPLE_HEAPS
1563
1564 #endif // MULTIPLE_HEAPS
1565
1566 void reset_memory (uint8_t* o, size_t sizeo);
1567
1568 #ifdef WRITE_WATCH
1569
1570 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1571 static bool virtual_alloc_hardware_write_watch = false;
1572 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1573
1574 static bool hardware_write_watch_capability = false;
1575
1576 #ifndef DACCESS_COMPILE
1577
1578 //check if the write watch APIs are supported.
1579
1580 void hardware_write_watch_api_supported()
1581 {
1582     if (GCToOSInterface::SupportsWriteWatch())
1583     {
1584         hardware_write_watch_capability = true;
1585         dprintf (2, ("WriteWatch supported"));
1586     }
1587     else
1588     {
1589         dprintf (2,("WriteWatch not supported"));
1590     }
1591 }
1592
1593 #endif //!DACCESS_COMPILE
1594
1595 inline bool can_use_hardware_write_watch()
1596 {
1597     return hardware_write_watch_capability;
1598 }
1599
1600 inline bool can_use_write_watch_for_gc_heap()
1601 {
1602 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1603     return true;
1604 #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1605     return can_use_hardware_write_watch();
1606 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1607 }
1608
1609 inline bool can_use_write_watch_for_card_table()
1610 {
1611 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
1612     return true;
1613 #else
1614     return can_use_hardware_write_watch();
1615 #endif
1616 }
1617
1618 #else
1619 #define mem_reserve (MEM_RESERVE)
1620 #endif //WRITE_WATCH
1621
1622 //check if the low memory notification is supported
1623
1624 #ifndef DACCESS_COMPILE
1625
1626 void WaitLongerNoInstru (int i)
1627 {
1628     // every 8th attempt:
1629     bool bToggleGC = GCToEEInterface::EnablePreemptiveGC();
1630
1631     // if we're waiting for gc to finish, we should block immediately
1632     if (g_fSuspensionPending == 0)
1633     {
1634         if  (g_num_processors > 1)
1635         {
1636             YieldProcessor();           // indicate to the processor that we are spinning
1637             if  (i & 0x01f)
1638                 GCToOSInterface::YieldThread (0);
1639             else
1640                 GCToOSInterface::Sleep (5);
1641         }
1642         else
1643             GCToOSInterface::Sleep (5);
1644     }
1645
1646     // If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
1647     // or it has no Thread object, in order to force a task to yield, or to triger a GC.
1648     // It is important that the thread is going to wait for GC.  Otherwise the thread
1649     // is in a tight loop.  If the thread has high priority, the perf is going to be very BAD.
1650     if (bToggleGC)
1651     {
1652 #ifdef _DEBUG
1653         // In debug builds, all enter_spin_lock operations go through this code.  If a GC has
1654         // started, it is important to block until the GC thread calls set_gc_done (since it is
1655         // guaranteed to have cleared g_TrapReturningThreads by this point).  This avoids livelock
1656         // conditions which can otherwise occur if threads are allowed to spin in this function
1657         // (and therefore starve the GC thread) between the point when the GC thread sets the
1658         // WaitForGC event and the point when the GC thread clears g_TrapReturningThreads.
1659         if (gc_heap::gc_started)
1660         {
1661             gc_heap::wait_for_gc_done();
1662         }
1663 #endif // _DEBUG
1664         GCToEEInterface::DisablePreemptiveGC();
1665     }
1666     else if (g_fSuspensionPending > 0)
1667     {
1668         g_theGCHeap->WaitUntilGCComplete();
1669     }
1670 }
1671
1672 inline
1673 static void safe_switch_to_thread()
1674 {
1675     bool cooperative_mode = gc_heap::enable_preemptive();
1676
1677     GCToOSInterface::YieldThread(0);
1678
1679     gc_heap::disable_preemptive(cooperative_mode);
1680 }
1681
1682 //
1683 // We need the following methods to have volatile arguments, so that they can accept
1684 // raw pointers in addition to the results of the & operator on Volatile<T>.
1685 //
1686 inline
1687 static void enter_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
1688 {
1689 retry:
1690
1691     if (Interlocked::CompareExchange(lock, 0, -1) >= 0)
1692     {
1693         unsigned int i = 0;
1694         while (VolatileLoad(lock) >= 0)
1695         {
1696             if ((++i & 7) && !IsGCInProgress())
1697             {
1698                 if  (g_num_processors > 1)
1699                 {
1700 #ifndef MULTIPLE_HEAPS
1701                     int spin_count = 32 * yp_spin_count_unit;
1702 #else //!MULTIPLE_HEAPS
1703                     int spin_count = yp_spin_count_unit;
1704 #endif //!MULTIPLE_HEAPS
1705                     for (int j = 0; j < spin_count; j++)
1706                     {
1707                         if  (VolatileLoad(lock) < 0 || IsGCInProgress())
1708                             break;
1709                         YieldProcessor();           // indicate to the processor that we are spinning
1710                     }
1711                     if  (VolatileLoad(lock) >= 0 && !IsGCInProgress())
1712                     {
1713                         safe_switch_to_thread();
1714                     }
1715                 }
1716                 else
1717                 {
1718                     safe_switch_to_thread();
1719                 }
1720             }
1721             else
1722             {
1723                 WaitLongerNoInstru(i);
1724             }
1725         }
1726         goto retry;
1727     }
1728 }
1729
1730 inline
1731 static BOOL try_enter_spin_lock_noinstru(RAW_KEYWORD(volatile) int32_t* lock)
1732 {
1733     return (Interlocked::CompareExchange(&*lock, 0, -1) < 0);
1734 }
1735
1736 inline
1737 static void leave_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
1738 {
1739     VolatileStore<int32_t>((int32_t*)lock, -1);
1740 }
1741
1742 #ifdef _DEBUG
1743
1744 inline
1745 static void enter_spin_lock(GCSpinLock *pSpinLock)
1746 {
1747     enter_spin_lock_noinstru(&pSpinLock->lock);
1748     assert (pSpinLock->holding_thread == (Thread*)-1);
1749     pSpinLock->holding_thread = GCToEEInterface::GetThread();
1750 }
1751
1752 inline
1753 static BOOL try_enter_spin_lock(GCSpinLock *pSpinLock)
1754 {
1755     BOOL ret = try_enter_spin_lock_noinstru(&pSpinLock->lock);
1756     if (ret)
1757         pSpinLock->holding_thread = GCToEEInterface::GetThread();
1758     return ret;
1759 }
1760
1761 inline
1762 static void leave_spin_lock(GCSpinLock *pSpinLock)
1763 {
1764     bool gc_thread_p = GCToEEInterface::WasCurrentThreadCreatedByGC();
1765 //    _ASSERTE((pSpinLock->holding_thread == GCToEEInterface::GetThread()) || gc_thread_p || pSpinLock->released_by_gc_p);
1766     pSpinLock->released_by_gc_p = gc_thread_p;
1767     pSpinLock->holding_thread = (Thread*) -1;
1768     if (pSpinLock->lock != -1)
1769         leave_spin_lock_noinstru(&pSpinLock->lock);
1770 }
1771
1772 #define ASSERT_HOLDING_SPIN_LOCK(pSpinLock) \
1773     _ASSERTE((pSpinLock)->holding_thread == GCToEEInterface::GetThread());
1774
1775 #define ASSERT_NOT_HOLDING_SPIN_LOCK(pSpinLock) \
1776     _ASSERTE((pSpinLock)->holding_thread != GCToEEInterface::GetThread());
1777
1778 #else //_DEBUG
1779
1780 //In the concurrent version, the Enable/DisablePreemptiveGC is optional because
1781 //the gc thread call WaitLonger.
1782 void WaitLonger (int i
1783 #ifdef SYNCHRONIZATION_STATS
1784     , GCSpinLock* spin_lock
1785 #endif //SYNCHRONIZATION_STATS
1786     )
1787 {
1788 #ifdef SYNCHRONIZATION_STATS
1789     (spin_lock->num_wait_longer)++;
1790 #endif //SYNCHRONIZATION_STATS
1791
1792     // every 8th attempt:
1793     bool bToggleGC = GCToEEInterface::EnablePreemptiveGC();
1794     assert (bToggleGC);
1795
1796     // if we're waiting for gc to finish, we should block immediately
1797     if (!gc_heap::gc_started)
1798     {
1799 #ifdef SYNCHRONIZATION_STATS
1800         (spin_lock->num_switch_thread_w)++;
1801 #endif //SYNCHRONIZATION_STATS
1802         if  (g_num_processors > 1)
1803         {
1804             YieldProcessor();           // indicate to the processor that we are spinning
1805             if  (i & 0x01f)
1806                 GCToOSInterface::YieldThread (0);
1807             else
1808                 GCToOSInterface::Sleep (5);
1809         }
1810         else
1811             GCToOSInterface::Sleep (5);
1812     }
1813
1814     // If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
1815     // or it has no Thread object, in order to force a task to yield, or to triger a GC.
1816     // It is important that the thread is going to wait for GC.  Otherwise the thread
1817     // is in a tight loop.  If the thread has high priority, the perf is going to be very BAD. 
1818     if (gc_heap::gc_started)
1819     {
1820         gc_heap::wait_for_gc_done();
1821     }
1822
1823     if (bToggleGC)
1824     {
1825 #ifdef SYNCHRONIZATION_STATS
1826         (spin_lock->num_disable_preemptive_w)++;
1827 #endif //SYNCHRONIZATION_STATS
1828         GCToEEInterface::DisablePreemptiveGC();
1829     }
1830 }
1831
1832 inline
1833 static void enter_spin_lock (GCSpinLock* spin_lock)
1834 {
1835 retry:
1836
1837     if (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) >= 0)
1838     {
1839         unsigned int i = 0;
1840         while (spin_lock->lock >= 0)
1841         {
1842             if ((++i & 7) && !gc_heap::gc_started)
1843             {
1844                 if  (g_num_processors > 1)
1845                 {
1846 #ifndef MULTIPLE_HEAPS
1847                     int spin_count = 32 * yp_spin_count_unit;
1848 #else //!MULTIPLE_HEAPS
1849                     int spin_count = yp_spin_count_unit;
1850 #endif //!MULTIPLE_HEAPS
1851                     for (int j = 0; j < spin_count; j++)
1852                     {
1853                         if  (spin_lock->lock < 0 || gc_heap::gc_started)
1854                             break;
1855                         YieldProcessor();           // indicate to the processor that we are spinning
1856                     }
1857                     if  (spin_lock->lock >= 0 && !gc_heap::gc_started)
1858                     {
1859 #ifdef SYNCHRONIZATION_STATS
1860                         (spin_lock->num_switch_thread)++;
1861 #endif //SYNCHRONIZATION_STATS
1862                         bool cooperative_mode = gc_heap::enable_preemptive ();
1863
1864                         GCToOSInterface::YieldThread(0);
1865
1866                         gc_heap::disable_preemptive (cooperative_mode);
1867                     }
1868                 }
1869                 else
1870                     GCToOSInterface::YieldThread(0);
1871             }
1872             else
1873             {
1874                 WaitLonger(i
1875 #ifdef SYNCHRONIZATION_STATS
1876                         , spin_lock
1877 #endif //SYNCHRONIZATION_STATS
1878                     );
1879             }
1880         }
1881         goto retry;
1882     }
1883 }
1884
1885 inline BOOL try_enter_spin_lock(GCSpinLock* spin_lock)
1886 {
1887     return (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) < 0);
1888 }
1889
1890 inline
1891 static void leave_spin_lock (GCSpinLock * spin_lock)
1892 {
1893     spin_lock->lock = -1;
1894 }
1895
1896 #define ASSERT_HOLDING_SPIN_LOCK(pSpinLock)
1897
1898 #endif //_DEBUG
1899
1900 bool gc_heap::enable_preemptive ()
1901 {
1902     return GCToEEInterface::EnablePreemptiveGC();
1903 }
1904
1905 void gc_heap::disable_preemptive (bool restore_cooperative)
1906 {
1907     if (restore_cooperative)
1908     {
1909         GCToEEInterface::DisablePreemptiveGC();
1910     }
1911 }
1912
1913 #endif // !DACCESS_COMPILE
1914
1915 typedef void **  PTR_PTR;
1916 //This function clears a piece of memory
1917 // size has to be Dword aligned
1918
1919 inline
1920 void memclr ( uint8_t* mem, size_t size)
1921 {
1922     dprintf (3, ("MEMCLR: %Ix, %d", mem, size));
1923     assert ((size & (sizeof(PTR_PTR)-1)) == 0);
1924     assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);
1925
1926 #if 0
1927     // The compiler will recognize this pattern and replace it with memset call. We can as well just call 
1928     // memset directly to make it obvious what's going on.
1929     PTR_PTR m = (PTR_PTR) mem;
1930     for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
1931         *(m++) = 0;
1932 #endif
1933
1934     memset (mem, 0, size);
1935 }
1936
1937 void memcopy (uint8_t* dmem, uint8_t* smem, size_t size)
1938 {
1939     const size_t sz4ptr = sizeof(PTR_PTR)*4;
1940     const size_t sz2ptr = sizeof(PTR_PTR)*2;
1941     const size_t sz1ptr = sizeof(PTR_PTR)*1;
1942
1943     // size must be a multiple of the pointer size
1944     assert ((size & (sizeof (PTR_PTR)-1)) == 0);
1945     assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);
1946
1947     // copy in groups of four pointer sized things at a time
1948     if (size >= sz4ptr)
1949     {
1950         do
1951         {
1952             ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
1953             ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1];
1954             ((PTR_PTR)dmem)[2] = ((PTR_PTR)smem)[2];
1955             ((PTR_PTR)dmem)[3] = ((PTR_PTR)smem)[3];
1956             dmem += sz4ptr;
1957             smem += sz4ptr;
1958         }
1959         while ((size -= sz4ptr) >= sz4ptr);
1960     }
1961
1962     // still two pointer sized things or more left to copy?
1963     if (size & sz2ptr)
1964     {
1965         ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
1966         ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1];
1967         dmem += sz2ptr;
1968         smem += sz2ptr;
1969     }
1970
1971     // still one pointer sized thing left to copy?
1972     if (size & sz1ptr)
1973     {
1974         ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
1975         // dmem += sz1ptr;
1976         // smem += sz1ptr;
1977     }
1978
1979 }
1980
1981 inline
1982 ptrdiff_t round_down (ptrdiff_t add, int pitch)
1983 {
1984     return ((add / pitch) * pitch);
1985 }
1986
1987 #if defined(FEATURE_STRUCTALIGN) && defined(RESPECT_LARGE_ALIGNMENT)
1988 // FEATURE_STRUCTALIGN allows the compiler to dictate the alignment,
1989 // i.e, if a larger alignment matters or is beneficial, the compiler
1990 // generated info tells us so.  RESPECT_LARGE_ALIGNMENT is just the
1991 // converse - it's a heuristic for the GC to use a larger alignment.
1992 #error FEATURE_STRUCTALIGN should imply !RESPECT_LARGE_ALIGNMENT
1993 #endif
1994
1995 #if defined(FEATURE_STRUCTALIGN) && defined(FEATURE_LOH_COMPACTION)
1996 #error FEATURE_STRUCTALIGN and FEATURE_LOH_COMPACTION are mutually exclusive
1997 #endif
1998
1999 #if defined(GROWABLE_SEG_MAPPING_TABLE) && !defined(SEG_MAPPING_TABLE)
2000 #error if GROWABLE_SEG_MAPPING_TABLE is defined, SEG_MAPPING_TABLE must be defined
2001 #endif
2002
2003 inline
2004 BOOL same_large_alignment_p (uint8_t* p1, uint8_t* p2)
2005 {
2006 #ifdef RESPECT_LARGE_ALIGNMENT
2007     return ((((size_t)p1 ^ (size_t)p2) & 7) == 0);
2008 #else
2009     UNREFERENCED_PARAMETER(p1);
2010     UNREFERENCED_PARAMETER(p2);
2011     return TRUE;
2012 #endif //RESPECT_LARGE_ALIGNMENT
2013 }
2014
2015 inline 
2016 size_t switch_alignment_size (BOOL already_padded_p)
2017 {
2018     if (already_padded_p)
2019         return DATA_ALIGNMENT;
2020     else
2021         return (Align (min_obj_size) +((Align (min_obj_size)&DATA_ALIGNMENT)^DATA_ALIGNMENT));
2022 }
2023
2024
2025 #ifdef FEATURE_STRUCTALIGN
2026 void set_node_aligninfo (uint8_t *node, int requiredAlignment, ptrdiff_t pad);
2027 void clear_node_aligninfo (uint8_t *node);
2028 #else // FEATURE_STRUCTALIGN
2029 #define node_realigned(node)    (((plug_and_reloc*)(node))[-1].reloc & 1)
2030 void set_node_realigned (uint8_t* node);
2031 void clear_node_realigned(uint8_t* node);
2032 #endif // FEATURE_STRUCTALIGN
2033
2034 inline
2035 size_t AlignQword (size_t nbytes)
2036 {
2037 #ifdef FEATURE_STRUCTALIGN
2038     // This function is used to align everything on the large object
2039     // heap to an 8-byte boundary, to reduce the number of unaligned
2040     // accesses to (say) arrays of doubles.  With FEATURE_STRUCTALIGN,
2041     // the compiler dictates the optimal alignment instead of having
2042     // a heuristic in the GC.
2043     return Align (nbytes);
2044 #else // FEATURE_STRUCTALIGN
2045     return (nbytes + 7) & ~7;
2046 #endif // FEATURE_STRUCTALIGN
2047 }
2048
2049 inline
2050 BOOL Aligned (size_t n)
2051 {
2052     return (n & ALIGNCONST) == 0;
2053 }
2054
2055 #define OBJECT_ALIGNMENT_OFFSET (sizeof(MethodTable *))
2056
2057 #ifdef FEATURE_STRUCTALIGN
2058 #define MAX_STRUCTALIGN OS_PAGE_SIZE
2059 #else // FEATURE_STRUCTALIGN
2060 #define MAX_STRUCTALIGN 0
2061 #endif // FEATURE_STRUCTALIGN
2062
2063 #ifdef FEATURE_STRUCTALIGN
2064 inline
2065 ptrdiff_t AdjustmentForMinPadSize(ptrdiff_t pad, int requiredAlignment)
2066 {
2067     // The resulting alignpad must be either 0 or at least min_obj_size.
2068     // Note that by computing the following difference on unsigned types,
2069     // we can do the range check 0 < alignpad < min_obj_size with a
2070     // single conditional branch.
2071     if ((size_t)(pad - DATA_ALIGNMENT) < Align (min_obj_size) - DATA_ALIGNMENT)
2072     {
2073         return requiredAlignment;
2074     }
2075     return 0;
2076 }
2077
2078 inline
2079 uint8_t* StructAlign (uint8_t* origPtr, int requiredAlignment, ptrdiff_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET)
2080 {
2081     // required alignment must be a power of two
2082     _ASSERTE(((size_t)origPtr & ALIGNCONST) == 0);
2083     _ASSERTE(((requiredAlignment - 1) & requiredAlignment) == 0);
2084     _ASSERTE(requiredAlignment >= sizeof(void *));
2085     _ASSERTE(requiredAlignment <= MAX_STRUCTALIGN);
2086
2087     // When this method is invoked for individual objects (i.e., alignmentOffset
2088     // is just the size of the PostHeader), what needs to be aligned when
2089     // we're done is the pointer to the payload of the object (which means
2090     // the actual resulting object pointer is typically not aligned).
2091
2092     uint8_t* result = (uint8_t*)Align ((size_t)origPtr + alignmentOffset, requiredAlignment-1) - alignmentOffset;
2093     ptrdiff_t alignpad = result - origPtr;
2094
2095     return result + AdjustmentForMinPadSize (alignpad, requiredAlignment);
2096 }
2097
2098 inline
2099 ptrdiff_t ComputeStructAlignPad (uint8_t* plug, int requiredAlignment, size_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET)
2100 {
2101     return StructAlign (plug, requiredAlignment, alignmentOffset) - plug;
2102 }
2103
2104 BOOL IsStructAligned (uint8_t *ptr, int requiredAlignment)
2105 {
2106     return StructAlign (ptr, requiredAlignment) == ptr;
2107 }
2108
2109 inline
2110 ptrdiff_t ComputeMaxStructAlignPad (int requiredAlignment)
2111 {
2112     if (requiredAlignment == DATA_ALIGNMENT)
2113         return 0;
2114     // Since a non-zero alignment padding cannot be less than min_obj_size (so we can fit the
2115     // alignment padding object), the worst-case alignment padding is correspondingly larger
2116     // than the required alignment.
2117     return requiredAlignment + Align (min_obj_size) - DATA_ALIGNMENT;
2118 }
2119
2120 inline
2121 ptrdiff_t ComputeMaxStructAlignPadLarge (int requiredAlignment)
2122 {
2123     if (requiredAlignment <= get_alignment_constant (TRUE)+1)
2124         return 0;
2125     // This is the same as ComputeMaxStructAlignPad, except that in addition to leaving space
2126     // for padding before the actual object, it also leaves space for filling a gap after the
2127     // actual object.  This is needed on the large object heap, as the outer allocation functions
2128     // don't operate on an allocation context (which would have left space for the final gap).
2129     return requiredAlignment + Align (min_obj_size) * 2 - DATA_ALIGNMENT;
2130 }
2131
2132 uint8_t* gc_heap::pad_for_alignment (uint8_t* newAlloc, int requiredAlignment, size_t size, alloc_context* acontext)
2133 {
2134     uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment);
2135     if (alignedPtr != newAlloc) {
2136         make_unused_array (newAlloc, alignedPtr - newAlloc);
2137     }
2138     acontext->alloc_ptr = alignedPtr + Align (size);
2139     return alignedPtr;
2140 }
2141
2142 uint8_t* gc_heap::pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size)
2143 {
2144     uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment);
2145     if (alignedPtr != newAlloc) {
2146         make_unused_array (newAlloc, alignedPtr - newAlloc);
2147     }
2148     if (alignedPtr < newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment)) {
2149         make_unused_array (alignedPtr + AlignQword (size), newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment) - alignedPtr);
2150     }
2151     return alignedPtr;
2152 }
2153 #else // FEATURE_STRUCTALIGN
2154 #define ComputeMaxStructAlignPad(requiredAlignment) 0
2155 #define ComputeMaxStructAlignPadLarge(requiredAlignment) 0
2156 #endif // FEATURE_STRUCTALIGN
2157
2158 //CLR_SIZE  is the max amount of bytes from gen0 that is set to 0 in one chunk
2159 #ifdef SERVER_GC
2160 #define CLR_SIZE ((size_t)(8*1024))
2161 #else //SERVER_GC
2162 #define CLR_SIZE ((size_t)(8*1024))
2163 #endif //SERVER_GC
2164
2165 #define END_SPACE_AFTER_GC (loh_size_threshold + MAX_STRUCTALIGN)
2166
2167 #ifdef BACKGROUND_GC
2168 #define SEGMENT_INITIAL_COMMIT (2*OS_PAGE_SIZE)
2169 #else
2170 #define SEGMENT_INITIAL_COMMIT (OS_PAGE_SIZE)
2171 #endif //BACKGROUND_GC
2172
2173 #ifdef SERVER_GC
2174
2175 #ifdef BIT64
2176
2177 #define INITIAL_ALLOC ((size_t)((size_t)4*1024*1024*1024))
2178 #define LHEAP_ALLOC   ((size_t)(1024*1024*256))
2179
2180 #else
2181
2182 #define INITIAL_ALLOC ((size_t)(1024*1024*64))
2183 #define LHEAP_ALLOC   ((size_t)(1024*1024*32))
2184
2185 #endif  // BIT64
2186
2187 #else //SERVER_GC
2188
2189 #ifdef BIT64
2190
2191 #define INITIAL_ALLOC ((size_t)(1024*1024*256))
2192 #define LHEAP_ALLOC   ((size_t)(1024*1024*128))
2193
2194 #else
2195
2196 #define INITIAL_ALLOC ((size_t)(1024*1024*16))
2197 #define LHEAP_ALLOC   ((size_t)(1024*1024*16))
2198
2199 #endif  // BIT64
2200
2201 #endif //SERVER_GC
2202
2203 //amount in bytes of the etw allocation tick
2204 const size_t etw_allocation_tick = 100*1024;
2205
2206 const size_t low_latency_alloc = 256*1024;
2207
2208 const size_t fgn_check_quantum = 2*1024*1024;
2209
2210 #ifdef MH_SC_MARK
2211 const int max_snoop_level = 128;
2212 #endif //MH_SC_MARK
2213
2214
2215 #ifdef CARD_BUNDLE
2216 //threshold of heap size to turn on card bundles.
2217 #define SH_TH_CARD_BUNDLE  (40*1024*1024)
2218 #define MH_TH_CARD_BUNDLE  (180*1024*1024)
2219 #endif //CARD_BUNDLE
2220
2221 #define GC_EPHEMERAL_DECOMMIT_TIMEOUT 5000
2222
2223 inline
2224 size_t align_on_page (size_t add)
2225 {
2226     return ((add + OS_PAGE_SIZE - 1) & ~((size_t)OS_PAGE_SIZE - 1));
2227 }
2228
2229 inline
2230 uint8_t* align_on_page (uint8_t* add)
2231 {
2232     return (uint8_t*)align_on_page ((size_t) add);
2233 }
2234
2235 inline
2236 size_t align_lower_page (size_t add)
2237 {
2238     return (add & ~((size_t)OS_PAGE_SIZE - 1));
2239 }
2240
2241 inline
2242 uint8_t* align_lower_page (uint8_t* add)
2243 {
2244     return (uint8_t*)align_lower_page ((size_t)add);
2245 }
2246
2247 inline
2248 size_t align_write_watch_lower_page (size_t add)
2249 {
2250     return (add & ~(WRITE_WATCH_UNIT_SIZE - 1));
2251 }
2252
2253 inline
2254 uint8_t* align_write_watch_lower_page (uint8_t* add)
2255 {
2256     return (uint8_t*)align_lower_page ((size_t)add);
2257 }
2258
2259
2260 inline
2261 BOOL power_of_two_p (size_t integer)
2262 {
2263     return !(integer & (integer-1));
2264 }
2265
2266 inline
2267 BOOL oddp (size_t integer)
2268 {
2269     return (integer & 1) != 0;
2270 }
2271
2272 // we only ever use this for WORDs.
2273 size_t logcount (size_t word)
2274 {
2275     //counts the number of high bits in a 16 bit word.
2276     assert (word < 0x10000);
2277     size_t count;
2278     count = (word & 0x5555) + ( (word >> 1 ) & 0x5555);
2279     count = (count & 0x3333) + ( (count >> 2) & 0x3333);
2280     count = (count & 0x0F0F) + ( (count >> 4) & 0x0F0F);
2281     count = (count & 0x00FF) + ( (count >> 8) & 0x00FF);
2282     return count;
2283 }
2284
2285 #ifndef DACCESS_COMPILE
2286
2287 void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_bounds_check)
2288 {
2289     WriteBarrierParameters args = {};
2290     args.operation = WriteBarrierOp::StompResize;
2291     args.is_runtime_suspended = is_runtime_suspended;
2292     args.requires_upper_bounds_check = requires_upper_bounds_check;
2293
2294     args.card_table = g_gc_card_table;
2295 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
2296     args.card_bundle_table = g_gc_card_bundle_table;
2297 #endif
2298
2299     args.lowest_address = g_gc_lowest_address;
2300     args.highest_address = g_gc_highest_address;
2301
2302 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
2303     if (SoftwareWriteWatch::IsEnabledForGCHeap())
2304     {
2305         args.write_watch_table = g_gc_sw_ww_table;
2306     }
2307 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
2308
2309     GCToEEInterface::StompWriteBarrier(&args);
2310 }
2311
2312 void stomp_write_barrier_ephemeral(uint8_t* ephemeral_low, uint8_t* ephemeral_high)
2313 {
2314     WriteBarrierParameters args = {};
2315     args.operation = WriteBarrierOp::StompEphemeral;
2316     args.is_runtime_suspended = true;
2317     args.ephemeral_low = ephemeral_low;
2318     args.ephemeral_high = ephemeral_high;
2319     GCToEEInterface::StompWriteBarrier(&args);
2320 }
2321
2322 void stomp_write_barrier_initialize(uint8_t* ephemeral_low, uint8_t* ephemeral_high)
2323 {
2324     WriteBarrierParameters args = {};
2325     args.operation = WriteBarrierOp::Initialize;
2326     args.is_runtime_suspended = true;
2327     args.requires_upper_bounds_check = false;
2328     args.card_table = g_gc_card_table;
2329
2330 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
2331     args.card_bundle_table = g_gc_card_bundle_table;
2332 #endif
2333     
2334     args.lowest_address = g_gc_lowest_address;
2335     args.highest_address = g_gc_highest_address;
2336     args.ephemeral_low = ephemeral_low;
2337     args.ephemeral_high = ephemeral_high;
2338     GCToEEInterface::StompWriteBarrier(&args);
2339 }
2340
2341 #endif // DACCESS_COMPILE
2342
2343 //extract the low bits [0,low[ of a uint32_t
2344 #define lowbits(wrd, bits) ((wrd) & ((1 << (bits))-1))
2345 //extract the high bits [high, 32] of a uint32_t
2346 #define highbits(wrd, bits) ((wrd) & ~((1 << (bits))-1))
2347
2348 // Things we need to manually initialize:
2349 // gen0 min_size - based on cache
2350 // gen0/1 max_size - based on segment size
2351 static static_data static_data_table[latency_level_last - latency_level_first + 1][NUMBERGENERATIONS] = 
2352 {
2353     // latency_level_memory_footprint
2354     {
2355         // gen0
2356         {0, 0, 40000, 0.5f, 9.0f, 20.0f, 1000, 1},
2357         // gen1
2358         {163840, 0, 80000, 0.5f, 2.0f, 7.0f, 10000, 10},
2359         // gen2
2360         {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, 100000, 100},
2361         // gen3
2362         {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}
2363     },
2364
2365     // latency_level_balanced
2366     {
2367         // gen0
2368         {0, 0, 40000, 0.5f,
2369 #ifdef MULTIPLE_HEAPS
2370             20.0f, 40.0f,
2371 #else
2372             9.0f, 20.0f,
2373 #endif //MULTIPLE_HEAPS
2374             1000, 1},
2375         // gen1
2376         {9*32*1024, 0, 80000, 0.5f, 2.0f, 7.0f, 10000, 10},
2377         // gen2
2378         {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, 100000, 100},
2379         // gen3
2380         {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}
2381     },
2382 };
2383
2384 class mark;
2385 class generation;
2386 class heap_segment;
2387 class CObjectHeader;
2388 class dynamic_data;
2389 class l_heap;
2390 class sorted_table;
2391 class c_synchronize;
2392
2393 #ifdef FEATURE_PREMORTEM_FINALIZATION
2394 #ifndef DACCESS_COMPILE
2395 static
2396 HRESULT AllocateCFinalize(CFinalize **pCFinalize);
2397 #endif //!DACCESS_COMPILE
2398 #endif // FEATURE_PREMORTEM_FINALIZATION
2399
2400 uint8_t* tree_search (uint8_t* tree, uint8_t* old_address);
2401
2402
2403 #ifdef USE_INTROSORT
2404 #define _sort introsort::sort
2405 #else //USE_INTROSORT
2406 #define _sort qsort1
2407 void qsort1(uint8_t** low, uint8_t** high, unsigned int depth);
2408 #endif //USE_INTROSORT
2409
2410 void* virtual_alloc (size_t size);
2411 void virtual_free (void* add, size_t size);
2412
2413 /* per heap static initialization */
2414 #ifdef MARK_ARRAY
2415 #ifndef MULTIPLE_HEAPS
2416 uint32_t*   gc_heap::mark_array;
2417 #endif //MULTIPLE_HEAPS
2418 #endif //MARK_ARRAY
2419
2420 #ifdef MARK_LIST
2421 uint8_t**   gc_heap::g_mark_list;
2422
2423 #ifdef PARALLEL_MARK_LIST_SORT
2424 uint8_t**   gc_heap::g_mark_list_copy;
2425 #endif //PARALLEL_MARK_LIST_SORT
2426
2427 size_t      gc_heap::mark_list_size;
2428 #endif //MARK_LIST
2429
2430 #ifdef SEG_MAPPING_TABLE
2431 seg_mapping* seg_mapping_table;
2432 #endif //SEG_MAPPING_TABLE
2433
2434 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
2435 sorted_table* gc_heap::seg_table;
2436 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
2437
2438 #ifdef MULTIPLE_HEAPS
2439 GCEvent     gc_heap::ee_suspend_event;
2440 size_t      gc_heap::min_balance_threshold = 0;
2441 #endif //MULTIPLE_HEAPS
2442
2443 VOLATILE(BOOL) gc_heap::gc_started;
2444
2445 #ifdef MULTIPLE_HEAPS
2446
2447 GCEvent     gc_heap::gc_start_event;
2448 bool        gc_heap::gc_thread_no_affinitize_p = false;
2449 uintptr_t   process_mask = 0;
2450
2451 int         gc_heap::n_heaps;
2452
2453 gc_heap**   gc_heap::g_heaps;
2454
2455 size_t*     gc_heap::g_promoted;
2456
2457 #ifdef MH_SC_MARK
2458 int*        gc_heap::g_mark_stack_busy;
2459 #endif //MH_SC_MARK
2460
2461
2462 #ifdef BACKGROUND_GC
2463 size_t*     gc_heap::g_bpromoted;
2464 #endif //BACKGROUND_GC
2465
2466 #else  //MULTIPLE_HEAPS
2467
2468 size_t      gc_heap::g_promoted;
2469
2470 #ifdef BACKGROUND_GC
2471 size_t      gc_heap::g_bpromoted;
2472 #endif //BACKGROUND_GC
2473
2474 #endif //MULTIPLE_HEAPS
2475
2476 size_t      gc_heap::reserved_memory = 0;
2477 size_t      gc_heap::reserved_memory_limit = 0;
2478 BOOL        gc_heap::g_low_memory_status;
2479
2480 #ifndef DACCESS_COMPILE
2481 static gc_reason gc_trigger_reason = reason_empty;
2482 #endif //DACCESS_COMPILE
2483
2484 gc_latency_level gc_heap::latency_level = latency_level_default;
2485
2486 gc_mechanisms  gc_heap::settings;
2487
2488 gc_history_global gc_heap::gc_data_global;
2489
2490 size_t      gc_heap::gc_last_ephemeral_decommit_time = 0;
2491
2492 size_t      gc_heap::gc_gen0_desired_high;
2493
2494 #ifdef SHORT_PLUGS
2495 double       gc_heap::short_plugs_pad_ratio = 0;
2496 #endif //SHORT_PLUGS
2497
2498 #if defined(BIT64)
2499 #define MAX_ALLOWED_MEM_LOAD 85
2500
2501 // consider putting this in dynamic data -
2502 // we may want different values for workstation
2503 // and server GC.
2504 #define MIN_YOUNGEST_GEN_DESIRED (16*1024*1024)
2505
2506 size_t      gc_heap::youngest_gen_desired_th;
2507 #endif //BIT64
2508
2509 uint32_t    gc_heap::last_gc_memory_load = 0;
2510
2511 size_t      gc_heap::last_gc_heap_size = 0;
2512
2513 size_t      gc_heap::last_gc_fragmentation = 0;
2514
2515 uint64_t    gc_heap::mem_one_percent = 0;
2516
2517 uint32_t    gc_heap::high_memory_load_th = 0;
2518
2519 uint32_t    gc_heap::m_high_memory_load_th;
2520
2521 uint32_t    gc_heap::v_high_memory_load_th;
2522
2523 uint64_t    gc_heap::total_physical_mem = 0;
2524
2525 uint64_t    gc_heap::entry_available_physical_mem = 0;
2526
2527 #ifdef BACKGROUND_GC
2528 GCEvent     gc_heap::bgc_start_event;
2529
2530 gc_mechanisms gc_heap::saved_bgc_settings;
2531
2532 GCEvent     gc_heap::background_gc_done_event;
2533
2534 GCEvent     gc_heap::ee_proceed_event;
2535
2536 bool        gc_heap::gc_can_use_concurrent = false;
2537
2538 bool        gc_heap::temp_disable_concurrent_p = false;
2539
2540 uint32_t    gc_heap::cm_in_progress = FALSE;
2541
2542 BOOL        gc_heap::dont_restart_ee_p = FALSE;
2543
2544 BOOL        gc_heap::keep_bgc_threads_p = FALSE;
2545
2546 GCEvent     gc_heap::bgc_threads_sync_event;
2547
2548 BOOL        gc_heap::do_ephemeral_gc_p = FALSE;
2549
2550 BOOL        gc_heap::do_concurrent_p = FALSE;
2551
2552 size_t      gc_heap::ephemeral_fgc_counts[max_generation];
2553
2554 BOOL        gc_heap::alloc_wait_event_p = FALSE;
2555
2556 VOLATILE(c_gc_state) gc_heap::current_c_gc_state = c_gc_state_free;
2557
2558 #endif //BACKGROUND_GC
2559
2560 #ifndef MULTIPLE_HEAPS
2561 #ifdef SPINLOCK_HISTORY
2562 int         gc_heap::spinlock_info_index = 0;
2563 spinlock_info gc_heap::last_spinlock_info[max_saved_spinlock_info + 8];
2564 #endif //SPINLOCK_HISTORY
2565
2566 size_t      gc_heap::fgn_last_alloc = 0;
2567
2568 int         gc_heap::generation_skip_ratio = 100;
2569
2570 uint64_t    gc_heap::loh_alloc_since_cg = 0;
2571
2572 BOOL        gc_heap::elevation_requested = FALSE;
2573
2574 BOOL        gc_heap::last_gc_before_oom = FALSE;
2575
2576 BOOL        gc_heap::sufficient_gen0_space_p = FALSE;
2577
2578 #ifdef BACKGROUND_GC
2579 uint8_t*    gc_heap::background_saved_lowest_address = 0;
2580 uint8_t*    gc_heap::background_saved_highest_address = 0;
2581 uint8_t*    gc_heap::next_sweep_obj = 0;
2582 uint8_t*    gc_heap::current_sweep_pos = 0;
2583 exclusive_sync* gc_heap::bgc_alloc_lock;
2584 #endif //BACKGROUND_GC
2585
2586 oom_history gc_heap::oom_info;
2587
2588 fgm_history gc_heap::fgm_result;
2589
2590 BOOL        gc_heap::ro_segments_in_range;
2591
2592 size_t      gc_heap::gen0_big_free_spaces = 0;
2593
2594 uint8_t*    gc_heap::ephemeral_low;
2595
2596 uint8_t*    gc_heap::ephemeral_high;
2597
2598 uint8_t*    gc_heap::lowest_address;
2599
2600 uint8_t*    gc_heap::highest_address;
2601
2602 BOOL        gc_heap::ephemeral_promotion;
2603
2604 uint8_t*    gc_heap::saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
2605 size_t      gc_heap::saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];
2606
2607 short*      gc_heap::brick_table;
2608
2609 uint32_t*   gc_heap::card_table;
2610
2611 #ifdef CARD_BUNDLE
2612 uint32_t*   gc_heap::card_bundle_table;
2613 #endif //CARD_BUNDLE
2614
2615 uint8_t*    gc_heap::gc_low;
2616
2617 uint8_t*    gc_heap::gc_high;
2618
2619 uint8_t*    gc_heap::demotion_low;
2620
2621 uint8_t*    gc_heap::demotion_high;
2622
2623 BOOL        gc_heap::demote_gen1_p = TRUE;
2624
2625 uint8_t*    gc_heap::last_gen1_pin_end;
2626
2627 gen_to_condemn_tuning gc_heap::gen_to_condemn_reasons;
2628
2629 size_t      gc_heap::etw_allocation_running_amount[2];
2630
2631 int         gc_heap::gc_policy = 0;
2632
2633 size_t      gc_heap::allocation_running_time;
2634
2635 size_t      gc_heap::allocation_running_amount;
2636
2637 heap_segment* gc_heap::ephemeral_heap_segment = 0;
2638
2639 BOOL        gc_heap::blocking_collection = FALSE;
2640
2641 heap_segment* gc_heap::freeable_large_heap_segment = 0;
2642
2643 size_t      gc_heap::time_bgc_last = 0;
2644
2645 size_t      gc_heap::mark_stack_tos = 0;
2646
2647 size_t      gc_heap::mark_stack_bos = 0;
2648
2649 size_t      gc_heap::mark_stack_array_length = 0;
2650
2651 mark*       gc_heap::mark_stack_array = 0;
2652
2653 #if defined (_DEBUG) && defined (VERIFY_HEAP)
2654 BOOL        gc_heap::verify_pinned_queue_p = FALSE;
2655 #endif // defined (_DEBUG) && defined (VERIFY_HEAP)
2656
2657 uint8_t*    gc_heap::oldest_pinned_plug = 0;
2658
2659 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
2660 size_t      gc_heap::num_pinned_objects = 0;
2661 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
2662
2663 #ifdef FEATURE_LOH_COMPACTION
2664 size_t      gc_heap::loh_pinned_queue_tos = 0;
2665
2666 size_t      gc_heap::loh_pinned_queue_bos = 0;
2667
2668 size_t      gc_heap::loh_pinned_queue_length = 0;
2669
2670 mark*       gc_heap::loh_pinned_queue = 0;
2671
2672 BOOL        gc_heap::loh_compacted_p = FALSE;
2673 #endif //FEATURE_LOH_COMPACTION
2674
2675 #ifdef BACKGROUND_GC
2676
2677 EEThreadId  gc_heap::bgc_thread_id;
2678
2679 uint8_t*    gc_heap::background_written_addresses [array_size+2];
2680
2681 heap_segment* gc_heap::freeable_small_heap_segment = 0;
2682
2683 size_t      gc_heap::bgc_overflow_count = 0;
2684
2685 size_t      gc_heap::bgc_begin_loh_size = 0;
2686 size_t      gc_heap::end_loh_size = 0;
2687
2688 uint32_t    gc_heap::bgc_alloc_spin_loh = 0;
2689
2690 size_t      gc_heap::bgc_loh_size_increased = 0;
2691
2692 size_t      gc_heap::bgc_loh_allocated_in_free = 0;
2693
2694 size_t      gc_heap::background_soh_alloc_count = 0;
2695
2696 size_t      gc_heap::background_loh_alloc_count = 0;
2697
2698 uint8_t**   gc_heap::background_mark_stack_tos = 0;
2699
2700 uint8_t**   gc_heap::background_mark_stack_array = 0;
2701
2702 size_t      gc_heap::background_mark_stack_array_length = 0;
2703
2704 uint8_t*    gc_heap::background_min_overflow_address =0;
2705
2706 uint8_t*    gc_heap::background_max_overflow_address =0;
2707
2708 BOOL        gc_heap::processed_soh_overflow_p = FALSE;
2709
2710 uint8_t*    gc_heap::background_min_soh_overflow_address =0;
2711
2712 uint8_t*    gc_heap::background_max_soh_overflow_address =0;
2713
2714 heap_segment* gc_heap::saved_sweep_ephemeral_seg = 0;
2715
2716 uint8_t*    gc_heap::saved_sweep_ephemeral_start = 0;
2717
2718 heap_segment* gc_heap::saved_overflow_ephemeral_seg = 0;
2719
2720 Thread*     gc_heap::bgc_thread = 0;
2721
2722 BOOL        gc_heap::expanded_in_fgc = FALSE;
2723
2724 uint8_t**   gc_heap::c_mark_list = 0;
2725
2726 size_t      gc_heap::c_mark_list_length = 0;
2727
2728 size_t      gc_heap::c_mark_list_index = 0;
2729
2730 gc_history_per_heap gc_heap::bgc_data_per_heap;
2731
2732 BOOL    gc_heap::bgc_thread_running;
2733
2734 CLRCriticalSection gc_heap::bgc_threads_timeout_cs;
2735
2736 GCEvent gc_heap::gc_lh_block_event;
2737
2738 #endif //BACKGROUND_GC
2739
2740 #ifdef MARK_LIST
2741 uint8_t**   gc_heap::mark_list;
2742 uint8_t**   gc_heap::mark_list_index;
2743 uint8_t**   gc_heap::mark_list_end;
2744 #endif //MARK_LIST
2745
2746 #ifdef SNOOP_STATS
2747 snoop_stats_data gc_heap::snoop_stat;
2748 #endif //SNOOP_STATS
2749
2750 uint8_t*    gc_heap::min_overflow_address = MAX_PTR;
2751
2752 uint8_t*    gc_heap::max_overflow_address = 0;
2753
2754 uint8_t*    gc_heap::shigh = 0;
2755
2756 uint8_t*    gc_heap::slow = MAX_PTR;
2757
2758 size_t      gc_heap::ordered_free_space_indices[MAX_NUM_BUCKETS];
2759
2760 size_t      gc_heap::saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
2761
2762 size_t      gc_heap::ordered_plug_indices[MAX_NUM_BUCKETS];
2763
2764 size_t      gc_heap::saved_ordered_plug_indices[MAX_NUM_BUCKETS];
2765
2766 BOOL        gc_heap::ordered_plug_indices_init = FALSE;
2767
2768 BOOL        gc_heap::use_bestfit = FALSE;
2769
2770 uint8_t*    gc_heap::bestfit_first_pin = 0;
2771
2772 BOOL        gc_heap::commit_end_of_seg = FALSE;
2773
2774 size_t      gc_heap::max_free_space_items = 0;
2775
2776 size_t      gc_heap::free_space_buckets = 0;
2777
2778 size_t      gc_heap::free_space_items = 0;
2779
2780 int         gc_heap::trimmed_free_space_index = 0;
2781
2782 size_t      gc_heap::total_ephemeral_plugs = 0;
2783
2784 seg_free_spaces* gc_heap::bestfit_seg = 0;
2785
2786 size_t      gc_heap::total_ephemeral_size = 0;
2787
2788 #ifdef HEAP_ANALYZE
2789
2790 size_t      gc_heap::internal_root_array_length = initial_internal_roots;
2791
2792 uint8_t**   gc_heap::internal_root_array = 0;
2793
2794 size_t      gc_heap::internal_root_array_index = 0;
2795
2796 BOOL        gc_heap::heap_analyze_success = TRUE;
2797
2798 uint8_t*    gc_heap::current_obj = 0;
2799 size_t      gc_heap::current_obj_size = 0;
2800
2801 #endif //HEAP_ANALYZE
2802
2803 #ifdef GC_CONFIG_DRIVEN
2804 size_t gc_heap::interesting_data_per_gc[max_idp_count];
2805 //size_t gc_heap::interesting_data_per_heap[max_idp_count];
2806 //size_t gc_heap::interesting_mechanisms_per_heap[max_im_count];
2807 #endif //GC_CONFIG_DRIVEN
2808 #endif //MULTIPLE_HEAPS
2809
2810 no_gc_region_info gc_heap::current_no_gc_region_info;
2811 BOOL gc_heap::proceed_with_gc_p = FALSE;
2812 GCSpinLock gc_heap::gc_lock;
2813
2814 size_t gc_heap::eph_gen_starts_size = 0;
2815 heap_segment* gc_heap::segment_standby_list;
2816 size_t        gc_heap::last_gc_index = 0;
2817 #ifdef SEG_MAPPING_TABLE
2818 size_t        gc_heap::min_segment_size = 0;
2819 size_t        gc_heap::min_segment_size_shr = 0;
2820 #endif //SEG_MAPPING_TABLE
2821 size_t        gc_heap::soh_segment_size = 0;
2822 size_t        gc_heap::min_loh_segment_size = 0;
2823 size_t        gc_heap::segment_info_size = 0;
2824
2825 #ifdef GC_CONFIG_DRIVEN
2826 size_t gc_heap::time_init = 0;
2827 size_t gc_heap::time_since_init = 0;
2828 size_t gc_heap::compact_or_sweep_gcs[2];
2829 #endif //GC_CONFIG_DRIVEN
2830
2831 #ifdef FEATURE_LOH_COMPACTION
2832 BOOL                   gc_heap::loh_compaction_always_p = FALSE;
2833 gc_loh_compaction_mode gc_heap::loh_compaction_mode = loh_compaction_default;
2834 int                    gc_heap::loh_pinned_queue_decay = LOH_PIN_DECAY;
2835
2836 #endif //FEATURE_LOH_COMPACTION
2837
2838 GCEvent gc_heap::full_gc_approach_event;
2839
2840 GCEvent gc_heap::full_gc_end_event;
2841
2842 uint32_t gc_heap::fgn_maxgen_percent = 0;
2843
2844 uint32_t gc_heap::fgn_loh_percent = 0;
2845
2846 #ifdef BACKGROUND_GC
2847 BOOL gc_heap::fgn_last_gc_was_concurrent = FALSE;
2848 #endif //BACKGROUND_GC
2849
2850 VOLATILE(bool) gc_heap::full_gc_approach_event_set;
2851
2852 size_t gc_heap::full_gc_counts[gc_type_max];
2853
2854 bool gc_heap::maxgen_size_inc_p = false;
2855
2856 BOOL gc_heap::should_expand_in_full_gc = FALSE;
2857
2858 // Provisional mode related stuff.
2859 bool gc_heap::provisional_mode_triggered = false;
2860 bool gc_heap::pm_trigger_full_gc = false;
2861 size_t gc_heap::provisional_triggered_gc_count = 0;
2862 size_t gc_heap::provisional_off_gc_count = 0;
2863 size_t gc_heap::num_provisional_triggered = 0;
2864 bool   gc_heap::pm_stress_on = false;
2865
2866 #ifdef HEAP_ANALYZE
2867 BOOL        gc_heap::heap_analyze_enabled = FALSE;
2868 #endif //HEAP_ANALYZE
2869
2870 #ifndef MULTIPLE_HEAPS
2871
2872 alloc_list gc_heap::loh_alloc_list [NUM_LOH_ALIST-1];
2873 alloc_list gc_heap::gen2_alloc_list[NUM_GEN2_ALIST-1];
2874
2875 dynamic_data gc_heap::dynamic_data_table [NUMBERGENERATIONS+1];
2876 gc_history_per_heap gc_heap::gc_data_per_heap;
2877 size_t gc_heap::maxgen_pinned_compact_before_advance = 0;
2878
2879 uint8_t* gc_heap::alloc_allocated = 0;
2880
2881 size_t gc_heap::allocation_quantum = CLR_SIZE;
2882
2883 GCSpinLock gc_heap::more_space_lock_soh;
2884 GCSpinLock gc_heap::more_space_lock_loh;
2885 VOLATILE(int32_t) gc_heap::loh_alloc_thread_count = 0;
2886
2887 #ifdef SYNCHRONIZATION_STATS
2888 unsigned int gc_heap::good_suspension = 0;
2889 unsigned int gc_heap::bad_suspension = 0;
2890 uint64_t     gc_heap::total_msl_acquire = 0;
2891 unsigned int gc_heap::num_msl_acquired = 0;
2892 unsigned int gc_heap::num_high_msl_acquire = 0;
2893 unsigned int gc_heap::num_low_msl_acquire = 0;
2894 #endif //SYNCHRONIZATION_STATS
2895
2896 size_t   gc_heap::alloc_contexts_used = 0;
2897 size_t   gc_heap::soh_allocation_no_gc = 0;
2898 size_t   gc_heap::loh_allocation_no_gc = 0;
2899 bool     gc_heap::no_gc_oom_p = false;
2900 heap_segment* gc_heap::saved_loh_segment_no_gc = 0;
2901
2902 #endif //MULTIPLE_HEAPS
2903
2904 #ifndef MULTIPLE_HEAPS
2905
2906 BOOL        gc_heap::gen0_bricks_cleared = FALSE;
2907
2908 #ifdef FFIND_OBJECT
2909 int         gc_heap::gen0_must_clear_bricks = 0;
2910 #endif //FFIND_OBJECT
2911
2912 #ifdef FEATURE_PREMORTEM_FINALIZATION
2913 CFinalize*  gc_heap::finalize_queue = 0;
2914 #endif // FEATURE_PREMORTEM_FINALIZATION
2915
2916 generation gc_heap::generation_table [NUMBERGENERATIONS + 1];
2917
2918 size_t     gc_heap::interesting_data_per_heap[max_idp_count];
2919
2920 size_t     gc_heap::compact_reasons_per_heap[max_compact_reasons_count];
2921
2922 size_t     gc_heap::expand_mechanisms_per_heap[max_expand_mechanisms_count];
2923
2924 size_t     gc_heap::interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
2925
2926 #endif // MULTIPLE_HEAPS
2927
2928 /* end of per heap static initialization */
2929
2930 /* end of static initialization */
2931
2932 #ifndef DACCESS_COMPILE
2933
2934 void gen_to_condemn_tuning::print (int heap_num)
2935 {
2936 #ifdef DT_LOG
2937     dprintf (DT_LOG_0, ("condemned reasons (%d %d)", condemn_reasons_gen, condemn_reasons_condition));
2938     dprintf (DT_LOG_0, ("%s", record_condemn_reasons_gen_header));
2939     gc_condemn_reason_gen r_gen;
2940     for (int i = 0; i < gcrg_max; i++)
2941     {
2942         r_gen = (gc_condemn_reason_gen)(i);
2943         str_reasons_gen[i * 2] = get_gen_char (get_gen (r_gen));
2944     }
2945     dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_gen));
2946
2947     dprintf (DT_LOG_0, ("%s", record_condemn_reasons_condition_header));
2948     gc_condemn_reason_condition r_condition;
2949     for (int i = 0; i < gcrc_max; i++)
2950     {
2951         r_condition = (gc_condemn_reason_condition)(i);
2952         str_reasons_condition[i * 2] = get_condition_char (get_condition (r_condition));
2953     }
2954
2955     dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_condition));
2956 #else
2957     UNREFERENCED_PARAMETER(heap_num);
2958 #endif //DT_LOG
2959 }
2960
2961 void gc_generation_data::print (int heap_num, int gen_num)
2962 {
2963 #if defined(SIMPLE_DPRINTF) && defined(DT_LOG)
2964     dprintf (DT_LOG_0, ("[%2d]gen%d beg %Id fl %Id fo %Id end %Id fl %Id fo %Id in %Id p %Id np %Id alloc %Id",
2965                 heap_num, gen_num, 
2966                 size_before, 
2967                 free_list_space_before, free_obj_space_before,
2968                 size_after, 
2969                 free_list_space_after, free_obj_space_after, 
2970                 in, pinned_surv, npinned_surv,
2971                 new_allocation));
2972 #else
2973     UNREFERENCED_PARAMETER(heap_num);
2974     UNREFERENCED_PARAMETER(gen_num);
2975 #endif //SIMPLE_DPRINTF && DT_LOG
2976 }
2977
2978 void gc_history_per_heap::set_mechanism (gc_mechanism_per_heap mechanism_per_heap, uint32_t value)
2979 {
2980     uint32_t* mechanism = &mechanisms[mechanism_per_heap];
2981     *mechanism = 0;
2982     *mechanism |= mechanism_mask;
2983     *mechanism |= (1 << value);
2984
2985 #ifdef DT_LOG
2986     gc_mechanism_descr* descr = &gc_mechanisms_descr[mechanism_per_heap];
2987     dprintf (DT_LOG_0, ("setting %s: %s", 
2988             descr->name,
2989             (descr->descr)[value]));
2990 #endif //DT_LOG
2991 }
2992
2993 void gc_history_per_heap::print()
2994 {
2995 #if defined(SIMPLE_DPRINTF) && defined(DT_LOG)
2996     for (int i = 0; i < (sizeof (gen_data)/sizeof (gc_generation_data)); i++)
2997     {
2998         gen_data[i].print (heap_index, i);
2999     }
3000
3001     dprintf (DT_LOG_0, ("fla %Id flr %Id esa %Id ca %Id pa %Id paa %Id, rfle %d, ec %Id", 
3002                     maxgen_size_info.free_list_allocated,
3003                     maxgen_size_info.free_list_rejected,
3004                     maxgen_size_info.end_seg_allocated,
3005                     maxgen_size_info.condemned_allocated,
3006                     maxgen_size_info.pinned_allocated,
3007                     maxgen_size_info.pinned_allocated_advance,
3008                     maxgen_size_info.running_free_list_efficiency,
3009                     extra_gen0_committed));
3010
3011     int mechanism = 0;
3012     gc_mechanism_descr* descr = 0;
3013
3014     for (int i = 0; i < max_mechanism_per_heap; i++)
3015     {
3016         mechanism = get_mechanism ((gc_mechanism_per_heap)i);
3017
3018         if (mechanism >= 0)
3019         {
3020             descr = &gc_mechanisms_descr[(gc_mechanism_per_heap)i];
3021             dprintf (DT_LOG_0, ("[%2d]%s%s", 
3022                         heap_index,
3023                         descr->name, 
3024                         (descr->descr)[mechanism]));
3025         }
3026     }
3027 #endif //SIMPLE_DPRINTF && DT_LOG
3028 }
3029
3030 void gc_history_global::print()
3031 {
3032 #ifdef DT_LOG
3033     char str_settings[64];
3034     memset (str_settings, '|', sizeof (char) * 64);
3035     str_settings[max_global_mechanisms_count*2] = 0;
3036
3037     for (int i = 0; i < max_global_mechanisms_count; i++)
3038     {
3039         str_settings[i * 2] = (get_mechanism_p ((gc_global_mechanism_p)i) ? 'Y' : 'N');
3040     }
3041
3042     dprintf (DT_LOG_0, ("[hp]|c|p|o|d|b|e|"));
3043
3044     dprintf (DT_LOG_0, ("%4d|%s", num_heaps, str_settings));
3045     dprintf (DT_LOG_0, ("Condemned gen%d(reason: %s; mode: %s), youngest budget %Id(%d), memload %d",
3046                         condemned_generation,
3047                         str_gc_reasons[reason],
3048                         str_gc_pause_modes[pause_mode],                        
3049                         final_youngest_desired,
3050                         gen0_reduction_count,
3051                         mem_pressure));
3052 #endif //DT_LOG
3053 }
3054
3055 void gc_heap::fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num)
3056 {
3057     maxgen_size_increase* maxgen_size_info = &(current_gc_data_per_heap->maxgen_size_info);
3058     FIRE_EVENT(GCPerHeapHistory_V3, 
3059                (void *)(maxgen_size_info->free_list_allocated),
3060                (void *)(maxgen_size_info->free_list_rejected),                              
3061                (void *)(maxgen_size_info->end_seg_allocated),
3062                (void *)(maxgen_size_info->condemned_allocated),
3063                (void *)(maxgen_size_info->pinned_allocated),
3064                (void *)(maxgen_size_info->pinned_allocated_advance),
3065                maxgen_size_info->running_free_list_efficiency,
3066                current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons0(),
3067                current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons1(),
3068                current_gc_data_per_heap->mechanisms[gc_heap_compact],
3069                current_gc_data_per_heap->mechanisms[gc_heap_expand],
3070                current_gc_data_per_heap->heap_index,
3071                (void *)(current_gc_data_per_heap->extra_gen0_committed),
3072                (max_generation + 2),
3073                (uint32_t)(sizeof (gc_generation_data)),
3074                (void *)&(current_gc_data_per_heap->gen_data[0]));
3075
3076     current_gc_data_per_heap->print();
3077     current_gc_data_per_heap->gen_to_condemn_reasons.print (heap_num);
3078 }
3079
3080 void gc_heap::fire_pevents()
3081 {
3082     settings.record (&gc_data_global);
3083     gc_data_global.print();
3084
3085     FIRE_EVENT(GCGlobalHeapHistory_V2, 
3086                gc_data_global.final_youngest_desired, 
3087                gc_data_global.num_heaps, 
3088                gc_data_global.condemned_generation, 
3089                gc_data_global.gen0_reduction_count, 
3090                gc_data_global.reason, 
3091                gc_data_global.global_mechanims_p, 
3092                gc_data_global.pause_mode, 
3093                gc_data_global.mem_pressure);
3094
3095 #ifdef MULTIPLE_HEAPS
3096     for (int i = 0; i < gc_heap::n_heaps; i++)
3097     {
3098         gc_heap* hp = gc_heap::g_heaps[i];
3099         gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap();
3100         fire_per_heap_hist_event (current_gc_data_per_heap, hp->heap_number);
3101     }
3102 #else
3103     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
3104     fire_per_heap_hist_event (current_gc_data_per_heap, heap_number);
3105 #endif    
3106 }
3107
3108 inline BOOL
3109 gc_heap::dt_low_ephemeral_space_p (gc_tuning_point tp)
3110 {
3111     BOOL ret = FALSE;
3112
3113     switch (tp)
3114     {
3115         case tuning_deciding_condemned_gen:
3116         case tuning_deciding_compaction:
3117         case tuning_deciding_expansion:
3118         case tuning_deciding_full_gc:
3119         {
3120             ret = (!ephemeral_gen_fit_p (tp));
3121             break;
3122         }
3123         case tuning_deciding_promote_ephemeral:
3124         {
3125             size_t new_gen0size = approximate_new_allocation();
3126             ptrdiff_t plan_ephemeral_size = total_ephemeral_size;
3127             
3128             dprintf (GTC_LOG, ("h%d: plan eph size is %Id, new gen0 is %Id", 
3129                 heap_number, plan_ephemeral_size, new_gen0size));
3130             // If we were in no_gc_region we could have allocated a larger than normal segment,
3131             // and the next seg we allocate will be a normal sized seg so if we can't fit the new
3132             // ephemeral generations there, do an ephemeral promotion.
3133             ret = ((soh_segment_size - segment_info_size) < (plan_ephemeral_size + new_gen0size));
3134             break;
3135         }
3136         default:
3137             break;
3138     }
3139
3140     return ret;
3141 }
3142
3143 BOOL 
3144 gc_heap::dt_high_frag_p (gc_tuning_point tp, 
3145                          int gen_number, 
3146                          BOOL elevate_p)
3147 {
3148     BOOL ret = FALSE;
3149
3150     switch (tp)
3151     {
3152         case tuning_deciding_condemned_gen:
3153         {
3154             dynamic_data* dd = dynamic_data_of (gen_number);
3155             float fragmentation_burden = 0;
3156
3157             if (elevate_p)
3158             {
3159                 ret = (dd_fragmentation (dynamic_data_of (max_generation)) >= dd_max_size(dd));
3160                 dprintf (GTC_LOG, ("h%d: frag is %Id, max size is %Id",
3161                     heap_number, dd_fragmentation (dd), dd_max_size(dd)));
3162             }
3163             else
3164             {
3165 #ifndef MULTIPLE_HEAPS
3166                 if (gen_number == max_generation)
3167                 {
3168                     float frag_ratio = (float)(dd_fragmentation (dynamic_data_of (max_generation))) / (float)generation_size (max_generation);
3169                     if (frag_ratio > 0.65)
3170                     {
3171                         dprintf (GTC_LOG, ("g2 FR: %d%%", (int)(frag_ratio*100)));
3172                         return TRUE;
3173                     }
3174                 }
3175 #endif //!MULTIPLE_HEAPS
3176                 size_t fr = generation_unusable_fragmentation (generation_of (gen_number));
3177                 ret = (fr > dd_fragmentation_limit(dd));
3178                 if (ret)
3179                 {
3180                     fragmentation_burden = (float)fr / generation_size (gen_number);
3181                     ret = (fragmentation_burden > dd_v_fragmentation_burden_limit (dd));
3182                 }
3183                 dprintf (GTC_LOG, ("h%d: gen%d, frag is %Id, alloc effi: %d%%, unusable frag is %Id, ratio is %d",
3184                     heap_number, gen_number, dd_fragmentation (dd), 
3185                     (int)(100*generation_allocator_efficiency (generation_of (gen_number))),
3186                     fr, (int)(fragmentation_burden*100)));
3187             }
3188             break;
3189         }
3190         default:
3191             break;
3192     }
3193
3194     return ret;
3195 }
3196
3197 inline BOOL 
3198 gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number)
3199 {
3200     BOOL ret = FALSE;
3201
3202     switch (tp)
3203     {
3204         case tuning_deciding_condemned_gen:
3205         {
3206             if (gen_number == max_generation)
3207             {
3208                 dynamic_data* dd = dynamic_data_of (gen_number);
3209                 size_t maxgen_allocated = (dd_desired_allocation (dd) - dd_new_allocation (dd));
3210                 size_t maxgen_total_size = maxgen_allocated + dd_current_size (dd);
3211                 size_t est_maxgen_surv = (size_t)((float) (maxgen_total_size) * dd_surv (dd));
3212                 size_t est_maxgen_free = maxgen_total_size - est_maxgen_surv + dd_fragmentation (dd);
3213
3214                 dprintf (GTC_LOG, ("h%d: Total gen2 size: %Id, est gen2 dead space: %Id (s: %d, allocated: %Id), frag: %Id",
3215                             heap_number,
3216                             maxgen_total_size,
3217                             est_maxgen_free, 
3218                             (int)(dd_surv (dd) * 100),
3219                             maxgen_allocated,
3220                             dd_fragmentation (dd)));
3221
3222                 uint32_t num_heaps = 1;
3223
3224 #ifdef MULTIPLE_HEAPS
3225                 num_heaps = gc_heap::n_heaps;
3226 #endif //MULTIPLE_HEAPS
3227
3228                 size_t min_frag_th = min_reclaim_fragmentation_threshold (num_heaps);
3229                 dprintf (GTC_LOG, ("h%d, min frag is %Id", heap_number, min_frag_th));
3230                 ret = (est_maxgen_free >= min_frag_th);
3231             }
3232             else
3233             {
3234                 assert (0);
3235             }
3236             break;
3237         }
3238
3239         default:
3240             break;
3241     }
3242
3243     return ret;
3244 }
3245
3246 // DTREVIEW: Right now we only estimate gen2 fragmentation. 
3247 // on 64-bit though we should consider gen1 or even gen0 fragmentatioin as
3248 // well 
3249 inline BOOL 
3250 gc_heap::dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem)
3251 {
3252     BOOL ret = FALSE;
3253
3254     switch (tp)
3255     {
3256         case tuning_deciding_condemned_gen:
3257         {
3258             if (gen_number == max_generation)
3259             {
3260                 dynamic_data* dd = dynamic_data_of (gen_number);
3261                 float est_frag_ratio = 0;
3262                 if (dd_current_size (dd) == 0)
3263                 {
3264                     est_frag_ratio = 1;
3265                 }
3266                 else if ((dd_fragmentation (dd) == 0) || (dd_fragmentation (dd) + dd_current_size (dd) == 0))
3267                 {
3268                     est_frag_ratio = 0;
3269                 }
3270                 else
3271                 {
3272                     est_frag_ratio = (float)dd_fragmentation (dd) / (float)(dd_fragmentation (dd) + dd_current_size (dd));
3273                 }
3274                 
3275                 size_t est_frag = (dd_fragmentation (dd) + (size_t)((dd_desired_allocation (dd) - dd_new_allocation (dd)) * est_frag_ratio));
3276                 dprintf (GTC_LOG, ("h%d: gen%d: current_size is %Id, frag is %Id, est_frag_ratio is %d%%, estimated frag is %Id", 
3277                     heap_number,
3278                     gen_number,
3279                     dd_current_size (dd),
3280                     dd_fragmentation (dd),
3281                     (int)(est_frag_ratio*100),
3282                     est_frag));
3283
3284                 uint32_t num_heaps = 1;
3285
3286 #ifdef MULTIPLE_HEAPS
3287                 num_heaps = gc_heap::n_heaps;
3288 #endif //MULTIPLE_HEAPS
3289                 uint64_t min_frag_th = min_high_fragmentation_threshold(available_mem, num_heaps);
3290                 //dprintf (GTC_LOG, ("h%d, min frag is %I64d", heap_number, min_frag_th));
3291                 ret = (est_frag >= min_frag_th);
3292             }
3293             else
3294             {
3295                 assert (0);
3296             }
3297             break;
3298         }
3299
3300         default:
3301             break;
3302     }
3303
3304     return ret;
3305 }
3306
3307 inline BOOL 
3308 gc_heap::dt_low_card_table_efficiency_p (gc_tuning_point tp)
3309 {
3310     BOOL ret = FALSE;
3311
3312     switch (tp)
3313     {
3314     case tuning_deciding_condemned_gen:
3315     {
3316         /* promote into max-generation if the card table has too many
3317         * generation faults besides the n -> 0
3318         */
3319         ret = (generation_skip_ratio < 30);
3320         break;
3321     }
3322
3323     default:
3324         break;
3325     }
3326
3327     return ret;
3328 }
3329
3330 inline BOOL
3331 in_range_for_segment(uint8_t* add, heap_segment* seg)
3332 {
3333     return ((add >= heap_segment_mem (seg)) && (add < heap_segment_reserved (seg)));
3334 }
3335
3336 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
3337 // The array we allocate is organized as follows:
3338 // 0th element is the address of the last array we allocated.
3339 // starting from the 1st element are the segment addresses, that's
3340 // what buckets() returns.
3341 struct bk
3342 {
3343     uint8_t* add;
3344     size_t val;
3345 };
3346
3347 class sorted_table
3348 {
3349 private:
3350     ptrdiff_t size;
3351     ptrdiff_t count;
3352     bk* slots;
3353     bk* buckets() { return (slots + 1); }
3354     uint8_t*& last_slot (bk* arr) { return arr[0].add; }
3355     bk* old_slots;
3356 public:
3357     static  sorted_table* make_sorted_table ();
3358     BOOL    insert (uint8_t* add, size_t val);;
3359     size_t  lookup (uint8_t*& add);
3360     void    remove (uint8_t* add);
3361     void    clear ();
3362     void    delete_sorted_table();
3363     void    delete_old_slots();
3364     void    enqueue_old_slot(bk* sl);
3365     BOOL    ensure_space_for_insert();
3366 };
3367
3368 sorted_table*
3369 sorted_table::make_sorted_table ()
3370 {
3371     size_t size = 400;
3372
3373     // allocate one more bk to store the older slot address.
3374     sorted_table* res = (sorted_table*)new char [sizeof (sorted_table) + (size + 1) * sizeof (bk)];
3375     if (!res)
3376         return 0;
3377     res->size = size;
3378     res->slots = (bk*)(res + 1);
3379     res->old_slots = 0;
3380     res->clear();
3381     return res;
3382 }
3383
3384 void
3385 sorted_table::delete_sorted_table()
3386 {
3387     if (slots != (bk*)(this+1))
3388     {
3389         delete slots;
3390     }
3391     delete_old_slots();
3392     delete this;
3393 }
3394 void
3395 sorted_table::delete_old_slots()
3396 {
3397     uint8_t* sl = (uint8_t*)old_slots;
3398     while (sl)
3399     {
3400         uint8_t* dsl = sl;
3401         sl = last_slot ((bk*)sl);
3402         delete dsl;
3403     }
3404     old_slots = 0;
3405 }
3406 void
3407 sorted_table::enqueue_old_slot(bk* sl)
3408 {
3409     last_slot (sl) = (uint8_t*)old_slots;
3410     old_slots = sl;
3411 }
3412
3413 inline
3414 size_t
3415 sorted_table::lookup (uint8_t*& add)
3416 {
3417     ptrdiff_t high = (count-1);
3418     ptrdiff_t low = 0;
3419     ptrdiff_t ti;
3420     ptrdiff_t mid;
3421     bk* buck = buckets();
3422     while (low <= high)
3423     {
3424         mid = ((low + high)/2);
3425         ti = mid;
3426         if (buck[ti].add > add)
3427         {
3428             if ((ti > 0) && (buck[ti-1].add <= add))
3429             {
3430                 add = buck[ti-1].add;
3431                 return buck[ti - 1].val;
3432             }
3433             high = mid - 1;
3434         }
3435         else
3436         {
3437             if (buck[ti+1].add > add)
3438             {
3439                 add = buck[ti].add;
3440                 return buck[ti].val;
3441             }
3442             low = mid + 1;
3443         }
3444     }
3445     add = 0;
3446     return 0;
3447 }
3448
3449 BOOL
3450 sorted_table::ensure_space_for_insert()
3451 {
3452     if (count == size)
3453     {
3454         size = (size * 3)/2;
3455         assert((size * sizeof (bk)) > 0);
3456         bk* res = (bk*)new (nothrow) char [(size + 1) * sizeof (bk)];
3457         assert (res);
3458         if (!res)
3459             return FALSE;
3460
3461         last_slot (res) = 0;
3462         memcpy (((bk*)res + 1), buckets(), count * sizeof (bk));
3463         bk* last_old_slots = slots;
3464         slots = res;
3465         if (last_old_slots != (bk*)(this + 1))
3466             enqueue_old_slot (last_old_slots);
3467     }
3468     return TRUE;
3469 }
3470
3471 BOOL
3472 sorted_table::insert (uint8_t* add, size_t val)
3473 {
3474     //grow if no more room
3475     assert (count < size);
3476
3477     //insert sorted
3478     ptrdiff_t high = (count-1);
3479     ptrdiff_t low = 0;
3480     ptrdiff_t ti;
3481     ptrdiff_t mid;
3482     bk* buck = buckets();
3483     while (low <= high)
3484     {
3485         mid = ((low + high)/2);
3486         ti = mid;
3487         if (buck[ti].add > add)
3488         {
3489             if ((ti == 0) || (buck[ti-1].add <= add))
3490             {
3491                 // found insertion point
3492                 for (ptrdiff_t k = count; k > ti;k--)
3493                 {
3494                     buck [k] = buck [k-1];
3495                 }
3496                 buck[ti].add = add;
3497                 buck[ti].val = val;
3498                 count++;
3499                 return TRUE;
3500             }
3501             high = mid - 1;
3502         }
3503         else
3504         {
3505             if (buck[ti+1].add > add)
3506             {
3507                 //found the insertion point
3508                 for (ptrdiff_t k = count; k > ti+1;k--)
3509                 {
3510                     buck [k] = buck [k-1];
3511                 }
3512                 buck[ti+1].add = add;
3513                 buck[ti+1].val = val;
3514                 count++;
3515                 return TRUE;
3516             }
3517             low = mid + 1;
3518         }
3519     }
3520     assert (0);
3521     return TRUE;
3522 }
3523
3524 void
3525 sorted_table::remove (uint8_t* add)
3526 {
3527     ptrdiff_t high = (count-1);
3528     ptrdiff_t low = 0;
3529     ptrdiff_t ti;
3530     ptrdiff_t mid;
3531     bk* buck = buckets();
3532     while (low <= high)
3533     {
3534         mid = ((low + high)/2);
3535         ti = mid;
3536         if (buck[ti].add > add)
3537         {
3538             if (buck[ti-1].add <= add)
3539             {
3540                 // found the guy to remove
3541                 for (ptrdiff_t k = ti; k < count; k++)
3542                     buck[k-1] = buck[k];
3543                 count--;
3544                 return;
3545             }
3546             high = mid - 1;
3547         }
3548         else
3549         {
3550             if (buck[ti+1].add > add)
3551             {
3552                 // found the guy to remove
3553                 for (ptrdiff_t k = ti+1; k < count; k++)
3554                     buck[k-1] = buck[k];
3555                 count--;
3556                 return;
3557             }
3558             low = mid + 1;
3559         }
3560     }
3561     assert (0);
3562 }
3563
3564 void
3565 sorted_table::clear()
3566 {
3567     count = 1;
3568     buckets()[0].add = MAX_PTR;
3569 }
3570 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
3571
3572 #ifdef SEG_MAPPING_TABLE
3573 #ifdef GROWABLE_SEG_MAPPING_TABLE
3574 inline
3575 uint8_t* align_on_segment (uint8_t* add)
3576 {
3577     return (uint8_t*)((size_t)(add + (gc_heap::min_segment_size - 1)) & ~(gc_heap::min_segment_size - 1));
3578 }
3579
3580 inline
3581 uint8_t* align_lower_segment (uint8_t* add)
3582 {
3583     return (uint8_t*)((size_t)(add) & ~(gc_heap::min_segment_size - 1));
3584 }
3585
3586 size_t size_seg_mapping_table_of (uint8_t* from, uint8_t* end)
3587 {
3588     from = align_lower_segment (from);
3589     end = align_on_segment (end);
3590     dprintf (1, ("from: %Ix, end: %Ix, size: %Ix", from, end, sizeof (seg_mapping)*(((size_t)(end - from) >> gc_heap::min_segment_size_shr))));
3591     return sizeof (seg_mapping)*((size_t)(end - from) >> gc_heap::min_segment_size_shr);
3592 }
3593
3594 // for seg_mapping_table we want it to start from a pointer sized address.
3595 inline
3596 size_t align_for_seg_mapping_table (size_t size)
3597 {
3598     return ((size + (sizeof (uint8_t*) - 1)) &~ (sizeof (uint8_t*) - 1));
3599 }
3600
3601 inline
3602 size_t seg_mapping_word_of (uint8_t* add)
3603 {
3604     return (size_t)add >> gc_heap::min_segment_size_shr;
3605 }
3606 #else //GROWABLE_SEG_MAPPING_TABLE
3607 BOOL seg_mapping_table_init()
3608 {
3609 #ifdef BIT64
3610     uint64_t total_address_space = (uint64_t)8*1024*1024*1024*1024;
3611 #else
3612     uint64_t total_address_space = (uint64_t)4*1024*1024*1024;
3613 #endif // BIT64
3614
3615     size_t num_entries = (size_t)(total_address_space >> gc_heap::min_segment_size_shr);
3616     seg_mapping_table = new seg_mapping[num_entries];
3617
3618     if (seg_mapping_table)
3619     {
3620         memset (seg_mapping_table, 0, num_entries * sizeof (seg_mapping));
3621         dprintf (1, ("created %d entries for heap mapping (%Id bytes)", 
3622                      num_entries, (num_entries * sizeof (seg_mapping))));
3623         return TRUE;
3624     }
3625     else
3626     {
3627         dprintf (1, ("failed to create %d entries for heap mapping (%Id bytes)", 
3628                      num_entries, (num_entries * sizeof (seg_mapping))));
3629         return FALSE;
3630     }
3631 }
3632 #endif //GROWABLE_SEG_MAPPING_TABLE
3633
3634 #ifdef FEATURE_BASICFREEZE
3635 inline
3636 size_t ro_seg_begin_index (heap_segment* seg)
3637 {
3638     size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
3639     begin_index = max (begin_index, (size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr);
3640     return begin_index;
3641 }
3642
3643 inline
3644 size_t ro_seg_end_index (heap_segment* seg)
3645 {
3646     size_t end_index = (size_t)(heap_segment_reserved (seg) - 1) >> gc_heap::min_segment_size_shr;
3647     end_index = min (end_index, (size_t)g_gc_highest_address >> gc_heap::min_segment_size_shr);
3648     return end_index;
3649 }
3650
3651 void seg_mapping_table_add_ro_segment (heap_segment* seg)
3652 {
3653 #ifdef GROWABLE_SEG_MAPPING_TABLE
3654     if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address))
3655         return;
3656 #endif //GROWABLE_SEG_MAPPING_TABLE
3657
3658     for (size_t entry_index = ro_seg_begin_index (seg); entry_index <= ro_seg_end_index (seg); entry_index++)
3659         seg_mapping_table[entry_index].seg1 = (heap_segment*)((size_t)seg_mapping_table[entry_index].seg1 | ro_in_entry);
3660 }
3661
3662 void seg_mapping_table_remove_ro_segment (heap_segment* seg)
3663 {
3664     UNREFERENCED_PARAMETER(seg);
3665 #if 0
3666 // POSSIBLE PERF TODO: right now we are not doing anything because we can't simply remove the flag. If it proves
3667 // to be a perf problem, we can search in the current ro segs and see if any lands in this range and only
3668 // remove the flag if none lands in this range.
3669 #endif //0
3670 }
3671
3672 heap_segment* ro_segment_lookup (uint8_t* o)
3673 {
3674     uint8_t* ro_seg_start = o;
3675     heap_segment* seg = (heap_segment*)gc_heap::seg_table->lookup (ro_seg_start);
3676
3677     if (ro_seg_start && in_range_for_segment (o, seg))
3678         return seg;
3679     else
3680         return 0;
3681 }
3682
3683 #endif //FEATURE_BASICFREEZE
3684
3685 void gc_heap::seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp)
3686 {
3687     size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1);
3688     size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
3689     seg_mapping* begin_entry = &seg_mapping_table[begin_index];
3690     size_t end_index = seg_end >> gc_heap::min_segment_size_shr;
3691     seg_mapping* end_entry = &seg_mapping_table[end_index];
3692
3693     dprintf (1, ("adding seg %Ix(%d)-%Ix(%d)", 
3694         seg, begin_index, heap_segment_reserved (seg), end_index));
3695
3696     dprintf (1, ("before add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", 
3697         begin_index, (seg_mapping_table[begin_index].boundary + 1),
3698         end_index, (seg_mapping_table[end_index].boundary + 1)));
3699
3700 #ifdef MULTIPLE_HEAPS
3701 #ifdef SIMPLE_DPRINTF
3702     dprintf (1, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end %d: h0: %Ix(%d), h1: %Ix(%d)",
3703         begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1),
3704         (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1),
3705         end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1),
3706         (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1)));
3707 #endif //SIMPLE_DPRINTF
3708     assert (end_entry->boundary == 0);
3709     assert (end_entry->h0 == 0);
3710     end_entry->h0 = hp;
3711     assert (begin_entry->h1 == 0);
3712     begin_entry->h1 = hp;
3713 #else
3714     UNREFERENCED_PARAMETER(hp);
3715 #endif //MULTIPLE_HEAPS
3716
3717     end_entry->boundary = (uint8_t*)seg_end;
3718
3719     dprintf (1, ("set entry %d seg1 and %d seg0 to %Ix", begin_index, end_index, seg));
3720     assert ((begin_entry->seg1 == 0) || ((size_t)(begin_entry->seg1) == ro_in_entry));
3721     begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) | (size_t)seg);
3722     end_entry->seg0 = seg;
3723
3724     // for every entry inbetween we need to set its heap too.
3725     for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++)
3726     {
3727         assert (seg_mapping_table[entry_index].boundary == 0);
3728 #ifdef MULTIPLE_HEAPS
3729         assert (seg_mapping_table[entry_index].h0 == 0);
3730         seg_mapping_table[entry_index].h1 = hp;
3731 #endif //MULTIPLE_HEAPS
3732         seg_mapping_table[entry_index].seg1 = seg;
3733     }
3734
3735     dprintf (1, ("after add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", 
3736         begin_index, (seg_mapping_table[begin_index].boundary + 1),
3737         end_index, (seg_mapping_table[end_index].boundary + 1)));
3738 #if defined(MULTIPLE_HEAPS) && defined(SIMPLE_DPRINTF)
3739     dprintf (1, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end: %d h0: %Ix(%d), h1: %Ix(%d)",
3740         begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1),
3741         (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1),
3742         end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1),
3743         (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1)));
3744 #endif //MULTIPLE_HEAPS && SIMPLE_DPRINTF
3745 }
3746
3747 void gc_heap::seg_mapping_table_remove_segment (heap_segment* seg)
3748 {
3749     size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1);
3750     size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
3751     seg_mapping* begin_entry = &seg_mapping_table[begin_index];
3752     size_t end_index = seg_end >> gc_heap::min_segment_size_shr;
3753     seg_mapping* end_entry = &seg_mapping_table[end_index];
3754     dprintf (1, ("removing seg %Ix(%d)-%Ix(%d)", 
3755         seg, begin_index, heap_segment_reserved (seg), end_index));
3756
3757     assert (end_entry->boundary == (uint8_t*)seg_end);
3758     end_entry->boundary = 0;
3759
3760 #ifdef MULTIPLE_HEAPS
3761     gc_heap* hp = heap_segment_heap (seg);
3762     assert (end_entry->h0 == hp);
3763     end_entry->h0 = 0;
3764     assert (begin_entry->h1 == hp);
3765     begin_entry->h1 = 0;
3766 #endif //MULTIPLE_HEAPS
3767
3768     assert (begin_entry->seg1 != 0);
3769     begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) & ro_in_entry);
3770     end_entry->seg0 = 0;
3771
3772     // for every entry inbetween we need to reset its heap too.
3773     for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++)
3774     {
3775         assert (seg_mapping_table[entry_index].boundary == 0);
3776 #ifdef MULTIPLE_HEAPS
3777         assert (seg_mapping_table[entry_index].h0 == 0);
3778         assert (seg_mapping_table[entry_index].h1 == hp);
3779         seg_mapping_table[entry_index].h1 = 0;
3780 #endif //MULTIPLE_HEAPS
3781         seg_mapping_table[entry_index].seg1 = 0;
3782     }
3783
3784     dprintf (1, ("after remove: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", 
3785         begin_index, (seg_mapping_table[begin_index].boundary + 1),
3786         end_index, (seg_mapping_table[end_index].boundary + 1)));
3787 #ifdef MULTIPLE_HEAPS
3788     dprintf (1, ("begin %d: h0: %Ix, h1: %Ix; end: %d h0: %Ix, h1: %Ix",
3789         begin_index, (uint8_t*)(begin_entry->h0), (uint8_t*)(begin_entry->h1),
3790         end_index, (uint8_t*)(end_entry->h0), (uint8_t*)(end_entry->h1)));
3791 #endif //MULTIPLE_HEAPS
3792 }
3793
3794 #ifdef MULTIPLE_HEAPS
3795 inline
3796 gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o)
3797 {
3798     size_t index = (size_t)o >> gc_heap::min_segment_size_shr;
3799     seg_mapping* entry = &seg_mapping_table[index];
3800
3801     gc_heap* hp = ((o > entry->boundary) ? entry->h1 : entry->h0);
3802
3803     dprintf (2, ("checking obj %Ix, index is %Id, entry: boundry: %Ix, h0: %Ix, seg0: %Ix, h1: %Ix, seg1: %Ix",
3804         o, index, (entry->boundary + 1), 
3805         (uint8_t*)(entry->h0), (uint8_t*)(entry->seg0),
3806         (uint8_t*)(entry->h1), (uint8_t*)(entry->seg1)));
3807
3808 #ifdef _DEBUG
3809     heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0);
3810 #ifdef FEATURE_BASICFREEZE
3811     if ((size_t)seg & ro_in_entry)
3812         seg = (heap_segment*)((size_t)seg & ~ro_in_entry);
3813 #endif //FEATURE_BASICFREEZE
3814
3815     if (seg)
3816     {
3817         if (in_range_for_segment (o, seg))
3818         {
3819             dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, seg, (uint8_t*)heap_segment_allocated (seg)));
3820         }
3821         else
3822         {
3823             dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg", 
3824                 seg, (uint8_t*)heap_segment_allocated (seg), o));
3825         }
3826     }
3827     else
3828     {
3829         dprintf (2, ("could not find obj %Ix in any existing segments", o));
3830     }
3831 #endif //_DEBUG
3832
3833     return hp;
3834 }
3835
3836 gc_heap* seg_mapping_table_heap_of (uint8_t* o)
3837 {
3838 #ifdef GROWABLE_SEG_MAPPING_TABLE
3839     if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
3840         return 0;
3841 #endif //GROWABLE_SEG_MAPPING_TABLE
3842
3843     return seg_mapping_table_heap_of_worker (o);
3844 }
3845
3846 gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
3847 {
3848 #if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
3849     if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
3850         return 0;
3851 #endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
3852
3853     return seg_mapping_table_heap_of_worker (o);
3854 }
3855 #endif //MULTIPLE_HEAPS
3856
3857 // Only returns a valid seg if we can actually find o on the seg.
3858 heap_segment* seg_mapping_table_segment_of (uint8_t* o)
3859 {
3860 #if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
3861     if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
3862 #ifdef FEATURE_BASICFREEZE
3863         return ro_segment_lookup (o);
3864 #else
3865         return 0;
3866 #endif //FEATURE_BASICFREEZE
3867 #endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
3868
3869     size_t index = (size_t)o >> gc_heap::min_segment_size_shr;
3870     seg_mapping* entry = &seg_mapping_table[index];
3871
3872     dprintf (2, ("checking obj %Ix, index is %Id, entry: boundry: %Ix, seg0: %Ix, seg1: %Ix",
3873         o, index, (entry->boundary + 1), 
3874         (uint8_t*)(entry->seg0), (uint8_t*)(entry->seg1)));
3875
3876     heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0);
3877 #ifdef FEATURE_BASICFREEZE
3878     if ((size_t)seg & ro_in_entry)
3879         seg = (heap_segment*)((size_t)seg & ~ro_in_entry);
3880 #endif //FEATURE_BASICFREEZE
3881
3882     if (seg)
3883     {
3884         if (in_range_for_segment (o, seg))
3885         {
3886             dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg)));
3887         }
3888         else
3889         {
3890             dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg, setting it to 0", 
3891                 (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg), o));
3892             seg = 0;
3893         }
3894     }
3895     else
3896     {
3897         dprintf (2, ("could not find obj %Ix in any existing segments", o));
3898     }
3899
3900 #ifdef FEATURE_BASICFREEZE
3901     // TODO: This was originally written assuming that the seg_mapping_table would always contain entries for ro 
3902     // segments whenever the ro segment falls into the [g_gc_lowest_address,g_gc_highest_address) range.  I.e., it had an 
3903     // extra "&& (size_t)(entry->seg1) & ro_in_entry" expression.  However, at the moment, grow_brick_card_table does 
3904     // not correctly go through the ro segments and add them back to the seg_mapping_table when the [lowest,highest) 
3905     // range changes.  We should probably go ahead and modify grow_brick_card_table and put back the 
3906     // "&& (size_t)(entry->seg1) & ro_in_entry" here.
3907     if (!seg)
3908     {
3909         seg = ro_segment_lookup (o);
3910         if (seg && !in_range_for_segment (o, seg))
3911             seg = 0;
3912     }
3913 #endif //FEATURE_BASICFREEZE
3914
3915     return seg;
3916 }
3917 #endif //SEG_MAPPING_TABLE
3918
3919 size_t gcard_of ( uint8_t*);
3920
3921 #define memref(i) *(uint8_t**)(i)
3922
3923 //GC Flags
3924 #define GC_MARKED       (size_t)0x1
3925 #define slot(i, j) ((uint8_t**)(i))[j+1]
3926
3927 #define free_object_base_size (plug_skew + sizeof(ArrayBase))
3928
3929 class CObjectHeader : public Object
3930 {
3931 public:
3932
3933 #if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE)
3934     // The GC expects the following methods that are provided by the Object class in the CLR but not provided
3935     // by Redhawk's version of Object.
3936     uint32_t GetNumComponents()
3937     {
3938         return ((ArrayBase *)this)->GetNumComponents();
3939     }
3940
3941     void Validate(BOOL bDeep=TRUE, BOOL bVerifyNextHeader = TRUE)
3942     {
3943         UNREFERENCED_PARAMETER(bVerifyNextHeader);
3944
3945         if (this == NULL)
3946             return;
3947
3948         MethodTable * pMT = GetMethodTable();
3949
3950         _ASSERTE(pMT->SanityCheck());
3951
3952         bool noRangeChecks =
3953             (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_RANGE_CHECKS) == GCConfig::HEAPVERIFY_NO_RANGE_CHECKS;
3954
3955         BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE;
3956         if (!noRangeChecks)
3957         {
3958             fSmallObjectHeapPtr = g_theGCHeap->IsHeapPointer(this, TRUE);
3959             if (!fSmallObjectHeapPtr)
3960                 fLargeObjectHeapPtr = g_theGCHeap->IsHeapPointer(this);
3961
3962             _ASSERTE(fSmallObjectHeapPtr || fLargeObjectHeapPtr);
3963         }
3964
3965 #ifdef FEATURE_STRUCTALIGN
3966         _ASSERTE(IsStructAligned((uint8_t *)this, GetMethodTable()->GetBaseAlignment()));
3967 #endif // FEATURE_STRUCTALIGN
3968
3969 #ifdef FEATURE_64BIT_ALIGNMENT
3970         if (pMT->RequiresAlign8())
3971         {
3972             _ASSERTE((((size_t)this) & 0x7) == (pMT->IsValueType() ? 4U : 0U));
3973         }
3974 #endif // FEATURE_64BIT_ALIGNMENT
3975
3976 #ifdef VERIFY_HEAP
3977         if (bDeep && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC))
3978             g_theGCHeap->ValidateObjectMember(this);
3979 #endif
3980         if (fSmallObjectHeapPtr)
3981         {
3982 #ifdef FEATURE_BASICFREEZE
3983             _ASSERTE(!g_theGCHeap->IsLargeObject(pMT) || g_theGCHeap->IsInFrozenSegment(this));
3984 #else
3985             _ASSERTE(!g_theGCHeap->IsLargeObject(pMT));
3986 #endif
3987         }
3988     }
3989
3990     void ValidatePromote(ScanContext *sc, uint32_t flags)
3991     {
3992         UNREFERENCED_PARAMETER(sc);
3993         UNREFERENCED_PARAMETER(flags);
3994
3995         Validate();
3996     }
3997
3998     void ValidateHeap(Object *from, BOOL bDeep)
3999     {
4000         UNREFERENCED_PARAMETER(from);
4001
4002         Validate(bDeep, FALSE);
4003     }
4004
4005 #endif //FEATURE_REDHAWK || BUILD_AS_STANDALONE
4006
4007     /////
4008     //
4009     // Header Status Information
4010     //
4011
4012     MethodTable    *GetMethodTable() const
4013     {
4014         return( (MethodTable *) (((size_t) RawGetMethodTable()) & (~(GC_MARKED))));
4015     }
4016
4017     void SetMarked()
4018     {
4019         RawSetMethodTable((MethodTable *) (((size_t) RawGetMethodTable()) | GC_MARKED));
4020     }
4021
4022     BOOL IsMarked() const
4023     {
4024         return !!(((size_t)RawGetMethodTable()) & GC_MARKED);
4025     }
4026
4027     void SetPinned()
4028     {
4029         assert (!(gc_heap::settings.concurrent));
4030         GetHeader()->SetGCBit();
4031     }
4032
4033     BOOL IsPinned() const
4034     {
4035         return !!((((CObjectHeader*)this)->GetHeader()->GetBits()) & BIT_SBLK_GC_RESERVE);
4036     }
4037
4038     void ClearMarked()
4039     {
4040         RawSetMethodTable( GetMethodTable() );
4041     }
4042
4043     CGCDesc *GetSlotMap ()
4044     {
4045         assert (GetMethodTable()->ContainsPointers());
4046         return CGCDesc::GetCGCDescFromMT(GetMethodTable());
4047     }
4048
4049     void SetFree(size_t size)
4050     {
4051         assert (size >= free_object_base_size);
4052
4053         assert (g_gc_pFreeObjectMethodTable->GetBaseSize() == free_object_base_size);
4054         assert (g_gc_pFreeObjectMethodTable->RawGetComponentSize() == 1);
4055
4056         RawSetMethodTable( g_gc_pFreeObjectMethodTable );
4057
4058         size_t* numComponentsPtr = (size_t*) &((uint8_t*) this)[ArrayBase::GetOffsetOfNumComponents()];
4059         *numComponentsPtr = size - free_object_base_size;
4060 #ifdef VERIFY_HEAP
4061         //This introduces a bug in the free list management. 
4062         //((void**) this)[-1] = 0;    // clear the sync block,
4063         assert (*numComponentsPtr >= 0);
4064         if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
4065             memset (((uint8_t*)this)+sizeof(ArrayBase), 0xcc, *numComponentsPtr);
4066 #endif //VERIFY_HEAP
4067     }
4068
4069     void UnsetFree()
4070     {
4071         size_t size = free_object_base_size - plug_skew;
4072
4073         // since we only need to clear 2 ptr size, we do it manually
4074         PTR_PTR m = (PTR_PTR) this;
4075         for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
4076             *(m++) = 0;
4077     }
4078
4079     BOOL IsFree () const
4080     {
4081         return (GetMethodTable() == g_gc_pFreeObjectMethodTable);
4082     }
4083
4084 #ifdef FEATURE_STRUCTALIGN
4085     int GetRequiredAlignment () const
4086     {
4087         return GetMethodTable()->GetRequiredAlignment();
4088     }
4089 #endif // FEATURE_STRUCTALIGN
4090
4091     BOOL ContainsPointers() const
4092     {
4093         return GetMethodTable()->ContainsPointers();
4094     }
4095
4096 #ifdef COLLECTIBLE_CLASS
4097     BOOL Collectible() const
4098     {
4099         return GetMethodTable()->Collectible();
4100     }
4101
4102     FORCEINLINE BOOL ContainsPointersOrCollectible() const
4103     {
4104         MethodTable *pMethodTable = GetMethodTable();
4105         return (pMethodTable->ContainsPointers() || pMethodTable->Collectible());
4106     }
4107 #endif //COLLECTIBLE_CLASS
4108
4109     Object* GetObjectBase() const
4110     {
4111         return (Object*) this;
4112     }
4113 };
4114
4115 #define header(i) ((CObjectHeader*)(i))
4116
4117 #define free_list_slot(x) ((uint8_t**)(x))[2]
4118 #define free_list_undo(x) ((uint8_t**)(x))[-1]
4119 #define UNDO_EMPTY ((uint8_t*)1)
4120
4121 #ifdef SHORT_PLUGS
4122 inline 
4123 void set_plug_padded (uint8_t* node)
4124 {
4125     header(node)->SetMarked();
4126 }
4127 inline
4128 void clear_plug_padded (uint8_t* node)
4129 {
4130     header(node)->ClearMarked();
4131 }
4132 inline
4133 BOOL is_plug_padded (uint8_t* node)
4134 {
4135     return header(node)->IsMarked();
4136 }
4137 #else //SHORT_PLUGS
4138 inline void set_plug_padded (uint8_t* node){}
4139 inline void clear_plug_padded (uint8_t* node){}
4140 inline
4141 BOOL is_plug_padded (uint8_t* node){return FALSE;}
4142 #endif //SHORT_PLUGS
4143
4144
4145 inline size_t unused_array_size(uint8_t * p)
4146 {
4147     assert(((CObjectHeader*)p)->IsFree());
4148
4149     size_t* numComponentsPtr = (size_t*)(p + ArrayBase::GetOffsetOfNumComponents());
4150     return free_object_base_size + *numComponentsPtr;
4151 }
4152
4153 heap_segment* heap_segment_rw (heap_segment* ns)
4154 {
4155     if ((ns == 0) || !heap_segment_read_only_p (ns))
4156     {
4157         return ns;
4158     }
4159     else
4160     {
4161         do
4162         {
4163             ns = heap_segment_next (ns);
4164         } while ((ns != 0) && heap_segment_read_only_p (ns));
4165         return ns;
4166     }
4167 }
4168
4169 //returns the next non ro segment.
4170 heap_segment* heap_segment_next_rw (heap_segment* seg)
4171 {
4172     heap_segment* ns = heap_segment_next (seg);
4173     return heap_segment_rw (ns);
4174 }
4175
4176 // returns the segment before seg.
4177 heap_segment* heap_segment_prev_rw (heap_segment* begin, heap_segment* seg)
4178 {
4179     assert (begin != 0);
4180     heap_segment* prev = begin;
4181     heap_segment* current = heap_segment_next_rw (begin);
4182
4183     while (current && current != seg)
4184     {
4185         prev = current;
4186         current = heap_segment_next_rw (current);
4187     }
4188
4189     if (current == seg)
4190     {
4191         return prev;
4192     }
4193     else
4194     {
4195         return 0;
4196     }
4197 }
4198
4199 // returns the segment before seg.
4200 heap_segment* heap_segment_prev (heap_segment* begin, heap_segment* seg)
4201 {
4202     assert (begin != 0);
4203     heap_segment* prev = begin;
4204     heap_segment* current = heap_segment_next (begin);
4205
4206     while (current && current != seg)
4207     {
4208         prev = current;
4209         current = heap_segment_next (current);
4210     }
4211
4212     if (current == seg)
4213     {
4214         return prev;
4215     }
4216     else
4217     {
4218         return 0;
4219     }
4220 }
4221
4222 heap_segment* heap_segment_in_range (heap_segment* ns)
4223 {
4224     if ((ns == 0) || heap_segment_in_range_p (ns))
4225     {
4226         return ns;
4227     }
4228     else
4229     {
4230         do
4231         {
4232             ns = heap_segment_next (ns);
4233         } while ((ns != 0) && !heap_segment_in_range_p (ns));
4234         return ns;
4235     }
4236 }
4237
4238 heap_segment* heap_segment_next_in_range (heap_segment* seg)
4239 {
4240     heap_segment* ns = heap_segment_next (seg);
4241     return heap_segment_in_range (ns);
4242 }
4243
4244 typedef struct
4245 {
4246     uint8_t* memory_base;
4247 } imemory_data;
4248
4249 typedef struct
4250 {
4251     imemory_data *initial_memory;
4252     imemory_data *initial_normal_heap; // points into initial_memory_array
4253     imemory_data *initial_large_heap;  // points into initial_memory_array
4254
4255     size_t block_size_normal;
4256     size_t block_size_large;
4257
4258     size_t block_count;                // # of blocks in each
4259     size_t current_block_normal;
4260     size_t current_block_large;
4261
4262     enum 
4263     { 
4264         ALLATONCE = 1, 
4265         TWO_STAGE, 
4266         EACH_BLOCK 
4267     };
4268
4269     size_t allocation_pattern;
4270 } initial_memory_details;
4271
4272 initial_memory_details memory_details;
4273
4274 BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_heaps)
4275 {
4276     BOOL reserve_success = FALSE;
4277
4278     // should only be called once
4279     assert (memory_details.initial_memory == 0);
4280
4281     memory_details.initial_memory = new (nothrow) imemory_data[num_heaps*2];
4282     if (memory_details.initial_memory == 0)
4283     {
4284         dprintf (2, ("failed to reserve %Id bytes for imemory_data", num_heaps*2*sizeof(imemory_data)));
4285         return FALSE;
4286     }
4287
4288     memory_details.initial_normal_heap = memory_details.initial_memory;
4289     memory_details.initial_large_heap = memory_details.initial_memory + num_heaps;
4290     memory_details.block_size_normal = normal_size;
4291     memory_details.block_size_large = large_size;
4292     memory_details.block_count = num_heaps;
4293
4294     memory_details.current_block_normal = 0;
4295     memory_details.current_block_large = 0;
4296
4297     g_gc_lowest_address = MAX_PTR;
4298     g_gc_highest_address = 0;
4299
4300     if (((size_t)MAX_PTR - large_size) < normal_size)
4301     {
4302         // we are already overflowing with just one heap.
4303         dprintf (2, ("0x%Ix + 0x%Ix already overflow", normal_size, large_size));
4304         return FALSE;
4305     }
4306
4307     if (((size_t)MAX_PTR / memory_details.block_count) < (normal_size + large_size))
4308     {
4309         dprintf (2, ("(0x%Ix + 0x%Ix)*0x%Ix overflow", normal_size, large_size, memory_details.block_count));
4310         return FALSE;
4311     }
4312
4313     size_t requestedMemory = memory_details.block_count * (normal_size + large_size);
4314
4315     uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory);
4316     if (allatonce_block)
4317     {
4318         g_gc_lowest_address =  allatonce_block;
4319         g_gc_highest_address = allatonce_block + (memory_details.block_count * (large_size + normal_size));
4320         memory_details.allocation_pattern = initial_memory_details::ALLATONCE;
4321
4322         for(size_t i = 0; i < memory_details.block_count; i++)
4323         {
4324             memory_details.initial_normal_heap[i].memory_base = allatonce_block + (i*normal_size);
4325             memory_details.initial_large_heap[i].memory_base = allatonce_block +
4326                             (memory_details.block_count*normal_size) + (i*large_size);
4327             reserve_success = TRUE;
4328         }
4329     }
4330     else
4331     {
4332         // try to allocate 2 blocks
4333         uint8_t* b1 = 0;
4334         uint8_t* b2 = 0;
4335         b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size);
4336         if (b1)
4337         {
4338             b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size);
4339             if (b2)
4340             {
4341                 memory_details.allocation_pattern = initial_memory_details::TWO_STAGE;
4342                 g_gc_lowest_address = min(b1,b2);
4343                 g_gc_highest_address = max(b1 + memory_details.block_count*normal_size,
4344                                         b2 + memory_details.block_count*large_size);
4345                 for(size_t i = 0; i < memory_details.block_count; i++)
4346                 {
4347                     memory_details.initial_normal_heap[i].memory_base = b1 + (i*normal_size);
4348                     memory_details.initial_large_heap[i].memory_base = b2 + (i*large_size);
4349                     reserve_success = TRUE;
4350                 }
4351             }
4352             else
4353             {
4354                 // b2 allocation failed, we'll go on to try allocating each block.
4355                 // We could preserve the b1 alloc, but code complexity increases
4356                 virtual_free (b1, memory_details.block_count * normal_size);
4357             }
4358         }
4359
4360         if ((b2==NULL) && ( memory_details.block_count > 1))
4361         {
4362             memory_details.allocation_pattern = initial_memory_details::EACH_BLOCK;
4363
4364             imemory_data *current_block = memory_details.initial_memory;
4365             for(size_t i = 0; i < (memory_details.block_count*2); i++, current_block++)
4366             {
4367                 size_t block_size = ((i < memory_details.block_count) ?
4368                                      memory_details.block_size_normal :
4369                                      memory_details.block_size_large);
4370                 current_block->memory_base =
4371                     (uint8_t*)virtual_alloc (block_size);
4372                 if (current_block->memory_base == 0)
4373                 {
4374                     // Free the blocks that we've allocated so far
4375                     current_block = memory_details.initial_memory;
4376                     for(size_t j = 0; j < i; j++, current_block++){
4377                         if (current_block->memory_base != 0){
4378                             block_size = ((j < memory_details.block_count) ?
4379                                      memory_details.block_size_normal :
4380                                      memory_details.block_size_large);
4381                              virtual_free (current_block->memory_base , block_size);
4382                         }
4383                     }
4384                     reserve_success = FALSE;
4385                     break;
4386                 }
4387                 else
4388                 {
4389                     if (current_block->memory_base < g_gc_lowest_address)
4390                         g_gc_lowest_address =  current_block->memory_base;
4391                     if (((uint8_t *) current_block->memory_base + block_size) > g_gc_highest_address)
4392                         g_gc_highest_address = (current_block->memory_base + block_size);
4393                 }
4394                 reserve_success = TRUE;
4395             }
4396         }
4397     }
4398
4399     return reserve_success;
4400 }
4401
4402 void destroy_initial_memory()
4403 {
4404     if (memory_details.initial_memory != NULL)
4405     {
4406         if (memory_details.allocation_pattern == initial_memory_details::ALLATONCE)
4407         {
4408             virtual_free(memory_details.initial_memory[0].memory_base,
4409                 memory_details.block_count*(memory_details.block_size_normal +
4410                 memory_details.block_size_large));
4411         }
4412         else if (memory_details.allocation_pattern == initial_memory_details::TWO_STAGE)
4413         {
4414             virtual_free (memory_details.initial_normal_heap[0].memory_base,
4415                 memory_details.block_count*memory_details.block_size_normal);
4416
4417             virtual_free (memory_details.initial_large_heap[0].memory_base,
4418                 memory_details.block_count*memory_details.block_size_large);
4419         }
4420         else
4421         {
4422             assert (memory_details.allocation_pattern == initial_memory_details::EACH_BLOCK);
4423             imemory_data *current_block = memory_details.initial_memory;
4424             for(size_t i = 0; i < (memory_details.block_count*2); i++, current_block++)
4425             {
4426                 size_t block_size = (i < memory_details.block_count) ? memory_details.block_size_normal :
4427                                                                        memory_details.block_size_large;
4428                 if (current_block->memory_base != NULL)
4429                 {
4430                     virtual_free (current_block->memory_base, block_size);
4431                 }
4432             }
4433         }
4434
4435         delete [] memory_details.initial_memory;
4436         memory_details.initial_memory = NULL;
4437         memory_details.initial_normal_heap = NULL;
4438         memory_details.initial_large_heap = NULL;
4439     }
4440 }
4441
4442 void* next_initial_memory (size_t size)
4443 {
4444     assert ((size == memory_details.block_size_normal) || (size == memory_details.block_size_large));
4445     void *res = NULL;
4446
4447     if ((size != memory_details.block_size_normal) ||
4448         ((memory_details.current_block_normal == memory_details.block_count) &&
4449          (memory_details.block_size_normal == memory_details.block_size_large)))
4450     {
4451         // If the block sizes are the same, flow block requests from normal to large
4452         assert (memory_details.current_block_large < memory_details.block_count);
4453         assert (memory_details.initial_large_heap != 0);
4454
4455         res = memory_details.initial_large_heap[memory_details.current_block_large].memory_base;
4456         memory_details.current_block_large++;
4457     }
4458     else
4459     {
4460         assert (memory_details.current_block_normal < memory_details.block_count);
4461         assert (memory_details.initial_normal_heap != NULL);
4462
4463         res = memory_details.initial_normal_heap[memory_details.current_block_normal].memory_base;
4464         memory_details.current_block_normal++;
4465     }
4466
4467     return res;
4468 }
4469
4470 heap_segment* get_initial_segment (size_t size, int h_number)
4471 {
4472     void* mem = next_initial_memory (size);
4473     heap_segment* res = gc_heap::make_heap_segment ((uint8_t*)mem, size , h_number);
4474
4475     return res;
4476 }
4477
4478 void* virtual_alloc (size_t size)
4479 {
4480     size_t requested_size = size;
4481
4482     if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
4483     {
4484         gc_heap::reserved_memory_limit =
4485             GCScan::AskForMoreReservedMemory (gc_heap::reserved_memory_limit, requested_size);
4486         if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
4487         {
4488             return 0;
4489         }
4490     }
4491
4492     uint32_t flags = VirtualReserveFlags::None;
4493 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
4494     if (virtual_alloc_hardware_write_watch)
4495     {
4496         flags = VirtualReserveFlags::WriteWatch;
4497     }
4498 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
4499     void* prgmem = GCToOSInterface::VirtualReserve (requested_size, card_size * card_word_width, flags);
4500     void *aligned_mem = prgmem;
4501
4502     // We don't want (prgmem + size) to be right at the end of the address space 
4503     // because we'd have to worry about that everytime we do (address + size).
4504     // We also want to make sure that we leave loh_size_threshold at the end 
4505     // so we allocate a small object we don't need to worry about overflow there
4506     // when we do alloc_ptr+size.
4507     if (prgmem)
4508     {
4509         uint8_t* end_mem = (uint8_t*)prgmem + requested_size;
4510
4511         if ((end_mem == 0) || ((size_t)(MAX_PTR - end_mem) <= END_SPACE_AFTER_GC))
4512         {
4513             GCToOSInterface::VirtualRelease (prgmem, requested_size);
4514             dprintf (2, ("Virtual Alloc size %Id returned memory right against 4GB [%Ix, %Ix[ - discarding",
4515                         requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size)));
4516             prgmem = 0;
4517             aligned_mem = 0;
4518         }
4519     }
4520
4521     if (prgmem)
4522     {
4523         gc_heap::reserved_memory += requested_size;
4524     }
4525
4526     dprintf (2, ("Virtual Alloc size %Id: [%Ix, %Ix[",
4527                  requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size)));
4528
4529     return aligned_mem;
4530 }
4531
4532 void virtual_free (void* add, size_t size)
4533 {
4534     GCToOSInterface::VirtualRelease (add, size);
4535     gc_heap::reserved_memory -= size;
4536     dprintf (2, ("Virtual Free size %Id: [%Ix, %Ix[",
4537                  size, (size_t)add, (size_t)((uint8_t*)add+size)));
4538 }
4539
4540 static size_t get_valid_segment_size (BOOL large_seg=FALSE)
4541 {
4542     size_t seg_size, initial_seg_size;
4543
4544     if (!large_seg)
4545     {
4546         initial_seg_size = INITIAL_ALLOC;
4547         seg_size = static_cast<size_t>(GCConfig::GetSegmentSize());
4548     }
4549     else
4550     {
4551         initial_seg_size = LHEAP_ALLOC;
4552         seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()) / 2;
4553     }
4554
4555 #ifdef MULTIPLE_HEAPS
4556 #ifdef BIT64
4557     if (!large_seg)
4558 #endif // BIT64
4559     {
4560         if (g_num_processors > 4)
4561             initial_seg_size /= 2;
4562         if (g_num_processors > 8)
4563             initial_seg_size /= 2;
4564     }
4565 #endif //MULTIPLE_HEAPS
4566
4567     // if seg_size is small but not 0 (0 is default if config not set)
4568     // then set the segment to the minimum size
4569     if (!g_theGCHeap->IsValidSegmentSize(seg_size))
4570     {
4571         // if requested size is between 1 byte and 4MB, use min
4572         if ((seg_size >> 1) && !(seg_size >> 22))
4573             seg_size = 1024*1024*4;
4574         else
4575             seg_size = initial_seg_size;
4576     }
4577
4578 #ifdef SEG_MAPPING_TABLE
4579 #ifdef BIT64
4580     seg_size = round_up_power2 (seg_size);
4581 #else
4582     seg_size = round_down_power2 (seg_size);
4583 #endif // BIT64
4584 #endif //SEG_MAPPING_TABLE
4585
4586     return (seg_size);
4587 }
4588
4589 void
4590 gc_heap::compute_new_ephemeral_size()
4591 {
4592     int eph_gen_max = max_generation - 1 - (settings.promotion ? 1 : 0);
4593     size_t padding_size = 0;
4594
4595     for (int i = 0; i <= eph_gen_max; i++)
4596     {
4597         dynamic_data* dd = dynamic_data_of (i);
4598         total_ephemeral_size += (dd_survived_size (dd) - dd_pinned_survived_size (dd));
4599 #ifdef RESPECT_LARGE_ALIGNMENT
4600         total_ephemeral_size += dd_num_npinned_plugs (dd) * switch_alignment_size (FALSE);
4601 #endif //RESPECT_LARGE_ALIGNMENT
4602 #ifdef FEATURE_STRUCTALIGN
4603         total_ephemeral_size += dd_num_npinned_plugs (dd) * MAX_STRUCTALIGN;
4604 #endif //FEATURE_STRUCTALIGN
4605
4606 #ifdef SHORT_PLUGS
4607         padding_size += dd_padding_size (dd);
4608 #endif //SHORT_PLUGS
4609     }
4610
4611     total_ephemeral_size += eph_gen_starts_size;
4612
4613 #ifdef RESPECT_LARGE_ALIGNMENT
4614     size_t planned_ephemeral_size = heap_segment_plan_allocated (ephemeral_heap_segment) -
4615                                        generation_plan_allocation_start (generation_of (max_generation-1));
4616     total_ephemeral_size = min (total_ephemeral_size, planned_ephemeral_size);
4617 #endif //RESPECT_LARGE_ALIGNMENT
4618
4619 #ifdef SHORT_PLUGS
4620     total_ephemeral_size = Align ((size_t)((double)total_ephemeral_size * short_plugs_pad_ratio) + 1);
4621     total_ephemeral_size += Align (DESIRED_PLUG_LENGTH);
4622 #endif //SHORT_PLUGS
4623
4624     dprintf (3, ("total ephemeral size is %Ix, padding %Ix(%Ix)", 
4625         total_ephemeral_size,
4626         padding_size, (total_ephemeral_size - padding_size)));
4627 }
4628
4629 #ifdef _MSC_VER
4630 #pragma warning(disable:4706) // "assignment within conditional expression" is intentional in this function.
4631 #endif // _MSC_VER
4632
4633 heap_segment*
4634 gc_heap::soh_get_segment_to_expand()
4635 {
4636     size_t size = soh_segment_size;
4637
4638     ordered_plug_indices_init = FALSE;
4639     use_bestfit = FALSE;
4640
4641     //compute the size of the new ephemeral heap segment.
4642     compute_new_ephemeral_size();
4643
4644     if ((settings.pause_mode != pause_low_latency) &&
4645         (settings.pause_mode != pause_no_gc)
4646 #ifdef BACKGROUND_GC
4647         && (!recursive_gc_sync::background_running_p())
4648 #endif //BACKGROUND_GC
4649         )
4650     {
4651         allocator*  gen_alloc = ((settings.condemned_generation == max_generation) ? 0 :
4652                               generation_allocator (generation_of (max_generation)));
4653         dprintf (2, ("(gen%d)soh_get_segment_to_expand", settings.condemned_generation));
4654
4655         // try to find one in the gen 2 segment list, search backwards because the first segments
4656         // tend to be more compact than the later ones.
4657         heap_segment* fseg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
4658
4659         PREFIX_ASSUME(fseg != NULL);
4660
4661 #ifdef SEG_REUSE_STATS
4662         int try_reuse = 0;
4663 #endif //SEG_REUSE_STATS
4664
4665         heap_segment* seg = ephemeral_heap_segment;
4666         while ((seg = heap_segment_prev_rw (fseg, seg)) && (seg != fseg))
4667         {
4668 #ifdef SEG_REUSE_STATS
4669         try_reuse++;
4670 #endif //SEG_REUSE_STATS
4671
4672             if (can_expand_into_p (seg, size/3, total_ephemeral_size, gen_alloc))
4673             {
4674                 get_gc_data_per_heap()->set_mechanism (gc_heap_expand, 
4675                     (use_bestfit ? expand_reuse_bestfit : expand_reuse_normal));
4676                 if (settings.condemned_generation == max_generation)
4677                 {
4678                     if (use_bestfit)
4679                     {
4680                         build_ordered_free_spaces (seg);
4681                         dprintf (GTC_LOG, ("can use best fit"));
4682                     }
4683
4684 #ifdef SEG_REUSE_STATS
4685                     dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse", 
4686                         settings.condemned_generation, try_reuse));
4687 #endif //SEG_REUSE_STATS
4688                     dprintf (GTC_LOG, ("max_gen: Found existing segment to expand into %Ix", (size_t)seg));
4689                     return seg;
4690                 }
4691                 else
4692                 {
4693 #ifdef SEG_REUSE_STATS
4694                     dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse - returning", 
4695                         settings.condemned_generation, try_reuse));
4696 #endif //SEG_REUSE_STATS
4697                     dprintf (GTC_LOG, ("max_gen-1: Found existing segment to expand into %Ix", (size_t)seg));
4698
4699                     // If we return 0 here, the allocator will think since we are short on end
4700                     // of seg we neeed to trigger a full compacting GC. So if sustained low latency 
4701                     // is set we should acquire a new seg instead, that way we wouldn't be short.
4702                     // The real solution, of course, is to actually implement seg reuse in gen1.
4703                     if (settings.pause_mode != pause_sustained_low_latency)
4704                     {
4705                         dprintf (GTC_LOG, ("max_gen-1: SustainedLowLatency is set, acquire a new seg"));
4706                         get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_next_full_gc);
4707                         return 0;
4708                     }
4709                 }
4710             }
4711         }
4712     }
4713
4714     heap_segment* result = get_segment (size, FALSE);
4715
4716     if(result)
4717     {
4718 #ifdef BACKGROUND_GC
4719         if (current_c_gc_state == c_gc_state_planning)
4720         {
4721             // When we expand heap during bgc sweep, we set the seg to be swept so 
4722             // we'll always look at cards for objects on the new segment.
4723             result->flags |= heap_segment_flags_swept;
4724         }
4725 #endif //BACKGROUND_GC
4726
4727         FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(result),
4728                                   (size_t)(heap_segment_reserved (result) - heap_segment_mem(result)),
4729                                   gc_etw_segment_small_object_heap);
4730     }
4731
4732     get_gc_data_per_heap()->set_mechanism (gc_heap_expand, (result ? expand_new_seg : expand_no_memory));
4733
4734     if (result == 0)
4735     {
4736         dprintf (2, ("h%d: failed to allocate a new segment!", heap_number));
4737     }
4738     else
4739     {
4740 #ifdef MULTIPLE_HEAPS
4741         heap_segment_heap (result) = this;
4742 #endif //MULTIPLE_HEAPS
4743     }
4744
4745     dprintf (GTC_LOG, ("(gen%d)creating new segment %Ix", settings.condemned_generation, result));
4746     return result;
4747 }
4748
4749 #ifdef _MSC_VER
4750 #pragma warning(default:4706)
4751 #endif // _MSC_VER
4752
4753 //returns 0 in case of allocation failure
4754 heap_segment*
4755 gc_heap::get_segment (size_t size, BOOL loh_p)
4756 {
4757     heap_segment* result = 0;
4758
4759     if (segment_standby_list != 0)
4760     {
4761         result = segment_standby_list;
4762         heap_segment* last = 0;
4763         while (result)
4764         {
4765             size_t hs = (size_t)(heap_segment_reserved (result) - (uint8_t*)result);
4766             if ((hs >= size) && ((hs / 2) < size))
4767             {
4768                 dprintf (2, ("Hoarded segment %Ix found", (size_t) result));
4769                 if (last)
4770                 {
4771                     heap_segment_next (last) = heap_segment_next (result);
4772                 }
4773                 else
4774                 {
4775                     segment_standby_list = heap_segment_next (result);
4776                 }
4777                 break;
4778             }
4779             else
4780             {
4781                 last = result;
4782                 result = heap_segment_next (result);
4783             }
4784         }
4785     }
4786
4787     if (result)
4788     {
4789         init_heap_segment (result);
4790 #ifdef BACKGROUND_GC
4791         if (should_commit_mark_array())
4792         {
4793             dprintf (GC_TABLE_LOG, ("hoarded seg %Ix, mark_array is %Ix", result, mark_array));
4794             if (!commit_mark_array_new_seg (__this, result))
4795             {
4796                 dprintf (GC_TABLE_LOG, ("failed to commit mark array for hoarded seg"));
4797                 // If we can't use it we need to thread it back.
4798                 if (segment_standby_list != 0)
4799                 {
4800                     heap_segment_next (result) = segment_standby_list;
4801                     segment_standby_list = result;
4802                 }
4803                 else
4804                 {
4805                     segment_standby_list = result;
4806                 }
4807
4808                 result = 0;
4809             }
4810         }
4811 #endif //BACKGROUND_GC
4812
4813 #ifdef SEG_MAPPING_TABLE
4814         if (result)
4815             seg_mapping_table_add_segment (result, __this);
4816 #endif //SEG_MAPPING_TABLE
4817     }
4818
4819     if (!result)
4820     {
4821 #ifndef SEG_MAPPING_TABLE
4822         if (!seg_table->ensure_space_for_insert ())
4823             return 0;
4824 #endif //SEG_MAPPING_TABLE
4825         void* mem = virtual_alloc (size);
4826         if (!mem)
4827         {
4828             fgm_result.set_fgm (fgm_reserve_segment, size, loh_p);
4829             return 0;
4830         }
4831
4832         result = gc_heap::make_heap_segment ((uint8_t*)mem, size, heap_number);
4833
4834         if (result)
4835         {
4836             uint8_t* start;
4837             uint8_t* end;
4838             if (mem < g_gc_lowest_address)
4839             {
4840                 start =  (uint8_t*)mem;
4841             }
4842             else
4843             {
4844                 start = (uint8_t*)g_gc_lowest_address;
4845             }
4846
4847             if (((uint8_t*)mem + size) > g_gc_highest_address)
4848             {
4849                 end = (uint8_t*)mem + size;
4850             }
4851             else
4852             {
4853                 end = (uint8_t*)g_gc_highest_address;
4854             }
4855
4856             if (gc_heap::grow_brick_card_tables (start, end, size, result, __this, loh_p) != 0)
4857             {
4858                 virtual_free (mem, size);
4859                 return 0;
4860             }
4861         }
4862         else
4863         {
4864             fgm_result.set_fgm (fgm_commit_segment_beg, SEGMENT_INITIAL_COMMIT, loh_p);
4865             virtual_free (mem, size);
4866         }
4867
4868         if (result)
4869         {
4870 #ifdef SEG_MAPPING_TABLE
4871             seg_mapping_table_add_segment (result, __this);
4872 #else //SEG_MAPPING_TABLE
4873             gc_heap::seg_table->insert ((uint8_t*)result, delta);
4874 #endif //SEG_MAPPING_TABLE
4875         }
4876     }
4877
4878 #ifdef BACKGROUND_GC
4879     if (result)
4880     {
4881         ::record_changed_seg ((uint8_t*)result, heap_segment_reserved (result), 
4882                             settings.gc_index, current_bgc_state,
4883                             seg_added);
4884         bgc_verify_mark_array_cleared (result);
4885     }
4886 #endif //BACKGROUND_GC
4887
4888     dprintf (GC_TABLE_LOG, ("h%d: new seg: %Ix-%Ix (%Id)", heap_number, result, ((uint8_t*)result + size), size));
4889     return result;
4890 }
4891
4892 void release_segment (heap_segment* sg)
4893 {
4894     ptrdiff_t delta = 0;
4895     FIRE_EVENT(GCFreeSegment_V1, heap_segment_mem(sg));
4896     virtual_free (sg, (uint8_t*)heap_segment_reserved (sg)-(uint8_t*)sg);
4897 }
4898
4899 heap_segment* gc_heap::get_segment_for_loh (size_t size
4900 #ifdef MULTIPLE_HEAPS
4901                                            , gc_heap* hp
4902 #endif //MULTIPLE_HEAPS
4903                                            )
4904 {
4905 #ifndef MULTIPLE_HEAPS
4906     gc_heap* hp = 0;
4907 #endif //MULTIPLE_HEAPS
4908     heap_segment* res = hp->get_segment (size, TRUE);
4909     if (res != 0)
4910     {
4911 #ifdef MULTIPLE_HEAPS
4912         heap_segment_heap (res) = hp;
4913 #endif //MULTIPLE_HEAPS
4914         res->flags |= heap_segment_flags_loh;
4915
4916         FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), gc_etw_segment_large_object_heap);
4917
4918         GCToEEInterface::DiagUpdateGenerationBounds();
4919
4920 #ifdef MULTIPLE_HEAPS
4921         hp->thread_loh_segment (res);
4922 #else
4923         thread_loh_segment (res);
4924 #endif //MULTIPLE_HEAPS
4925     }
4926
4927     return res;
4928 }
4929
4930 void gc_heap::thread_loh_segment (heap_segment* new_seg)
4931 {
4932     heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
4933
4934     while (heap_segment_next_rw (seg))
4935         seg = heap_segment_next_rw (seg);
4936     heap_segment_next (seg) = new_seg;
4937 }
4938
4939 heap_segment*
4940 gc_heap::get_large_segment (size_t size, BOOL* did_full_compact_gc)
4941 {
4942     *did_full_compact_gc = FALSE;
4943     size_t last_full_compact_gc_count = get_full_compact_gc_count();
4944
4945     //access to get_segment needs to be serialized
4946     add_saved_spinlock_info (true, me_release, mt_get_large_seg);
4947     leave_spin_lock (&more_space_lock_loh);
4948     enter_spin_lock (&gc_heap::gc_lock);
4949     dprintf (SPINLOCK_LOG, ("[%d]Seg: Egc", heap_number));
4950     // if a GC happened between here and before we ask for a segment in 
4951     // get_large_segment, we need to count that GC.
4952     size_t current_full_compact_gc_count = get_full_compact_gc_count();
4953
4954     if (current_full_compact_gc_count > last_full_compact_gc_count)
4955     {
4956         *did_full_compact_gc = TRUE;
4957     }
4958
4959     heap_segment* res = get_segment_for_loh (size
4960 #ifdef MULTIPLE_HEAPS
4961                                             , this
4962 #endif //MULTIPLE_HEAPS
4963                                             );
4964
4965     dprintf (SPINLOCK_LOG, ("[%d]Seg: A Lgc", heap_number));
4966     leave_spin_lock (&gc_heap::gc_lock);
4967     enter_spin_lock (&more_space_lock_loh);
4968     add_saved_spinlock_info (true, me_acquire, mt_get_large_seg);
4969
4970     return res;
4971 }
4972
4973 #if 0
4974 BOOL gc_heap::unprotect_segment (heap_segment* seg)
4975 {
4976     uint8_t* start = align_lower_page (heap_segment_mem (seg));
4977     ptrdiff_t region_size = heap_segment_allocated (seg) - start;
4978
4979     if (region_size != 0 )
4980     {
4981         dprintf (3, ("unprotecting segment %Ix:", (size_t)seg));
4982
4983         BOOL status = GCToOSInterface::VirtualUnprotect (start, region_size);
4984         assert (status);
4985         return status;
4986     }
4987     return FALSE;
4988 }
4989 #endif
4990
4991 #ifdef MULTIPLE_HEAPS
4992 #ifdef _X86_
4993 #ifdef _MSC_VER
4994 #pragma warning(disable:4035)
4995     static ptrdiff_t  get_cycle_count()
4996     {
4997         __asm   rdtsc
4998     }
4999 #pragma warning(default:4035)
5000 #elif defined(__GNUC__)
5001     static ptrdiff_t  get_cycle_count()
5002     {
5003         ptrdiff_t cycles;
5004         ptrdiff_t cyclesHi;
5005         __asm__ __volatile__
5006         ("rdtsc":"=a" (cycles), "=d" (cyclesHi));
5007         return cycles;
5008     }
5009 #else //_MSC_VER
5010 #error Unknown compiler
5011 #endif //_MSC_VER
5012 #elif defined(_TARGET_AMD64_) 
5013 #ifdef _MSC_VER
5014 extern "C" uint64_t __rdtsc();
5015 #pragma intrinsic(__rdtsc)
5016     static ptrdiff_t get_cycle_count()
5017     {
5018         return (ptrdiff_t)__rdtsc();
5019     }
5020 #elif defined(__clang__)    
5021     static ptrdiff_t get_cycle_count()
5022     {
5023         ptrdiff_t cycles;
5024         ptrdiff_t cyclesHi;
5025         __asm__ __volatile__
5026         ("rdtsc":"=a" (cycles), "=d" (cyclesHi));
5027         return (cyclesHi << 32) | cycles;
5028     }
5029 #else // _MSC_VER
5030     extern "C" ptrdiff_t get_cycle_count(void);
5031 #endif // _MSC_VER
5032 #elif defined(_TARGET_ARM_)
5033     static ptrdiff_t get_cycle_count()
5034     {
5035         // @ARMTODO: cycle counter is not exposed to user mode by CoreARM. For now (until we can show this
5036         // makes a difference on the ARM configurations on which we'll run) just return 0. This will result in
5037         // all buffer access times being reported as equal in access_time().
5038         return 0;
5039     }
5040 #elif defined(_TARGET_ARM64_)
5041     static ptrdiff_t get_cycle_count()
5042     {
5043         // @ARM64TODO: cycle counter is not exposed to user mode by CoreARM. For now (until we can show this
5044         // makes a difference on the ARM configurations on which we'll run) just return 0. This will result in
5045         // all buffer access times being reported as equal in access_time().
5046         return 0;
5047     }
5048 #else
5049 #error NYI platform: get_cycle_count
5050 #endif //_TARGET_X86_
5051
5052 class heap_select
5053 {
5054     heap_select() {}
5055     static uint8_t* sniff_buffer;
5056     static unsigned n_sniff_buffers;
5057     static unsigned cur_sniff_index;
5058
5059     static uint16_t proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
5060     static uint16_t heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
5061     static uint16_t heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
5062     static uint16_t heap_no_to_cpu_group[MAX_SUPPORTED_CPUS];
5063     static uint16_t heap_no_to_group_proc[MAX_SUPPORTED_CPUS];
5064     static uint16_t numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
5065
5066     static int access_time(uint8_t *sniff_buffer, int heap_number, unsigned sniff_index, unsigned n_sniff_buffers)
5067     {
5068         ptrdiff_t start_cycles = get_cycle_count();
5069         uint8_t sniff = sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE];
5070         assert (sniff == 0);
5071         ptrdiff_t elapsed_cycles = get_cycle_count() - start_cycles;
5072         // add sniff here just to defeat the optimizer
5073         elapsed_cycles += sniff;
5074         return (int) elapsed_cycles;
5075     }
5076
5077 public:
5078     static BOOL init(int n_heaps)
5079     {
5080         assert (sniff_buffer == NULL && n_sniff_buffers == 0);
5081         if (!GCToOSInterface::CanGetCurrentProcessorNumber())
5082         {
5083             n_sniff_buffers = n_heaps*2+1;
5084             size_t n_cache_lines = 1 + n_heaps * n_sniff_buffers + 1;
5085             size_t sniff_buf_size = n_cache_lines * HS_CACHE_LINE_SIZE;
5086             if (sniff_buf_size / HS_CACHE_LINE_SIZE != n_cache_lines) // check for overlow
5087             {
5088                 return FALSE;
5089             }
5090
5091             sniff_buffer = new (nothrow) uint8_t[sniff_buf_size];
5092             if (sniff_buffer == 0)
5093                 return FALSE;
5094             memset(sniff_buffer, 0, sniff_buf_size*sizeof(uint8_t));
5095         }
5096
5097         //can not enable gc numa aware, force all heaps to be in
5098         //one numa node by filling the array with all 0s
5099         if (!GCToOSInterface::CanEnableGCNumaAware())
5100             memset(heap_no_to_numa_node, 0, sizeof (heap_no_to_numa_node)); 
5101
5102         return TRUE;
5103     }
5104
5105     static void init_cpu_mapping(gc_heap * /*heap*/, int heap_number)
5106     {
5107         if (GCToOSInterface::CanGetCurrentProcessorNumber())
5108         {
5109             uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps;
5110             // We can safely cast heap_number to a uint16_t 'cause GetCurrentProcessCpuCount
5111             // only returns up to MAX_SUPPORTED_CPUS procs right now. We only ever create at most
5112             // MAX_SUPPORTED_CPUS GC threads.
5113             proc_no_to_heap_no[proc_no] = (uint16_t)heap_number;
5114         }
5115     }
5116
5117     static void mark_heap(int heap_number)
5118     {
5119         if (GCToOSInterface::CanGetCurrentProcessorNumber())
5120             return;
5121
5122         for (unsigned sniff_index = 0; sniff_index < n_sniff_buffers; sniff_index++)
5123             sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1;
5124     }
5125
5126     static int select_heap(alloc_context* acontext, int /*hint*/)
5127     {
5128         UNREFERENCED_PARAMETER(acontext); // only referenced by dprintf
5129
5130         if (GCToOSInterface::CanGetCurrentProcessorNumber())
5131             return proc_no_to_heap_no[GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps];
5132
5133         unsigned sniff_index = Interlocked::Increment(&cur_sniff_index);
5134         sniff_index %= n_sniff_buffers;
5135
5136         int best_heap = 0;
5137         int best_access_time = 1000*1000*1000;
5138         int second_best_access_time = best_access_time;
5139
5140         uint8_t *l_sniff_buffer = sniff_buffer;
5141         unsigned l_n_sniff_buffers = n_sniff_buffers;
5142         for (int heap_number = 0; heap_number < gc_heap::n_heaps; heap_number++)
5143         {
5144             int this_access_time = access_time(l_sniff_buffer, heap_number, sniff_index, l_n_sniff_buffers);
5145             if (this_access_time < best_access_time)
5146             {
5147                 second_best_access_time = best_access_time;
5148                 best_access_time = this_access_time;
5149                 best_heap = heap_number;
5150             }
5151             else if (this_access_time < second_best_access_time)
5152             {
5153                 second_best_access_time = this_access_time;
5154             }
5155         }
5156
5157         if (best_access_time*2 < second_best_access_time)
5158         {
5159             sniff_buffer[(1 + best_heap*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1;
5160
5161             dprintf (3, ("select_heap yields crisp %d for context %p\n", best_heap, (void *)acontext));
5162         }
5163         else
5164         {
5165             dprintf (3, ("select_heap yields vague %d for context %p\n", best_heap, (void *)acontext ));
5166         }
5167
5168         return best_heap;
5169     }
5170
5171     static bool can_find_heap_fast()
5172     {
5173         return GCToOSInterface::CanGetCurrentProcessorNumber();
5174     }
5175
5176     static uint16_t find_proc_no_from_heap_no(int heap_number)
5177     {
5178         return heap_no_to_proc_no[heap_number];
5179     }
5180
5181     static void set_proc_no_for_heap(int heap_number, uint16_t proc_no)
5182     {
5183         heap_no_to_proc_no[heap_number] = proc_no;
5184     }
5185
5186     static uint16_t find_numa_node_from_heap_no(int heap_number)
5187     {
5188         return heap_no_to_numa_node[heap_number];
5189     }
5190
5191     static void set_numa_node_for_heap(int heap_number, uint16_t numa_node)
5192     {
5193         heap_no_to_numa_node[heap_number] = numa_node;
5194     }
5195
5196     static uint16_t find_cpu_group_from_heap_no(int heap_number)
5197     {
5198         return heap_no_to_cpu_group[heap_number];
5199     }
5200
5201     static void set_cpu_group_for_heap(int heap_number, uint16_t group_number)
5202     {
5203         heap_no_to_cpu_group[heap_number] = group_number;
5204     }
5205
5206     static uint16_t find_group_proc_from_heap_no(int heap_number)
5207     {
5208         return heap_no_to_group_proc[heap_number];
5209     }
5210
5211     static void set_group_proc_for_heap(int heap_number, uint16_t group_proc)
5212     {
5213         heap_no_to_group_proc[heap_number] = group_proc;
5214     }
5215
5216     static void init_numa_node_to_heap_map(int nheaps)
5217     {   // called right after GCHeap::Init() for each heap is finished
5218         // when numa is not enabled, heap_no_to_numa_node[] are all filled
5219         // with 0s during initialization, and will be treated as one node
5220         numa_node_to_heap_map[0] = 0;
5221         int node_index = 1;
5222
5223         for (int i=1; i < nheaps; i++)
5224         {
5225             if (heap_no_to_numa_node[i] != heap_no_to_numa_node[i-1])
5226                 numa_node_to_heap_map[node_index++] = (uint16_t)i;
5227         }
5228         numa_node_to_heap_map[node_index] = (uint16_t)nheaps; //mark the end with nheaps
5229     }
5230
5231     static void get_heap_range_for_heap(int hn, int* start, int* end)
5232     {   // 1-tier/no numa case: heap_no_to_numa_node[] all zeros, 
5233         // and treated as in one node. thus: start=0, end=n_heaps
5234         uint16_t numa_node = heap_no_to_numa_node[hn];
5235         *start = (int)numa_node_to_heap_map[numa_node];
5236         *end   = (int)(numa_node_to_heap_map[numa_node+1]);
5237     }
5238 };
5239 uint8_t* heap_select::sniff_buffer;
5240 unsigned heap_select::n_sniff_buffers;
5241 unsigned heap_select::cur_sniff_index;
5242 uint16_t heap_select::proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
5243 uint16_t heap_select::heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
5244 uint16_t heap_select::heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
5245 uint16_t heap_select::heap_no_to_cpu_group[MAX_SUPPORTED_CPUS];
5246 uint16_t heap_select::heap_no_to_group_proc[MAX_SUPPORTED_CPUS];
5247 uint16_t heap_select::numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
5248
5249 BOOL gc_heap::create_thread_support (unsigned number_of_heaps)
5250 {
5251     BOOL ret = FALSE;
5252     if (!gc_start_event.CreateOSManualEventNoThrow (FALSE))
5253     {
5254         goto cleanup;
5255     }
5256     if (!ee_suspend_event.CreateOSAutoEventNoThrow (FALSE))
5257     {
5258         goto cleanup;
5259     }
5260     if (!gc_t_join.init (number_of_heaps, join_flavor_server_gc))
5261     {
5262         goto cleanup;
5263     }
5264
5265     ret = TRUE;
5266
5267 cleanup:
5268
5269     if (!ret)
5270     {
5271         destroy_thread_support();
5272     }
5273
5274     return ret;
5275 }
5276
5277 void gc_heap::destroy_thread_support ()
5278 {
5279     if (ee_suspend_event.IsValid())
5280     {
5281         ee_suspend_event.CloseEvent();
5282     }
5283     if (gc_start_event.IsValid())
5284     {
5285         gc_start_event.CloseEvent();
5286     }
5287 }
5288
5289 void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affinity)
5290 {
5291     affinity->Group = GCThreadAffinity::None;
5292     affinity->Processor = GCThreadAffinity::None;
5293
5294     uint16_t gn, gpn;
5295     GCToOSInterface::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
5296
5297     int bit_number = 0;
5298     for (uintptr_t mask = 1; mask !=0; mask <<=1) 
5299     {
5300         if (bit_number == gpn)
5301         {
5302             dprintf(3, ("using processor group %d, mask %Ix for heap %d\n", gn, mask, heap_number));
5303             affinity->Processor = gpn;
5304             affinity->Group = gn;
5305             heap_select::set_cpu_group_for_heap(heap_number, gn);
5306             heap_select::set_group_proc_for_heap(heap_number, gpn);
5307             if (GCToOSInterface::CanEnableGCNumaAware())
5308             {  
5309                 PROCESSOR_NUMBER proc_no;
5310                 proc_no.Group    = gn;
5311                 proc_no.Number   = (uint8_t)gpn;
5312                 proc_no.Reserved = 0;
5313
5314                 uint16_t node_no = 0;
5315                 if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
5316                     heap_select::set_numa_node_for_heap(heap_number, node_no);
5317             }
5318             else
5319             {   // no numa setting, each cpu group is treated as a node
5320                 heap_select::set_numa_node_for_heap(heap_number, gn);
5321             }
5322             return;
5323         }
5324         bit_number++;
5325     }
5326 }
5327
5328 void set_thread_affinity_mask_for_heap(int heap_number, GCThreadAffinity* affinity)
5329 {
5330     affinity->Group = GCThreadAffinity::None;
5331     affinity->Processor = GCThreadAffinity::None;
5332
5333     uintptr_t pmask = process_mask;
5334     int bit_number = 0; 
5335     uint8_t proc_number = 0;
5336     for (uintptr_t mask = 1; mask != 0; mask <<= 1)
5337     {
5338         if ((mask & pmask) != 0)
5339         {
5340             if (bit_number == heap_number)
5341             {
5342                 dprintf (3, ("Using processor %d for heap %d", proc_number, heap_number));
5343                 affinity->Processor = proc_number;
5344                 heap_select::set_proc_no_for_heap(heap_number, proc_number);
5345                 if (GCToOSInterface::CanEnableGCNumaAware())
5346                 {
5347                     uint16_t node_no = 0;
5348                     PROCESSOR_NUMBER proc_no;
5349                     proc_no.Group = 0;
5350                     proc_no.Number = (uint8_t)proc_number;
5351                     proc_no.Reserved = 0;
5352                     if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
5353                     {
5354                         heap_select::set_numa_node_for_heap(heap_number, node_no);
5355                     }
5356                 }
5357                 return;
5358             }
5359             bit_number++;
5360         }
5361         proc_number++;
5362     }
5363 }
5364
5365 bool gc_heap::create_gc_thread ()
5366 {
5367     dprintf (3, ("Creating gc thread\n"));
5368     return GCToEEInterface::CreateThread(gc_thread_stub, this, false, ".NET Server GC");
5369 }
5370
5371 #ifdef _MSC_VER
5372 #pragma warning(disable:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
5373 #endif //_MSC_VER
5374 void gc_heap::gc_thread_function ()
5375 {
5376     assert (gc_done_event.IsValid());
5377     assert (gc_start_event.IsValid());
5378     dprintf (3, ("gc thread started"));
5379
5380     heap_select::init_cpu_mapping(this, heap_number);
5381
5382     while (1)
5383     {
5384         assert (!gc_t_join.joined());
5385
5386         if (heap_number == 0)
5387         {
5388             gc_heap::ee_suspend_event.Wait(INFINITE, FALSE);
5389
5390             BEGIN_TIMING(suspend_ee_during_log);
5391             GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
5392             END_TIMING(suspend_ee_during_log);
5393
5394             proceed_with_gc_p = TRUE;
5395
5396             if (!should_proceed_with_gc())
5397             {
5398                 update_collection_counts_for_no_gc();
5399                 proceed_with_gc_p = FALSE;
5400             }
5401             else
5402             {
5403                 settings.init_mechanisms();
5404                 gc_start_event.Set();
5405             }
5406             dprintf (3, ("%d gc thread waiting...", heap_number));
5407         }
5408         else
5409         {
5410             gc_start_event.Wait(INFINITE, FALSE);
5411             dprintf (3, ("%d gc thread waiting... Done", heap_number));
5412         }
5413
5414         assert ((heap_number == 0) || proceed_with_gc_p);
5415
5416         if (proceed_with_gc_p)
5417         {
5418             garbage_collect (GCHeap::GcCondemnedGeneration);
5419
5420             if (pm_trigger_full_gc)
5421             {
5422                 garbage_collect_pm_full_gc();
5423             }
5424         }
5425
5426         if (heap_number == 0)
5427         {
5428             if (proceed_with_gc_p && (!settings.concurrent))
5429             {
5430                 do_post_gc();
5431             }
5432
5433 #ifdef BACKGROUND_GC
5434             recover_bgc_settings();
5435 #endif //BACKGROUND_GC
5436
5437 #ifdef MULTIPLE_HEAPS
5438             for (int i = 0; i < gc_heap::n_heaps; i++)
5439             {
5440                 gc_heap* hp = gc_heap::g_heaps[i];
5441                 hp->add_saved_spinlock_info (false, me_release, mt_block_gc);
5442                 leave_spin_lock(&hp->more_space_lock_soh);
5443             }
5444 #endif //MULTIPLE_HEAPS
5445
5446             gc_heap::gc_started = FALSE;
5447
5448             BEGIN_TIMING(restart_ee_during_log);
5449             GCToEEInterface::RestartEE(TRUE);
5450             END_TIMING(restart_ee_during_log);
5451             process_sync_log_stats();
5452
5453             dprintf (SPINLOCK_LOG, ("GC Lgc"));
5454             leave_spin_lock (&gc_heap::gc_lock);
5455
5456             gc_heap::internal_gc_done = true;
5457
5458             if (proceed_with_gc_p)
5459                 set_gc_done();
5460             else
5461             {
5462                 // If we didn't actually do a GC, it means we didn't wait up the other threads,
5463                 // we still need to set the gc_done_event for those threads.
5464                 for (int i = 0; i < gc_heap::n_heaps; i++)
5465                 {
5466                     gc_heap* hp = gc_heap::g_heaps[i];
5467                     hp->set_gc_done();
5468                 }
5469             }
5470         }
5471         else
5472         {
5473             int spin_count = 32 * (gc_heap::n_heaps - 1);
5474
5475             // wait until RestartEE has progressed to a stage where we can restart user threads
5476             while (!gc_heap::internal_gc_done && !GCHeap::SafeToRestartManagedThreads())
5477             {
5478                 spin_and_switch (spin_count, (gc_heap::internal_gc_done || GCHeap::SafeToRestartManagedThreads()));
5479             }
5480             set_gc_done();
5481         }
5482     }
5483 }
5484 #ifdef _MSC_VER
5485 #pragma warning(default:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
5486 #endif //_MSC_VER
5487
5488 #endif //MULTIPLE_HEAPS
5489
5490 bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number)
5491 {
5492 #if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK)
5493     // Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
5494     // a host. This will need to be added later.
5495 #if !defined(FEATURE_CORECLR) && !defined(BUILD_AS_STANDALONE)
5496     if (!CLRMemoryHosted())
5497 #endif
5498     {
5499         if (GCToOSInterface::CanEnableGCNumaAware())
5500         {
5501             uint32_t numa_node = heap_select::find_numa_node_from_heap_no(h_number);
5502             if (GCToOSInterface::VirtualCommit(addr, size, numa_node))
5503                 return true;
5504         }
5505     }
5506 #else
5507     UNREFERENCED_PARAMETER(h_number);
5508 #endif
5509
5510     //numa aware not enabled, or call failed --> fallback to VirtualCommit()
5511     return GCToOSInterface::VirtualCommit(addr, size);
5512 }
5513
5514 #ifndef SEG_MAPPING_TABLE
5515 inline
5516 heap_segment* gc_heap::segment_of (uint8_t* add, ptrdiff_t& delta, BOOL verify_p)
5517 {
5518     uint8_t* sadd = add;
5519     heap_segment* hs = 0;
5520     heap_segment* hs1 = 0;
5521     if (!((add >= g_gc_lowest_address) && (add < g_gc_highest_address)))
5522     {
5523         delta = 0;
5524         return 0;
5525     }
5526     //repeat in case there is a concurrent insertion in the table.
5527     do
5528     {
5529         hs = hs1;
5530         sadd = add;
5531         seg_table->lookup (sadd);
5532         hs1 = (heap_segment*)sadd;
5533     } while (hs1 && !in_range_for_segment (add, hs1) && (hs != hs1));
5534
5535     hs = hs1;
5536
5537     if ((hs == 0) ||
5538         (verify_p && (add > heap_segment_reserved ((heap_segment*)(sadd + delta)))))
5539         delta = 0;
5540     return hs;
5541 }
5542 #endif //SEG_MAPPING_TABLE
5543
5544 class mark
5545 {
5546 public:
5547     uint8_t* first;
5548     size_t len;
5549
5550     // If we want to save space we can have a pool of plug_and_gap's instead of 
5551     // always having 2 allocated for each pinned plug.
5552     gap_reloc_pair saved_pre_plug;
5553     // If we decide to not compact, we need to restore the original values.
5554     gap_reloc_pair saved_pre_plug_reloc;
5555
5556     gap_reloc_pair saved_post_plug;
5557
5558     // Supposedly Pinned objects cannot have references but we are seeing some from pinvoke 
5559     // frames. Also if it's an artificially pinned plug created by us, it can certainly 
5560     // have references. 
5561     // We know these cases will be rare so we can optimize this to be only allocated on decommand. 
5562     gap_reloc_pair saved_post_plug_reloc;
5563
5564     // We need to calculate this after we are done with plan phase and before compact
5565     // phase because compact phase will change the bricks so relocate_address will no 
5566     // longer work.
5567     uint8_t* saved_pre_plug_info_reloc_start;
5568
5569     // We need to save this because we will have no way to calculate it, unlike the 
5570     // pre plug info start which is right before this plug.
5571     uint8_t* saved_post_plug_info_start;
5572
5573 #ifdef SHORT_PLUGS
5574     uint8_t* allocation_context_start_region;
5575 #endif //SHORT_PLUGS
5576
5577     // How the bits in these bytes are organized:
5578     // MSB --> LSB
5579     // bit to indicate whether it's a short obj | 3 bits for refs in this short obj | 2 unused bits | bit to indicate if it's collectible | last bit
5580     // last bit indicates if there's pre or post info associated with this plug. If it's not set all other bits will be 0.
5581     BOOL saved_pre_p;
5582     BOOL saved_post_p;
5583
5584 #ifdef _DEBUG
5585     // We are seeing this is getting corrupted for a PP with a NP after.
5586     // Save it when we first set it and make sure it doesn't change.
5587     gap_reloc_pair saved_post_plug_debug;
5588 #endif //_DEBUG
5589
5590     size_t get_max_short_bits()
5591     {
5592         return (sizeof (gap_reloc_pair) / sizeof (uint8_t*));
5593     }
5594
5595     // pre bits
5596     size_t get_pre_short_start_bit ()
5597     {
5598         return (sizeof (saved_pre_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*)));
5599     }
5600
5601     BOOL pre_short_p()
5602     {
5603         return (saved_pre_p & (1 << (sizeof (saved_pre_p) * 8 - 1)));
5604     }
5605
5606     void set_pre_short()
5607     {
5608         saved_pre_p |= (1 << (sizeof (saved_pre_p) * 8 - 1));
5609     }
5610
5611     void set_pre_short_bit (size_t bit)
5612     {
5613         saved_pre_p |= 1 << (get_pre_short_start_bit() + bit);
5614     }
5615
5616     BOOL pre_short_bit_p (size_t bit)
5617     {
5618         return (saved_pre_p & (1 << (get_pre_short_start_bit() + bit)));
5619     }
5620
5621 #ifdef COLLECTIBLE_CLASS
5622     void set_pre_short_collectible()
5623     {
5624         saved_pre_p |= 2;
5625     }
5626
5627     BOOL pre_short_collectible_p()
5628     {
5629         return (saved_pre_p & 2);
5630     }
5631 #endif //COLLECTIBLE_CLASS
5632
5633     // post bits
5634     size_t get_post_short_start_bit ()
5635     {
5636         return (sizeof (saved_post_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*)));
5637     }
5638
5639     BOOL post_short_p()
5640     {
5641         return (saved_post_p & (1 << (sizeof (saved_post_p) * 8 - 1)));
5642     }
5643
5644     void set_post_short()
5645     {
5646         saved_post_p |= (1 << (sizeof (saved_post_p) * 8 - 1));
5647     }
5648
5649     void set_post_short_bit (size_t bit)
5650     {
5651         saved_post_p |= 1 << (get_post_short_start_bit() + bit);
5652     }
5653
5654     BOOL post_short_bit_p (size_t bit)
5655     {
5656         return (saved_post_p & (1 << (get_post_short_start_bit() + bit)));
5657     }
5658
5659 #ifdef COLLECTIBLE_CLASS
5660     void set_post_short_collectible()
5661     {
5662         saved_post_p |= 2;
5663     }
5664
5665     BOOL post_short_collectible_p()
5666     {
5667         return (saved_post_p & 2);
5668     }
5669 #endif //COLLECTIBLE_CLASS
5670
5671     uint8_t* get_plug_address() { return first; }
5672
5673     BOOL has_pre_plug_info() { return saved_pre_p; }
5674     BOOL has_post_plug_info() { return saved_post_p; }
5675
5676     gap_reloc_pair* get_pre_plug_reloc_info() { return &saved_pre_plug_reloc; }
5677     gap_reloc_pair* get_post_plug_reloc_info() { return &saved_post_plug_reloc; }
5678     void set_pre_plug_info_reloc_start (uint8_t* reloc) { saved_pre_plug_info_reloc_start = reloc; }
5679     uint8_t* get_post_plug_info_start() { return saved_post_plug_info_start; }
5680
5681     // We need to temporarily recover the shortened plugs for compact phase so we can
5682     // copy over the whole plug and their related info (mark bits/cards). But we will
5683     // need to set the artificial gap back so compact phase can keep reading the plug info.
5684     // We also need to recover the saved info because we'll need to recover it later.
5685     // 
5686     // So we would call swap_p*_plug_and_saved once to recover the object info; then call 
5687     // it again to recover the artificial gap.
5688     void swap_pre_plug_and_saved()
5689     {
5690         gap_reloc_pair temp;
5691         memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp));
5692         memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc));
5693         saved_pre_plug_reloc = temp;
5694     }
5695
5696     void swap_post_plug_and_saved()
5697     {
5698         gap_reloc_pair temp;
5699         memcpy (&temp, saved_post_plug_info_start, sizeof (temp));
5700         memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc));
5701         saved_post_plug_reloc = temp;
5702     }
5703
5704     void swap_pre_plug_and_saved_for_profiler()
5705     {
5706         gap_reloc_pair temp;
5707         memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp));
5708         memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug));
5709         saved_pre_plug = temp;
5710     }
5711
5712     void swap_post_plug_and_saved_for_profiler()
5713     {
5714         gap_reloc_pair temp;
5715         memcpy (&temp, saved_post_plug_info_start, sizeof (temp));
5716         memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
5717         saved_post_plug = temp;
5718     }
5719
5720     // We should think about whether it's really necessary to have to copy back the pre plug
5721     // info since it was already copied during compacting plugs. But if a plug doesn't move
5722     // by >= 3 ptr size (the size of gap_reloc_pair), it means we'd have to recover pre plug info.
5723     void recover_plug_info() 
5724     {
5725         if (saved_pre_p)
5726         {
5727             if (gc_heap::settings.compaction)
5728             {
5729                 dprintf (3, ("%Ix: REC Pre: %Ix-%Ix", 
5730                     first,
5731                     &saved_pre_plug_reloc, 
5732                     saved_pre_plug_info_reloc_start));
5733                 memcpy (saved_pre_plug_info_reloc_start, &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc));
5734             }
5735             else
5736             {
5737                 dprintf (3, ("%Ix: REC Pre: %Ix-%Ix", 
5738                     first,
5739                     &saved_pre_plug, 
5740                     (first - sizeof (plug_and_gap))));
5741                 memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug));
5742             }
5743         }
5744
5745         if (saved_post_p)
5746         {
5747             if (gc_heap::settings.compaction)
5748             {
5749                 dprintf (3, ("%Ix: REC Post: %Ix-%Ix", 
5750                     first,
5751                     &saved_post_plug_reloc, 
5752                     saved_post_plug_info_start));
5753                 memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc));
5754             }
5755             else
5756             {
5757                 dprintf (3, ("%Ix: REC Post: %Ix-%Ix", 
5758                     first,
5759                     &saved_post_plug, 
5760                     saved_post_plug_info_start));
5761                 memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
5762             }
5763         }
5764     }
5765 };
5766
5767
5768 void gc_mechanisms::init_mechanisms()
5769 {
5770     condemned_generation = 0;
5771     promotion = FALSE;//TRUE;
5772     compaction = TRUE;
5773 #ifdef FEATURE_LOH_COMPACTION
5774     loh_compaction = gc_heap::should_compact_loh();
5775 #else
5776     loh_compaction = FALSE;
5777 #endif //FEATURE_LOH_COMPACTION
5778     heap_expansion = FALSE;
5779     concurrent = FALSE;
5780     demotion = FALSE;
5781     elevation_reduced = FALSE;
5782     found_finalizers = FALSE;
5783 #ifdef BACKGROUND_GC
5784     background_p = recursive_gc_sync::background_running_p() != FALSE;
5785     allocations_allowed = TRUE;
5786 #endif //BACKGROUND_GC
5787
5788     entry_memory_load = 0;
5789     exit_memory_load = 0;
5790
5791 #ifdef STRESS_HEAP
5792     stress_induced = FALSE;
5793 #endif // STRESS_HEAP
5794 }
5795
5796 void gc_mechanisms::first_init()
5797 {
5798     gc_index = 0;
5799     gen0_reduction_count = 0;
5800     should_lock_elevation = FALSE;
5801     elevation_locked_count = 0;
5802     reason = reason_empty;
5803 #ifdef BACKGROUND_GC
5804     pause_mode = gc_heap::gc_can_use_concurrent ? pause_interactive : pause_batch;
5805 #ifdef _DEBUG
5806     int debug_pause_mode = static_cast<int>(GCConfig::GetLatencyMode());
5807     if (debug_pause_mode >= 0)
5808     {
5809         assert (debug_pause_mode <= pause_sustained_low_latency);
5810         pause_mode = (gc_pause_mode)debug_pause_mode;
5811     }
5812 #endif //_DEBUG
5813 #else //BACKGROUND_GC
5814     pause_mode = pause_batch;
5815 #endif //BACKGROUND_GC
5816
5817     init_mechanisms();
5818 }
5819
5820 void gc_mechanisms::record (gc_history_global* history)
5821 {
5822 #ifdef MULTIPLE_HEAPS
5823     history->num_heaps = gc_heap::n_heaps;
5824 #else
5825     history->num_heaps = 1;
5826 #endif //MULTIPLE_HEAPS
5827
5828     history->condemned_generation = condemned_generation;
5829     history->gen0_reduction_count = gen0_reduction_count;
5830     history->reason = reason;
5831     history->pause_mode = (int)pause_mode;
5832     history->mem_pressure = entry_memory_load;
5833     history->global_mechanims_p = 0;
5834
5835     // start setting the boolean values.
5836     if (concurrent)
5837         history->set_mechanism_p (global_concurrent);
5838     
5839     if (compaction)
5840         history->set_mechanism_p (global_compaction);
5841
5842     if (promotion)
5843         history->set_mechanism_p (global_promotion);
5844     
5845     if (demotion)
5846         history->set_mechanism_p (global_demotion);
5847
5848     if (card_bundles)
5849         history->set_mechanism_p (global_card_bundles);
5850
5851     if (elevation_reduced)
5852         history->set_mechanism_p (global_elevation);
5853 }
5854
5855 /**********************************
5856    called at the beginning of GC to fix the allocated size to
5857    what is really allocated, or to turn the free area into an unused object
5858    It needs to be called after all of the other allocation contexts have been
5859    fixed since it relies on alloc_allocated.
5860  ********************************/
5861
5862 //for_gc_p indicates that the work is being done for GC,
5863 //as opposed to concurrent heap verification
5864 void gc_heap::fix_youngest_allocation_area (BOOL for_gc_p)
5865 {
5866     UNREFERENCED_PARAMETER(for_gc_p);
5867
5868     // The gen 0 alloc context is never used for allocation in the allocator path. It's
5869     // still used in the allocation path during GCs.
5870     assert (generation_allocation_pointer (youngest_generation) == nullptr);
5871     assert (generation_allocation_limit (youngest_generation) == nullptr);
5872     heap_segment_allocated (ephemeral_heap_segment) = alloc_allocated;
5873 }
5874
5875 void gc_heap::fix_large_allocation_area (BOOL for_gc_p)
5876 {
5877     UNREFERENCED_PARAMETER(for_gc_p);
5878
5879 #ifdef _DEBUG
5880     alloc_context* acontext = 
5881 #endif // _DEBUG
5882         generation_alloc_context (large_object_generation);
5883     assert (acontext->alloc_ptr == 0);
5884     assert (acontext->alloc_limit == 0); 
5885 #if 0
5886     dprintf (3, ("Large object alloc context: ptr: %Ix, limit %Ix",
5887                  (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit));
5888     fix_allocation_context (acontext, FALSE, get_alignment_constant (FALSE));
5889     if (for_gc_p)
5890     {
5891         acontext->alloc_ptr = 0;
5892         acontext->alloc_limit = acontext->alloc_ptr;
5893     }
5894 #endif //0
5895 }
5896
5897 //for_gc_p indicates that the work is being done for GC,
5898 //as opposed to concurrent heap verification
5899 void gc_heap::fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
5900                                       int align_const)
5901 {
5902     dprintf (3, ("Fixing allocation context %Ix: ptr: %Ix, limit: %Ix",
5903                  (size_t)acontext,
5904                  (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit));
5905
5906     if (((size_t)(alloc_allocated - acontext->alloc_limit) > Align (min_obj_size, align_const)) ||
5907         !for_gc_p)
5908     {
5909         uint8_t*  point = acontext->alloc_ptr;
5910         if (point != 0)
5911         {
5912             size_t  size = (acontext->alloc_limit - acontext->alloc_ptr);
5913             // the allocation area was from the free list
5914             // it was shortened by Align (min_obj_size) to make room for
5915             // at least the shortest unused object
5916             size += Align (min_obj_size, align_const);
5917             assert ((size >= Align (min_obj_size)));
5918
5919             dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point,
5920                        (size_t)point + size ));
5921             make_unused_array (point, size);
5922
5923             if (for_gc_p)
5924             {
5925                 generation_free_obj_space (generation_of (0)) += size;
5926                 alloc_contexts_used ++;
5927             }
5928         }
5929     }
5930     else if (for_gc_p)
5931     {
5932         alloc_allocated = acontext->alloc_ptr;
5933         assert (heap_segment_allocated (ephemeral_heap_segment) <=
5934                 heap_segment_committed (ephemeral_heap_segment));
5935         alloc_contexts_used ++;
5936     }
5937
5938     if (for_gc_p)
5939     {
5940         // We need to update the alloc_bytes to reflect the portion that we have not used  
5941         acontext->alloc_bytes -= (acontext->alloc_limit - acontext->alloc_ptr);  
5942         acontext->alloc_ptr = 0;
5943         acontext->alloc_limit = acontext->alloc_ptr;
5944     }
5945 }
5946
5947 //used by the heap verification for concurrent gc.
5948 //it nulls out the words set by fix_allocation_context for heap_verification
5949 void repair_allocation (gc_alloc_context* acontext, void*)
5950 {
5951     uint8_t*  point = acontext->alloc_ptr;
5952
5953     if (point != 0)
5954     {
5955         dprintf (3, ("Clearing [%Ix, %Ix[", (size_t)acontext->alloc_ptr,
5956                      (size_t)acontext->alloc_limit+Align(min_obj_size)));
5957         memclr (acontext->alloc_ptr - plug_skew,
5958                 (acontext->alloc_limit - acontext->alloc_ptr)+Align (min_obj_size));
5959     }
5960 }
5961
5962 void void_allocation (gc_alloc_context* acontext, void*)
5963 {
5964     uint8_t*  point = acontext->alloc_ptr;
5965
5966     if (point != 0)
5967     {
5968         dprintf (3, ("Void [%Ix, %Ix[", (size_t)acontext->alloc_ptr,
5969                      (size_t)acontext->alloc_limit+Align(min_obj_size)));
5970         acontext->alloc_ptr = 0;
5971         acontext->alloc_limit = acontext->alloc_ptr;
5972     }
5973 }
5974
5975 void gc_heap::repair_allocation_contexts (BOOL repair_p)
5976 {
5977     GCToEEInterface::GcEnumAllocContexts (repair_p ? repair_allocation : void_allocation, NULL);
5978 }
5979
5980 struct fix_alloc_context_args
5981 {
5982     BOOL for_gc_p;
5983     void* heap;
5984 };
5985
5986 void fix_alloc_context (gc_alloc_context* acontext, void* param)
5987 {
5988     fix_alloc_context_args* args = (fix_alloc_context_args*)param;
5989     g_theGCHeap->FixAllocContext(acontext, (void*)(size_t)(args->for_gc_p), args->heap);
5990 }
5991
5992 void gc_heap::fix_allocation_contexts (BOOL for_gc_p)
5993 {
5994     fix_alloc_context_args args;
5995     args.for_gc_p = for_gc_p;
5996     args.heap = __this;
5997
5998     GCToEEInterface::GcEnumAllocContexts(fix_alloc_context, &args);
5999     fix_youngest_allocation_area(for_gc_p);
6000     fix_large_allocation_area(for_gc_p);
6001 }
6002
6003 void gc_heap::fix_older_allocation_area (generation* older_gen)
6004 {
6005     heap_segment* older_gen_seg = generation_allocation_segment (older_gen);
6006     if (generation_allocation_limit (older_gen) !=
6007         heap_segment_plan_allocated (older_gen_seg))
6008     {
6009         uint8_t*  point = generation_allocation_pointer (older_gen);
6010
6011         size_t  size = (generation_allocation_limit (older_gen) -
6012                                generation_allocation_pointer (older_gen));
6013         if (size != 0)
6014         {
6015             assert ((size >= Align (min_obj_size)));
6016             dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point, (size_t)point+size));
6017             make_unused_array (point, size);
6018             if (size >= min_free_list)
6019             {
6020                 generation_allocator (older_gen)->thread_item_front (point, size);
6021                 add_gen_free (older_gen->gen_num, size);
6022                 generation_free_list_space (older_gen) += size;
6023             }
6024             else
6025             {
6026                 generation_free_obj_space (older_gen) += size;
6027             }
6028         }
6029     }
6030     else
6031     {
6032         assert (older_gen_seg != ephemeral_heap_segment);
6033         heap_segment_plan_allocated (older_gen_seg) =
6034             generation_allocation_pointer (older_gen);
6035         generation_allocation_limit (older_gen) =
6036             generation_allocation_pointer (older_gen);
6037     }
6038
6039     generation_allocation_pointer (older_gen) = 0;
6040     generation_allocation_limit (older_gen) = 0;
6041 }
6042
6043 void gc_heap::set_allocation_heap_segment (generation* gen)
6044 {
6045     uint8_t* p = generation_allocation_start (gen);
6046     assert (p);
6047     heap_segment* seg = generation_allocation_segment (gen);
6048     if (in_range_for_segment (p, seg))
6049         return;
6050
6051     // try ephemeral heap segment in case of heap expansion
6052     seg = ephemeral_heap_segment;
6053     if (!in_range_for_segment (p, seg))
6054     {
6055         seg = heap_segment_rw (generation_start_segment (gen));
6056
6057         PREFIX_ASSUME(seg != NULL);
6058
6059         while (!in_range_for_segment (p, seg))
6060         {
6061             seg = heap_segment_next_rw (seg);
6062             PREFIX_ASSUME(seg != NULL);
6063         }
6064     }
6065
6066     generation_allocation_segment (gen) = seg;
6067 }
6068
6069 void gc_heap::reset_allocation_pointers (generation* gen, uint8_t* start)
6070 {
6071     assert (start);
6072     assert (Align ((size_t)start) == (size_t)start);
6073     generation_allocation_start (gen) = start;
6074     generation_allocation_pointer (gen) =  0;//start + Align (min_obj_size);
6075     generation_allocation_limit (gen) = 0;//generation_allocation_pointer (gen);
6076     set_allocation_heap_segment (gen);
6077 }
6078
6079 #ifdef BACKGROUND_GC
6080 //TODO BACKGROUND_GC this is for test only
6081 void
6082 gc_heap::disallow_new_allocation (int gen_number)
6083 {
6084     UNREFERENCED_PARAMETER(gen_number);
6085     settings.allocations_allowed = FALSE;
6086 }
6087 void
6088 gc_heap::allow_new_allocation (int gen_number)
6089 {
6090     UNREFERENCED_PARAMETER(gen_number);
6091     settings.allocations_allowed = TRUE;
6092 }
6093
6094 #endif //BACKGROUND_GC
6095
6096 bool gc_heap::new_allocation_allowed (int gen_number)
6097 {
6098 #ifdef BACKGROUND_GC
6099     //TODO BACKGROUND_GC this is for test only
6100     if (!settings.allocations_allowed)
6101     {
6102         dprintf (2, ("new allocation not allowed"));
6103         return FALSE;
6104     }
6105 #endif //BACKGROUND_GC
6106
6107     if (dd_new_allocation (dynamic_data_of (gen_number)) < 0)
6108     {
6109         if (gen_number != 0)
6110         {
6111             // For LOH we will give it more budget before we try a GC.
6112             if (settings.concurrent)
6113             {
6114                 dynamic_data* dd2 = dynamic_data_of (max_generation + 1 );
6115
6116                 if (dd_new_allocation (dd2) <= (ptrdiff_t)(-2 * dd_desired_allocation (dd2)))
6117                 {
6118                     return TRUE;
6119                 }
6120             }
6121         }
6122         return FALSE;
6123     }
6124 #ifndef MULTIPLE_HEAPS
6125     else if ((settings.pause_mode != pause_no_gc) && (gen_number == 0))
6126     {
6127         dprintf (3, ("evaluating allocation rate"));
6128         dynamic_data* dd0 = dynamic_data_of (0);
6129         if ((allocation_running_amount - dd_new_allocation (dd0)) >
6130             dd_min_size (dd0))
6131         {
6132             uint32_t ctime = GCToOSInterface::GetLowPrecisionTimeStamp();
6133             if ((ctime - allocation_running_time) > 1000)
6134             {
6135                 dprintf (2, (">1s since last gen0 gc"));
6136                 return FALSE;
6137             }
6138             else
6139             {
6140                 allocation_running_amount = dd_new_allocation (dd0);
6141             }
6142         }
6143     }
6144 #endif //MULTIPLE_HEAPS
6145     return TRUE;
6146 }
6147
6148 inline
6149 ptrdiff_t gc_heap::get_desired_allocation (int gen_number)
6150 {
6151     return dd_desired_allocation (dynamic_data_of (gen_number));
6152 }
6153
6154 inline
6155 ptrdiff_t  gc_heap::get_new_allocation (int gen_number)
6156 {
6157     return dd_new_allocation (dynamic_data_of (gen_number));
6158 }
6159
6160 //return the amount allocated so far in gen_number
6161 inline
6162 ptrdiff_t  gc_heap::get_allocation (int gen_number)
6163 {
6164     dynamic_data* dd = dynamic_data_of (gen_number);
6165
6166     return dd_desired_allocation (dd) - dd_new_allocation (dd);
6167 }
6168
6169 inline
6170 BOOL grow_mark_stack (mark*& m, size_t& len, size_t init_len)
6171 {
6172     size_t new_size = max (init_len, 2*len);
6173     mark* tmp = new (nothrow) mark [new_size];
6174     if (tmp)
6175     {
6176         memcpy (tmp, m, len * sizeof (mark));
6177         delete m;
6178         m = tmp;
6179         len = new_size;
6180         return TRUE;
6181     }
6182     else
6183     {
6184         dprintf (1, ("Failed to allocate %Id bytes for mark stack", (len * sizeof (mark))));
6185         return FALSE;
6186     }
6187 }
6188
6189 inline
6190 uint8_t* pinned_plug (mark* m)
6191 {
6192    return m->first;
6193 }
6194
6195 inline
6196 size_t& pinned_len (mark* m)
6197 {
6198     return m->len;
6199 }
6200
6201 inline
6202 void set_new_pin_info (mark* m, uint8_t* pin_free_space_start)
6203 {
6204     m->len = pinned_plug (m) - pin_free_space_start;
6205 #ifdef SHORT_PLUGS
6206     m->allocation_context_start_region = pin_free_space_start;
6207 #endif //SHORT_PLUGS
6208 }
6209
6210 #ifdef SHORT_PLUGS
6211 inline
6212 uint8_t*& pin_allocation_context_start_region (mark* m)
6213 {
6214     return m->allocation_context_start_region;
6215 }
6216
6217 uint8_t* get_plug_start_in_saved (uint8_t* old_loc, mark* pinned_plug_entry)
6218 {
6219     uint8_t* saved_pre_plug_info = (uint8_t*)(pinned_plug_entry->get_pre_plug_reloc_info());
6220     uint8_t* plug_start_in_saved = saved_pre_plug_info + (old_loc - (pinned_plug (pinned_plug_entry) - sizeof (plug_and_gap)));
6221     //dprintf (1, ("detected a very short plug: %Ix before PP %Ix, pad %Ix", 
6222     //    old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved));
6223     dprintf (1, ("EP: %Ix(%Ix), %Ix", old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved));
6224     return plug_start_in_saved;
6225 }
6226
6227 inline
6228 void set_padding_in_expand (uint8_t* old_loc,
6229                             BOOL set_padding_on_saved_p,
6230                             mark* pinned_plug_entry)
6231 {
6232     if (set_padding_on_saved_p)
6233     {
6234         set_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry));
6235     }
6236     else
6237     {
6238         set_plug_padded (old_loc);
6239     }
6240 }
6241
6242 inline
6243 void clear_padding_in_expand (uint8_t* old_loc,
6244                               BOOL set_padding_on_saved_p,
6245                               mark* pinned_plug_entry)
6246 {
6247     if (set_padding_on_saved_p)
6248     {
6249         clear_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry));
6250     }
6251     else
6252     {
6253         clear_plug_padded (old_loc);
6254     }
6255 }
6256 #endif //SHORT_PLUGS
6257
6258 void gc_heap::reset_pinned_queue()
6259 {
6260     mark_stack_tos = 0;
6261     mark_stack_bos = 0;
6262 }
6263
6264 void gc_heap::reset_pinned_queue_bos()
6265 {
6266     mark_stack_bos = 0;
6267 }
6268
6269 // last_pinned_plug is only for asserting purpose.
6270 void gc_heap::merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size)
6271 {
6272     if (last_pinned_plug)
6273     {
6274         mark& last_m = mark_stack_array[mark_stack_tos - 1];
6275         assert (last_pinned_plug == last_m.first);
6276         if (last_m.saved_post_p)
6277         {
6278             last_m.saved_post_p = FALSE;
6279             dprintf (3, ("setting last plug %Ix post to false", last_m.first));
6280             // We need to recover what the gap has overwritten.
6281             memcpy ((last_m.first + last_m.len - sizeof (plug_and_gap)), &(last_m.saved_post_plug), sizeof (gap_reloc_pair));
6282         }
6283         last_m.len += plug_size;
6284         dprintf (3, ("recovered the last part of plug %Ix, setting its plug size to %Ix", last_m.first, last_m.len));
6285     }
6286 }
6287
6288 void gc_heap::set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit)
6289 {
6290     dprintf (3, ("sanp: ptr: %Ix, limit: %Ix", alloc_pointer, alloc_limit));
6291     dprintf (3, ("oldest %Id: %Ix", mark_stack_bos, pinned_plug (oldest_pin())));
6292     if (!(pinned_plug_que_empty_p()))
6293     {
6294         mark*  oldest_entry = oldest_pin();
6295         uint8_t* plug = pinned_plug (oldest_entry);
6296         if ((plug >= alloc_pointer) && (plug < alloc_limit))
6297         {
6298             alloc_limit = pinned_plug (oldest_entry);
6299             dprintf (3, ("now setting alloc context: %Ix->%Ix(%Id)",
6300                 alloc_pointer, alloc_limit, (alloc_limit - alloc_pointer)));
6301         }
6302     }
6303 }
6304
6305 void gc_heap::set_allocator_next_pin (generation* gen)
6306 {
6307     dprintf (3, ("SANP: gen%d, ptr; %Ix, limit: %Ix", gen->gen_num, generation_allocation_pointer (gen), generation_allocation_limit (gen)));
6308     if (!(pinned_plug_que_empty_p()))
6309     {
6310         mark*  oldest_entry = oldest_pin();
6311         uint8_t* plug = pinned_plug (oldest_entry);
6312         if ((plug >= generation_allocation_pointer (gen)) &&
6313             (plug <  generation_allocation_limit (gen)))
6314         {
6315             generation_allocation_limit (gen) = pinned_plug (oldest_entry);
6316             dprintf (3, ("SANP: get next pin free space in gen%d for alloc: %Ix->%Ix(%Id)", 
6317                 gen->gen_num,
6318                 generation_allocation_pointer (gen), generation_allocation_limit (gen),
6319                 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
6320         }
6321         else
6322             assert (!((plug < generation_allocation_pointer (gen)) &&
6323                       (plug >= heap_segment_mem (generation_allocation_segment (gen)))));
6324     }
6325 }
6326
6327 // After we set the info, we increase tos.
6328 void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, uint8_t* alloc_pointer, uint8_t*& alloc_limit)
6329 {
6330     UNREFERENCED_PARAMETER(last_pinned_plug);
6331
6332     mark& m = mark_stack_array[mark_stack_tos];
6333     assert (m.first == last_pinned_plug);
6334
6335     m.len = plug_len;
6336     mark_stack_tos++;
6337     set_allocator_next_pin (alloc_pointer, alloc_limit);
6338 }
6339
6340 // After we set the info, we increase tos.
6341 void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen)
6342 {
6343     UNREFERENCED_PARAMETER(last_pinned_plug);
6344
6345     mark& m = mark_stack_array[mark_stack_tos];
6346     assert (m.first == last_pinned_plug);
6347
6348     m.len = plug_len;
6349     mark_stack_tos++;
6350     assert (gen != 0);
6351     // Why are we checking here? gen is never 0.
6352     if (gen != 0)
6353     {
6354         set_allocator_next_pin (gen);
6355     }
6356 }
6357
6358 size_t gc_heap::deque_pinned_plug ()
6359 {
6360     dprintf (3, ("dequed: %Id", mark_stack_bos));
6361     size_t m = mark_stack_bos;
6362     mark_stack_bos++;
6363     return m;
6364 }
6365
6366 inline
6367 mark* gc_heap::pinned_plug_of (size_t bos)
6368 {
6369     return &mark_stack_array [ bos ];
6370 }
6371
6372 inline
6373 mark* gc_heap::oldest_pin ()
6374 {
6375     return pinned_plug_of (mark_stack_bos);
6376 }
6377
6378 inline
6379 BOOL gc_heap::pinned_plug_que_empty_p ()
6380 {
6381     return (mark_stack_bos == mark_stack_tos);
6382 }
6383
6384 inline
6385 mark* gc_heap::before_oldest_pin()
6386 {
6387     if (mark_stack_bos >= 1)
6388         return pinned_plug_of (mark_stack_bos-1);
6389     else
6390         return 0;
6391 }
6392
6393 inline
6394 BOOL gc_heap::ephemeral_pointer_p (uint8_t* o)
6395 {
6396     return ((o >= ephemeral_low) && (o < ephemeral_high));
6397 }
6398
6399 #ifdef MH_SC_MARK
6400 inline
6401 int& gc_heap::mark_stack_busy()
6402 {
6403     return  g_mark_stack_busy [(heap_number+2)*HS_CACHE_LINE_SIZE/sizeof(int)];
6404 }
6405 #endif //MH_SC_MARK
6406
6407 void gc_heap::make_mark_stack (mark* arr)
6408 {
6409     reset_pinned_queue();
6410     mark_stack_array = arr;
6411     mark_stack_array_length = MARK_STACK_INITIAL_LENGTH;
6412 #ifdef MH_SC_MARK
6413     mark_stack_busy() = 0;
6414 #endif //MH_SC_MARK
6415 }
6416
6417 #ifdef BACKGROUND_GC
6418 inline
6419 size_t& gc_heap::bpromoted_bytes(int thread)
6420 {
6421 #ifdef MULTIPLE_HEAPS
6422     return g_bpromoted [thread*16];
6423 #else //MULTIPLE_HEAPS
6424     UNREFERENCED_PARAMETER(thread);
6425     return g_bpromoted;
6426 #endif //MULTIPLE_HEAPS
6427 }
6428
6429 void gc_heap::make_background_mark_stack (uint8_t** arr)
6430 {
6431     background_mark_stack_array = arr;
6432     background_mark_stack_array_length = MARK_STACK_INITIAL_LENGTH;
6433     background_mark_stack_tos = arr;
6434 }
6435
6436 void gc_heap::make_c_mark_list (uint8_t** arr)
6437 {
6438     c_mark_list = arr;
6439     c_mark_list_index = 0;
6440     c_mark_list_length = 1 + (OS_PAGE_SIZE / MIN_OBJECT_SIZE);
6441 }
6442 #endif //BACKGROUND_GC
6443
6444
6445 #ifdef CARD_BUNDLE
6446
6447 // The card bundle keeps track of groups of card words.
6448 static const size_t card_bundle_word_width = 32;
6449
6450 // How do we express the fact that 32 bits (card_word_width) is one uint32_t?
6451 static const size_t card_bundle_size = (size_t)(GC_PAGE_SIZE / (sizeof(uint32_t)*card_bundle_word_width));
6452
6453 inline
6454 size_t card_bundle_word (size_t cardb)
6455 {
6456     return cardb / card_bundle_word_width;
6457 }
6458
6459 inline
6460 uint32_t card_bundle_bit (size_t cardb)
6461 {
6462     return (uint32_t)(cardb % card_bundle_word_width);
6463 }
6464
6465 size_t align_cardw_on_bundle (size_t cardw)
6466 {
6467     return ((size_t)(cardw + card_bundle_size - 1) & ~(card_bundle_size - 1 ));
6468 }
6469
6470 // Get the card bundle representing a card word
6471 size_t cardw_card_bundle (size_t cardw)
6472 {
6473     return cardw / card_bundle_size;
6474 }
6475
6476 // Get the first card word in a card bundle
6477 size_t card_bundle_cardw (size_t cardb)
6478 {
6479     return cardb * card_bundle_size;
6480 }
6481
6482 // Clear the specified card bundle
6483 void gc_heap::card_bundle_clear (size_t cardb)
6484 {
6485     card_bundle_table [card_bundle_word (cardb)] &= ~(1 << card_bundle_bit (cardb));
6486     dprintf (1,("Cleared card bundle %Ix [%Ix, %Ix[", cardb, (size_t)card_bundle_cardw (cardb),
6487               (size_t)card_bundle_cardw (cardb+1)));
6488 }
6489
6490 void gc_heap::card_bundle_set (size_t cardb)
6491 {
6492     if (!card_bundle_set_p (cardb))
6493     {
6494         card_bundle_table [card_bundle_word (cardb)] |= (1 << card_bundle_bit (cardb));
6495     }
6496 }
6497
6498 // Set the card bundle bits between start_cardb and end_cardb
6499 void gc_heap::card_bundles_set (size_t start_cardb, size_t end_cardb)
6500 {
6501     if (start_cardb == end_cardb)
6502     {
6503         card_bundle_set(start_cardb);
6504         return;
6505     }
6506
6507     size_t start_word = card_bundle_word (start_cardb);
6508     size_t end_word = card_bundle_word (end_cardb);
6509
6510     if (start_word < end_word)
6511     {
6512         // Set the partial words
6513         card_bundle_table [start_word] |= highbits (~0u, card_bundle_bit (start_cardb));
6514
6515         if (card_bundle_bit (end_cardb))
6516             card_bundle_table [end_word] |= lowbits (~0u, card_bundle_bit (end_cardb));
6517
6518         // Set the full words
6519         for (size_t i = start_word + 1; i < end_word; i++)
6520             card_bundle_table [i] = ~0u;
6521     }
6522     else
6523     {
6524         card_bundle_table [start_word] |= (highbits (~0u, card_bundle_bit (start_cardb)) &
6525                                             lowbits (~0u, card_bundle_bit (end_cardb)));
6526     }
6527 }
6528
6529 // Indicates whether the specified bundle is set.
6530 BOOL gc_heap::card_bundle_set_p (size_t cardb)
6531 {
6532     return (card_bundle_table[card_bundle_word(cardb)] & (1 << card_bundle_bit (cardb)));
6533 }
6534
6535 // Returns the size (in bytes) of a card bundle representing the region from 'from' to 'end'
6536 size_t size_card_bundle_of (uint8_t* from, uint8_t* end)
6537 {
6538     // Number of heap bytes represented by a card bundle word
6539     size_t cbw_span = card_size * card_word_width * card_bundle_size * card_bundle_word_width;
6540
6541     // Align the start of the region down
6542     from = (uint8_t*)((size_t)from & ~(cbw_span - 1));
6543
6544     // Align the end of the region up
6545     end = (uint8_t*)((size_t)(end + (cbw_span - 1)) & ~(cbw_span - 1));
6546
6547     // Make sure they're really aligned
6548     assert (((size_t)from & (cbw_span - 1)) == 0);
6549     assert (((size_t)end  & (cbw_span - 1)) == 0);
6550
6551     return ((end - from) / cbw_span) * sizeof (uint32_t);
6552 }
6553
6554 // Takes a pointer to a card bundle table and an address, and returns a pointer that represents
6555 // where a theoretical card bundle table that represents every address (starting from 0) would
6556 // start if the bundle word representing the address were to be located at the pointer passed in.
6557 // The returned 'translated' pointer makes it convenient/fast to calculate where the card bundle
6558 // for a given address is using a simple shift operation on the address.
6559 uint32_t* translate_card_bundle_table (uint32_t* cb, uint8_t* lowest_address)
6560 {
6561     // The number of bytes of heap memory represented by a card bundle word
6562     const size_t heap_bytes_for_bundle_word = card_size * card_word_width * card_bundle_size * card_bundle_word_width;
6563
6564     // Each card bundle word is 32 bits
6565     return (uint32_t*)((uint8_t*)cb - (((size_t)lowest_address / heap_bytes_for_bundle_word) * sizeof (uint32_t)));
6566 }
6567
6568 void gc_heap::enable_card_bundles ()
6569 {
6570     if (can_use_write_watch_for_card_table() && (!card_bundles_enabled()))
6571     {
6572         dprintf (1, ("Enabling card bundles"));
6573
6574         // We initially set all of the card bundles
6575         card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))),
6576                           cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address)))));
6577         settings.card_bundles = TRUE;
6578     }
6579 }
6580
6581 BOOL gc_heap::card_bundles_enabled ()
6582 {
6583     return settings.card_bundles;
6584 }
6585
6586 #endif // CARD_BUNDLE
6587
6588 #if defined (_TARGET_AMD64_)
6589 #define brick_size ((size_t)4096)
6590 #else
6591 #define brick_size ((size_t)2048)
6592 #endif //_TARGET_AMD64_
6593
6594 inline
6595 size_t gc_heap::brick_of (uint8_t* add)
6596 {
6597     return (size_t)(add - lowest_address) / brick_size;
6598 }
6599
6600 inline
6601 uint8_t* gc_heap::brick_address (size_t brick)
6602 {
6603     return lowest_address + (brick_size * brick);
6604 }
6605
6606
6607 void gc_heap::clear_brick_table (uint8_t* from, uint8_t* end)
6608 {
6609     for (size_t i = brick_of (from);i < brick_of (end); i++)
6610         brick_table[i] = 0;
6611 }
6612
6613 //codes for the brick entries:
6614 //entry == 0 -> not assigned
6615 //entry >0 offset is entry-1
6616 //entry <0 jump back entry bricks
6617
6618
6619 inline
6620 void gc_heap::set_brick (size_t index, ptrdiff_t val)
6621 {
6622     if (val < -32767)
6623     {
6624         val = -32767;
6625     }
6626     assert (val < 32767);
6627     if (val >= 0)
6628         brick_table [index] = (short)val+1;
6629     else
6630         brick_table [index] = (short)val;
6631 }
6632
6633 inline
6634 int gc_heap::get_brick_entry (size_t index)
6635 {
6636 #ifdef MULTIPLE_HEAPS
6637     return VolatileLoadWithoutBarrier(&brick_table [index]);
6638 #else
6639     return brick_table[index];
6640 #endif
6641 }
6642
6643
6644 inline
6645 uint8_t* align_on_brick (uint8_t* add)
6646 {
6647     return (uint8_t*)((size_t)(add + brick_size - 1) & ~(brick_size - 1));
6648 }
6649
6650 inline
6651 uint8_t* align_lower_brick (uint8_t* add)
6652 {
6653     return (uint8_t*)(((size_t)add) & ~(brick_size - 1));
6654 }
6655
6656 size_t size_brick_of (uint8_t* from, uint8_t* end)
6657 {
6658     assert (((size_t)from & (brick_size-1)) == 0);
6659     assert (((size_t)end  & (brick_size-1)) == 0);
6660
6661     return ((end - from) / brick_size) * sizeof (short);
6662 }
6663
6664 inline
6665 uint8_t* gc_heap::card_address (size_t card)
6666 {
6667     return  (uint8_t*) (card_size * card);
6668 }
6669
6670 inline
6671 size_t gc_heap::card_of ( uint8_t* object)
6672 {
6673     return (size_t)(object) / card_size;
6674 }
6675
6676 inline
6677 size_t gc_heap::card_to_brick (size_t card)
6678 {
6679     return brick_of (card_address (card));
6680 }
6681
6682 inline
6683 uint8_t* align_on_card (uint8_t* add)
6684 {
6685     return (uint8_t*)((size_t)(add + card_size - 1) & ~(card_size - 1 ));
6686 }
6687 inline
6688 uint8_t* align_on_card_word (uint8_t* add)
6689 {
6690     return (uint8_t*) ((size_t)(add + (card_size*card_word_width)-1) & ~(card_size*card_word_width - 1));
6691 }
6692
6693 inline
6694 uint8_t* align_lower_card (uint8_t* add)
6695 {
6696     return (uint8_t*)((size_t)add & ~(card_size-1));
6697 }
6698
6699 inline
6700 void gc_heap::clear_card (size_t card)
6701 {
6702     card_table [card_word (card)] =
6703         (card_table [card_word (card)] & ~(1 << card_bit (card)));
6704     dprintf (3,("Cleared card %Ix [%Ix, %Ix[", card, (size_t)card_address (card),
6705               (size_t)card_address (card+1)));
6706 }
6707
6708 inline
6709 void gc_heap::set_card (size_t card)
6710 {
6711     size_t word = card_word (card);
6712     card_table[word] = (card_table [word] | (1 << card_bit (card)));
6713
6714 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
6715     // Also set the card bundle that corresponds to the card
6716     size_t bundle_to_set = cardw_card_bundle(word);
6717
6718     card_bundle_set(bundle_to_set);
6719
6720     dprintf (3,("Set card %Ix [%Ix, %Ix[ and bundle %Ix", card, (size_t)card_address (card), (size_t)card_address (card+1), bundle_to_set));
6721     assert(card_bundle_set_p(bundle_to_set) != 0);
6722 #endif
6723 }
6724
6725 inline
6726 BOOL  gc_heap::card_set_p (size_t card)
6727 {
6728     return ( card_table [ card_word (card) ] & (1 << card_bit (card)));
6729 }
6730
6731 // Returns the number of DWORDs in the card table that cover the
6732 // range of addresses [from, end[.
6733 size_t count_card_of (uint8_t* from, uint8_t* end)
6734 {
6735     return card_word (gcard_of (end - 1)) - card_word (gcard_of (from)) + 1;
6736 }
6737
6738 // Returns the number of bytes to allocate for a card table
6739 // that covers the range of addresses [from, end[.
6740 size_t size_card_of (uint8_t* from, uint8_t* end)
6741 {
6742     return count_card_of (from, end) * sizeof(uint32_t);
6743 }
6744
6745 // We don't store seg_mapping_table in card_table_info because there's only always one view.
6746 class card_table_info
6747 {
6748 public:
6749     unsigned    recount;
6750     uint8_t*    lowest_address;
6751     uint8_t*    highest_address;
6752     short*      brick_table;
6753
6754 #ifdef CARD_BUNDLE
6755     uint32_t*   card_bundle_table;
6756 #endif //CARD_BUNDLE
6757
6758     // mark_array is always at the end of the data structure because we
6759     // want to be able to make one commit call for everything before it.
6760 #ifdef MARK_ARRAY
6761     uint32_t*   mark_array;
6762 #endif //MARK_ARRAY
6763
6764     size_t      size;
6765     uint32_t*   next_card_table;
6766 };
6767
6768 //These are accessors on untranslated cardtable
6769 inline
6770 unsigned& card_table_refcount (uint32_t* c_table)
6771 {
6772     return *(unsigned*)((char*)c_table - sizeof (card_table_info));
6773 }
6774
6775 inline
6776 uint8_t*& card_table_lowest_address (uint32_t* c_table)
6777 {
6778     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->lowest_address;
6779 }
6780
6781 uint32_t* translate_card_table (uint32_t* ct)
6782 {
6783     return (uint32_t*)((uint8_t*)ct - card_word (gcard_of (card_table_lowest_address (ct))) * sizeof(uint32_t));
6784 }
6785
6786 inline
6787 uint8_t*& card_table_highest_address (uint32_t* c_table)
6788 {
6789     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->highest_address;
6790 }
6791
6792 inline
6793 short*& card_table_brick_table (uint32_t* c_table)
6794 {
6795     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->brick_table;
6796 }
6797
6798 #ifdef CARD_BUNDLE
6799 inline
6800 uint32_t*& card_table_card_bundle_table (uint32_t* c_table)
6801 {
6802     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->card_bundle_table;
6803 }
6804 #endif //CARD_BUNDLE
6805
6806 #ifdef MARK_ARRAY
6807 /* Support for mark_array */
6808
6809 inline
6810 uint32_t*& card_table_mark_array (uint32_t* c_table)
6811 {
6812     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->mark_array;
6813 }
6814
6815 #ifdef BIT64
6816 #define mark_bit_pitch ((size_t)16)
6817 #else
6818 #define mark_bit_pitch ((size_t)8)
6819 #endif // BIT64
6820 #define mark_word_width ((size_t)32)
6821 #define mark_word_size (mark_word_width * mark_bit_pitch)
6822
6823 inline
6824 uint8_t* align_on_mark_bit (uint8_t* add)
6825 {
6826     return (uint8_t*)((size_t)(add + (mark_bit_pitch - 1)) & ~(mark_bit_pitch - 1));
6827 }
6828
6829 inline
6830 uint8_t* align_lower_mark_bit (uint8_t* add)
6831 {
6832     return (uint8_t*)((size_t)(add) & ~(mark_bit_pitch - 1));
6833 }
6834
6835 inline
6836 BOOL is_aligned_on_mark_word (uint8_t* add)
6837 {
6838     return ((size_t)add == ((size_t)(add) & ~(mark_word_size - 1)));
6839 }
6840
6841 inline
6842 uint8_t* align_on_mark_word (uint8_t* add)
6843 {
6844     return (uint8_t*)((size_t)(add + mark_word_size - 1) & ~(mark_word_size - 1));
6845 }
6846
6847 inline
6848 uint8_t* align_lower_mark_word (uint8_t* add)
6849 {
6850     return (uint8_t*)((size_t)(add) & ~(mark_word_size - 1));
6851 }
6852
6853 inline
6854 size_t mark_bit_of (uint8_t* add)
6855 {
6856     return ((size_t)add / mark_bit_pitch);
6857 }
6858
6859 inline
6860 unsigned int mark_bit_bit (size_t mark_bit)
6861 {
6862     return (unsigned int)(mark_bit % mark_word_width);
6863 }
6864
6865 inline
6866 size_t mark_bit_word (size_t mark_bit)
6867 {
6868     return (mark_bit / mark_word_width);
6869 }
6870
6871 inline
6872 size_t mark_word_of (uint8_t* add)
6873 {
6874     return ((size_t)add) / mark_word_size;
6875 }
6876
6877 uint8_t* mark_word_address (size_t wd)
6878 {
6879     return (uint8_t*)(wd*mark_word_size);
6880 }
6881
6882 uint8_t* mark_bit_address (size_t mark_bit)
6883 {
6884     return (uint8_t*)(mark_bit*mark_bit_pitch);
6885 }
6886
6887 inline
6888 size_t mark_bit_bit_of (uint8_t* add)
6889 {
6890     return  (((size_t)add / mark_bit_pitch) % mark_word_width);
6891 }
6892
6893 inline
6894 unsigned int gc_heap::mark_array_marked(uint8_t* add)
6895 {
6896     return mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add));
6897 }
6898
6899 inline
6900 BOOL gc_heap::is_mark_bit_set (uint8_t* add)
6901 {
6902     return (mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add)));
6903 }
6904
6905 inline
6906 void gc_heap::mark_array_set_marked (uint8_t* add)
6907 {
6908     size_t index = mark_word_of (add);
6909     uint32_t val = (1 << mark_bit_bit_of (add));
6910 #ifdef MULTIPLE_HEAPS
6911     Interlocked::Or (&(mark_array [index]), val);
6912 #else
6913     mark_array [index] |= val;
6914 #endif 
6915 }
6916
6917 inline
6918 void gc_heap::mark_array_clear_marked (uint8_t* add)
6919 {
6920     mark_array [mark_word_of (add)] &= ~(1 << mark_bit_bit_of (add));
6921 }
6922
6923 size_t size_mark_array_of (uint8_t* from, uint8_t* end)
6924 {
6925     assert (((size_t)from & ((mark_word_size)-1)) == 0);
6926     assert (((size_t)end  & ((mark_word_size)-1)) == 0);
6927     return sizeof (uint32_t)*(((end - from) / mark_word_size));
6928 }
6929
6930 //In order to eliminate the lowest_address in the mark array
6931 //computations (mark_word_of, etc) mark_array is offset
6932 // according to the lowest_address.
6933 uint32_t* translate_mark_array (uint32_t* ma)
6934 {
6935     return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_gc_lowest_address));
6936 }
6937
6938 // from and end must be page aligned addresses. 
6939 void gc_heap::clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only/*=TRUE*/
6940 #ifdef FEATURE_BASICFREEZE
6941                                 , BOOL read_only/*=FALSE*/
6942 #endif // FEATURE_BASICFREEZE
6943                                 )
6944 {
6945     if(!gc_can_use_concurrent)
6946         return;
6947
6948 #ifdef FEATURE_BASICFREEZE
6949     if (!read_only)
6950 #endif // FEATURE_BASICFREEZE
6951     {
6952         assert (from == align_on_mark_word (from));
6953     }
6954     assert (end == align_on_mark_word (end));
6955
6956 #ifdef BACKGROUND_GC
6957     uint8_t* current_lowest_address = background_saved_lowest_address;
6958     uint8_t* current_highest_address = background_saved_highest_address;
6959 #else
6960     uint8_t* current_lowest_address = lowest_address;
6961     uint8_t* current_highest_address = highest_address;
6962 #endif //BACKGROUND_GC
6963
6964     //there is a possibility of the addresses to be
6965     //outside of the covered range because of a newly allocated
6966     //large object segment
6967     if ((end <= current_highest_address) && (from >= current_lowest_address))
6968     {
6969         size_t beg_word = mark_word_of (align_on_mark_word (from));
6970         MAYBE_UNUSED_VAR(beg_word);
6971         //align end word to make sure to cover the address
6972         size_t end_word = mark_word_of (align_on_mark_word (end));
6973         MAYBE_UNUSED_VAR(end_word);
6974         dprintf (3, ("Calling clearing mark array [%Ix, %Ix[ for addresses [%Ix, %Ix[(%s)",
6975                      (size_t)mark_word_address (beg_word),
6976                      (size_t)mark_word_address (end_word),
6977                      (size_t)from, (size_t)end,
6978                      (check_only ? "check_only" : "clear")));
6979         if (!check_only)
6980         {
6981             uint8_t* op = from;
6982             while (op < mark_word_address (beg_word))
6983             {
6984                 mark_array_clear_marked (op);
6985                 op += mark_bit_pitch;
6986             }
6987
6988             memset (&mark_array[beg_word], 0, (end_word - beg_word)*sizeof (uint32_t));
6989         }
6990 #ifdef _DEBUG
6991         else
6992         {
6993             //Beware, it is assumed that the mark array word straddling
6994             //start has been cleared before
6995             //verify that the array is empty.
6996             size_t  markw = mark_word_of (align_on_mark_word (from));
6997             size_t  markw_end = mark_word_of (align_on_mark_word (end));
6998             while (markw < markw_end)
6999             {
7000                 assert (!(mark_array [markw]));
7001                 markw++;
7002             }
7003             uint8_t* p = mark_word_address (markw_end);
7004             while (p < end)
7005             {
7006                 assert (!(mark_array_marked (p)));
7007                 p++;
7008             }
7009         }
7010 #endif //_DEBUG
7011     }
7012 }
7013 #endif //MARK_ARRAY
7014
7015 //These work on untranslated card tables
7016 inline
7017 uint32_t*& card_table_next (uint32_t* c_table)
7018 {
7019     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->next_card_table;
7020 }
7021
7022 inline
7023 size_t& card_table_size (uint32_t* c_table)
7024 {
7025     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->size;
7026 }
7027
7028 void own_card_table (uint32_t* c_table)
7029 {
7030     card_table_refcount (c_table) += 1;
7031 }
7032
7033 void destroy_card_table (uint32_t* c_table);
7034
7035 void delete_next_card_table (uint32_t* c_table)
7036 {
7037     uint32_t* n_table = card_table_next (c_table);
7038     if (n_table)
7039     {
7040         if (card_table_next (n_table))
7041         {
7042             delete_next_card_table (n_table);
7043         }
7044         if (card_table_refcount (n_table) == 0)
7045         {
7046             destroy_card_table (n_table);
7047             card_table_next (c_table) = 0;
7048         }
7049     }
7050 }
7051
7052 void release_card_table (uint32_t* c_table)
7053 {
7054     assert (card_table_refcount (c_table) >0);
7055     card_table_refcount (c_table) -= 1;
7056     if (card_table_refcount (c_table) == 0)
7057     {
7058         delete_next_card_table (c_table);
7059         if (card_table_next (c_table) == 0)
7060         {
7061             destroy_card_table (c_table);
7062             // sever the link from the parent
7063             if (&g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))] == c_table)
7064             {
7065                 g_gc_card_table = 0;
7066
7067 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7068                 g_gc_card_bundle_table = 0;
7069 #endif
7070 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7071                 SoftwareWriteWatch::StaticClose();
7072 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7073             }
7074             else
7075             {
7076                 uint32_t* p_table = &g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))];
7077                 if (p_table)
7078                 {
7079                     while (p_table && (card_table_next (p_table) != c_table))
7080                         p_table = card_table_next (p_table);
7081                     card_table_next (p_table) = 0;
7082                 }
7083             }
7084         }
7085     }
7086 }
7087
7088 void destroy_card_table (uint32_t* c_table)
7089 {
7090 //  delete (uint32_t*)&card_table_refcount(c_table);
7091
7092     GCToOSInterface::VirtualRelease (&card_table_refcount(c_table), card_table_size(c_table));
7093     dprintf (2, ("Table Virtual Free : %Ix", (size_t)&card_table_refcount(c_table)));
7094 }
7095
7096 uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
7097 {
7098     assert (g_gc_lowest_address == start);
7099     assert (g_gc_highest_address == end);
7100
7101     uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
7102
7103     size_t bs = size_brick_of (start, end);
7104     size_t cs = size_card_of (start, end);
7105 #ifdef MARK_ARRAY
7106     size_t ms = (gc_can_use_concurrent ? 
7107                  size_mark_array_of (start, end) :
7108                  0);
7109 #else
7110     size_t ms = 0;
7111 #endif //MARK_ARRAY
7112
7113     size_t cb = 0;
7114
7115 #ifdef CARD_BUNDLE
7116     if (can_use_write_watch_for_card_table())
7117     {
7118         cb = size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address);
7119 #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7120         // If we're not manually managing the card bundles, we will need to use OS write
7121         // watch APIs over this region to track changes.
7122         virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
7123 #endif
7124     }
7125 #endif //CARD_BUNDLE
7126
7127     size_t wws = 0;
7128 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7129     size_t sw_ww_table_offset = 0;
7130     if (gc_can_use_concurrent)
7131     {
7132         size_t sw_ww_size_before_table = sizeof(card_table_info) + cs + bs + cb;
7133         sw_ww_table_offset = SoftwareWriteWatch::GetTableStartByteOffset(sw_ww_size_before_table);
7134         wws = sw_ww_table_offset - sw_ww_size_before_table + SoftwareWriteWatch::GetTableByteSize(start, end);
7135     }
7136 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7137
7138 #ifdef GROWABLE_SEG_MAPPING_TABLE
7139     size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
7140     size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
7141     size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
7142
7143     st += (st_table_offset_aligned - st_table_offset);
7144 #else //GROWABLE_SEG_MAPPING_TABLE
7145     size_t st = 0;
7146 #endif //GROWABLE_SEG_MAPPING_TABLE
7147
7148     // it is impossible for alloc_size to overflow due bounds on each of 
7149     // its components.
7150     size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
7151     uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags);
7152
7153     if (!mem)
7154         return 0;
7155
7156     dprintf (2, ("Init - Card table alloc for %Id bytes: [%Ix, %Ix[",
7157                  alloc_size, (size_t)mem, (size_t)(mem+alloc_size)));
7158
7159     // mark array will be committed separately (per segment).
7160     size_t commit_size = alloc_size - ms;
7161
7162     if (!GCToOSInterface::VirtualCommit (mem, commit_size))
7163     {
7164         dprintf (2, ("Card table commit failed"));
7165         GCToOSInterface::VirtualRelease (mem, alloc_size);
7166         return 0;
7167     }
7168
7169     // initialize the ref count
7170     uint32_t* ct = (uint32_t*)(mem+sizeof (card_table_info));
7171     card_table_refcount (ct) = 0;
7172     card_table_lowest_address (ct) = start;
7173     card_table_highest_address (ct) = end;
7174     card_table_brick_table (ct) = (short*)((uint8_t*)ct + cs);
7175     card_table_size (ct) = alloc_size;
7176     card_table_next (ct) = 0;
7177
7178 #ifdef CARD_BUNDLE
7179     card_table_card_bundle_table (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs);
7180
7181 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7182     g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), g_gc_lowest_address);
7183 #endif
7184
7185 #endif //CARD_BUNDLE
7186
7187 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7188     if (gc_can_use_concurrent)
7189     {
7190         SoftwareWriteWatch::InitializeUntranslatedTable(mem + sw_ww_table_offset, start);
7191     }
7192 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7193
7194 #ifdef GROWABLE_SEG_MAPPING_TABLE
7195     seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
7196     seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table - 
7197                                         size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address))));
7198 #endif //GROWABLE_SEG_MAPPING_TABLE
7199
7200 #ifdef MARK_ARRAY
7201     if (gc_can_use_concurrent)
7202         card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
7203     else
7204         card_table_mark_array (ct) = NULL;
7205 #endif //MARK_ARRAY
7206
7207     return translate_card_table(ct);
7208 }
7209
7210 void gc_heap::set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p)
7211 {
7212 #ifdef MULTIPLE_HEAPS
7213     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
7214     {
7215         gc_heap* hp = gc_heap::g_heaps [hn];
7216         hp->fgm_result.set_fgm (f, s, loh_p);
7217     }
7218 #else //MULTIPLE_HEAPS
7219     fgm_result.set_fgm (f, s, loh_p);
7220 #endif //MULTIPLE_HEAPS
7221 }
7222
7223 //returns 0 for success, -1 otherwise
7224 // We are doing all the decommitting here because we want to make sure we have
7225 // enough memory to do so - if we do this during copy_brick_card_table and 
7226 // and fail to decommit it would make the failure case very complicated to 
7227 // handle. This way we can waste some decommit if we call this multiple 
7228 // times before the next FGC but it's easier to handle the failure case.
7229 int gc_heap::grow_brick_card_tables (uint8_t* start,
7230                                      uint8_t* end,
7231                                      size_t size,
7232                                      heap_segment* new_seg, 
7233                                      gc_heap* hp, 
7234                                      BOOL loh_p)
7235 {
7236     uint8_t* la = g_gc_lowest_address;
7237     uint8_t* ha = g_gc_highest_address;
7238     uint8_t* saved_g_lowest_address = min (start, g_gc_lowest_address);
7239     uint8_t* saved_g_highest_address = max (end, g_gc_highest_address);
7240     seg_mapping* new_seg_mapping_table = nullptr;
7241 #ifdef BACKGROUND_GC
7242     // This value is only for logging purpose - it's not necessarily exactly what we 
7243     // would commit for mark array but close enough for diagnostics purpose.
7244     size_t logging_ma_commit_size = size_mark_array_of (0, (uint8_t*)size);
7245 #endif //BACKGROUND_GC
7246
7247     // See if the address is already covered
7248     if ((la != saved_g_lowest_address ) || (ha != saved_g_highest_address))
7249     {
7250         {
7251             //modify the higest address so the span covered
7252             //is twice the previous one.
7253             uint8_t* top = (uint8_t*)0 + Align (GCToOSInterface::GetVirtualMemoryLimit());
7254             // On non-Windows systems, we get only an approximate value that can possibly be
7255             // slightly lower than the saved_g_highest_address.
7256             // In such case, we set the top to the saved_g_highest_address so that the
7257             // card and brick tables always cover the whole new range.
7258             if (top < saved_g_highest_address)
7259             {
7260                 top = saved_g_highest_address;
7261             }
7262             size_t ps = ha-la;
7263 #ifdef BIT64
7264             if (ps > (uint64_t)200*1024*1024*1024)
7265                 ps += (uint64_t)100*1024*1024*1024;
7266             else
7267 #endif // BIT64
7268                 ps *= 2;
7269
7270             if (saved_g_lowest_address < g_gc_lowest_address)
7271             {
7272                 if (ps > (size_t)g_gc_lowest_address)
7273                     saved_g_lowest_address = (uint8_t*)(size_t)OS_PAGE_SIZE;
7274                 else
7275                 {
7276                     assert (((size_t)g_gc_lowest_address - ps) >= OS_PAGE_SIZE);
7277                     saved_g_lowest_address = min (saved_g_lowest_address, (g_gc_lowest_address - ps));
7278                 }
7279             }
7280
7281             if (saved_g_highest_address > g_gc_highest_address)
7282             {
7283                 saved_g_highest_address = max ((saved_g_lowest_address + ps), saved_g_highest_address);
7284                 if (saved_g_highest_address > top)
7285                     saved_g_highest_address = top;
7286             }
7287         }
7288         dprintf (GC_TABLE_LOG, ("Growing card table [%Ix, %Ix[",
7289                                 (size_t)saved_g_lowest_address,
7290                                 (size_t)saved_g_highest_address));
7291
7292         bool write_barrier_updated = false;
7293         uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
7294         uint32_t* saved_g_card_table = g_gc_card_table;
7295
7296 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7297         uint32_t* saved_g_card_bundle_table = g_gc_card_bundle_table;
7298 #endif
7299
7300         uint32_t* ct = 0;
7301         uint32_t* translated_ct = 0;
7302         short* bt = 0;
7303
7304         size_t cs = size_card_of (saved_g_lowest_address, saved_g_highest_address);
7305         size_t bs = size_brick_of (saved_g_lowest_address, saved_g_highest_address);
7306
7307 #ifdef MARK_ARRAY
7308         size_t ms = (gc_heap::gc_can_use_concurrent ? 
7309                     size_mark_array_of (saved_g_lowest_address, saved_g_highest_address) :
7310                     0);
7311 #else
7312         size_t ms = 0;
7313 #endif //MARK_ARRAY
7314
7315         size_t cb = 0;
7316
7317 #ifdef CARD_BUNDLE
7318         if (can_use_write_watch_for_card_table())
7319         {
7320             cb = size_card_bundle_of (saved_g_lowest_address, saved_g_highest_address);
7321
7322 #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7323             // If we're not manually managing the card bundles, we will need to use OS write
7324             // watch APIs over this region to track changes.
7325             virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
7326 #endif
7327         }
7328 #endif //CARD_BUNDLE
7329
7330         size_t wws = 0;
7331 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7332         size_t sw_ww_table_offset = 0;
7333         if (gc_can_use_concurrent)
7334         {
7335             size_t sw_ww_size_before_table = sizeof(card_table_info) + cs + bs + cb;
7336             sw_ww_table_offset = SoftwareWriteWatch::GetTableStartByteOffset(sw_ww_size_before_table);
7337             wws =
7338                 sw_ww_table_offset -
7339                 sw_ww_size_before_table +
7340                 SoftwareWriteWatch::GetTableByteSize(saved_g_lowest_address, saved_g_highest_address);
7341         }
7342 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7343
7344 #ifdef GROWABLE_SEG_MAPPING_TABLE
7345         size_t st = size_seg_mapping_table_of (saved_g_lowest_address, saved_g_highest_address);
7346         size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
7347         size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
7348         st += (st_table_offset_aligned - st_table_offset);
7349 #else //GROWABLE_SEG_MAPPING_TABLE
7350         size_t st = 0;
7351 #endif //GROWABLE_SEG_MAPPING_TABLE
7352
7353         // it is impossible for alloc_size to overflow due bounds on each of 
7354         // its components.
7355         size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
7356         dprintf (GC_TABLE_LOG, ("card table: %Id; brick table: %Id; card bundle: %Id; sw ww table: %Id; seg table: %Id; mark array: %Id",
7357                                   cs, bs, cb, wws, st, ms));
7358
7359         uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags);
7360
7361         if (!mem)
7362         {
7363             set_fgm_result (fgm_grow_table, alloc_size, loh_p);
7364             goto fail;
7365         }
7366
7367         dprintf (GC_TABLE_LOG, ("Table alloc for %Id bytes: [%Ix, %Ix[",
7368                                  alloc_size, (size_t)mem, (size_t)((uint8_t*)mem+alloc_size)));
7369
7370         {   
7371             // mark array will be committed separately (per segment).
7372             size_t commit_size = alloc_size - ms;
7373
7374             if (!GCToOSInterface::VirtualCommit (mem, commit_size))
7375             {
7376                 dprintf (GC_TABLE_LOG, ("Table commit failed"));
7377                 set_fgm_result (fgm_commit_table, commit_size, loh_p);
7378                 goto fail;
7379             }
7380         }
7381
7382         ct = (uint32_t*)(mem + sizeof (card_table_info));
7383         card_table_refcount (ct) = 0;
7384         card_table_lowest_address (ct) = saved_g_lowest_address;
7385         card_table_highest_address (ct) = saved_g_highest_address;
7386         card_table_next (ct) = &g_gc_card_table[card_word (gcard_of (la))];
7387
7388         //clear the card table
7389 /*
7390         memclr ((uint8_t*)ct,
7391                 (((saved_g_highest_address - saved_g_lowest_address)*sizeof (uint32_t) /
7392                   (card_size * card_word_width))
7393                  + sizeof (uint32_t)));
7394 */
7395
7396         bt = (short*)((uint8_t*)ct + cs);
7397
7398         // No initialization needed, will be done in copy_brick_card
7399
7400         card_table_brick_table (ct) = bt;
7401
7402 #ifdef CARD_BUNDLE
7403         card_table_card_bundle_table (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs);
7404         //set all bundle to look at all of the cards
7405         memset(card_table_card_bundle_table (ct), 0xFF, cb);
7406 #endif //CARD_BUNDLE
7407
7408 #ifdef GROWABLE_SEG_MAPPING_TABLE
7409         {
7410             new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
7411             new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table -
7412                                               size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address))));
7413             memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
7414                 &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
7415                 size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address));
7416
7417             // new_seg_mapping_table gets assigned to seg_mapping_table at the bottom of this function,
7418             // not here. The reason for this is that, if we fail at mark array committing (OOM) and we've
7419             // already switched seg_mapping_table to point to the new mapping table, we'll decommit it and
7420             // run into trouble. By not assigning here, we're making sure that we will not change seg_mapping_table
7421             // if an OOM occurs.
7422         }
7423 #endif //GROWABLE_SEG_MAPPING_TABLE
7424
7425 #ifdef MARK_ARRAY
7426         if(gc_can_use_concurrent)
7427             card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
7428         else
7429             card_table_mark_array (ct) = NULL;
7430 #endif //MARK_ARRAY
7431
7432         translated_ct = translate_card_table (ct);
7433
7434         dprintf (GC_TABLE_LOG, ("card table: %Ix(translated: %Ix), seg map: %Ix, mark array: %Ix", 
7435             (size_t)ct, (size_t)translated_ct, (size_t)new_seg_mapping_table, (size_t)card_table_mark_array (ct)));
7436
7437 #ifdef BACKGROUND_GC
7438         if (hp->should_commit_mark_array())
7439         {
7440             dprintf (GC_TABLE_LOG, ("new low: %Ix, new high: %Ix, latest mark array is %Ix(translate: %Ix)", 
7441                                     saved_g_lowest_address, saved_g_highest_address,
7442                                     card_table_mark_array (ct),
7443                                     translate_mark_array (card_table_mark_array (ct))));
7444             uint32_t* new_mark_array = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, saved_g_lowest_address));
7445             if (!commit_new_mark_array_global (new_mark_array))
7446             {
7447                 dprintf (GC_TABLE_LOG, ("failed to commit portions in the mark array for existing segments"));
7448                 set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
7449                 goto fail;
7450             }
7451
7452             if (!commit_mark_array_new_seg (hp, new_seg, translated_ct, saved_g_lowest_address))
7453             {
7454                 dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg"));
7455                 set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
7456                 goto fail;
7457             }
7458         }
7459         else
7460         {
7461             clear_commit_flag_global();
7462         }
7463 #endif //BACKGROUND_GC
7464
7465 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7466         if (gc_can_use_concurrent)
7467         {
7468             // The current design of software write watch requires that the runtime is suspended during resize. Suspending
7469             // on resize is preferred because it is a far less frequent operation than GetWriteWatch() / ResetWriteWatch().
7470             // Suspending here allows copying dirty state from the old table into the new table, and not have to merge old
7471             // table info lazily as done for card tables.
7472
7473             // Either this thread was the thread that did the suspension which means we are suspended; or this is called
7474             // from a GC thread which means we are in a blocking GC and also suspended.
7475             bool is_runtime_suspended = GCToEEInterface::IsGCThread();
7476             if (!is_runtime_suspended)
7477             {
7478                 // Note on points where the runtime is suspended anywhere in this function. Upon an attempt to suspend the
7479                 // runtime, a different thread may suspend first, causing this thread to block at the point of the suspend call.
7480                 // So, at any suspend point, externally visible state needs to be consistent, as code that depends on that state
7481                 // may run while this thread is blocked. This includes updates to g_gc_card_table, g_gc_lowest_address, and
7482                 // g_gc_highest_address.
7483                 suspend_EE();
7484             }
7485
7486             g_gc_card_table = translated_ct;
7487
7488 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7489             g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address);
7490 #endif
7491
7492             SoftwareWriteWatch::SetResizedUntranslatedTable(
7493                 mem + sw_ww_table_offset,
7494                 saved_g_lowest_address,
7495                 saved_g_highest_address);
7496
7497             seg_mapping_table = new_seg_mapping_table;
7498
7499             // Since the runtime is already suspended, update the write barrier here as well.
7500             // This passes a bool telling whether we need to switch to the post
7501             // grow version of the write barrier.  This test tells us if the new
7502             // segment was allocated at a lower address than the old, requiring
7503             // that we start doing an upper bounds check in the write barrier.
7504             g_gc_lowest_address = saved_g_lowest_address;
7505             g_gc_highest_address = saved_g_highest_address;
7506             stomp_write_barrier_resize(true, la != saved_g_lowest_address);
7507             write_barrier_updated = true;
7508
7509             if (!is_runtime_suspended)
7510             {
7511                 restart_EE();
7512             }
7513         }
7514         else
7515 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7516         {
7517             g_gc_card_table = translated_ct;
7518
7519 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7520             g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address);
7521 #endif
7522         }
7523
7524         if (!write_barrier_updated)
7525         {
7526             seg_mapping_table = new_seg_mapping_table;
7527             GCToOSInterface::FlushProcessWriteBuffers();
7528             g_gc_lowest_address = saved_g_lowest_address;
7529             g_gc_highest_address = saved_g_highest_address;
7530
7531             // This passes a bool telling whether we need to switch to the post
7532             // grow version of the write barrier.  This test tells us if the new
7533             // segment was allocated at a lower address than the old, requiring
7534             // that we start doing an upper bounds check in the write barrier.
7535             // This will also suspend the runtime if the write barrier type needs
7536             // to be changed, so we are doing this after all global state has
7537             // been updated. See the comment above suspend_EE() above for more
7538             // info.
7539             stomp_write_barrier_resize(GCToEEInterface::IsGCThread(), la != saved_g_lowest_address);
7540         }
7541
7542         return 0;
7543         
7544 fail:
7545         //cleanup mess and return -1;
7546
7547         if (mem)
7548         {
7549             assert(g_gc_card_table == saved_g_card_table);
7550
7551 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7552             assert(g_gc_card_bundle_table  == saved_g_card_bundle_table);
7553 #endif
7554
7555             //delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info));
7556             if (!GCToOSInterface::VirtualRelease (mem, alloc_size))
7557             {
7558                 dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualRelease failed"));
7559                 assert (!"release failed");
7560             }
7561         }
7562
7563         return -1;
7564     }
7565     else
7566     {
7567 #ifdef BACKGROUND_GC
7568         if (hp->should_commit_mark_array())
7569         {
7570             dprintf (GC_TABLE_LOG, ("in range new seg %Ix, mark_array is %Ix", new_seg, hp->mark_array));
7571             if (!commit_mark_array_new_seg (hp, new_seg))
7572             {
7573                 dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg in range"));
7574                 set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
7575                 return -1;
7576             }
7577         }
7578 #endif //BACKGROUND_GC
7579     }
7580
7581     return 0;
7582 }
7583
7584 //copy all of the arrays managed by the card table for a page aligned range
7585 void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
7586                                      short* old_brick_table,
7587                                      heap_segment* seg,
7588                                      uint8_t* start, uint8_t* end)
7589 {
7590     ptrdiff_t brick_offset = brick_of (start) - brick_of (la);
7591
7592
7593     dprintf (2, ("copying tables for range [%Ix %Ix[", (size_t)start, (size_t)end));
7594
7595     // copy brick table
7596     short* brick_start = &brick_table [brick_of (start)];
7597     if (old_brick_table)
7598     {
7599         // segments are always on page boundaries
7600         memcpy (brick_start, &old_brick_table[brick_offset],
7601                 size_brick_of (start, end));
7602
7603     }
7604     else
7605     {
7606         // This is a large heap, just clear the brick table
7607     }
7608
7609     uint32_t* old_ct = &old_card_table[card_word (card_of (la))];
7610 #ifdef MARK_ARRAY
7611 #ifdef BACKGROUND_GC
7612     UNREFERENCED_PARAMETER(seg);
7613     if (recursive_gc_sync::background_running_p())
7614     {
7615         uint32_t* old_mark_array = card_table_mark_array (old_ct);
7616
7617         // We don't need to go through all the card tables here because 
7618         // we only need to copy from the GC version of the mark array - when we
7619         // mark (even in allocate_large_object) we always use that mark array.
7620         if ((card_table_highest_address (old_ct) >= start) &&
7621             (card_table_lowest_address (old_ct) <= end))
7622         {
7623             if ((background_saved_highest_address >= start) &&
7624                 (background_saved_lowest_address <= end))
7625             {
7626                 //copy the mark bits
7627                 // segments are always on page boundaries
7628                 uint8_t* m_start = max (background_saved_lowest_address, start);
7629                 uint8_t* m_end = min (background_saved_highest_address, end);
7630                 memcpy (&mark_array[mark_word_of (m_start)],
7631                         &old_mark_array[mark_word_of (m_start) - mark_word_of (la)],
7632                         size_mark_array_of (m_start, m_end));
7633             }
7634         }
7635         else
7636         {
7637             //only large segments can be out of range
7638             assert (old_brick_table == 0);
7639         }
7640     }
7641 #else //BACKGROUND_GC
7642     assert (seg != 0);
7643     clear_mark_array (start, heap_segment_committed(seg));
7644 #endif //BACKGROUND_GC
7645 #endif //MARK_ARRAY
7646
7647     // n way merge with all of the card table ever used in between
7648     uint32_t* ct = card_table_next (&card_table[card_word (card_of(lowest_address))]);
7649
7650     assert (ct);
7651     while (card_table_next (old_ct) != ct)
7652     {
7653         //copy if old card table contained [start, end[
7654         if ((card_table_highest_address (ct) >= end) &&
7655             (card_table_lowest_address (ct) <= start))
7656         {
7657             // or the card_tables
7658
7659             size_t start_word = card_word (card_of (start));
7660
7661             uint32_t* dest = &card_table[start_word];
7662             uint32_t* src = &((translate_card_table (ct))[start_word]);
7663             ptrdiff_t count = count_card_of (start, end);
7664             for (int x = 0; x < count; x++)
7665             {
7666                 *dest |= *src;
7667
7668 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7669                 if (*src != 0)
7670                 {
7671                     card_bundle_set(cardw_card_bundle(start_word+x));
7672                 }
7673 #endif
7674
7675                 dest++;
7676                 src++;
7677             }
7678         }
7679         ct = card_table_next (ct);
7680     }
7681 }
7682
7683 //initialize all of the arrays managed by the card table for a page aligned range when an existing ro segment becomes in range
7684 void gc_heap::init_brick_card_range (heap_segment* seg)
7685 {
7686     dprintf (2, ("initialising tables for range [%Ix %Ix[",
7687                  (size_t)heap_segment_mem (seg),
7688                  (size_t)heap_segment_allocated (seg)));
7689
7690     // initialize the brick table
7691     for (size_t b = brick_of (heap_segment_mem (seg));
7692          b < brick_of (align_on_brick (heap_segment_allocated (seg)));
7693          b++)
7694     {
7695         set_brick (b, -1);
7696     }
7697
7698 #ifdef MARK_ARRAY
7699     if (recursive_gc_sync::background_running_p() && (seg->flags & heap_segment_flags_ma_committed))
7700     {
7701         assert (seg != 0);
7702         clear_mark_array (heap_segment_mem (seg), heap_segment_committed(seg));
7703     }
7704 #endif //MARK_ARRAY
7705
7706     clear_card_for_addresses (heap_segment_mem (seg),
7707                               heap_segment_allocated (seg));
7708 }
7709
7710 void gc_heap::copy_brick_card_table()
7711 {
7712     uint8_t* la = lowest_address;
7713     uint8_t* ha = highest_address;
7714     MAYBE_UNUSED_VAR(ha);
7715     uint32_t* old_card_table = card_table;
7716     short* old_brick_table = brick_table;
7717
7718     assert (la == card_table_lowest_address (&old_card_table[card_word (card_of (la))]));
7719     assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))]));
7720
7721     /* todo: Need a global lock for this */
7722     uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
7723     own_card_table (ct);
7724     card_table = translate_card_table (ct);
7725     /* End of global lock */
7726     highest_address = card_table_highest_address (ct);
7727     lowest_address = card_table_lowest_address (ct);
7728
7729     brick_table = card_table_brick_table (ct);
7730
7731 #ifdef MARK_ARRAY
7732     if (gc_can_use_concurrent)
7733     {
7734         mark_array = translate_mark_array (card_table_mark_array (ct));
7735         assert (mark_word_of (g_gc_highest_address) ==
7736             mark_word_of (align_on_mark_word (g_gc_highest_address)));
7737     }
7738     else
7739         mark_array = NULL;
7740 #endif //MARK_ARRAY
7741
7742 #ifdef CARD_BUNDLE
7743 #if defined(MARK_ARRAY) && defined(_DEBUG)
7744     size_t cb_end = (size_t)((uint8_t*)card_table_card_bundle_table (ct) + size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address));
7745 #ifdef GROWABLE_SEG_MAPPING_TABLE
7746     size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
7747     size_t cb_end_aligned = align_for_seg_mapping_table (cb_end);
7748     st += (cb_end_aligned - cb_end);
7749 #else  //GROWABLE_SEG_MAPPING_TABLE
7750     size_t st = 0;
7751 #endif //GROWABLE_SEG_MAPPING_TABLE
7752 #endif //MARK_ARRAY && _DEBUG
7753     card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
7754
7755     // Ensure that the word that represents g_gc_lowest_address in the translated table is located at the
7756     // start of the untranslated table.
7757     assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
7758             card_table_card_bundle_table (ct));
7759
7760     //set the card table if we are in a heap growth scenario
7761     if (card_bundles_enabled())
7762     {
7763         card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))),
7764                           cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address)))));
7765     }
7766     //check if we need to turn on card_bundles.
7767 #ifdef MULTIPLE_HEAPS
7768     // use INT64 arithmetic here because of possible overflow on 32p
7769     uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*gc_heap::n_heaps;
7770 #else
7771     // use INT64 arithmetic here because of possible overflow on 32p
7772     uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE;
7773 #endif //MULTIPLE_HEAPS
7774     if (reserved_memory >= th)
7775     {
7776         enable_card_bundles();
7777     }
7778
7779 #endif //CARD_BUNDLE
7780
7781     // for each of the segments and heaps, copy the brick table and
7782     // or the card table
7783     heap_segment* seg = generation_start_segment (generation_of (max_generation));
7784     while (seg)
7785     {
7786         if (heap_segment_read_only_p (seg) && !heap_segment_in_range_p (seg))
7787         {
7788             //check if it became in range
7789             if ((heap_segment_reserved (seg) > lowest_address) &&
7790                 (heap_segment_mem (seg) < highest_address))
7791             {
7792                 set_ro_segment_in_range (seg);
7793             }
7794         }
7795         else
7796         {
7797
7798             uint8_t* end = align_on_page (heap_segment_allocated (seg));
7799             copy_brick_card_range (la, old_card_table,
7800                                    old_brick_table,
7801                                    seg,
7802                                    align_lower_page (heap_segment_mem (seg)),
7803                                    end);
7804         }
7805         seg = heap_segment_next (seg);
7806     }
7807
7808     seg = generation_start_segment (large_object_generation);
7809     while (seg)
7810     {
7811         if (heap_segment_read_only_p (seg) && !heap_segment_in_range_p (seg))
7812         {
7813             //check if it became in range
7814             if ((heap_segment_reserved (seg) > lowest_address) &&
7815                 (heap_segment_mem (seg) < highest_address))
7816             {
7817                 set_ro_segment_in_range (seg);
7818             }
7819         }
7820         else
7821         {
7822             uint8_t* end = align_on_page (heap_segment_allocated (seg));
7823             copy_brick_card_range (la, old_card_table,
7824                                    0,
7825                                    seg,
7826                                    align_lower_page (heap_segment_mem (seg)),
7827                                    end);
7828         }
7829         seg = heap_segment_next (seg);
7830     }
7831
7832     release_card_table (&old_card_table[card_word (card_of(la))]);
7833 }
7834
7835 #ifdef FEATURE_BASICFREEZE
7836 BOOL gc_heap::insert_ro_segment (heap_segment* seg)
7837 {
7838     enter_spin_lock (&gc_heap::gc_lock);
7839
7840     if (!gc_heap::seg_table->ensure_space_for_insert ()
7841         || (should_commit_mark_array() && !commit_mark_array_new_seg(__this, seg)))
7842     {
7843         leave_spin_lock(&gc_heap::gc_lock);
7844         return FALSE;
7845     }
7846
7847     //insert at the head of the segment list
7848     generation* gen2 = generation_of (max_generation);
7849     heap_segment* oldhead = generation_start_segment (gen2);
7850     heap_segment_next (seg) = oldhead;
7851     generation_start_segment (gen2) = seg;
7852
7853     seg_table->insert (heap_segment_mem(seg), (size_t)seg);
7854
7855 #ifdef SEG_MAPPING_TABLE
7856     seg_mapping_table_add_ro_segment (seg);
7857 #endif //SEG_MAPPING_TABLE
7858
7859     //test if in range
7860     if ((heap_segment_reserved (seg) > lowest_address) &&
7861         (heap_segment_mem (seg) < highest_address))
7862     {
7863         set_ro_segment_in_range (seg);
7864     }
7865
7866     FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg), (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)), gc_etw_segment_read_only_heap);
7867
7868     leave_spin_lock (&gc_heap::gc_lock);
7869     return TRUE;
7870 }
7871
7872 // No one is calling this function right now. If this is getting called we need
7873 // to take care of decommitting the mark array for it - we will need to remember
7874 // which portion of the mark array was committed and only decommit that.
7875 void gc_heap::remove_ro_segment (heap_segment* seg)
7876 {
7877 //clear the mark bits so a new segment allocated in its place will have a clear mark bits
7878 #ifdef MARK_ARRAY
7879     if (gc_can_use_concurrent)
7880     {
7881         clear_mark_array (align_lower_mark_word (max (heap_segment_mem (seg), lowest_address)),
7882                       align_on_card_word (min (heap_segment_allocated (seg), highest_address)),
7883                       false); // read_only segments need the mark clear
7884     }
7885 #endif //MARK_ARRAY
7886
7887     enter_spin_lock (&gc_heap::gc_lock);
7888
7889     seg_table->remove ((uint8_t*)seg);
7890
7891 #ifdef SEG_MAPPING_TABLE
7892     seg_mapping_table_remove_ro_segment (seg);
7893 #endif //SEG_MAPPING_TABLE
7894
7895     // Locate segment (and previous segment) in the list.
7896     generation* gen2 = generation_of (max_generation);
7897     heap_segment* curr_seg = generation_start_segment (gen2);
7898     heap_segment* prev_seg = NULL;
7899
7900     while (curr_seg && curr_seg != seg)
7901     {
7902         prev_seg = curr_seg;
7903         curr_seg = heap_segment_next (curr_seg);
7904     }
7905     assert (curr_seg == seg);
7906
7907     // Patch previous segment (or list head if there is none) to skip the removed segment.
7908     if (prev_seg)
7909         heap_segment_next (prev_seg) = heap_segment_next (curr_seg);
7910     else
7911         generation_start_segment (gen2) = heap_segment_next (curr_seg);
7912
7913     leave_spin_lock (&gc_heap::gc_lock);
7914 }
7915 #endif //FEATURE_BASICFREEZE
7916
7917 BOOL gc_heap::set_ro_segment_in_range (heap_segment* seg)
7918 {
7919     //set it in range
7920     seg->flags |= heap_segment_flags_inrange;
7921 //    init_brick_card_range (seg);
7922     ro_segments_in_range = TRUE;
7923     //right now, segments aren't protected
7924     //unprotect_segment (seg);
7925     return TRUE;
7926 }
7927
7928 #ifdef MARK_LIST
7929
7930 uint8_t** make_mark_list (size_t size)
7931 {
7932     uint8_t** mark_list = new (nothrow) uint8_t* [size];
7933     return mark_list;
7934 }
7935
7936 #define swap(a,b){uint8_t* t; t = a; a = b; b = t;}
7937
7938 void verify_qsort_array (uint8_t* *low, uint8_t* *high)
7939 {
7940     uint8_t **i = 0;
7941
7942     for (i = low+1; i <= high; i++)
7943     {
7944         if (*i < *(i-1))
7945         {
7946             FATAL_GC_ERROR();
7947         }
7948     }
7949 }
7950
7951 #ifndef USE_INTROSORT
7952 void qsort1( uint8_t* *low, uint8_t* *high, unsigned int depth)
7953 {
7954     if (((low + 16) >= high) || (depth > 100))
7955     {
7956         //insertion sort
7957         uint8_t **i, **j;
7958         for (i = low+1; i <= high; i++)
7959         {
7960             uint8_t* val = *i;
7961             for (j=i;j >low && val<*(j-1);j--)
7962             {
7963                 *j=*(j-1);
7964             }
7965             *j=val;
7966         }
7967     }
7968     else
7969     {
7970         uint8_t *pivot, **left, **right;
7971
7972         //sort low middle and high
7973         if (*(low+((high-low)/2)) < *low)
7974             swap (*(low+((high-low)/2)), *low);
7975         if (*high < *low)
7976             swap (*low, *high);
7977         if (*high < *(low+((high-low)/2)))
7978             swap (*(low+((high-low)/2)), *high);
7979
7980         swap (*(low+((high-low)/2)), *(high-1));
7981         pivot =  *(high-1);
7982         left = low; right = high-1;
7983         while (1) {
7984             while (*(--right) > pivot);
7985             while (*(++left)  < pivot);
7986             if (left < right)
7987             {
7988                 swap(*left, *right);
7989             }
7990             else
7991                 break;
7992         }
7993         swap (*left, *(high-1));
7994         qsort1(low, left-1, depth+1);
7995         qsort1(left+1, high, depth+1);
7996     }
7997 }
7998 #endif //USE_INTROSORT
7999 void rqsort1( uint8_t* *low, uint8_t* *high)
8000 {
8001     if ((low + 16) >= high)
8002     {
8003         //insertion sort
8004         uint8_t **i, **j;
8005         for (i = low+1; i <= high; i++)
8006         {
8007             uint8_t* val = *i;
8008             for (j=i;j >low && val>*(j-1);j--)
8009             {
8010                 *j=*(j-1);
8011             }
8012             *j=val;
8013         }
8014     }
8015     else
8016     {
8017         uint8_t *pivot, **left, **right;
8018
8019         //sort low middle and high
8020         if (*(low+((high-low)/2)) > *low)
8021             swap (*(low+((high-low)/2)), *low);
8022         if (*high > *low)
8023             swap (*low, *high);
8024         if (*high > *(low+((high-low)/2)))
8025             swap (*(low+((high-low)/2)), *high);
8026
8027         swap (*(low+((high-low)/2)), *(high-1));
8028         pivot =  *(high-1);
8029         left = low; right = high-1;
8030         while (1) {
8031             while (*(--right) < pivot);
8032             while (*(++left)  > pivot);
8033             if (left < right)
8034             {
8035                 swap(*left, *right);
8036             }
8037             else
8038                 break;
8039         }
8040         swap (*left, *(high-1));
8041         rqsort1(low, left-1);
8042         rqsort1(left+1, high);
8043     }
8044 }
8045
8046 #ifdef USE_INTROSORT
8047 class introsort 
8048 {
8049
8050 private: 
8051     static const int size_threshold = 64;
8052     static const int max_depth = 100;
8053
8054
8055 inline static void swap_elements(uint8_t** i,uint8_t** j)
8056     {
8057         uint8_t* t=*i;
8058         *i=*j; 
8059         *j=t;
8060     }
8061
8062 public:
8063     static void sort (uint8_t** begin, uint8_t** end, int ignored)
8064     {
8065         ignored = 0;
8066         introsort_loop (begin, end, max_depth);
8067         insertionsort (begin, end);
8068     }
8069
8070 private: 
8071
8072     static void introsort_loop (uint8_t** lo, uint8_t** hi, int depth_limit)
8073     {
8074         while (hi-lo >= size_threshold)
8075         {
8076             if (depth_limit == 0)
8077             {
8078                 heapsort (lo, hi);
8079                 return;
8080             }
8081             uint8_t** p=median_partition (lo, hi);
8082             depth_limit=depth_limit-1;
8083             introsort_loop (p, hi, depth_limit);
8084             hi=p-1;
8085         }        
8086     }
8087
8088     static uint8_t** median_partition (uint8_t** low, uint8_t** high)
8089     {
8090         uint8_t *pivot, **left, **right;
8091
8092         //sort low middle and high
8093         if (*(low+((high-low)/2)) < *low)
8094             swap_elements ((low+((high-low)/2)), low);
8095         if (*high < *low)
8096             swap_elements (low, high);
8097         if (*high < *(low+((high-low)/2)))
8098             swap_elements ((low+((high-low)/2)), high);
8099
8100         swap_elements ((low+((high-low)/2)), (high-1));
8101         pivot =  *(high-1);
8102         left = low; right = high-1;
8103         while (1) {
8104             while (*(--right) > pivot);
8105             while (*(++left)  < pivot);
8106             if (left < right)
8107             {
8108                 swap_elements(left, right);
8109             }
8110             else
8111                 break;
8112         }
8113         swap_elements (left, (high-1));
8114         return left;
8115     }
8116
8117
8118     static void insertionsort (uint8_t** lo, uint8_t** hi)
8119     {
8120         for (uint8_t** i=lo+1; i <= hi; i++)
8121         {
8122             uint8_t** j = i;
8123             uint8_t* t = *i;
8124             while((j > lo) && (t <*(j-1)))
8125             {
8126                 *j = *(j-1);
8127                 j--;
8128             }
8129             *j = t;
8130         }
8131     }
8132
8133     static void heapsort (uint8_t** lo, uint8_t** hi)
8134     { 
8135         size_t n = hi - lo + 1;
8136         for (size_t i=n / 2; i >= 1; i--)
8137         {
8138             downheap (i,n,lo);
8139         }
8140         for (size_t i = n; i > 1; i--)
8141         {
8142             swap_elements (lo, lo + i - 1);
8143             downheap(1, i - 1,  lo);
8144         }
8145     }
8146
8147     static void downheap (size_t i, size_t n, uint8_t** lo)
8148     {
8149         uint8_t* d = *(lo + i - 1);
8150         size_t child;
8151         while (i <= n / 2)
8152         {
8153             child = 2*i;
8154             if (child < n && *(lo + child - 1)<(*(lo + child)))
8155             {
8156                 child++;
8157             }
8158             if (!(d<*(lo + child - 1))) 
8159             {
8160                 break;
8161             }
8162             *(lo + i - 1) = *(lo + child - 1);
8163             i = child;
8164         }
8165         *(lo + i - 1) = d;
8166     }
8167
8168 };
8169
8170 #endif //USE_INTROSORT    
8171
8172 #ifdef MULTIPLE_HEAPS
8173 #ifdef PARALLEL_MARK_LIST_SORT
8174 void gc_heap::sort_mark_list()
8175 {
8176     // if this heap had a mark list overflow, we don't do anything
8177     if (mark_list_index > mark_list_end)
8178     {
8179 //        printf("sort_mark_list: overflow on heap %d\n", heap_number);
8180         return;
8181     }
8182
8183     // if any other heap had a mark list overflow, we fake one too,
8184     // so we don't use an incomplete mark list by mistake
8185     for (int i = 0; i < n_heaps; i++)
8186     {
8187         if (g_heaps[i]->mark_list_index > g_heaps[i]->mark_list_end)
8188         {
8189             mark_list_index = mark_list_end + 1;
8190 //            printf("sort_mark_list: overflow on heap %d\n", i);
8191             return;
8192         }
8193     }
8194
8195 //    unsigned long start = GetCycleCount32();
8196
8197     dprintf (3, ("Sorting mark lists"));
8198     if (mark_list_index > mark_list)
8199         _sort (mark_list, mark_list_index - 1, 0);
8200
8201 //    printf("first phase of sort_mark_list for heap %d took %u cycles to sort %u entries\n", this->heap_number, GetCycleCount32() - start, mark_list_index - mark_list);
8202 //    start = GetCycleCount32();
8203
8204     // first set the pieces for all heaps to empty
8205     int heap_num;
8206     for (heap_num = 0; heap_num < n_heaps; heap_num++)
8207     {
8208         mark_list_piece_start[heap_num] = NULL;
8209         mark_list_piece_end[heap_num] = NULL;
8210     }
8211
8212     uint8_t** x = mark_list;
8213
8214 // predicate means: x is still within the mark list, and within the bounds of this heap
8215 #define predicate(x) (((x) < mark_list_index) && (*(x) < heap->ephemeral_high))
8216
8217     heap_num = -1;
8218     while (x < mark_list_index)
8219     {
8220         gc_heap* heap;
8221         // find the heap x points into - searching cyclically from the last heap,
8222         // because in many cases the right heap is the next one or comes soon after
8223         int last_heap_num = heap_num;
8224         MAYBE_UNUSED_VAR(last_heap_num);
8225         do
8226         {
8227             heap_num++;
8228             if (heap_num >= n_heaps)
8229                 heap_num = 0;
8230             assert(heap_num != last_heap_num); // we should always find the heap - infinite loop if not!
8231             heap = g_heaps[heap_num];
8232         }
8233         while (!(*x >= heap->ephemeral_low && *x < heap->ephemeral_high));
8234
8235         // x is the start of the mark list piece for this heap
8236         mark_list_piece_start[heap_num] = x;
8237
8238         // to find the end of the mark list piece for this heap, find the first x
8239         // that has !predicate(x), i.e. that is either not in this heap, or beyond the end of the list
8240         if (predicate(x))
8241         {
8242             // let's see if we get lucky and the whole rest belongs to this piece
8243             if (predicate(mark_list_index-1))
8244             {
8245                 x = mark_list_index;
8246                 mark_list_piece_end[heap_num] = x;
8247                 break;
8248             }
8249
8250             // we play a variant of binary search to find the point sooner.
8251             // the first loop advances by increasing steps until the predicate turns false.
8252             // then we retreat the last step, and the second loop advances by decreasing steps, keeping the predicate true.
8253             unsigned inc = 1;
8254             do
8255             {
8256                 inc *= 2;
8257                 uint8_t** temp_x = x;
8258                 x += inc;
8259                 if (temp_x > x)
8260                 {
8261                     break;
8262                 }
8263             }
8264             while (predicate(x));
8265             // we know that only the last step was wrong, so we undo it
8266             x -= inc;
8267             do
8268             {
8269                 // loop invariant - predicate holds at x, but not x + inc
8270                 assert (predicate(x) && !(((x + inc) > x) && predicate(x + inc)));
8271                 inc /= 2;
8272                 if (((x + inc) > x) && predicate(x + inc))
8273                 {
8274                     x += inc;
8275                 }
8276             }
8277             while (inc > 1);
8278             // the termination condition and the loop invariant together imply this:
8279             assert(predicate(x) && !predicate(x + inc) && (inc == 1));
8280             // so the spot we're looking for is one further
8281             x += 1;
8282         }
8283         mark_list_piece_end[heap_num] = x;
8284     }
8285
8286 #undef predicate
8287
8288 //    printf("second phase of sort_mark_list for heap %d took %u cycles\n", this->heap_number, GetCycleCount32() - start);
8289 }
8290
8291 void gc_heap::append_to_mark_list(uint8_t **start, uint8_t **end)
8292 {
8293     size_t slots_needed = end - start;
8294     size_t slots_available = mark_list_end + 1 - mark_list_index;
8295     size_t slots_to_copy = min(slots_needed, slots_available);
8296     memcpy(mark_list_index, start, slots_to_copy*sizeof(*start));
8297     mark_list_index += slots_to_copy;
8298 //    printf("heap %d: appended %Id slots to mark_list\n", heap_number, slots_to_copy);
8299 }
8300
8301 void gc_heap::merge_mark_lists()
8302 {
8303     uint8_t** source[MAX_SUPPORTED_CPUS];
8304     uint8_t** source_end[MAX_SUPPORTED_CPUS];
8305     int source_heap[MAX_SUPPORTED_CPUS];
8306     int source_count = 0;
8307
8308     // in case of mark list overflow, don't bother
8309     if (mark_list_index >  mark_list_end)
8310     {
8311 //        printf("merge_mark_lists: overflow\n");
8312         return;
8313     }
8314
8315     dprintf(3, ("merge_mark_lists: heap_number = %d  starts out with %Id entries", heap_number, mark_list_index - mark_list));
8316 //    unsigned long start = GetCycleCount32();
8317     for (int i = 0; i < n_heaps; i++)
8318     {
8319         gc_heap* heap = g_heaps[i];
8320         if (heap->mark_list_piece_start[heap_number] < heap->mark_list_piece_end[heap_number])
8321         {
8322             source[source_count] = heap->mark_list_piece_start[heap_number];
8323             source_end[source_count] = heap->mark_list_piece_end[heap_number];
8324             source_heap[source_count] = i;
8325             if (source_count < MAX_SUPPORTED_CPUS)
8326                 source_count++;
8327         }
8328     }
8329 //    printf("first phase of merge_mark_lists for heap %d took %u cycles\n", heap_number, GetCycleCount32() - start);
8330
8331     dprintf(3, ("heap_number = %d  has %d sources\n", heap_number, source_count));
8332 #if defined(_DEBUG) || defined(TRACE_GC)
8333     for (int j = 0; j < source_count; j++)
8334     {
8335         dprintf(3, ("heap_number = %d  ", heap_number));
8336         dprintf(3, (" source from heap %d = %Ix .. %Ix (%Id entries)",
8337             (size_t)(source_heap[j]), (size_t)(source[j][0]), (size_t)(source_end[j][-1]), (size_t)(source_end[j] - source[j])));
8338        // the sources should all be sorted
8339         for (uint8_t **x = source[j]; x < source_end[j] - 1; x++)
8340         {
8341             if (x[0] > x[1])
8342             {
8343                 dprintf(3, ("oops, mark_list from source %d for heap %d isn't sorted\n", j, heap_number));
8344                 assert (0);
8345             }
8346         }
8347     }
8348 #endif //_DEBUG || TRACE_GC
8349
8350 //    start = GetCycleCount32();
8351
8352     mark_list = &g_mark_list_copy [heap_number*mark_list_size];
8353     mark_list_index = mark_list;
8354     mark_list_end = &mark_list [mark_list_size-1];
8355     int piece_count = 0;
8356     if (source_count == 0)
8357     {
8358         ; // nothing to do
8359     }
8360     else if (source_count == 1)
8361     {
8362         mark_list = source[0];
8363         mark_list_index = source_end[0];
8364         mark_list_end = mark_list_index;
8365         piece_count++;
8366     }
8367     else
8368     {
8369         while (source_count > 1)
8370         {
8371             // find the lowest and second lowest value in the sources we're merging from
8372             int lowest_source = 0;
8373             uint8_t *lowest = *source[0];
8374             uint8_t *second_lowest = *source[1];
8375             for (int i = 1; i < source_count; i++)
8376             {
8377                 if (lowest > *source[i])
8378                 {
8379                     second_lowest = lowest;
8380                     lowest = *source[i];
8381                     lowest_source = i;
8382                 }
8383                 else if (second_lowest > *source[i])
8384                 {
8385                     second_lowest = *source[i];
8386                 }
8387             }
8388
8389             // find the point in the lowest source where it either runs out or is not <= second_lowest anymore
8390
8391             // let's first try to get lucky and see if the whole source is <= second_lowest -- this is actually quite common
8392             uint8_t **x;
8393             if (source_end[lowest_source][-1] <= second_lowest)
8394                 x = source_end[lowest_source];
8395             else
8396             {
8397                 // use linear search to find the end -- could also use binary search as in sort_mark_list,
8398                 // but saw no improvement doing that
8399                 for (x = source[lowest_source]; x < source_end[lowest_source] && *x <= second_lowest; x++)
8400                     ;
8401             }
8402
8403             // blast this piece to the mark list
8404             append_to_mark_list(source[lowest_source], x);
8405             piece_count++;
8406
8407             source[lowest_source] = x;
8408
8409             // check whether this source is now exhausted
8410             if (x >= source_end[lowest_source])
8411             {
8412                 // if it's not the source with the highest index, copy the source with the highest index
8413                 // over it so the non-empty sources are always at the beginning
8414                 if (lowest_source < source_count-1)
8415                 {
8416                     source[lowest_source] = source[source_count-1];
8417                     source_end[lowest_source] = source_end[source_count-1];
8418                 }
8419                 source_count--;
8420             }
8421         }
8422         // we're left with just one source that we copy
8423         append_to_mark_list(source[0], source_end[0]);
8424         piece_count++;
8425     }
8426
8427 //    printf("second phase of merge_mark_lists for heap %d took %u cycles to merge %d pieces\n", heap_number, GetCycleCount32() - start, piece_count);
8428
8429 #if defined(_DEBUG) || defined(TRACE_GC)
8430     // the final mark list must be sorted
8431     for (uint8_t **x = mark_list; x < mark_list_index - 1; x++)
8432     {
8433         if (x[0] > x[1])
8434         {
8435             dprintf(3, ("oops, mark_list for heap %d isn't sorted at the end of merge_mark_lists", heap_number));
8436             assert (0);
8437         }
8438     }
8439 #endif //defined(_DEBUG) || defined(TRACE_GC)
8440 }
8441 #else //PARALLEL_MARK_LIST_SORT
8442 void gc_heap::combine_mark_lists()
8443 {
8444     dprintf (3, ("Combining mark lists"));
8445     //verify if a heap has overflowed its mark list
8446     BOOL use_mark_list = TRUE;
8447     for (int i = 0; i < n_heaps; i++)
8448     {
8449         if (g_heaps [i]->mark_list_index >  g_heaps [i]->mark_list_end)
8450         {
8451             use_mark_list = FALSE;
8452             break;
8453         }
8454     }
8455
8456     if (use_mark_list)
8457     {
8458         dprintf (3, ("Using mark list"));
8459         //compact the gaps out of the mark list
8460         int gn = 0;
8461         uint8_t** current_gap = g_heaps [gn]->mark_list_index;
8462         uint8_t** current_gap_end = g_heaps[gn]->mark_list_end + 1;
8463         uint8_t** dst_last = current_gap-1;
8464
8465         int srcn = n_heaps-1;
8466         gc_heap* srch = g_heaps [srcn];
8467         uint8_t** src = srch->mark_list_index - 1;
8468         uint8_t** src_beg = srch->mark_list;
8469
8470         while (current_gap <= src)
8471         {
8472             while ((gn < n_heaps-1) && (current_gap >= current_gap_end))
8473             {
8474                 //go to the next gap
8475                 gn++;
8476                 dprintf (3, ("Going to the next gap %d", gn));
8477                 assert (gn < n_heaps);
8478                 current_gap = g_heaps [gn]->mark_list_index;
8479                 current_gap_end = g_heaps[gn]->mark_list_end + 1;
8480                 assert ((gn == (n_heaps-1)) || (current_gap_end == g_heaps[gn+1]->mark_list));
8481             }
8482             while ((srcn > 0) && (src < src_beg))
8483             {
8484                 //go to the previous source
8485                 srcn--;
8486                 dprintf (3, ("going to the previous source %d", srcn));
8487                 assert (srcn>=0);
8488                 gc_heap* srch = g_heaps [srcn];
8489                 src = srch->mark_list_index - 1;
8490                 src_beg = srch->mark_list;
8491             }
8492             if (current_gap < src)
8493             {
8494                 dst_last = current_gap;
8495                 *current_gap++ = *src--;
8496             }
8497         }
8498         dprintf (3, ("src: %Ix dst_last: %Ix", (size_t)src, (size_t)dst_last));
8499
8500         uint8_t** end_of_list = max (src, dst_last);
8501
8502         //sort the resulting compacted list
8503         assert (end_of_list < &g_mark_list [n_heaps*mark_list_size]);
8504         if (end_of_list > &g_mark_list[0])
8505             _sort (&g_mark_list[0], end_of_list, 0);
8506         //adjust the mark_list to the begining of the resulting mark list.
8507         for (int i = 0; i < n_heaps; i++)
8508         {
8509             g_heaps [i]->mark_list = g_mark_list;
8510             g_heaps [i]->mark_list_index = end_of_list + 1;
8511             g_heaps [i]->mark_list_end = end_of_list + 1;
8512         }
8513     }
8514     else
8515     {
8516         uint8_t** end_of_list = g_mark_list;
8517         //adjust the mark_list to the begining of the resulting mark list.
8518         //put the index beyond the end to turn off mark list processing
8519         for (int i = 0; i < n_heaps; i++)
8520         {
8521             g_heaps [i]->mark_list = g_mark_list;
8522             g_heaps [i]->mark_list_index = end_of_list + 1;
8523             g_heaps [i]->mark_list_end = end_of_list;
8524         }
8525     }
8526 }
8527 #endif // PARALLEL_MARK_LIST_SORT
8528 #endif //MULTIPLE_HEAPS
8529 #endif //MARK_LIST
8530
8531 class seg_free_spaces
8532 {
8533     struct seg_free_space
8534     {
8535         BOOL is_plug;
8536         void* start;
8537     };
8538
8539     struct free_space_bucket
8540     {
8541         seg_free_space* free_space;
8542         ptrdiff_t count_add; // Assigned when we first contruct the array.
8543         ptrdiff_t count_fit; // How many items left when we are fitting plugs.
8544     };
8545
8546     void move_bucket (int old_power2, int new_power2)
8547     {
8548         // PREFAST warning 22015: old_power2 could be negative
8549         assert (old_power2 >= 0);
8550         assert (old_power2 >= new_power2);
8551
8552         if (old_power2 == new_power2)
8553         {
8554             return;
8555         }
8556
8557         seg_free_space* src_index = free_space_buckets[old_power2].free_space;
8558         for (int i = old_power2; i > new_power2; i--)
8559         {
8560             seg_free_space** dest = &(free_space_buckets[i].free_space);
8561             (*dest)++;
8562
8563             seg_free_space* dest_index = free_space_buckets[i - 1].free_space;
8564             if (i > (new_power2 + 1))
8565             {
8566                 seg_free_space temp = *src_index;
8567                 *src_index = *dest_index;
8568                 *dest_index = temp;
8569             }
8570             src_index = dest_index;
8571         }
8572
8573         free_space_buckets[old_power2].count_fit--;
8574         free_space_buckets[new_power2].count_fit++;
8575     }
8576
8577 #ifdef _DEBUG
8578
8579     void dump_free_space (seg_free_space* item)
8580     {
8581         uint8_t* addr = 0;
8582         size_t len = 0;
8583
8584         if (item->is_plug)
8585         {
8586             mark* m = (mark*)(item->start);
8587             len = pinned_len (m);
8588             addr = pinned_plug (m) - len;
8589         }
8590         else
8591         {
8592             heap_segment* seg = (heap_segment*)(item->start);
8593             addr = heap_segment_plan_allocated (seg);
8594             len = heap_segment_committed (seg) - addr;
8595         }
8596
8597         dprintf (SEG_REUSE_LOG_1, ("[%d]0x%Ix %Id", heap_num, addr, len));
8598     }
8599
8600     void dump()
8601     {
8602         seg_free_space* item = NULL;
8603         int i = 0;
8604
8605         dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------\nnow the free spaces look like:", heap_num));
8606         for (i = 0; i < (free_space_bucket_count - 1); i++)
8607         {
8608             dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i)));
8609             dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len"));
8610             item = free_space_buckets[i].free_space;
8611             while (item < free_space_buckets[i + 1].free_space)
8612             {
8613                 dump_free_space (item);
8614                 item++;
8615             }
8616             dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num));
8617         }
8618
8619         dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i)));
8620         dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len"));
8621         item = free_space_buckets[i].free_space;
8622
8623         while (item <= &seg_free_space_array[free_space_item_count - 1])
8624         {
8625             dump_free_space (item);
8626             item++;
8627         }
8628         dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num));
8629     }
8630
8631 #endif //_DEBUG
8632
8633     free_space_bucket* free_space_buckets;
8634     seg_free_space* seg_free_space_array;
8635     ptrdiff_t free_space_bucket_count;
8636     ptrdiff_t free_space_item_count;
8637     int base_power2;
8638     int heap_num;
8639 #ifdef _DEBUG
8640     BOOL has_end_of_seg;
8641 #endif //_DEBUG
8642
8643 public:
8644
8645     seg_free_spaces (int h_number)
8646     {
8647         heap_num = h_number;
8648     }
8649
8650     BOOL alloc ()
8651     {
8652         size_t total_prealloc_size = 
8653             MAX_NUM_BUCKETS * sizeof (free_space_bucket) +
8654             MAX_NUM_FREE_SPACES * sizeof (seg_free_space);
8655
8656         free_space_buckets = (free_space_bucket*) new (nothrow) uint8_t[total_prealloc_size];
8657
8658         return (!!free_space_buckets);
8659     }
8660
8661     // We take the ordered free space array we got from the 1st pass,
8662     // and feed the portion that we decided to use to this method, ie,
8663     // the largest item_count free spaces.
8664     void add_buckets (int base, size_t* ordered_free_spaces, int bucket_count, size_t item_count)
8665     {
8666         assert (free_space_buckets);
8667         assert (item_count <= (size_t)MAX_PTR);
8668
8669         free_space_bucket_count = bucket_count;
8670         free_space_item_count = item_count;
8671         base_power2 = base;
8672 #ifdef _DEBUG
8673         has_end_of_seg = FALSE;
8674 #endif //_DEBUG
8675
8676         ptrdiff_t total_item_count = 0;
8677         ptrdiff_t i = 0;
8678
8679         seg_free_space_array = (seg_free_space*)(free_space_buckets + free_space_bucket_count);
8680
8681         for (i = 0; i < (ptrdiff_t)item_count; i++)
8682         {
8683             seg_free_space_array[i].start = 0;
8684             seg_free_space_array[i].is_plug = FALSE;
8685         }
8686
8687         for (i = 0; i < bucket_count; i++)
8688         {
8689             free_space_buckets[i].count_add = ordered_free_spaces[i];
8690             free_space_buckets[i].count_fit = ordered_free_spaces[i];
8691             free_space_buckets[i].free_space = &seg_free_space_array[total_item_count];
8692             total_item_count += free_space_buckets[i].count_add;
8693         }
8694
8695         assert (total_item_count == (ptrdiff_t)item_count);
8696     }
8697
8698     // If we are adding a free space before a plug we pass the
8699     // mark stack position so we can update the length; we could
8700     // also be adding the free space after the last plug in which
8701     // case start is the segment which we'll need to update the 
8702     // heap_segment_plan_allocated.
8703     void add (void* start, BOOL plug_p, BOOL first_p)
8704     {
8705         size_t size = (plug_p ? 
8706                        pinned_len ((mark*)start) : 
8707                        (heap_segment_committed ((heap_segment*)start) - 
8708                            heap_segment_plan_allocated ((heap_segment*)start)));
8709         
8710         if (plug_p)
8711         {
8712             dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space before plug: %Id", heap_num, size));
8713         }
8714         else
8715         {
8716             dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space at end of seg: %Id", heap_num, size));
8717 #ifdef _DEBUG
8718             has_end_of_seg = TRUE;
8719 #endif //_DEBUG
8720         }
8721                   
8722         if (first_p)
8723         {
8724             size_t eph_gen_starts = gc_heap::eph_gen_starts_size;
8725             size -= eph_gen_starts;
8726             if (plug_p)
8727             {
8728                 mark* m = (mark*)(start);
8729                 pinned_len (m) -= eph_gen_starts;
8730             }
8731             else
8732             {
8733                 heap_segment* seg = (heap_segment*)start;
8734                 heap_segment_plan_allocated (seg) += eph_gen_starts;
8735             }
8736         }
8737
8738         int bucket_power2 = index_of_highest_set_bit (size);
8739         if (bucket_power2 < base_power2)
8740         {
8741             return;
8742         }
8743
8744         free_space_bucket* bucket = &free_space_buckets[bucket_power2 - base_power2];
8745
8746         seg_free_space* bucket_free_space = bucket->free_space;
8747         assert (plug_p || (!plug_p && bucket->count_add));
8748
8749         if (bucket->count_add == 0)
8750         {
8751             dprintf (SEG_REUSE_LOG_1, ("[%d]Already have enough of 2^%d", heap_num, bucket_power2));
8752             return;
8753         }
8754
8755         ptrdiff_t index = bucket->count_add - 1;
8756
8757         dprintf (SEG_REUSE_LOG_1, ("[%d]Building free spaces: adding %Ix; len: %Id (2^%d)", 
8758                     heap_num, 
8759                     (plug_p ? 
8760                         (pinned_plug ((mark*)start) - pinned_len ((mark*)start)) : 
8761                         heap_segment_plan_allocated ((heap_segment*)start)),
8762                     size,
8763                     bucket_power2));
8764
8765         if (plug_p)
8766         {
8767             bucket_free_space[index].is_plug = TRUE;
8768         }
8769
8770         bucket_free_space[index].start = start;
8771         bucket->count_add--;
8772     }
8773
8774 #ifdef _DEBUG
8775
8776     // Do a consistency check after all free spaces are added.
8777     void check()
8778     {
8779         ptrdiff_t i = 0;
8780         int end_of_seg_count = 0;
8781
8782         for (i = 0; i < free_space_item_count; i++)
8783         {
8784             assert (seg_free_space_array[i].start);
8785             if (!(seg_free_space_array[i].is_plug))
8786             {
8787                 end_of_seg_count++;
8788             }
8789         }
8790         
8791         if (has_end_of_seg)
8792         {
8793             assert (end_of_seg_count == 1);
8794         }
8795         else
8796         {
8797             assert (end_of_seg_count == 0);
8798         }
8799
8800         for (i = 0; i < free_space_bucket_count; i++)
8801         {
8802             assert (free_space_buckets[i].count_add == 0);
8803         }
8804     }
8805
8806 #endif //_DEBUG
8807
8808     uint8_t* fit (uint8_t* old_loc,
8809 #ifdef SHORT_PLUGS
8810                BOOL set_padding_on_saved_p,
8811                mark* pinned_plug_entry,
8812 #endif //SHORT_PLUGS
8813                size_t plug_size
8814                REQD_ALIGN_AND_OFFSET_DCL)
8815     {
8816         if (old_loc)
8817         {
8818 #ifdef SHORT_PLUGS
8819             assert (!is_plug_padded (old_loc));
8820 #endif //SHORT_PLUGS
8821             assert (!node_realigned (old_loc));
8822         }
8823
8824         size_t saved_plug_size = plug_size;
8825
8826 #ifdef FEATURE_STRUCTALIGN
8827         // BARTOKTODO (4841): this code path is disabled (see can_fit_all_blocks_p) until we take alignment requirements into account
8828         _ASSERTE(requiredAlignment == DATA_ALIGNMENT && false);
8829 #endif // FEATURE_STRUCTALIGN
8830         // TODO: this is also not large alignment ready. We would need to consider alignment when chosing the 
8831         // the bucket.
8832
8833         size_t plug_size_to_fit = plug_size;
8834
8835         // best fit is only done for gen1 to gen2 and we do not pad in gen2.
8836         int pad_in_front = 0;
8837
8838 #ifdef SHORT_PLUGS
8839         plug_size_to_fit += (pad_in_front ? Align(min_obj_size) : 0);
8840 #endif //SHORT_PLUGS
8841
8842         int plug_power2 = index_of_highest_set_bit (round_up_power2 (plug_size_to_fit + Align(min_obj_size)));
8843         ptrdiff_t i;
8844         uint8_t* new_address = 0;
8845
8846         if (plug_power2 < base_power2)
8847         {
8848             plug_power2 = base_power2;
8849         }
8850
8851         int chosen_power2 = plug_power2 - base_power2;
8852 retry:
8853         for (i = chosen_power2; i < free_space_bucket_count; i++)
8854         {
8855             if (free_space_buckets[i].count_fit != 0)
8856             {
8857                 break;
8858             }
8859             chosen_power2++;
8860         }
8861
8862         dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting plug len %Id (2^%d) using 2^%d free space", 
8863             heap_num, 
8864             plug_size, 
8865             plug_power2, 
8866             (chosen_power2 + base_power2)));
8867
8868         assert (i < free_space_bucket_count);
8869         
8870         seg_free_space* bucket_free_space = free_space_buckets[chosen_power2].free_space;
8871         ptrdiff_t free_space_count = free_space_buckets[chosen_power2].count_fit;
8872         size_t new_free_space_size = 0;
8873         BOOL can_fit = FALSE;
8874         size_t pad = 0;
8875
8876         for (i = 0; i < free_space_count; i++)
8877         {
8878             size_t free_space_size = 0;
8879             pad = 0;
8880 #ifdef SHORT_PLUGS
8881             BOOL short_plugs_padding_p = FALSE;
8882 #endif //SHORT_PLUGS
8883             BOOL realign_padding_p = FALSE;
8884
8885             if (bucket_free_space[i].is_plug)
8886             {
8887                 mark* m = (mark*)(bucket_free_space[i].start);
8888                 uint8_t* plug_free_space_start = pinned_plug (m) - pinned_len (m);
8889                 
8890 #ifdef SHORT_PLUGS
8891                 if ((pad_in_front & USE_PADDING_FRONT) &&
8892                     (((plug_free_space_start - pin_allocation_context_start_region (m))==0) ||
8893                     ((plug_free_space_start - pin_allocation_context_start_region (m))>=DESIRED_PLUG_LENGTH)))
8894                 {
8895                     pad = Align (min_obj_size);
8896                     short_plugs_padding_p = TRUE;
8897                 }
8898 #endif //SHORT_PLUGS
8899
8900                 if (!((old_loc == 0) || same_large_alignment_p (old_loc, plug_free_space_start+pad)))
8901                 {
8902                     pad += switch_alignment_size (pad != 0);
8903                     realign_padding_p = TRUE;
8904                 }
8905
8906                 plug_size = saved_plug_size + pad;
8907
8908                 free_space_size = pinned_len (m);
8909                 new_address = pinned_plug (m) - pinned_len (m);
8910
8911                 if (free_space_size >= (plug_size + Align (min_obj_size)) ||
8912                     free_space_size == plug_size)
8913                 {
8914                     new_free_space_size = free_space_size - plug_size;
8915                     pinned_len (m) = new_free_space_size;
8916 #ifdef SIMPLE_DPRINTF
8917                     dprintf (SEG_REUSE_LOG_0, ("[%d]FP: 0x%Ix->0x%Ix(%Ix)(%Ix), [0x%Ix (2^%d) -> [0x%Ix (2^%d)",
8918                                 heap_num, 
8919                                 old_loc,
8920                                 new_address, 
8921                                 (plug_size - pad),
8922                                 pad,
8923                                 pinned_plug (m), 
8924                                 index_of_highest_set_bit (free_space_size),
8925                                 (pinned_plug (m) - pinned_len (m)), 
8926                                 index_of_highest_set_bit (new_free_space_size)));
8927 #endif //SIMPLE_DPRINTF
8928
8929 #ifdef SHORT_PLUGS
8930                     if (short_plugs_padding_p)
8931                     {
8932                         pin_allocation_context_start_region (m) = plug_free_space_start;
8933                         set_padding_in_expand (old_loc, set_padding_on_saved_p, pinned_plug_entry);
8934                     }
8935 #endif //SHORT_PLUGS
8936
8937                     if (realign_padding_p)
8938                     {
8939                         set_node_realigned (old_loc);
8940                     }
8941
8942                     can_fit = TRUE;
8943                 }
8944             }
8945             else
8946             {
8947                 heap_segment* seg = (heap_segment*)(bucket_free_space[i].start);
8948                 free_space_size = heap_segment_committed (seg) - heap_segment_plan_allocated (seg);
8949
8950                 if (!((old_loc == 0) || same_large_alignment_p (old_loc, heap_segment_plan_allocated (seg))))
8951                 {
8952                     pad = switch_alignment_size (FALSE);
8953                     realign_padding_p = TRUE;
8954                 }
8955
8956                 plug_size = saved_plug_size + pad;
8957
8958                 if (free_space_size >= (plug_size + Align (min_obj_size)) ||
8959                     free_space_size == plug_size)
8960                 {
8961                     new_address = heap_segment_plan_allocated (seg);
8962                     new_free_space_size = free_space_size - plug_size;
8963                     heap_segment_plan_allocated (seg) = new_address + plug_size;
8964 #ifdef SIMPLE_DPRINTF
8965                     dprintf (SEG_REUSE_LOG_0, ("[%d]FS: 0x%Ix-> 0x%Ix(%Ix) (2^%d) -> 0x%Ix (2^%d)",
8966                                 heap_num, 
8967                                 old_loc,
8968                                 new_address, 
8969                                 (plug_size - pad),
8970                                 index_of_highest_set_bit (free_space_size),
8971                                 heap_segment_plan_allocated (seg), 
8972                                 index_of_highest_set_bit (new_free_space_size)));
8973 #endif //SIMPLE_DPRINTF
8974
8975                     if (realign_padding_p)
8976                         set_node_realigned (old_loc);
8977
8978                     can_fit = TRUE;
8979                 }
8980             }
8981
8982             if (can_fit)
8983             {
8984                 break;
8985             }
8986         }
8987
8988         if (!can_fit)
8989         {
8990             assert (chosen_power2 == 0);
8991             chosen_power2 = 1;
8992             goto retry;
8993         }
8994         else
8995         {
8996             if (pad)
8997             {
8998                 new_address += pad;
8999             }
9000             assert ((chosen_power2 && (i == 0)) ||
9001                     (!chosen_power2) && (i < free_space_count));
9002         }
9003
9004         int new_bucket_power2 = index_of_highest_set_bit (new_free_space_size);
9005
9006         if (new_bucket_power2 < base_power2)
9007         {
9008             new_bucket_power2 = base_power2;
9009         }
9010
9011         move_bucket (chosen_power2, new_bucket_power2 - base_power2);
9012
9013         //dump();
9014
9015         return new_address;
9016     }
9017
9018     void cleanup ()
9019     {
9020         if (free_space_buckets)
9021         {
9022             delete [] free_space_buckets;
9023         }
9024         if (seg_free_space_array)
9025         {
9026             delete [] seg_free_space_array;
9027         }
9028     }
9029 };
9030
9031
9032 #define marked(i) header(i)->IsMarked()
9033 #define set_marked(i) header(i)->SetMarked()
9034 #define clear_marked(i) header(i)->ClearMarked()
9035 #define pinned(i) header(i)->IsPinned()
9036 #define set_pinned(i) header(i)->SetPinned()
9037 #define clear_pinned(i) header(i)->GetHeader()->ClrGCBit();
9038
9039 inline size_t my_get_size (Object* ob)
9040 {
9041     MethodTable* mT = header(ob)->GetMethodTable();
9042     return (mT->GetBaseSize() +
9043             (mT->HasComponentSize() ?
9044              ((size_t)((CObjectHeader*)ob)->GetNumComponents() * mT->RawGetComponentSize()) : 0));
9045 }
9046
9047 //#define size(i) header(i)->GetSize()
9048 #define size(i) my_get_size (header(i))
9049
9050 #define contain_pointers(i) header(i)->ContainsPointers()
9051 #ifdef COLLECTIBLE_CLASS
9052 #define contain_pointers_or_collectible(i) header(i)->ContainsPointersOrCollectible()
9053
9054 #define get_class_object(i) GCToEEInterface::GetLoaderAllocatorObjectForGC((Object *)i)
9055 #define is_collectible(i) method_table(i)->Collectible()
9056 #else //COLLECTIBLE_CLASS
9057 #define contain_pointers_or_collectible(i) header(i)->ContainsPointers()
9058 #endif //COLLECTIBLE_CLASS
9059
9060 #if defined (MARK_ARRAY) && defined (BACKGROUND_GC)
9061 inline
9062 void gc_heap::seg_clear_mark_array_bits_soh (heap_segment* seg)
9063 {
9064     uint8_t* range_beg = 0;
9065     uint8_t* range_end = 0;
9066     if (bgc_mark_array_range (seg, FALSE, &range_beg, &range_end))
9067     {
9068         clear_mark_array (range_beg, align_on_mark_word (range_end), FALSE
9069 #ifdef FEATURE_BASICFREEZE
9070             , TRUE
9071 #endif // FEATURE_BASICFREEZE
9072             );
9073     }
9074 }
9075
9076 void gc_heap::clear_batch_mark_array_bits (uint8_t* start, uint8_t* end)
9077 {
9078     if ((start < background_saved_highest_address) &&
9079         (end > background_saved_lowest_address))
9080     {
9081         start = max (start, background_saved_lowest_address);
9082         end = min (end, background_saved_highest_address);
9083
9084         size_t start_mark_bit = mark_bit_of (start);
9085         size_t end_mark_bit = mark_bit_of (end);
9086         unsigned int startbit = mark_bit_bit (start_mark_bit);
9087         unsigned int endbit = mark_bit_bit (end_mark_bit);
9088         size_t startwrd = mark_bit_word (start_mark_bit);
9089         size_t endwrd = mark_bit_word (end_mark_bit);
9090
9091         dprintf (3, ("Clearing all mark array bits between [%Ix:%Ix-[%Ix:%Ix", 
9092             (size_t)start, (size_t)start_mark_bit, 
9093             (size_t)end, (size_t)end_mark_bit));
9094
9095         unsigned int firstwrd = lowbits (~0, startbit);
9096         unsigned int lastwrd = highbits (~0, endbit);
9097
9098         if (startwrd == endwrd)
9099         {
9100             unsigned int wrd = firstwrd | lastwrd;
9101             mark_array[startwrd] &= wrd;
9102             return;
9103         }
9104
9105         // clear the first mark word.
9106         if (startbit)
9107         {
9108             mark_array[startwrd] &= firstwrd;
9109             startwrd++;
9110         }
9111
9112         for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
9113         {
9114             mark_array[wrdtmp] = 0;
9115         }
9116
9117         // clear the last mark word.
9118         if (endbit)
9119         {
9120             mark_array[endwrd] &= lastwrd;
9121         }
9122     }
9123 }
9124
9125 void gc_heap::bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end)
9126 {
9127     if ((start < background_saved_highest_address) &&
9128         (end > background_saved_lowest_address))
9129     {
9130         start = max (start, background_saved_lowest_address);
9131         end = min (end, background_saved_highest_address);
9132
9133         clear_batch_mark_array_bits (start, end);
9134     }
9135 }
9136
9137 void gc_heap::clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p)
9138 {
9139     dprintf (3, ("clearing mark array bits by objects for addr [%Ix,[%Ix", 
9140                   from, end));
9141     int align_const = get_alignment_constant (!loh_p);
9142
9143     uint8_t* o = from;
9144
9145     while (o < end)
9146     {
9147         uint8_t*  next_o = o + Align (size (o), align_const);
9148
9149         if (background_object_marked (o, TRUE))
9150         {
9151             dprintf (3, ("%Ix was marked by bgc, is now cleared", o));
9152         }
9153
9154         o = next_o;
9155     }
9156 }
9157 #endif //MARK_ARRAY && BACKGROUND_GC
9158
9159 inline
9160 BOOL gc_heap::is_mark_set (uint8_t* o)
9161 {
9162     return marked (o);
9163 }
9164
9165 #if defined (_MSC_VER) && defined (_TARGET_X86_)
9166 #pragma optimize("y", on)        // Small critical routines, don't put in EBP frame 
9167 #endif //_MSC_VER && _TARGET_X86_
9168
9169 // return the generation number of an object.
9170 // It is assumed that the object is valid.
9171 //Note that this will return max_generation for a LOH object
9172 int gc_heap::object_gennum (uint8_t* o)
9173 {
9174     if (in_range_for_segment (o, ephemeral_heap_segment) &&
9175         (o >= generation_allocation_start (generation_of (max_generation-1))))
9176     {
9177         // in an ephemeral generation.
9178         for ( int i = 0; i < max_generation-1; i++)
9179         {
9180             if ((o >= generation_allocation_start (generation_of (i))))
9181                 return i;
9182         }
9183         return max_generation-1;
9184     }
9185     else
9186     {
9187         return max_generation;
9188     }
9189 }
9190
9191 int gc_heap::object_gennum_plan (uint8_t* o)
9192 {
9193     if (in_range_for_segment (o, ephemeral_heap_segment))
9194     {
9195         for (int i = 0; i <= max_generation-1; i++)
9196         {
9197             uint8_t* plan_start = generation_plan_allocation_start (generation_of (i));
9198             if (plan_start && (o >= plan_start))
9199             {
9200                 return i;
9201             }
9202         }
9203     }
9204     return max_generation;
9205 }
9206
9207 #if defined(_MSC_VER) && defined(_TARGET_X86_)
9208 #pragma optimize("", on)        // Go back to command line default optimizations
9209 #endif //_MSC_VER && _TARGET_X86_
9210
9211 heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, int h_number)
9212 {
9213     size_t initial_commit = SEGMENT_INITIAL_COMMIT;
9214
9215     //Commit the first page
9216     if (!virtual_alloc_commit_for_heap (new_pages, initial_commit, h_number))
9217     {
9218         return 0;
9219     }
9220
9221     //overlay the heap_segment
9222     heap_segment* new_segment = (heap_segment*)new_pages;
9223
9224     uint8_t* start = new_pages + segment_info_size;
9225     heap_segment_mem (new_segment) = start;
9226     heap_segment_used (new_segment) = start;
9227     heap_segment_reserved (new_segment) = new_pages + size;
9228     heap_segment_committed (new_segment) = new_pages + initial_commit;
9229     init_heap_segment (new_segment);
9230     dprintf (2, ("Creating heap segment %Ix", (size_t)new_segment));
9231     return new_segment;
9232 }
9233
9234 void gc_heap::init_heap_segment (heap_segment* seg)
9235 {
9236     seg->flags = 0;
9237     heap_segment_next (seg) = 0;
9238     heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
9239     heap_segment_allocated (seg) = heap_segment_mem (seg);
9240 #ifdef BACKGROUND_GC
9241     heap_segment_background_allocated (seg) = 0;
9242     heap_segment_saved_bg_allocated (seg) = 0;
9243 #endif //BACKGROUND_GC
9244 }
9245
9246 //Releases the segment to the OS.
9247 // this is always called on one thread only so calling seg_table->remove is fine.
9248 void gc_heap::delete_heap_segment (heap_segment* seg, BOOL consider_hoarding)
9249 {
9250     if (!heap_segment_loh_p (seg))
9251     {
9252         //cleanup the brick table back to the empty value
9253         clear_brick_table (heap_segment_mem (seg), heap_segment_reserved (seg));
9254     }
9255
9256     if (consider_hoarding)
9257     {
9258         assert ((heap_segment_mem (seg) - (uint8_t*)seg) <= ptrdiff_t(2*OS_PAGE_SIZE));
9259         size_t ss = (size_t) (heap_segment_reserved (seg) - (uint8_t*)seg);
9260         //Don't keep the big ones.
9261         if (ss <= INITIAL_ALLOC)
9262         {
9263             dprintf (2, ("Hoarding segment %Ix", (size_t)seg));
9264 #ifdef BACKGROUND_GC
9265             // We don't need to clear the decommitted flag because when this segment is used
9266             // for a new segment the flags will be cleared.
9267             if (!heap_segment_decommitted_p (seg))
9268 #endif //BACKGROUND_GC
9269             {
9270                 decommit_heap_segment (seg);
9271             }
9272
9273 #ifdef SEG_MAPPING_TABLE
9274             seg_mapping_table_remove_segment (seg);
9275 #endif //SEG_MAPPING_TABLE
9276
9277             heap_segment_next (seg) = segment_standby_list;
9278             segment_standby_list = seg;
9279             seg = 0;
9280         }
9281     }
9282
9283     if (seg != 0)
9284     {
9285         dprintf (2, ("h%d: del seg: [%Ix, %Ix[", 
9286                      heap_number, (size_t)seg,
9287                      (size_t)(heap_segment_reserved (seg))));
9288
9289 #ifdef BACKGROUND_GC
9290         ::record_changed_seg ((uint8_t*)seg, heap_segment_reserved (seg), 
9291                             settings.gc_index, current_bgc_state,
9292                             seg_deleted);
9293         decommit_mark_array_by_seg (seg);
9294 #endif //BACKGROUND_GC
9295
9296 #ifdef SEG_MAPPING_TABLE
9297         seg_mapping_table_remove_segment (seg);
9298 #else //SEG_MAPPING_TABLE
9299         seg_table->remove ((uint8_t*)seg);
9300 #endif //SEG_MAPPING_TABLE
9301
9302         release_segment (seg);
9303     }
9304 }
9305
9306 //resets the pages beyond allocates size so they won't be swapped out and back in
9307
9308 void gc_heap::reset_heap_segment_pages (heap_segment* seg)
9309 {
9310     size_t page_start = align_on_page ((size_t)heap_segment_allocated (seg));
9311     size_t size = (size_t)heap_segment_committed (seg) - page_start;
9312     if (size != 0)
9313         GCToOSInterface::VirtualReset((void*)page_start, size, false /* unlock */);
9314 }
9315
9316 void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
9317                                            size_t extra_space)
9318 {
9319     uint8_t*  page_start = align_on_page (heap_segment_allocated(seg));
9320     size_t size = heap_segment_committed (seg) - page_start;
9321     extra_space = align_on_page (extra_space);
9322     if (size >= max ((extra_space + 2*OS_PAGE_SIZE), 100*OS_PAGE_SIZE))
9323     {
9324         page_start += max(extra_space, 32*OS_PAGE_SIZE);
9325         size -= max (extra_space, 32*OS_PAGE_SIZE);
9326
9327         GCToOSInterface::VirtualDecommit (page_start, size);
9328         dprintf (3, ("Decommitting heap segment [%Ix, %Ix[(%d)", 
9329             (size_t)page_start, 
9330             (size_t)(page_start + size),
9331             size));
9332         heap_segment_committed (seg) = page_start;
9333         if (heap_segment_used (seg) > heap_segment_committed (seg))
9334         {
9335             heap_segment_used (seg) = heap_segment_committed (seg);
9336         }
9337     }
9338 }
9339
9340 //decommit all pages except one or 2
9341 void gc_heap::decommit_heap_segment (heap_segment* seg)
9342 {
9343     uint8_t*  page_start = align_on_page (heap_segment_mem (seg));
9344
9345     dprintf (3, ("Decommitting heap segment %Ix", (size_t)seg));
9346
9347 #ifdef BACKGROUND_GC
9348     page_start += OS_PAGE_SIZE;
9349 #endif //BACKGROUND_GC
9350
9351     size_t size = heap_segment_committed (seg) - page_start;
9352     GCToOSInterface::VirtualDecommit (page_start, size);
9353
9354     //re-init the segment object
9355     heap_segment_committed (seg) = page_start;
9356     if (heap_segment_used (seg) > heap_segment_committed (seg))
9357     {
9358         heap_segment_used (seg) = heap_segment_committed (seg);
9359     }
9360 }
9361
9362 void gc_heap::clear_gen0_bricks()
9363 {
9364     if (!gen0_bricks_cleared)
9365     {
9366         gen0_bricks_cleared = TRUE;
9367         //initialize brick table for gen 0
9368         for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
9369                 b < brick_of (align_on_brick
9370                             (heap_segment_allocated (ephemeral_heap_segment)));
9371                 b++)
9372         {
9373             set_brick (b, -1);
9374         }
9375     }
9376 }
9377
9378 #ifdef BACKGROUND_GC
9379 void gc_heap::rearrange_small_heap_segments()
9380 {
9381     heap_segment* seg = freeable_small_heap_segment;
9382     while (seg)
9383     {
9384         heap_segment* next_seg = heap_segment_next (seg);
9385         // TODO: we need to consider hoarding here.
9386         delete_heap_segment (seg, FALSE);
9387         seg = next_seg;
9388     }
9389     freeable_small_heap_segment = 0;
9390 }
9391 #endif //BACKGROUND_GC
9392
9393 void gc_heap::rearrange_large_heap_segments()
9394 {
9395     dprintf (2, ("deleting empty large segments"));
9396     heap_segment* seg = freeable_large_heap_segment;
9397     while (seg)
9398     {
9399         heap_segment* next_seg = heap_segment_next (seg);
9400         delete_heap_segment (seg, GCConfig::GetRetainVM());
9401         seg = next_seg;
9402     }
9403     freeable_large_heap_segment = 0;
9404 }
9405
9406 void gc_heap::rearrange_heap_segments(BOOL compacting)
9407 {
9408     heap_segment* seg =
9409         generation_start_segment (generation_of (max_generation));
9410
9411     heap_segment* prev_seg = 0;
9412     heap_segment* next_seg = 0;
9413     while (seg)
9414     {
9415         next_seg = heap_segment_next (seg);
9416
9417         //link ephemeral segment when expanding
9418         if ((next_seg == 0) && (seg != ephemeral_heap_segment))
9419         {
9420             seg->next = ephemeral_heap_segment;
9421             next_seg = heap_segment_next (seg);
9422         }
9423
9424         //re-used expanded heap segment
9425         if ((seg == ephemeral_heap_segment) && next_seg)
9426         {
9427             heap_segment_next (prev_seg) = next_seg;
9428             heap_segment_next (seg) = 0;
9429         }
9430         else
9431         {
9432             uint8_t* end_segment = (compacting ?
9433                                  heap_segment_plan_allocated (seg) : 
9434                                  heap_segment_allocated (seg));
9435             // check if the segment was reached by allocation
9436             if ((end_segment == heap_segment_mem (seg))&&
9437                 !heap_segment_read_only_p (seg))
9438             {
9439                 //if not, unthread and delete
9440                 assert (prev_seg);
9441                 assert (seg != ephemeral_heap_segment);
9442                 heap_segment_next (prev_seg) = next_seg;
9443                 delete_heap_segment (seg, GCConfig::GetRetainVM());
9444
9445                 dprintf (2, ("Deleting heap segment %Ix", (size_t)seg));
9446             }
9447             else
9448             {
9449                 if (!heap_segment_read_only_p (seg))
9450                 {
9451                     if (compacting)
9452                     {
9453                         heap_segment_allocated (seg) =
9454                             heap_segment_plan_allocated (seg);
9455                     }
9456
9457                     // reset the pages between allocated and committed.
9458                     if (seg != ephemeral_heap_segment)
9459                     {
9460                         decommit_heap_segment_pages (seg, 0);
9461                     }
9462                 }
9463                 prev_seg = seg;
9464             }
9465         }
9466
9467         seg = next_seg;
9468     }
9469 }
9470
9471
9472 #ifdef WRITE_WATCH
9473
9474 uint8_t* g_addresses [array_size+2]; // to get around the bug in GetWriteWatch
9475
9476 #ifdef TIME_WRITE_WATCH
9477 static unsigned int tot_cycles = 0;
9478 #endif //TIME_WRITE_WATCH
9479
9480 #ifdef CARD_BUNDLE
9481
9482 inline void gc_heap::verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word)
9483 {
9484 #ifdef _DEBUG
9485     for (size_t x = cardw_card_bundle (first_card_word); x < cardw_card_bundle (last_card_word); x++)
9486     {
9487         if (!card_bundle_set_p (x))
9488         {
9489             assert (!"Card bundle not set");
9490             dprintf (3, ("Card bundle %Ix not set", x));
9491         }
9492     }
9493 #endif
9494 }
9495
9496 // Verifies that any bundles that are not set represent only cards that are not set.
9497 inline void gc_heap::verify_card_bundles()
9498 {
9499 #ifdef _DEBUG
9500     size_t lowest_card = card_word (card_of (lowest_address));
9501     size_t highest_card = card_word (card_of (highest_address));
9502     size_t cardb = cardw_card_bundle (lowest_card);
9503     size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (highest_card));
9504
9505     while (cardb < end_cardb)
9506     {
9507         uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb), lowest_card)];
9508         uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1), highest_card)];
9509
9510         if (card_bundle_set_p (cardb) == 0)
9511         {
9512             // Verify that no card is set
9513             while (card_word < card_word_end)
9514             {
9515                 if (*card_word != 0)
9516                 {
9517                     dprintf  (3, ("gc: %d, Card word %Ix for address %Ix set, card_bundle %Ix clear",
9518                             dd_collection_count (dynamic_data_of (0)), 
9519                             (size_t)(card_word-&card_table[0]),
9520                             (size_t)(card_address ((size_t)(card_word-&card_table[0]) * card_word_width)), cardb));
9521                 }
9522
9523                 assert((*card_word)==0);
9524                 card_word++;
9525             }
9526         }
9527
9528         cardb++;
9529     }
9530 #endif
9531 }
9532
9533 // If card bundles are enabled, use write watch to find pages in the card table that have 
9534 // been dirtied, and set the corresponding card bundle bits.
9535 void gc_heap::update_card_table_bundle()
9536 {
9537     if (card_bundles_enabled())
9538     {
9539         // The address of the card word containing the card representing the lowest heap address
9540         uint8_t* base_address = (uint8_t*)(&card_table[card_word (card_of (lowest_address))]);
9541
9542         // The address of the card word containing the card representing the highest heap address
9543         uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]);
9544         
9545         uint8_t* saved_base_address = base_address;
9546         uintptr_t bcount = array_size;
9547         size_t saved_region_size = align_on_page (high_address) - saved_base_address;
9548
9549         do
9550         {
9551             size_t region_size = align_on_page (high_address) - base_address;
9552
9553             dprintf (3,("Probing card table pages [%Ix, %Ix[", (size_t)base_address, (size_t)base_address+region_size));
9554             bool success = GCToOSInterface::GetWriteWatch(false /* resetState */,
9555                                                           base_address,
9556                                                           region_size,
9557                                                           (void**)g_addresses,
9558                                                           &bcount);
9559             assert (success && "GetWriteWatch failed!");
9560
9561             dprintf (3,("Found %d pages written", bcount));
9562             for (unsigned i = 0; i < bcount; i++)
9563             {
9564                 // Offset of the dirty page from the start of the card table (clamped to base_address)
9565                 size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0];
9566
9567                 // Offset of the end of the page from the start of the card table (clamped to high addr)
9568                 size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0];
9569                 assert (bcardw >= card_word (card_of (g_gc_lowest_address)));
9570
9571                 // Set the card bundle bits representing the dirty card table page
9572                 card_bundles_set (cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw)));
9573                 dprintf (3,("Set Card bundle [%Ix, %Ix[", cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw))));
9574
9575                 verify_card_bundle_bits_set(bcardw, ecardw);
9576             }
9577
9578             if (bcount >= array_size)
9579             {
9580                 base_address = g_addresses [array_size-1] + OS_PAGE_SIZE;
9581                 bcount = array_size;
9582             }
9583
9584         } while ((bcount >= array_size) && (base_address < high_address));
9585
9586         // Now that we've updated the card bundle bits, reset the write-tracking state. 
9587         GCToOSInterface::ResetWriteWatch (saved_base_address, saved_region_size);
9588     }
9589 }
9590 #endif //CARD_BUNDLE
9591
9592 // static
9593 void gc_heap::reset_write_watch_for_gc_heap(void* base_address, size_t region_size)
9594 {
9595 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9596     SoftwareWriteWatch::ClearDirty(base_address, region_size);
9597 #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9598     GCToOSInterface::ResetWriteWatch(base_address, region_size);
9599 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9600 }
9601
9602 // static
9603 void gc_heap::get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended)
9604 {
9605 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9606     SoftwareWriteWatch::GetDirty(base_address, region_size, dirty_pages, dirty_page_count_ref, reset, is_runtime_suspended);
9607 #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9608     UNREFERENCED_PARAMETER(is_runtime_suspended);
9609     bool success = GCToOSInterface::GetWriteWatch(reset, base_address, region_size, dirty_pages, dirty_page_count_ref);
9610     assert(success);
9611 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9612 }
9613
9614 const size_t ww_reset_quantum = 128*1024*1024;
9615
9616 inline
9617 void gc_heap::switch_one_quantum()
9618 {
9619     enable_preemptive ();
9620     GCToOSInterface::Sleep (1);
9621     disable_preemptive (true);
9622 }
9623
9624 void gc_heap::reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size)
9625 {
9626     size_t reset_size = 0;
9627     size_t remaining_reset_size = 0;
9628     size_t next_reset_size = 0;
9629
9630     while (reset_size != total_reset_size)
9631     {
9632         remaining_reset_size = total_reset_size - reset_size;
9633         next_reset_size = ((remaining_reset_size >= ww_reset_quantum) ? ww_reset_quantum : remaining_reset_size);
9634         if (next_reset_size)
9635         {
9636             reset_write_watch_for_gc_heap(start_address, next_reset_size);
9637             reset_size += next_reset_size;
9638
9639             switch_one_quantum();
9640         }
9641     }
9642
9643     assert (reset_size == total_reset_size);
9644 }
9645
9646 // This does a Sleep(1) for every reset ww_reset_quantum bytes of reset 
9647 // we do concurrently.
9648 void gc_heap::switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size)
9649 {
9650     if (concurrent_p)
9651     {
9652         *current_total_reset_size += last_reset_size;
9653
9654         dprintf (2, ("reset %Id bytes so far", *current_total_reset_size));
9655
9656         if (*current_total_reset_size > ww_reset_quantum)
9657         {
9658             switch_one_quantum();
9659
9660             *current_total_reset_size = 0;
9661         }
9662     }
9663 }
9664
9665 void gc_heap::reset_write_watch (BOOL concurrent_p)
9666 {
9667 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9668     // Software write watch currently requires the runtime to be suspended during reset. See SoftwareWriteWatch::ClearDirty().
9669     assert(!concurrent_p);
9670 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9671
9672     heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
9673
9674     PREFIX_ASSUME(seg != NULL);
9675
9676     size_t reset_size = 0;
9677     size_t region_size = 0;
9678
9679     dprintf (2, ("bgc lowest: %Ix, bgc highest: %Ix", background_saved_lowest_address, background_saved_highest_address));
9680
9681     while (seg)
9682     {
9683         uint8_t* base_address = align_lower_page (heap_segment_mem (seg));
9684         base_address = max (base_address, background_saved_lowest_address);
9685
9686         uint8_t* high_address = 0;
9687         high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg));
9688         high_address = min (high_address, background_saved_highest_address);
9689         
9690         if (base_address < high_address)
9691         {
9692             region_size = high_address - base_address;
9693
9694 #ifdef TIME_WRITE_WATCH
9695             unsigned int time_start = GetCycleCount32();
9696 #endif //TIME_WRITE_WATCH
9697             dprintf (3, ("h%d: soh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
9698             //reset_ww_by_chunk (base_address, region_size);
9699             reset_write_watch_for_gc_heap(base_address, region_size);
9700
9701 #ifdef TIME_WRITE_WATCH
9702             unsigned int time_stop = GetCycleCount32();
9703             tot_cycles += time_stop - time_start;
9704             printf ("ResetWriteWatch Duration: %d, total: %d\n",
9705                     time_stop - time_start, tot_cycles);
9706 #endif //TIME_WRITE_WATCH
9707
9708             switch_on_reset (concurrent_p, &reset_size, region_size);
9709         }
9710
9711         seg = heap_segment_next_rw (seg);
9712
9713         concurrent_print_time_delta ("CRWW soh");
9714     }
9715
9716     //concurrent_print_time_delta ("CRW soh");
9717
9718     seg = heap_segment_rw (generation_start_segment (large_object_generation));
9719
9720     PREFIX_ASSUME(seg != NULL);
9721
9722     while (seg)
9723     {
9724         uint8_t* base_address = align_lower_page (heap_segment_mem (seg));
9725         uint8_t* high_address =  heap_segment_allocated (seg);
9726
9727         base_address = max (base_address, background_saved_lowest_address);
9728         high_address = min (high_address, background_saved_highest_address);
9729
9730         if (base_address < high_address)
9731         {
9732             region_size = high_address - base_address;
9733             
9734 #ifdef TIME_WRITE_WATCH
9735             unsigned int time_start = GetCycleCount32();
9736 #endif //TIME_WRITE_WATCH
9737             dprintf (3, ("h%d: loh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
9738             //reset_ww_by_chunk (base_address, region_size);
9739             reset_write_watch_for_gc_heap(base_address, region_size);
9740
9741 #ifdef TIME_WRITE_WATCH
9742             unsigned int time_stop = GetCycleCount32();
9743             tot_cycles += time_stop - time_start;
9744             printf ("ResetWriteWatch Duration: %d, total: %d\n",
9745                     time_stop - time_start, tot_cycles);
9746 #endif //TIME_WRITE_WATCH
9747     
9748             switch_on_reset (concurrent_p, &reset_size, region_size);
9749         }
9750
9751         seg = heap_segment_next_rw (seg);
9752
9753         concurrent_print_time_delta ("CRWW loh");
9754     }
9755
9756 #ifdef DEBUG_WRITE_WATCH
9757     debug_write_watch = (uint8_t**)~0;
9758 #endif //DEBUG_WRITE_WATCH
9759 }
9760
9761 #endif //WRITE_WATCH
9762
9763 #ifdef BACKGROUND_GC
9764 void gc_heap::restart_vm()
9765 {
9766     //assert (generation_allocation_pointer (youngest_generation) == 0);
9767     dprintf (3, ("Restarting EE"));
9768     STRESS_LOG0(LF_GC, LL_INFO10000, "Concurrent GC: Retarting EE\n");
9769     ee_proceed_event.Set();
9770 }
9771
9772 inline
9773 void fire_alloc_wait_event (alloc_wait_reason awr, BOOL begin_p)
9774 {
9775     if (awr != awr_ignored)
9776     {
9777         if (begin_p)
9778         {
9779             FIRE_EVENT(BGCAllocWaitBegin, awr);
9780         }
9781         else
9782         {
9783             FIRE_EVENT(BGCAllocWaitEnd, awr);
9784         }
9785     }
9786 }
9787
9788
9789 void gc_heap::fire_alloc_wait_event_begin (alloc_wait_reason awr)
9790 {
9791     fire_alloc_wait_event (awr, TRUE);
9792 }
9793
9794
9795 void gc_heap::fire_alloc_wait_event_end (alloc_wait_reason awr)
9796 {
9797     fire_alloc_wait_event (awr, FALSE);
9798 }
9799 #endif //BACKGROUND_GC
9800 void gc_heap::make_generation (generation& gen, heap_segment* seg, uint8_t* start, uint8_t* pointer)
9801 {
9802     gen.allocation_start = start;
9803     gen.allocation_context.alloc_ptr = pointer;
9804     gen.allocation_context.alloc_limit = pointer;
9805     gen.allocation_context.alloc_bytes = 0;
9806     gen.allocation_context.alloc_bytes_loh = 0;
9807     gen.allocation_context_start_region = pointer;
9808     gen.start_segment = seg;
9809     gen.allocation_segment = seg;
9810     gen.plan_allocation_start = 0;
9811     gen.free_list_space = 0;
9812     gen.pinned_allocated = 0; 
9813     gen.free_list_allocated = 0; 
9814     gen.end_seg_allocated = 0;
9815     gen.condemned_allocated = 0; 
9816     gen.free_obj_space = 0;
9817     gen.allocation_size = 0;
9818     gen.pinned_allocation_sweep_size = 0;
9819     gen.pinned_allocation_compact_size = 0;
9820     gen.allocate_end_seg_p = FALSE;
9821     gen.free_list_allocator.clear();
9822
9823 #ifdef FREE_USAGE_STATS
9824     memset (gen.gen_free_spaces, 0, sizeof (gen.gen_free_spaces));
9825     memset (gen.gen_current_pinned_free_spaces, 0, sizeof (gen.gen_current_pinned_free_spaces));
9826     memset (gen.gen_plugs, 0, sizeof (gen.gen_plugs));
9827 #endif //FREE_USAGE_STATS
9828 }
9829
9830 void gc_heap::adjust_ephemeral_limits ()
9831 {
9832     ephemeral_low = generation_allocation_start (generation_of (max_generation - 1));
9833     ephemeral_high = heap_segment_reserved (ephemeral_heap_segment);
9834
9835     dprintf (3, ("new ephemeral low: %Ix new ephemeral high: %Ix",
9836                  (size_t)ephemeral_low, (size_t)ephemeral_high))
9837
9838 #ifndef MULTIPLE_HEAPS
9839     // This updates the write barrier helpers with the new info.
9840     stomp_write_barrier_ephemeral(ephemeral_low, ephemeral_high);
9841 #endif // MULTIPLE_HEAPS
9842 }
9843
9844 #if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN)
9845 FILE* CreateLogFile(const GCConfigStringHolder& temp_logfile_name, bool is_config)
9846 {
9847     FILE* logFile;
9848
9849     if (!temp_logfile_name.Get())
9850     {
9851         return nullptr;
9852     }
9853
9854     char logfile_name[MAX_LONGPATH+1];
9855     uint32_t pid = GCToOSInterface::GetCurrentProcessId();
9856     const char* suffix = is_config ? ".config.log" : ".log";
9857     _snprintf_s(logfile_name, MAX_LONGPATH+1, _TRUNCATE, "%s.%d%s", temp_logfile_name.Get(), pid, suffix);
9858     logFile = fopen(logfile_name, "wb");
9859     return logFile;
9860 }
9861 #endif //TRACE_GC || GC_CONFIG_DRIVEN
9862
9863 HRESULT gc_heap::initialize_gc (size_t segment_size,
9864                                 size_t heap_size
9865 #ifdef MULTIPLE_HEAPS
9866                                 ,unsigned number_of_heaps
9867 #endif //MULTIPLE_HEAPS
9868 )
9869 {
9870 #ifdef TRACE_GC
9871     if (GCConfig::GetLogEnabled())
9872     {
9873         gc_log = CreateLogFile(GCConfig::GetLogFile(), false);
9874
9875         if (gc_log == NULL)
9876             return E_FAIL;
9877
9878         // GCLogFileSize in MBs.
9879         gc_log_file_size = static_cast<size_t>(GCConfig::GetLogFileSize());
9880
9881         if (gc_log_file_size <= 0 || gc_log_file_size > 500)
9882         {
9883             fclose (gc_log);
9884             return E_FAIL;
9885         }
9886
9887         gc_log_lock.Initialize();
9888         gc_log_buffer = new (nothrow) uint8_t [gc_log_buffer_size];
9889         if (!gc_log_buffer)
9890         {
9891             fclose(gc_log);
9892             return E_FAIL;
9893         }
9894
9895         memset (gc_log_buffer, '*', gc_log_buffer_size);
9896
9897         max_gc_buffers = gc_log_file_size * 1024 * 1024 / gc_log_buffer_size;
9898     }
9899 #endif // TRACE_GC
9900
9901 #ifdef GC_CONFIG_DRIVEN
9902     if (GCConfig::GetConfigLogEnabled())
9903     {
9904         gc_config_log = CreateLogFile(GCConfig::GetConfigLogFile(), true);
9905
9906         if (gc_config_log == NULL)
9907             return E_FAIL;
9908
9909         gc_config_log_buffer = new (nothrow) uint8_t [gc_config_log_buffer_size];
9910         if (!gc_config_log_buffer)
9911         {
9912             fclose(gc_config_log);
9913             return E_FAIL;
9914         }
9915
9916         compact_ratio = static_cast<int>(GCConfig::GetCompactRatio());
9917
9918         //         h#  | GC  | gen | C   | EX   | NF  | BF  | ML  | DM  || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP | 
9919         cprintf (("%2s | %6s | %1s | %1s | %2s | %2s | %2s | %2s | %2s || %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s |",
9920                 "h#", // heap index
9921                 "GC", // GC index
9922                 "g", // generation
9923                 "C",  // compaction (empty means sweeping), 'M' means it was mandatory, 'W' means it was not
9924                 "EX", // heap expansion
9925                 "NF", // normal fit
9926                 "BF", // best fit (if it indicates neither NF nor BF it means it had to acquire a new seg.
9927                 "ML", // mark list
9928                 "DM", // demotion
9929                 "PreS", // short object before pinned plug
9930                 "PostS", // short object after pinned plug
9931                 "Merge", // merged pinned plugs
9932                 "Conv", // converted to pinned plug
9933                 "Pre", // plug before pinned plug but not after
9934                 "Post", // plug after pinned plug but not before
9935                 "PrPo", // plug both before and after pinned plug
9936                 "PreP", // pre short object padded
9937                 "PostP" // post short object padded
9938                 ));
9939     }
9940 #endif //GC_CONFIG_DRIVEN
9941
9942 #ifdef GC_STATS
9943     GCConfigStringHolder logFileName = GCConfig::GetMixLogFile();
9944     if (logFileName.Get() != nullptr)
9945     {
9946         GCStatistics::logFileName = _strdup(logFileName.Get());
9947         GCStatistics::logFile = fopen(GCStatistics::logFileName, "a");
9948         if (!GCStatistics::logFile)
9949         {
9950             return E_FAIL;
9951         }
9952     }
9953 #endif // GC_STATS
9954
9955     HRESULT hres = S_OK;
9956
9957 #ifdef WRITE_WATCH
9958     hardware_write_watch_api_supported();
9959 #ifdef BACKGROUND_GC
9960     if (can_use_write_watch_for_gc_heap() && GCConfig::GetConcurrentGC())
9961     {
9962         gc_can_use_concurrent = true;
9963 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9964         virtual_alloc_hardware_write_watch = true;
9965 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9966     }
9967     else
9968     {
9969         gc_can_use_concurrent = false;
9970     }
9971 #endif //BACKGROUND_GC
9972 #endif //WRITE_WATCH
9973
9974 #ifdef BACKGROUND_GC
9975     // leave the first page to contain only segment info
9976     // because otherwise we could need to revisit the first page frequently in 
9977     // background GC.
9978     segment_info_size = OS_PAGE_SIZE;
9979 #else
9980     segment_info_size = Align (sizeof (heap_segment), get_alignment_constant (FALSE));
9981 #endif //BACKGROUND_GC
9982
9983     reserved_memory = 0;
9984     unsigned block_count;
9985 #ifdef MULTIPLE_HEAPS
9986     reserved_memory_limit = (segment_size + heap_size) * number_of_heaps;
9987     block_count = number_of_heaps;
9988     n_heaps = number_of_heaps;
9989 #else //MULTIPLE_HEAPS
9990     reserved_memory_limit = segment_size + heap_size;
9991     block_count = 1;
9992 #endif //MULTIPLE_HEAPS
9993
9994     if (!reserve_initial_memory(segment_size,heap_size,block_count))
9995         return E_OUTOFMEMORY;
9996
9997 #ifdef CARD_BUNDLE
9998     //check if we need to turn on card_bundles.
9999 #ifdef MULTIPLE_HEAPS
10000     // use INT64 arithmetic here because of possible overflow on 32p
10001     uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*number_of_heaps;
10002 #else
10003     // use INT64 arithmetic here because of possible overflow on 32p
10004     uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE;
10005 #endif //MULTIPLE_HEAPS
10006
10007     if (can_use_write_watch_for_card_table() && reserved_memory >= th)
10008     {
10009         settings.card_bundles = TRUE;
10010     }
10011     else
10012     {
10013         settings.card_bundles = FALSE;
10014     }
10015 #endif //CARD_BUNDLE
10016
10017     settings.first_init();
10018
10019     int latency_level_from_config = static_cast<int>(GCConfig::GetLatencyLevel());
10020     if (latency_level_from_config >= latency_level_first && latency_level_from_config <= latency_level_last)
10021     {
10022         gc_heap::latency_level = static_cast<gc_latency_level>(latency_level_from_config);
10023     }
10024
10025     init_static_data();
10026
10027     g_gc_card_table = make_card_table (g_gc_lowest_address, g_gc_highest_address);
10028
10029     if (!g_gc_card_table)
10030         return E_OUTOFMEMORY;
10031
10032     gc_started = FALSE;
10033
10034 #ifdef MULTIPLE_HEAPS
10035     g_heaps = new (nothrow) gc_heap* [number_of_heaps];
10036     if (!g_heaps)
10037         return E_OUTOFMEMORY;
10038
10039 #ifdef _PREFAST_ 
10040 #pragma warning(push)
10041 #pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow
10042 #endif // _PREFAST_
10043     g_promoted = new (nothrow) size_t [number_of_heaps*16];
10044     g_bpromoted = new (nothrow) size_t [number_of_heaps*16];
10045 #ifdef MH_SC_MARK
10046     g_mark_stack_busy = new (nothrow) int[(number_of_heaps+2)*HS_CACHE_LINE_SIZE/sizeof(int)];
10047 #endif //MH_SC_MARK
10048 #ifdef _PREFAST_ 
10049 #pragma warning(pop)
10050 #endif // _PREFAST_
10051     if (!g_promoted || !g_bpromoted)
10052         return E_OUTOFMEMORY;
10053
10054 #ifdef MH_SC_MARK
10055     if (!g_mark_stack_busy)
10056         return E_OUTOFMEMORY;
10057 #endif //MH_SC_MARK
10058
10059     if (!create_thread_support (number_of_heaps))
10060         return E_OUTOFMEMORY;
10061
10062     if (!heap_select::init (number_of_heaps))
10063         return E_OUTOFMEMORY;
10064
10065 #endif //MULTIPLE_HEAPS
10066
10067 #ifdef MULTIPLE_HEAPS
10068     yp_spin_count_unit = 32 * number_of_heaps;
10069 #else
10070     yp_spin_count_unit = 32 * g_num_processors;
10071 #endif //MULTIPLE_HEAPS
10072
10073     if (!init_semi_shared())
10074     {
10075         hres = E_FAIL;
10076     }
10077
10078     return hres;
10079 }
10080
10081 //Initializes PER_HEAP_ISOLATED data members.
10082 int
10083 gc_heap::init_semi_shared()
10084 {
10085     int ret = 0;
10086
10087     // This is used for heap expansion - it's to fix exactly the start for gen 0
10088     // through (max_generation-1). When we expand the heap we allocate all these
10089     // gen starts at the beginning of the new ephemeral seg. 
10090     eph_gen_starts_size = (Align (min_obj_size)) * max_generation;
10091
10092 #ifdef MARK_LIST
10093 #ifdef MULTIPLE_HEAPS
10094     mark_list_size = min (150*1024, max (8192, soh_segment_size/(2*10*32)));
10095     g_mark_list = make_mark_list (mark_list_size*n_heaps);
10096
10097     min_balance_threshold = alloc_quantum_balance_units * CLR_SIZE * 2;
10098 #ifdef PARALLEL_MARK_LIST_SORT
10099     g_mark_list_copy = make_mark_list (mark_list_size*n_heaps);
10100     if (!g_mark_list_copy)
10101     {
10102         goto cleanup;
10103     }
10104 #endif //PARALLEL_MARK_LIST_SORT
10105
10106 #else //MULTIPLE_HEAPS
10107
10108     mark_list_size = max (8192, soh_segment_size/(64*32));
10109     g_mark_list = make_mark_list (mark_list_size);
10110
10111 #endif //MULTIPLE_HEAPS
10112
10113     dprintf (3, ("mark_list_size: %d", mark_list_size));
10114
10115     if (!g_mark_list)
10116     {
10117         goto cleanup;
10118     }
10119 #endif //MARK_LIST
10120
10121 #if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
10122     if (!seg_mapping_table_init())
10123         goto cleanup;
10124 #endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE
10125
10126 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
10127     seg_table = sorted_table::make_sorted_table();
10128
10129     if (!seg_table)
10130         goto cleanup;
10131 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
10132
10133     segment_standby_list = 0;
10134
10135     if (!full_gc_approach_event.CreateManualEventNoThrow(FALSE))
10136     {
10137         goto cleanup;
10138     }
10139     if (!full_gc_end_event.CreateManualEventNoThrow(FALSE))
10140     {
10141         goto cleanup;
10142     }
10143
10144     fgn_maxgen_percent = 0;
10145     fgn_loh_percent = 0;
10146     full_gc_approach_event_set = false;
10147
10148     memset (full_gc_counts, 0, sizeof (full_gc_counts));
10149
10150     last_gc_index = 0;
10151     should_expand_in_full_gc = FALSE;
10152
10153 #ifdef FEATURE_LOH_COMPACTION
10154     loh_compaction_always_p = GCConfig::GetLOHCompactionMode() != 0;
10155     loh_compaction_mode = loh_compaction_default;
10156 #endif //FEATURE_LOH_COMPACTION
10157
10158     loh_size_threshold = (size_t)GCConfig::GetLOHThreshold();
10159     assert (loh_size_threshold >= LARGE_OBJECT_SIZE);
10160
10161 #ifdef BACKGROUND_GC
10162     memset (ephemeral_fgc_counts, 0, sizeof (ephemeral_fgc_counts));
10163     bgc_alloc_spin_count = static_cast<uint32_t>(GCConfig::GetBGCSpinCount());
10164     bgc_alloc_spin = static_cast<uint32_t>(GCConfig::GetBGCSpin());
10165
10166     {   
10167         int number_bgc_threads = 1;
10168 #ifdef MULTIPLE_HEAPS
10169         number_bgc_threads = n_heaps;
10170 #endif //MULTIPLE_HEAPS
10171         if (!create_bgc_threads_support (number_bgc_threads))
10172         {
10173             goto cleanup;
10174         }
10175     }
10176 #endif //BACKGROUND_GC
10177
10178     memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
10179
10180 #ifdef GC_CONFIG_DRIVEN
10181     compact_or_sweep_gcs[0] = 0;
10182     compact_or_sweep_gcs[1] = 0;
10183 #endif //GC_CONFIG_DRIVEN
10184
10185 #ifdef SHORT_PLUGS
10186     short_plugs_pad_ratio = (double)DESIRED_PLUG_LENGTH / (double)(DESIRED_PLUG_LENGTH - Align (min_obj_size));
10187 #endif //SHORT_PLUGS
10188
10189     ret = 1;
10190
10191 cleanup:
10192
10193     if (!ret)
10194     {
10195         if (full_gc_approach_event.IsValid())
10196         {
10197             full_gc_approach_event.CloseEvent();
10198         }
10199         if (full_gc_end_event.IsValid())
10200         {
10201             full_gc_end_event.CloseEvent();
10202         }
10203     }
10204
10205     return ret;
10206 }
10207
10208 gc_heap* gc_heap::make_gc_heap (
10209 #ifdef MULTIPLE_HEAPS
10210                                 GCHeap* vm_hp,
10211                                 int heap_number
10212 #endif //MULTIPLE_HEAPS
10213                                 )
10214 {
10215     gc_heap* res = 0;
10216
10217 #ifdef MULTIPLE_HEAPS
10218     res = new (nothrow) gc_heap;
10219     if (!res)
10220         return 0;
10221
10222     res->vm_heap = vm_hp;
10223     res->alloc_context_count = 0;
10224
10225 #ifdef MARK_LIST
10226 #ifdef PARALLEL_MARK_LIST_SORT
10227     res->mark_list_piece_start = new (nothrow) uint8_t**[n_heaps];
10228     if (!res->mark_list_piece_start)
10229         return 0;
10230
10231 #ifdef _PREFAST_ 
10232 #pragma warning(push)
10233 #pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow
10234 #endif // _PREFAST_
10235     res->mark_list_piece_end = new (nothrow) uint8_t**[n_heaps + 32]; // +32 is padding to reduce false sharing
10236 #ifdef _PREFAST_ 
10237 #pragma warning(pop)
10238 #endif // _PREFAST_
10239
10240     if (!res->mark_list_piece_end)
10241         return 0;
10242 #endif //PARALLEL_MARK_LIST_SORT
10243 #endif //MARK_LIST
10244
10245
10246 #endif //MULTIPLE_HEAPS
10247
10248     if (res->init_gc_heap (
10249 #ifdef MULTIPLE_HEAPS
10250         heap_number
10251 #else  //MULTIPLE_HEAPS
10252         0
10253 #endif //MULTIPLE_HEAPS
10254         )==0)
10255     {
10256         return 0;
10257     }
10258
10259 #ifdef MULTIPLE_HEAPS
10260     return res;
10261 #else
10262     return (gc_heap*)1;
10263 #endif //MULTIPLE_HEAPS
10264 }
10265
10266 uint32_t
10267 gc_heap::wait_for_gc_done(int32_t timeOut)
10268 {
10269     bool cooperative_mode = enable_preemptive ();
10270
10271     uint32_t dwWaitResult = NOERROR;
10272
10273     gc_heap* wait_heap = NULL;
10274     while (gc_heap::gc_started)
10275     {       
10276 #ifdef MULTIPLE_HEAPS
10277         wait_heap = GCHeap::GetHeap(heap_select::select_heap(NULL, 0))->pGenGCHeap;
10278         dprintf(2, ("waiting for the gc_done_event on heap %d", wait_heap->heap_number));
10279 #endif // MULTIPLE_HEAPS
10280
10281 #ifdef _PREFAST_
10282         PREFIX_ASSUME(wait_heap != NULL);
10283 #endif // _PREFAST_
10284
10285         dwWaitResult = wait_heap->gc_done_event.Wait(timeOut, FALSE); 
10286     }
10287     disable_preemptive (cooperative_mode);
10288
10289     return dwWaitResult;
10290 }
10291
10292 void 
10293 gc_heap::set_gc_done()
10294 {
10295     enter_gc_done_event_lock();
10296     if (!gc_done_event_set)
10297     {
10298         gc_done_event_set = true;
10299         dprintf (2, ("heap %d: setting gc_done_event", heap_number));
10300         gc_done_event.Set();
10301     }
10302     exit_gc_done_event_lock();
10303 }
10304
10305 void 
10306 gc_heap::reset_gc_done()
10307 {
10308     enter_gc_done_event_lock();
10309     if (gc_done_event_set)
10310     {
10311         gc_done_event_set = false;
10312         dprintf (2, ("heap %d: resetting gc_done_event", heap_number));
10313         gc_done_event.Reset();
10314     }
10315     exit_gc_done_event_lock();
10316 }
10317
10318 void 
10319 gc_heap::enter_gc_done_event_lock()
10320 {
10321     uint32_t dwSwitchCount = 0;
10322 retry:
10323
10324     if (Interlocked::CompareExchange(&gc_done_event_lock, 0, -1) >= 0)
10325     {
10326         while (gc_done_event_lock >= 0)
10327         {
10328             if  (g_num_processors > 1)
10329             {
10330                 int spin_count = yp_spin_count_unit;
10331                 for (int j = 0; j < spin_count; j++)
10332                 {
10333                     if  (gc_done_event_lock < 0)
10334                         break;
10335                     YieldProcessor();           // indicate to the processor that we are spinning
10336                 }
10337                 if  (gc_done_event_lock >= 0)
10338                     GCToOSInterface::YieldThread(++dwSwitchCount);
10339             }
10340             else
10341                 GCToOSInterface::YieldThread(++dwSwitchCount);
10342         }
10343         goto retry;
10344     }
10345 }
10346
10347 void 
10348 gc_heap::exit_gc_done_event_lock()
10349 {
10350     gc_done_event_lock = -1;
10351 }
10352
10353 #ifndef MULTIPLE_HEAPS
10354
10355 #ifdef RECORD_LOH_STATE
10356 int gc_heap::loh_state_index = 0;
10357 gc_heap::loh_state_info gc_heap::last_loh_states[max_saved_loh_states];
10358 #endif //RECORD_LOH_STATE
10359
10360 VOLATILE(int32_t) gc_heap::gc_done_event_lock;
10361 VOLATILE(bool) gc_heap::gc_done_event_set;
10362 GCEvent gc_heap::gc_done_event;
10363 #endif //!MULTIPLE_HEAPS
10364 VOLATILE(bool) gc_heap::internal_gc_done;
10365
10366 void gc_heap::add_saved_spinlock_info (
10367             bool loh_p, 
10368             msl_enter_state enter_state, 
10369             msl_take_state take_state)
10370
10371 {
10372 #ifdef SPINLOCK_HISTORY
10373     spinlock_info* current = &last_spinlock_info[spinlock_info_index];
10374
10375     current->enter_state = enter_state;
10376     current->take_state = take_state;
10377     current->thread_id.SetToCurrentThread();
10378     current->loh_p = loh_p;
10379     dprintf (SPINLOCK_LOG, ("[%d]%s %s %s", 
10380         heap_number, 
10381         (loh_p ? "loh" : "soh"),
10382         ((enter_state == me_acquire) ? "E" : "L"),
10383         msl_take_state_str[take_state]));
10384
10385     spinlock_info_index++;
10386
10387     assert (spinlock_info_index <= max_saved_spinlock_info);
10388
10389     if (spinlock_info_index >= max_saved_spinlock_info)
10390     {
10391         spinlock_info_index = 0;
10392     }
10393 #else
10394     MAYBE_UNUSED_VAR(enter_state);
10395     MAYBE_UNUSED_VAR(take_state);
10396 #endif //SPINLOCK_HISTORY
10397 }
10398
10399 int
10400 gc_heap::init_gc_heap (int  h_number)
10401 {
10402 #ifdef MULTIPLE_HEAPS
10403
10404     time_bgc_last = 0;
10405
10406 #ifdef SPINLOCK_HISTORY
10407     spinlock_info_index = 0;
10408     memset (last_spinlock_info, 0, sizeof(last_spinlock_info));
10409 #endif //SPINLOCK_HISTORY
10410
10411     // initialize per heap members.
10412     ephemeral_low = (uint8_t*)1;
10413
10414     ephemeral_high = MAX_PTR;
10415
10416     ephemeral_heap_segment = 0;
10417
10418     freeable_large_heap_segment = 0;
10419
10420     condemned_generation_num = 0;
10421
10422     blocking_collection = FALSE;
10423
10424     generation_skip_ratio = 100;
10425
10426     mark_stack_tos = 0;
10427
10428     mark_stack_bos = 0;
10429
10430     mark_stack_array_length = 0;
10431
10432     mark_stack_array = 0;
10433
10434 #if defined (_DEBUG) && defined (VERIFY_HEAP)
10435     verify_pinned_queue_p = FALSE;
10436 #endif // _DEBUG && VERIFY_HEAP
10437
10438     loh_pinned_queue_tos = 0;
10439
10440     loh_pinned_queue_bos = 0;
10441
10442     loh_pinned_queue_length = 0;
10443
10444     loh_pinned_queue_decay = LOH_PIN_DECAY;
10445
10446     loh_pinned_queue = 0;
10447
10448     min_overflow_address = MAX_PTR;
10449
10450     max_overflow_address = 0;
10451
10452     gen0_bricks_cleared = FALSE;
10453
10454     gen0_must_clear_bricks = 0;
10455
10456     allocation_quantum = CLR_SIZE;
10457
10458     more_space_lock_soh = gc_lock;
10459
10460     more_space_lock_loh = gc_lock;
10461
10462     ro_segments_in_range = FALSE;
10463
10464     loh_alloc_since_cg = 0;
10465
10466     new_heap_segment = NULL;
10467
10468     gen0_allocated_after_gc_p = false;
10469
10470 #ifdef RECORD_LOH_STATE
10471     loh_state_index = 0;
10472 #endif //RECORD_LOH_STATE
10473 #endif //MULTIPLE_HEAPS
10474
10475 #ifdef MULTIPLE_HEAPS
10476     if (h_number > n_heaps)
10477     {
10478         assert (!"Number of heaps exceeded");
10479         return 0;
10480     }
10481
10482     heap_number = h_number;
10483 #endif //MULTIPLE_HEAPS
10484
10485     memset (&oom_info, 0, sizeof (oom_info));
10486     memset (&fgm_result, 0, sizeof (fgm_result));
10487     if (!gc_done_event.CreateManualEventNoThrow(FALSE))
10488     {
10489         return 0;
10490     }
10491     gc_done_event_lock = -1;
10492     gc_done_event_set = false;
10493
10494 #ifndef SEG_MAPPING_TABLE
10495     if (!gc_heap::seg_table->ensure_space_for_insert ())
10496     {
10497         return 0;
10498     }
10499 #endif //!SEG_MAPPING_TABLE
10500
10501     heap_segment* seg = get_initial_segment (soh_segment_size, h_number);
10502     if (!seg)
10503         return 0;
10504
10505     FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg),
10506                               (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)),
10507                               gc_etw_segment_small_object_heap);
10508     
10509 #ifdef SEG_MAPPING_TABLE
10510     seg_mapping_table_add_segment (seg, __this);
10511 #else //SEG_MAPPING_TABLE
10512     seg_table->insert ((uint8_t*)seg, sdelta);
10513 #endif //SEG_MAPPING_TABLE
10514
10515 #ifdef MULTIPLE_HEAPS
10516     heap_segment_heap (seg) = this;
10517 #endif //MULTIPLE_HEAPS
10518
10519     /* todo: Need a global lock for this */
10520     uint32_t* ct = &g_gc_card_table [card_word (card_of (g_gc_lowest_address))];
10521     own_card_table (ct);
10522     card_table = translate_card_table (ct);
10523     /* End of global lock */
10524
10525     brick_table = card_table_brick_table (ct);
10526     highest_address = card_table_highest_address (ct);
10527     lowest_address = card_table_lowest_address (ct);
10528
10529 #ifdef CARD_BUNDLE
10530     card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
10531     assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
10532             card_table_card_bundle_table (ct));
10533 #endif //CARD_BUNDLE
10534
10535 #ifdef MARK_ARRAY
10536     if (gc_can_use_concurrent)
10537         mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))]));
10538     else
10539         mark_array = NULL;
10540 #endif //MARK_ARRAY
10541
10542     uint8_t*  start = heap_segment_mem (seg);
10543
10544     for (int i = 0; i < 1 + max_generation; i++)
10545     {
10546         make_generation (generation_table [ (max_generation - i) ],
10547                          seg, start, 0);
10548         generation_table [(max_generation - i)].gen_num = max_generation - i;
10549         start += Align (min_obj_size);
10550     }
10551
10552     heap_segment_allocated (seg) = start;
10553     alloc_allocated = start;
10554     heap_segment_used (seg) = start - plug_skew;
10555
10556     ephemeral_heap_segment = seg;
10557
10558 #ifndef SEG_MAPPING_TABLE
10559     if (!gc_heap::seg_table->ensure_space_for_insert ())
10560     {
10561         return 0;
10562     }
10563 #endif //!SEG_MAPPING_TABLE
10564     //Create the large segment generation
10565     heap_segment* lseg = get_initial_segment(min_loh_segment_size, h_number);
10566     if (!lseg)
10567         return 0;
10568     lseg->flags |= heap_segment_flags_loh;
10569
10570     FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(lseg),
10571                               (size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)),
10572                               gc_etw_segment_large_object_heap);
10573
10574 #ifdef SEG_MAPPING_TABLE
10575     seg_mapping_table_add_segment (lseg, __this);
10576 #else //SEG_MAPPING_TABLE
10577     seg_table->insert ((uint8_t*)lseg, sdelta);
10578 #endif //SEG_MAPPING_TABLE
10579
10580     generation_table [max_generation].free_list_allocator = allocator(NUM_GEN2_ALIST, BASE_GEN2_ALIST, gen2_alloc_list);
10581     //assign the alloc_list for the large generation 
10582     generation_table [max_generation+1].free_list_allocator = allocator(NUM_LOH_ALIST, BASE_LOH_ALIST, loh_alloc_list);
10583     generation_table [max_generation+1].gen_num = max_generation+1;
10584     make_generation (generation_table [max_generation+1],lseg, heap_segment_mem (lseg), 0);
10585     heap_segment_allocated (lseg) = heap_segment_mem (lseg) + Align (min_obj_size, get_alignment_constant (FALSE));
10586     heap_segment_used (lseg) = heap_segment_allocated (lseg) - plug_skew;
10587
10588     for (int gen_num = 0; gen_num <= 1 + max_generation; gen_num++)
10589     {
10590         generation*  gen = generation_of (gen_num);
10591         make_unused_array (generation_allocation_start (gen), Align (min_obj_size));
10592     }
10593
10594 #ifdef MULTIPLE_HEAPS
10595     heap_segment_heap (lseg) = this;
10596
10597     //initialize the alloc context heap
10598     generation_alloc_context (generation_of (0))->set_alloc_heap(vm_heap);
10599
10600     //initialize the alloc context heap
10601     generation_alloc_context (generation_of (max_generation+1))->set_alloc_heap(vm_heap);
10602
10603 #endif //MULTIPLE_HEAPS
10604
10605     //Do this only once
10606 #ifdef MULTIPLE_HEAPS
10607     if (h_number == 0)
10608 #endif //MULTIPLE_HEAPS
10609     {
10610 #ifndef INTERIOR_POINTERS
10611         //set the brick_table for large objects
10612         //but default value is clearded
10613         //clear_brick_table ((uint8_t*)heap_segment_mem (lseg),
10614         //                   (uint8_t*)heap_segment_reserved (lseg));
10615
10616 #else //INTERIOR_POINTERS
10617
10618         //Because of the interior pointer business, we have to clear
10619         //the whole brick table
10620         //but the default value is cleared
10621         // clear_brick_table (lowest_address, highest_address);
10622 #endif //INTERIOR_POINTERS
10623     }
10624
10625     if (!init_dynamic_data())
10626     {
10627         return 0;
10628     }
10629
10630     etw_allocation_running_amount[0] = 0;
10631     etw_allocation_running_amount[1] = 0;
10632
10633     //needs to be done after the dynamic data has been initialized
10634 #ifndef MULTIPLE_HEAPS
10635     allocation_running_amount = dd_min_size (dynamic_data_of (0));
10636 #endif //!MULTIPLE_HEAPS
10637
10638     fgn_last_alloc = dd_min_size (dynamic_data_of (0));
10639
10640     mark* arr = new (nothrow) (mark [MARK_STACK_INITIAL_LENGTH]);
10641     if (!arr)
10642         return 0;
10643
10644     make_mark_stack(arr);
10645
10646 #ifdef BACKGROUND_GC
10647     freeable_small_heap_segment = 0;
10648     gchist_index_per_heap = 0;
10649     uint8_t** b_arr = new (nothrow) (uint8_t* [MARK_STACK_INITIAL_LENGTH]);
10650     if (!b_arr)
10651         return 0;
10652
10653     make_background_mark_stack (b_arr);
10654 #endif //BACKGROUND_GC
10655
10656     ephemeral_low = generation_allocation_start(generation_of(max_generation - 1));
10657     ephemeral_high = heap_segment_reserved(ephemeral_heap_segment);
10658     if (heap_number == 0)
10659     {
10660         stomp_write_barrier_initialize(
10661 #ifdef MULTIPLE_HEAPS
10662             reinterpret_cast<uint8_t*>(1), reinterpret_cast<uint8_t*>(~0)
10663 #else
10664             ephemeral_low, ephemeral_high
10665 #endif //!MULTIPLE_HEAPS
10666         );
10667     }
10668
10669 #ifdef MARK_ARRAY
10670     // why would we clear the mark array for this page? it should be cleared..
10671     // clear the first committed page
10672     //if(gc_can_use_concurrent)
10673     //{
10674     //    clear_mark_array (align_lower_page (heap_segment_mem (seg)), heap_segment_committed (seg));
10675     //}
10676 #endif //MARK_ARRAY
10677
10678 #ifdef MULTIPLE_HEAPS
10679     //register the heap in the heaps array
10680
10681     if (!create_gc_thread ())
10682         return 0;
10683
10684     g_heaps [heap_number] = this;
10685
10686 #endif //MULTIPLE_HEAPS
10687
10688 #ifdef FEATURE_PREMORTEM_FINALIZATION
10689     HRESULT hr = AllocateCFinalize(&finalize_queue);
10690     if (FAILED(hr))
10691         return 0;
10692 #endif // FEATURE_PREMORTEM_FINALIZATION
10693
10694     max_free_space_items = MAX_NUM_FREE_SPACES;
10695
10696     bestfit_seg = new (nothrow) seg_free_spaces (heap_number);
10697
10698     if (!bestfit_seg)
10699     {
10700         return 0;
10701     }
10702
10703     if (!bestfit_seg->alloc())
10704     {
10705         return 0;
10706     }
10707
10708     last_gc_before_oom = FALSE;
10709
10710     sufficient_gen0_space_p = FALSE;
10711
10712 #ifdef MULTIPLE_HEAPS
10713
10714 #ifdef HEAP_ANALYZE
10715
10716     heap_analyze_success = TRUE;
10717
10718     internal_root_array  = 0;
10719
10720     internal_root_array_index = 0;
10721
10722     internal_root_array_length = initial_internal_roots;
10723
10724     current_obj          = 0;
10725
10726     current_obj_size     = 0;
10727
10728 #endif //HEAP_ANALYZE
10729
10730 #endif // MULTIPLE_HEAPS
10731
10732 #ifdef BACKGROUND_GC
10733     bgc_thread_id.Clear();
10734
10735     if (!create_bgc_thread_support())
10736     {
10737         return 0;
10738     }
10739
10740     bgc_alloc_lock = new (nothrow) exclusive_sync;
10741     if (!bgc_alloc_lock)
10742     {
10743         return 0;
10744     }
10745
10746     bgc_alloc_lock->init();
10747
10748     if (h_number == 0)
10749     {
10750         if (!recursive_gc_sync::init())
10751             return 0;
10752     }
10753
10754     bgc_thread_running = 0;
10755     bgc_thread = 0;
10756     bgc_threads_timeout_cs.Initialize();
10757     expanded_in_fgc = 0;
10758     current_bgc_state = bgc_not_in_process;
10759     background_soh_alloc_count = 0;
10760     background_loh_alloc_count = 0;
10761     bgc_overflow_count = 0;
10762     end_loh_size = dd_min_size (dynamic_data_of (max_generation + 1));
10763 #endif //BACKGROUND_GC
10764
10765 #ifdef GC_CONFIG_DRIVEN
10766     memset (interesting_data_per_heap, 0, sizeof (interesting_data_per_heap));
10767     memset(compact_reasons_per_heap, 0, sizeof (compact_reasons_per_heap));
10768     memset(expand_mechanisms_per_heap, 0, sizeof (expand_mechanisms_per_heap));
10769     memset(interesting_mechanism_bits_per_heap, 0, sizeof (interesting_mechanism_bits_per_heap));
10770 #endif //GC_CONFIG_DRIVEN
10771
10772     return 1;
10773 }
10774
10775 void
10776 gc_heap::destroy_semi_shared()
10777 {
10778 //TODO: will need to move this to per heap
10779 //#ifdef BACKGROUND_GC
10780 //    if (c_mark_list)
10781 //        delete c_mark_list;
10782 //#endif //BACKGROUND_GC
10783
10784 #ifdef MARK_LIST
10785     if (g_mark_list)
10786         delete g_mark_list;
10787 #endif //MARK_LIST
10788
10789 #if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
10790     if (seg_mapping_table)
10791         delete seg_mapping_table;
10792 #endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE
10793
10794 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
10795     //destroy the segment map
10796     seg_table->delete_sorted_table();
10797 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
10798 }
10799
10800 void
10801 gc_heap::self_destroy()
10802 {
10803 #ifdef BACKGROUND_GC
10804     kill_gc_thread();
10805 #endif //BACKGROUND_GC
10806
10807     if (gc_done_event.IsValid())
10808     {
10809         gc_done_event.CloseEvent();
10810     }
10811
10812     // destroy every segment.
10813     heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
10814
10815     PREFIX_ASSUME(seg != NULL);
10816
10817     heap_segment* next_seg;
10818     while (seg)
10819     {
10820         next_seg = heap_segment_next_rw (seg);
10821         delete_heap_segment (seg);
10822         seg = next_seg;
10823     }
10824
10825     seg = heap_segment_rw (generation_start_segment (generation_of (max_generation+1)));
10826
10827     PREFIX_ASSUME(seg != NULL);
10828
10829     while (seg)
10830     {
10831         next_seg = heap_segment_next_rw (seg);
10832         delete_heap_segment (seg);
10833         seg = next_seg;
10834     }
10835
10836     // get rid of the card table
10837     release_card_table (card_table);
10838
10839     // destroy the mark stack
10840     delete mark_stack_array;
10841
10842 #ifdef FEATURE_PREMORTEM_FINALIZATION
10843     if (finalize_queue)
10844         delete finalize_queue;
10845 #endif // FEATURE_PREMORTEM_FINALIZATION
10846 }
10847
10848 void
10849 gc_heap::destroy_gc_heap(gc_heap* heap)
10850 {
10851     heap->self_destroy();
10852     delete heap;
10853 }
10854
10855 // Destroys resources owned by gc. It is assumed that a last GC has been performed and that
10856 // the finalizer queue has been drained.
10857 void gc_heap::shutdown_gc()
10858 {
10859     destroy_semi_shared();
10860
10861 #ifdef MULTIPLE_HEAPS
10862     //delete the heaps array
10863     delete g_heaps;
10864     destroy_thread_support();
10865     n_heaps = 0;
10866 #endif //MULTIPLE_HEAPS
10867     //destroy seg_manager
10868
10869     destroy_initial_memory();
10870
10871     GCToOSInterface::Shutdown();
10872 }
10873
10874 inline
10875 BOOL gc_heap::size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit,
10876                           uint8_t* old_loc, int use_padding)
10877 {
10878     BOOL already_padded = FALSE;
10879 #ifdef SHORT_PLUGS
10880     if ((old_loc != 0) && (use_padding & USE_PADDING_FRONT))
10881     {
10882         alloc_pointer = alloc_pointer + Align (min_obj_size);
10883         already_padded = TRUE;
10884     }
10885 #endif //SHORT_PLUGS
10886
10887     if (!((old_loc == 0) || same_large_alignment_p (old_loc, alloc_pointer)))
10888         size = size + switch_alignment_size (already_padded);
10889
10890 #ifdef FEATURE_STRUCTALIGN
10891     alloc_pointer = StructAlign(alloc_pointer, requiredAlignment, alignmentOffset);
10892 #endif // FEATURE_STRUCTALIGN
10893
10894     // in allocate_in_condemned_generation we can have this when we
10895     // set the alloc_limit to plan_allocated which could be less than 
10896     // alloc_ptr
10897     if (alloc_limit < alloc_pointer)
10898     {
10899         return FALSE;
10900     }
10901
10902     if (old_loc != 0)
10903     {
10904         return (((size_t)(alloc_limit - alloc_pointer) >= (size + ((use_padding & USE_PADDING_TAIL)? Align(min_obj_size) : 0))) 
10905 #ifdef SHORT_PLUGS
10906                 ||((!(use_padding & USE_PADDING_FRONT)) && ((alloc_pointer + size) == alloc_limit))
10907 #else //SHORT_PLUGS
10908                 ||((alloc_pointer + size) == alloc_limit)
10909 #endif //SHORT_PLUGS
10910             );
10911     }
10912     else
10913     {
10914         assert (size == Align (min_obj_size));
10915         return ((size_t)(alloc_limit - alloc_pointer) >= size);
10916     }
10917 }
10918
10919 inline
10920 BOOL gc_heap::a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit,
10921                             int align_const)
10922 {
10923     // We could have run into cases where this is true when alloc_allocated is the 
10924     // the same as the seg committed.
10925     if (alloc_limit < alloc_pointer)
10926     {
10927         return FALSE;
10928     }
10929
10930     return ((size_t)(alloc_limit - alloc_pointer) >= (size + Align(min_obj_size, align_const)));
10931 }
10932
10933 // Grow by committing more pages
10934 BOOL gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* high_address)
10935 {
10936     assert (high_address <= heap_segment_reserved (seg));
10937
10938     //return 0 if we are at the end of the segment.
10939     if (align_on_page (high_address) > heap_segment_reserved (seg))
10940         return FALSE;
10941
10942     if (high_address <= heap_segment_committed (seg))
10943         return TRUE;
10944
10945     size_t c_size = align_on_page ((size_t)(high_address - heap_segment_committed (seg)));
10946     c_size = max (c_size, 16*OS_PAGE_SIZE);
10947     c_size = min (c_size, (size_t)(heap_segment_reserved (seg) - heap_segment_committed (seg)));
10948
10949     if (c_size == 0)
10950         return FALSE;
10951
10952     STRESS_LOG2(LF_GC, LL_INFO10000,
10953                 "Growing heap_segment: %Ix high address: %Ix\n",
10954                 (size_t)seg, (size_t)high_address);
10955
10956     dprintf(3, ("Growing segment allocation %Ix %Ix", (size_t)heap_segment_committed(seg),c_size));
10957     
10958     if (!virtual_alloc_commit_for_heap(heap_segment_committed (seg), c_size, heap_number))
10959     {
10960         dprintf(3, ("Cannot grow heap segment"));
10961         return FALSE;
10962     }
10963 #ifdef MARK_ARRAY
10964 #ifndef BACKGROUND_GC
10965     clear_mark_array (heap_segment_committed (seg),
10966                       heap_segment_committed (seg)+c_size, TRUE);
10967 #endif //BACKGROUND_GC
10968 #endif //MARK_ARRAY
10969     heap_segment_committed (seg) += c_size;
10970     STRESS_LOG1(LF_GC, LL_INFO10000, "New commit: %Ix",
10971                 (size_t)heap_segment_committed (seg));
10972
10973     assert (heap_segment_committed (seg) <= heap_segment_reserved (seg));
10974
10975     assert (high_address <= heap_segment_committed (seg));
10976
10977     return TRUE;
10978 }
10979
10980 inline
10981 int gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* allocated, uint8_t* old_loc, size_t size, BOOL pad_front_p  REQD_ALIGN_AND_OFFSET_DCL)
10982 {
10983 #ifdef SHORT_PLUGS
10984     if ((old_loc != 0) && pad_front_p)
10985     {
10986         allocated = allocated + Align (min_obj_size);
10987     }
10988 #endif //SHORT_PLUGS
10989
10990     if (!((old_loc == 0) || same_large_alignment_p (old_loc, allocated)))
10991         size = size + switch_alignment_size (FALSE);
10992 #ifdef FEATURE_STRUCTALIGN
10993     size_t pad = ComputeStructAlignPad(allocated, requiredAlignment, alignmentOffset);
10994     return grow_heap_segment (seg, allocated + pad + size);
10995 #else // FEATURE_STRUCTALIGN
10996     return grow_heap_segment (seg, allocated + size);
10997 #endif // FEATURE_STRUCTALIGN
10998 }
10999
11000 //used only in older generation allocation (i.e during gc).
11001 void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
11002                             int gennum)
11003 {
11004     UNREFERENCED_PARAMETER(gennum);
11005     dprintf (3, ("gc Expanding segment allocation"));
11006     heap_segment* seg = generation_allocation_segment (gen);
11007     if ((generation_allocation_limit (gen) != start) || (start != heap_segment_plan_allocated (seg)))
11008     {
11009         if (generation_allocation_limit (gen) == heap_segment_plan_allocated (seg))
11010         {
11011             assert (generation_allocation_pointer (gen) >= heap_segment_mem (seg));
11012             assert (generation_allocation_pointer (gen) <= heap_segment_committed (seg));
11013             heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen);
11014         }
11015         else
11016         {
11017             uint8_t*  hole = generation_allocation_pointer (gen);
11018             size_t  size = (generation_allocation_limit (gen) - generation_allocation_pointer (gen));
11019
11020             if (size != 0)
11021             {
11022                 dprintf (3, ("filling up hole: %Ix, size %Ix", hole, size));
11023                 size_t allocated_size = generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen);
11024                 if (size >= Align (min_free_list))
11025                 {
11026                     if (allocated_size < min_free_list)
11027                     {
11028                         if (size >= (Align (min_free_list) + Align (min_obj_size)))
11029                         {
11030                             //split hole into min obj + threadable free item
11031                             make_unused_array (hole, min_obj_size);
11032                             generation_free_obj_space (gen) += Align (min_obj_size);
11033                             make_unused_array (hole + Align (min_obj_size), size - Align (min_obj_size));
11034                             generation_free_list_space (gen) += size - Align (min_obj_size);
11035                             generation_allocator(gen)->thread_item_front (hole + Align (min_obj_size), 
11036                                                                           size - Align (min_obj_size));
11037                             add_gen_free (gen->gen_num, (size - Align (min_obj_size)));
11038                         }
11039                         else
11040                         {
11041                             dprintf (3, ("allocated size too small, can't put back rest on free list %Ix", allocated_size));
11042                             make_unused_array (hole, size);
11043                             generation_free_obj_space (gen) += size;
11044                         }
11045                     }
11046                     else 
11047                     {
11048                         dprintf (3, ("threading hole in front of free list"));
11049                         make_unused_array (hole, size);
11050                         generation_free_list_space (gen) += size;
11051                         generation_allocator(gen)->thread_item_front (hole, size);
11052                         add_gen_free (gen->gen_num, size);
11053                     }
11054                 }
11055                 else
11056                 {
11057                     make_unused_array (hole, size);
11058                     generation_free_obj_space (gen) += size;
11059                 }
11060             }
11061         }
11062         generation_allocation_pointer (gen) = start;
11063         generation_allocation_context_start_region (gen) = start;
11064     }
11065     generation_allocation_limit (gen) = (start + limit_size);
11066 }
11067
11068 void verify_mem_cleared (uint8_t* start, size_t size)
11069 {
11070     if (!Aligned (size))
11071     {
11072         FATAL_GC_ERROR();
11073     }
11074
11075     PTR_PTR curr_ptr = (PTR_PTR) start;
11076     for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
11077     {
11078         if (*(curr_ptr++) != 0)
11079         {
11080             FATAL_GC_ERROR();
11081         }
11082     }
11083 }
11084
11085 #if defined (VERIFY_HEAP) && defined (BACKGROUND_GC)
11086 void gc_heap::set_batch_mark_array_bits (uint8_t* start, uint8_t* end)
11087 {
11088     size_t start_mark_bit = mark_bit_of (start);
11089     size_t end_mark_bit = mark_bit_of (end);
11090     unsigned int startbit = mark_bit_bit (start_mark_bit);
11091     unsigned int endbit = mark_bit_bit (end_mark_bit);
11092     size_t startwrd = mark_bit_word (start_mark_bit);
11093     size_t endwrd = mark_bit_word (end_mark_bit);
11094
11095     dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix", 
11096         (size_t)start, (size_t)start_mark_bit, 
11097         (size_t)end, (size_t)end_mark_bit));
11098
11099     unsigned int firstwrd = ~(lowbits (~0, startbit));
11100     unsigned int lastwrd = ~(highbits (~0, endbit));
11101
11102     if (startwrd == endwrd)
11103     {
11104         unsigned int wrd = firstwrd & lastwrd;
11105         mark_array[startwrd] |= wrd;
11106         return;
11107     }
11108
11109     // set the first mark word.
11110     if (startbit)
11111     {
11112         mark_array[startwrd] |= firstwrd;
11113         startwrd++;
11114     }
11115
11116     for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
11117     {
11118         mark_array[wrdtmp] = ~(unsigned int)0;
11119     }
11120
11121     // set the last mark word.
11122     if (endbit)
11123     {
11124         mark_array[endwrd] |= lastwrd;
11125     }
11126 }
11127
11128 // makes sure that the mark array bits between start and end are 0.
11129 void gc_heap::check_batch_mark_array_bits (uint8_t* start, uint8_t* end)
11130 {
11131     size_t start_mark_bit = mark_bit_of (start);
11132     size_t end_mark_bit = mark_bit_of (end);
11133     unsigned int startbit = mark_bit_bit (start_mark_bit);
11134     unsigned int endbit = mark_bit_bit (end_mark_bit);
11135     size_t startwrd = mark_bit_word (start_mark_bit);
11136     size_t endwrd = mark_bit_word (end_mark_bit);
11137
11138     //dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix", 
11139     //    (size_t)start, (size_t)start_mark_bit, 
11140     //    (size_t)end, (size_t)end_mark_bit));
11141
11142     unsigned int firstwrd = ~(lowbits (~0, startbit));
11143     unsigned int lastwrd = ~(highbits (~0, endbit));
11144
11145     if (startwrd == endwrd)
11146     {
11147         unsigned int wrd = firstwrd & lastwrd;
11148         if (mark_array[startwrd] & wrd)
11149         {
11150             dprintf  (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
11151                             wrd, startwrd, 
11152                             mark_array [startwrd], mark_word_address (startwrd)));
11153             FATAL_GC_ERROR();
11154         }
11155         return;
11156     }
11157
11158     // set the first mark word.
11159     if (startbit)
11160     {
11161         if (mark_array[startwrd] & firstwrd)
11162         {
11163             dprintf  (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
11164                             firstwrd, startwrd, 
11165                             mark_array [startwrd], mark_word_address (startwrd)));
11166             FATAL_GC_ERROR();
11167         }
11168
11169         startwrd++;
11170     }
11171
11172     for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
11173     {
11174         if (mark_array[wrdtmp])
11175         {
11176             dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
11177                             wrdtmp, 
11178                             mark_array [wrdtmp], mark_word_address (wrdtmp)));
11179             FATAL_GC_ERROR();
11180         }
11181     }
11182
11183     // set the last mark word.
11184     if (endbit)
11185     {
11186         if (mark_array[endwrd] & lastwrd)
11187         {
11188             dprintf  (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
11189                             lastwrd, lastwrd, 
11190                             mark_array [lastwrd], mark_word_address (lastwrd)));
11191             FATAL_GC_ERROR();
11192         }
11193     }
11194 }
11195 #endif //VERIFY_HEAP && BACKGROUND_GC
11196
11197 allocator::allocator (unsigned int num_b, size_t fbs, alloc_list* b)
11198 {
11199     assert (num_b < MAX_BUCKET_COUNT);
11200     num_buckets = num_b;
11201     frst_bucket_size = fbs;
11202     buckets = b;
11203 }
11204
11205 alloc_list& allocator::alloc_list_of (unsigned int bn)
11206 {
11207     assert (bn < num_buckets);
11208     if (bn == 0)
11209         return first_bucket;
11210     else
11211         return buckets [bn-1];
11212 }
11213
11214 size_t& allocator::alloc_list_damage_count_of (unsigned int bn)
11215 {
11216     assert (bn < num_buckets);
11217     if (bn == 0)
11218         return first_bucket.alloc_list_damage_count();
11219     else
11220         return buckets [bn-1].alloc_list_damage_count();
11221 }
11222
11223 void allocator::unlink_item (unsigned int bn, uint8_t* item, uint8_t* prev_item, BOOL use_undo_p)
11224 {
11225     //unlink the free_item
11226     alloc_list* al = &alloc_list_of (bn);
11227     if (prev_item)
11228     {
11229         if (use_undo_p && (free_list_undo (prev_item) == UNDO_EMPTY))
11230         {
11231             assert (item == free_list_slot (prev_item));
11232             free_list_undo (prev_item) = item;
11233             alloc_list_damage_count_of (bn)++;
11234         }
11235         free_list_slot (prev_item) = free_list_slot(item);
11236     }
11237     else
11238     {
11239         al->alloc_list_head() = (uint8_t*)free_list_slot(item);
11240     }
11241     if (al->alloc_list_tail() == item)
11242     {
11243         al->alloc_list_tail() = prev_item;
11244     }
11245 }
11246
11247 void allocator::clear()
11248 {
11249     for (unsigned int i = 0; i < num_buckets; i++)
11250     {
11251         alloc_list_head_of (i) = 0;
11252         alloc_list_tail_of (i) = 0;
11253     }
11254 }
11255
11256 //always thread to the end.
11257 void allocator::thread_free_item (uint8_t* item, uint8_t*& head, uint8_t*& tail)
11258 {
11259     free_list_slot (item) = 0;
11260     free_list_undo (item) = UNDO_EMPTY;
11261     assert (item != head);
11262
11263     if (head == 0)
11264     {
11265        head = item;
11266     }
11267     //TODO: This shouldn't happen anymore - verify that's the case.
11268     //the following is necessary because the last free element
11269     //may have been truncated, and tail isn't updated.
11270     else if (free_list_slot (head) == 0)
11271     {
11272         free_list_slot (head) = item;
11273     }
11274     else
11275     {
11276         assert (item != tail);
11277         assert (free_list_slot(tail) == 0);
11278         free_list_slot (tail) = item;
11279     }
11280     tail = item;
11281 }
11282
11283 void allocator::thread_item (uint8_t* item, size_t size)
11284 {
11285     size_t sz = frst_bucket_size;
11286     unsigned int a_l_number = 0; 
11287
11288     for (; a_l_number < (num_buckets-1); a_l_number++)
11289     {
11290         if (size < sz)
11291         {
11292             break;
11293         }
11294         sz = sz * 2;
11295     }
11296     alloc_list* al = &alloc_list_of (a_l_number);
11297     thread_free_item (item, 
11298                       al->alloc_list_head(),
11299                       al->alloc_list_tail());
11300 }
11301
11302 void allocator::thread_item_front (uint8_t* item, size_t size)
11303 {
11304     //find right free list
11305     size_t sz = frst_bucket_size;
11306     unsigned int a_l_number = 0; 
11307     for (; a_l_number < (num_buckets-1); a_l_number++)
11308     {
11309         if (size < sz)
11310         {
11311             break;
11312         }
11313         sz = sz * 2;
11314     }
11315     alloc_list* al = &alloc_list_of (a_l_number);
11316     free_list_slot (item) = al->alloc_list_head();
11317     free_list_undo (item) = UNDO_EMPTY;
11318
11319     if (al->alloc_list_tail() == 0)
11320     {
11321         al->alloc_list_tail() = al->alloc_list_head();
11322     }
11323     al->alloc_list_head() = item;
11324     if (al->alloc_list_tail() == 0)
11325     {
11326         al->alloc_list_tail() = item;
11327     }
11328 }
11329
11330 void allocator::copy_to_alloc_list (alloc_list* toalist)
11331 {
11332     for (unsigned int i = 0; i < num_buckets; i++)
11333     {
11334         toalist [i] = alloc_list_of (i);
11335 #ifdef FL_VERIFICATION
11336         uint8_t* free_item = alloc_list_head_of (i);
11337         size_t count = 0;
11338         while (free_item)
11339         {
11340             count++;
11341             free_item = free_list_slot (free_item);
11342         }
11343
11344         toalist[i].item_count = count;
11345 #endif //FL_VERIFICATION
11346     }
11347 }
11348
11349 void allocator::copy_from_alloc_list (alloc_list* fromalist)
11350 {
11351     BOOL repair_list = !discard_if_no_fit_p ();
11352     for (unsigned int i = 0; i < num_buckets; i++)
11353     {
11354         size_t count = alloc_list_damage_count_of (i);
11355         alloc_list_of (i) = fromalist [i];
11356         assert (alloc_list_damage_count_of (i) == 0);
11357
11358         if (repair_list)
11359         {
11360             //repair the the list
11361             //new items may have been added during the plan phase 
11362             //items may have been unlinked. 
11363             uint8_t* free_item = alloc_list_head_of (i);
11364             while (free_item && count)
11365             {
11366                 assert (((CObjectHeader*)free_item)->IsFree());
11367                 if ((free_list_undo (free_item) != UNDO_EMPTY))
11368                 {
11369                     count--;
11370                     free_list_slot (free_item) = free_list_undo (free_item);
11371                     free_list_undo (free_item) = UNDO_EMPTY;
11372                 }
11373
11374                 free_item = free_list_slot (free_item);
11375             }
11376
11377 #ifdef FL_VERIFICATION
11378             free_item = alloc_list_head_of (i);
11379             size_t item_count = 0;
11380             while (free_item)
11381             {
11382                 item_count++;
11383                 free_item = free_list_slot (free_item);
11384             }
11385
11386             assert (item_count == alloc_list_of (i).item_count);
11387 #endif //FL_VERIFICATION
11388         }
11389 #ifdef DEBUG
11390         uint8_t* tail_item = alloc_list_tail_of (i);
11391         assert ((tail_item == 0) || (free_list_slot (tail_item) == 0));
11392 #endif
11393     }
11394 }
11395
11396 void allocator::commit_alloc_list_changes()
11397 {
11398     BOOL repair_list = !discard_if_no_fit_p ();
11399     if (repair_list)
11400     {
11401         for (unsigned int i = 0; i < num_buckets; i++)
11402         {
11403             //remove the undo info from list. 
11404             uint8_t* free_item = alloc_list_head_of (i);
11405             size_t count = alloc_list_damage_count_of (i);
11406             while (free_item && count)
11407             {
11408                 assert (((CObjectHeader*)free_item)->IsFree());
11409
11410                 if (free_list_undo (free_item) != UNDO_EMPTY)
11411                 {
11412                     free_list_undo (free_item) = UNDO_EMPTY;
11413                     count--;
11414                 }
11415
11416                 free_item = free_list_slot (free_item);
11417             }
11418
11419             alloc_list_damage_count_of (i) = 0; 
11420         }
11421     }
11422 }
11423
11424 void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size,
11425                                 alloc_context* acontext, heap_segment* seg,
11426                                 int align_const, int gen_number)
11427 {
11428     bool loh_p = (gen_number > 0);
11429     GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
11430
11431     size_t aligned_min_obj_size = Align(min_obj_size, align_const);
11432
11433     if (seg)
11434     {
11435         assert (heap_segment_used (seg) <= heap_segment_committed (seg));
11436     }
11437
11438 #ifdef MULTIPLE_HEAPS
11439     if (gen_number == 0)
11440     {
11441         if (!gen0_allocated_after_gc_p)
11442         {
11443             gen0_allocated_after_gc_p = true;
11444         }
11445     }
11446 #endif //MULTIPLE_HEAPS
11447
11448     dprintf (3, ("Expanding segment allocation [%Ix, %Ix[", (size_t)start,
11449                (size_t)start + limit_size - aligned_min_obj_size));
11450
11451     if ((acontext->alloc_limit != start) &&
11452         (acontext->alloc_limit + aligned_min_obj_size)!= start)
11453     {
11454         uint8_t*  hole = acontext->alloc_ptr;
11455         if (hole != 0)
11456         {
11457             size_t  size = (acontext->alloc_limit - acontext->alloc_ptr);
11458             dprintf (3, ("filling up hole [%Ix, %Ix[", (size_t)hole, (size_t)hole + size + Align (min_obj_size, align_const)));
11459             // when we are finishing an allocation from a free list
11460             // we know that the free area was Align(min_obj_size) larger
11461             acontext->alloc_bytes -= size;
11462             size_t free_obj_size = size + aligned_min_obj_size;
11463             make_unused_array (hole, free_obj_size);
11464             generation_free_obj_space (generation_of (gen_number)) += free_obj_size;
11465         }
11466         acontext->alloc_ptr = start;
11467     }
11468     else
11469     {
11470         if (gen_number == 0)
11471         {
11472             size_t pad_size = Align (min_obj_size, align_const);
11473             make_unused_array (acontext->alloc_ptr, pad_size);
11474             dprintf (3, ("contigous ac: making min obj gap %Ix->%Ix(%Id)", 
11475                 acontext->alloc_ptr, (acontext->alloc_ptr + pad_size), pad_size));
11476             acontext->alloc_ptr += pad_size;
11477         }
11478     }
11479     acontext->alloc_limit = (start + limit_size - aligned_min_obj_size);
11480     acontext->alloc_bytes += limit_size - ((gen_number < max_generation + 1) ? aligned_min_obj_size : 0);
11481
11482 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
11483     if (g_fEnableAppDomainMonitoring)
11484     {
11485         GCToEEInterface::RecordAllocatedBytesForHeap(limit_size, heap_number);
11486     }
11487 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
11488
11489     uint8_t* saved_used = 0;
11490
11491     if (seg)
11492     {
11493         saved_used = heap_segment_used (seg);
11494     }
11495
11496     if (seg == ephemeral_heap_segment)
11497     {
11498         //Sometimes the allocated size is advanced without clearing the
11499         //memory. Let's catch up here
11500         if (heap_segment_used (seg) < (alloc_allocated - plug_skew))
11501         {
11502 #ifdef MARK_ARRAY
11503 #ifndef BACKGROUND_GC
11504             clear_mark_array (heap_segment_used (seg) + plug_skew, alloc_allocated);
11505 #endif //BACKGROUND_GC
11506 #endif //MARK_ARRAY
11507             heap_segment_used (seg) = alloc_allocated - plug_skew;
11508         }
11509     }
11510 #ifdef BACKGROUND_GC
11511     else if (seg)
11512     {
11513         uint8_t* old_allocated = heap_segment_allocated (seg) - plug_skew - limit_size;
11514 #ifdef FEATURE_LOH_COMPACTION
11515         old_allocated -= Align (loh_padding_obj_size, align_const);
11516 #endif //FEATURE_LOH_COMPACTION
11517
11518         assert (heap_segment_used (seg) >= old_allocated);
11519     }
11520 #endif //BACKGROUND_GC
11521     if ((seg == 0) ||
11522         (start - plug_skew + limit_size) <= heap_segment_used (seg))
11523     {
11524         add_saved_spinlock_info (loh_p, me_release, mt_clr_mem);
11525         leave_spin_lock (msl);
11526         dprintf (3, ("clearing memory at %Ix for %d bytes", (start - plug_skew), limit_size));
11527         memclr (start - plug_skew, limit_size);
11528     }
11529     else
11530     {
11531         uint8_t* used = heap_segment_used (seg);
11532         heap_segment_used (seg) = start + limit_size - plug_skew;
11533
11534         add_saved_spinlock_info (loh_p, me_release, mt_clr_mem);
11535         leave_spin_lock (msl);
11536
11537         if ((start - plug_skew) < used)
11538         {
11539             if (used != saved_used)
11540             {
11541                 FATAL_GC_ERROR ();
11542             }
11543
11544             dprintf (2, ("clearing memory before used at %Ix for %Id bytes", 
11545                 (start - plug_skew), (plug_skew + used - start)));
11546             memclr (start - plug_skew, used - (start - plug_skew));
11547         }
11548     }
11549
11550     //this portion can be done after we release the lock
11551     if (seg == ephemeral_heap_segment)
11552     {
11553 #ifdef FFIND_OBJECT
11554         if (gen0_must_clear_bricks > 0)
11555         {
11556             //set the brick table to speed up find_object
11557             size_t b = brick_of (acontext->alloc_ptr);
11558             set_brick (b, acontext->alloc_ptr - brick_address (b));
11559             b++;
11560             dprintf (3, ("Allocation Clearing bricks [%Ix, %Ix[",
11561                          b, brick_of (align_on_brick (start + limit_size))));
11562             volatile short* x = &brick_table [b];
11563             short* end_x = &brick_table [brick_of (align_on_brick (start + limit_size))];
11564
11565             for (;x < end_x;x++)
11566                 *x = -1;
11567         }
11568         else
11569 #endif //FFIND_OBJECT
11570         {
11571             gen0_bricks_cleared = FALSE;
11572         }
11573     }
11574
11575     // verifying the memory is completely cleared.
11576     //verify_mem_cleared (start - plug_skew, limit_size);
11577 }
11578
11579 size_t gc_heap::new_allocation_limit (size_t size, size_t physical_limit, int gen_number)
11580 {
11581     dynamic_data* dd = dynamic_data_of (gen_number);
11582     ptrdiff_t new_alloc = dd_new_allocation (dd);
11583     assert (new_alloc == (ptrdiff_t)Align (new_alloc,
11584                                            get_alignment_constant (!(gen_number == (max_generation+1)))));
11585
11586     ptrdiff_t logical_limit = max (new_alloc, (ptrdiff_t)size);
11587     size_t limit = min (logical_limit, (ptrdiff_t)physical_limit);
11588     assert (limit == Align (limit, get_alignment_constant (!(gen_number == (max_generation+1)))));
11589     dd_new_allocation (dd) = (new_alloc - limit);
11590     return limit;
11591 }
11592
11593 size_t gc_heap::limit_from_size (size_t size, size_t physical_limit, int gen_number,
11594                                  int align_const)
11595 {
11596     size_t padded_size = size + Align (min_obj_size, align_const);
11597     // for LOH this is not true...we could select a physical_limit that's exactly the same
11598     // as size.
11599     assert ((gen_number != 0) || (physical_limit >= padded_size));
11600     size_t min_size_to_allocate = ((gen_number == 0) ? allocation_quantum : 0);
11601
11602     // For SOH if the size asked for is very small, we want to allocate more than 
11603     // just what's asked for if possible.
11604     size_t desired_size_to_allocate  = max (padded_size, min_size_to_allocate);
11605     size_t new_physical_limit = min (physical_limit, desired_size_to_allocate);
11606
11607     size_t new_limit = new_allocation_limit (padded_size,
11608                                              new_physical_limit,
11609                                              gen_number);
11610     assert (new_limit >= (size + Align (min_obj_size, align_const)));
11611     dprintf (100, ("requested to allocate %Id bytes, actual size is %Id", size, new_limit));
11612     return new_limit;
11613 }
11614
11615 void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size, 
11616                           uint8_t* allocated, uint8_t* reserved)
11617 {
11618     dprintf (1, ("total committed on the heap is %Id", get_total_committed_size()));
11619
11620     UNREFERENCED_PARAMETER(heap_num);
11621
11622     if (reason == oom_budget)
11623     {
11624         alloc_size = dd_min_size (dynamic_data_of (0)) / 2;
11625     }
11626
11627     if ((reason == oom_budget) && ((!fgm_result.loh_p) && (fgm_result.fgm != fgm_no_failure)))
11628     {
11629         // This means during the last GC we needed to reserve and/or commit more memory
11630         // but we couldn't. We proceeded with the GC and ended up not having enough
11631         // memory at the end. This is a legitimate OOM situtation. Otherwise we 
11632         // probably made a mistake and didn't expand the heap when we should have.
11633         reason = oom_low_mem;
11634     }
11635
11636     oom_info.reason = reason;
11637     oom_info.allocated = allocated;
11638     oom_info.reserved = reserved;
11639     oom_info.alloc_size = alloc_size;
11640     oom_info.gc_index = settings.gc_index;
11641     oom_info.fgm = fgm_result.fgm;
11642     oom_info.size = fgm_result.size;
11643     oom_info.available_pagefile_mb = fgm_result.available_pagefile_mb;
11644     oom_info.loh_p = fgm_result.loh_p;
11645
11646     fgm_result.fgm = fgm_no_failure;
11647
11648     // Break early - before the more_space_lock is release so no other threads
11649     // could have allocated on the same heap when OOM happened.
11650     if (GCConfig::GetBreakOnOOM())
11651     {
11652         GCToOSInterface::DebugBreak();
11653     }
11654 }
11655
11656 #ifdef BACKGROUND_GC
11657 BOOL gc_heap::background_allowed_p()
11658 {
11659     return ( gc_can_use_concurrent && ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency)) );
11660 }
11661 #endif //BACKGROUND_GC
11662
11663 void gc_heap::check_for_full_gc (int gen_num, size_t size)
11664 {
11665     BOOL should_notify = FALSE;
11666     // if we detect full gc because of the allocation budget specified this is TRUE;
11667     // it's FALSE if it's due to other factors.
11668     BOOL alloc_factor = TRUE; 
11669     int i = 0;
11670     int n = 0;
11671     int n_initial = gen_num;
11672     BOOL local_blocking_collection = FALSE;
11673     BOOL local_elevation_requested = FALSE;
11674     int new_alloc_remain_percent = 0;
11675
11676     if (full_gc_approach_event_set)
11677     {
11678         return;
11679     }
11680     
11681     if (gen_num != (max_generation + 1))
11682     {
11683         gen_num = max_generation;
11684     }
11685
11686     dynamic_data* dd_full = dynamic_data_of (gen_num);
11687     ptrdiff_t new_alloc_remain = 0;
11688     uint32_t pct = ((gen_num == (max_generation + 1)) ? fgn_loh_percent : fgn_maxgen_percent);
11689
11690     for (int gen_index = 0; gen_index <= (max_generation + 1); gen_index++)
11691     {
11692         dprintf (2, ("FGN: h#%d: gen%d: %Id(%Id)", 
11693                      heap_number, gen_index,
11694                      dd_new_allocation (dynamic_data_of (gen_index)),
11695                      dd_desired_allocation (dynamic_data_of (gen_index))));
11696     }
11697
11698     // For small object allocations we only check every fgn_check_quantum bytes.
11699     if (n_initial == 0)
11700     {
11701         dprintf (2, ("FGN: gen0 last recorded alloc: %Id", fgn_last_alloc));
11702         dynamic_data* dd_0 = dynamic_data_of (n_initial);
11703         if (((fgn_last_alloc - dd_new_allocation (dd_0)) < fgn_check_quantum) &&
11704             (dd_new_allocation (dd_0) >= 0))
11705         {
11706             return;
11707         }
11708         else
11709         {
11710             fgn_last_alloc = dd_new_allocation (dd_0);
11711             dprintf (2, ("FGN: gen0 last recorded alloc is now: %Id", fgn_last_alloc));
11712         }
11713
11714         // We don't consider the size that came from soh 'cause it doesn't contribute to the
11715         // gen2 budget.
11716         size = 0;
11717     }
11718
11719     for (i = n+1; i <= max_generation; i++)
11720     {
11721         if (get_new_allocation (i) <= 0)
11722         {
11723             n = min (i, max_generation);
11724         }
11725         else
11726             break;
11727     }
11728
11729     dprintf (2, ("FGN: h#%d: gen%d budget exceeded", heap_number, n));
11730     if (gen_num == max_generation)
11731     {
11732         // If it's small object heap we should first see if we will even be looking at gen2 budget
11733         // in the next GC or not. If not we should go directly to checking other factors.
11734         if (n < (max_generation - 1))
11735         {
11736             goto check_other_factors;
11737         }
11738     }
11739
11740     new_alloc_remain = dd_new_allocation (dd_full) - size;
11741
11742     new_alloc_remain_percent = (int)(((float)(new_alloc_remain) / (float)dd_desired_allocation (dd_full)) * 100);
11743
11744     dprintf (2, ("FGN: alloc threshold for gen%d is %d%%, current threshold is %d%%", 
11745                  gen_num, pct, new_alloc_remain_percent));
11746
11747     if (new_alloc_remain_percent <= (int)pct)
11748     {
11749 #ifdef BACKGROUND_GC
11750         // If background GC is enabled, we still want to check whether this will
11751         // be a blocking GC or not because we only want to notify when it's a 
11752         // blocking full GC.
11753         if (background_allowed_p())
11754         {
11755             goto check_other_factors;
11756         }
11757 #endif //BACKGROUND_GC
11758
11759         should_notify = TRUE;
11760         goto done;
11761     }
11762
11763 check_other_factors:
11764
11765     dprintf (2, ("FGC: checking other factors"));
11766     n = generation_to_condemn (n, 
11767                                &local_blocking_collection, 
11768                                &local_elevation_requested, 
11769                                TRUE);
11770
11771     if (local_elevation_requested && (n == max_generation))
11772     {
11773         if (settings.should_lock_elevation)
11774         {
11775             int local_elevation_locked_count = settings.elevation_locked_count + 1;
11776             if (local_elevation_locked_count != 6)
11777             {
11778                 dprintf (2, ("FGN: lock count is %d - Condemning max_generation-1", 
11779                     local_elevation_locked_count));
11780                 n = max_generation - 1;
11781             }
11782         }
11783     }
11784
11785     dprintf (2, ("FGN: we estimate gen%d will be collected", n));
11786
11787 #ifdef BACKGROUND_GC
11788     // When background GC is enabled it decreases the accuracy of our predictability -
11789     // by the time the GC happens, we may not be under BGC anymore. If we try to 
11790     // predict often enough it should be ok.
11791     if ((n == max_generation) &&
11792         (recursive_gc_sync::background_running_p()))
11793     {
11794         n = max_generation - 1;
11795         dprintf (2, ("FGN: bgc - 1 instead of 2"));
11796     }
11797
11798     if ((n == max_generation) && !local_blocking_collection)
11799     {
11800         if (!background_allowed_p())
11801         {
11802             local_blocking_collection = TRUE;
11803         }
11804     }
11805 #endif //BACKGROUND_GC
11806
11807     dprintf (2, ("FGN: we estimate gen%d will be collected: %s", 
11808                        n, 
11809                        (local_blocking_collection ? "blocking" : "background")));
11810
11811     if ((n == max_generation) && local_blocking_collection)
11812     {
11813         alloc_factor = FALSE;
11814         should_notify = TRUE;
11815         goto done;
11816     }
11817
11818 done:
11819
11820     if (should_notify)
11821     {
11822         dprintf (2, ("FGN: gen%d detecting full GC approaching(%s) (GC#%d) (%Id%% left in gen%d)", 
11823                      n_initial,
11824                      (alloc_factor ? "alloc" : "other"),
11825                      dd_collection_count (dynamic_data_of (0)),
11826                      new_alloc_remain_percent, 
11827                      gen_num));
11828
11829         send_full_gc_notification (n_initial, alloc_factor);
11830     }
11831 }
11832
11833 void gc_heap::send_full_gc_notification (int gen_num, BOOL due_to_alloc_p)
11834 {
11835     if (!full_gc_approach_event_set)
11836     {
11837         assert (full_gc_approach_event.IsValid());
11838         FIRE_EVENT(GCFullNotify_V1, gen_num, due_to_alloc_p);
11839
11840         full_gc_end_event.Reset();
11841         full_gc_approach_event.Set();
11842         full_gc_approach_event_set = true;
11843     }
11844 }
11845
11846 wait_full_gc_status gc_heap::full_gc_wait (GCEvent *event, int time_out_ms)
11847 {
11848     if (fgn_maxgen_percent == 0)
11849     {
11850         return wait_full_gc_na;
11851     }
11852
11853     uint32_t wait_result = user_thread_wait(event, FALSE, time_out_ms);
11854
11855     if ((wait_result == WAIT_OBJECT_0) || (wait_result == WAIT_TIMEOUT))
11856     {
11857         if (fgn_maxgen_percent == 0)
11858         {
11859             return wait_full_gc_cancelled;
11860         }
11861         
11862         if (wait_result == WAIT_OBJECT_0)
11863         {
11864 #ifdef BACKGROUND_GC
11865             if (fgn_last_gc_was_concurrent)
11866             {
11867                 fgn_last_gc_was_concurrent = FALSE;
11868                 return wait_full_gc_na;
11869             }
11870             else
11871 #endif //BACKGROUND_GC
11872             {
11873                 return wait_full_gc_success;
11874             }
11875         }
11876         else
11877         {
11878             return wait_full_gc_timeout;
11879         }
11880     }
11881     else
11882     {
11883         return wait_full_gc_failed;
11884     }
11885 }
11886
11887 size_t gc_heap::get_full_compact_gc_count()
11888 {
11889     return full_gc_counts[gc_type_compacting];
11890 }
11891
11892 // DTREVIEW - we should check this in dt_low_ephemeral_space_p
11893 // as well.
11894 inline
11895 BOOL gc_heap::short_on_end_of_seg (int gen_number,
11896                                    heap_segment* seg,
11897                                    int align_const)
11898 {
11899     UNREFERENCED_PARAMETER(gen_number);
11900     uint8_t* allocated = heap_segment_allocated(seg);
11901
11902     BOOL sufficient_p = a_size_fit_p (end_space_after_gc(),
11903                                       allocated,
11904                                       heap_segment_reserved (seg), 
11905                                       align_const);
11906
11907     if (!sufficient_p)
11908     {
11909         if (sufficient_gen0_space_p)
11910         {
11911             dprintf (GTC_LOG, ("gen0 has enough free space"));
11912         }
11913
11914         sufficient_p = sufficient_gen0_space_p;
11915     }
11916
11917     return !sufficient_p;
11918 }
11919
11920 #ifdef _MSC_VER
11921 #pragma warning(disable:4706) // "assignment within conditional expression" is intentional in this function.
11922 #endif // _MSC_VER
11923
11924 inline
11925 BOOL gc_heap::a_fit_free_list_p (int gen_number, 
11926                                  size_t size, 
11927                                  alloc_context* acontext,
11928                                  int align_const)
11929 {
11930     BOOL can_fit = FALSE;
11931     generation* gen = generation_of (gen_number);
11932     allocator* gen_allocator = generation_allocator (gen);
11933     size_t sz_list = gen_allocator->first_bucket_size();
11934     for (unsigned int a_l_idx = 0; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
11935     {
11936         if ((size < sz_list) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
11937         {
11938             uint8_t* free_list = gen_allocator->alloc_list_head_of (a_l_idx);
11939             uint8_t* prev_free_item = 0;
11940
11941             while (free_list != 0)
11942             {
11943                 dprintf (3, ("considering free list %Ix", (size_t)free_list));
11944                 size_t free_list_size = unused_array_size (free_list);
11945                 if ((size + Align (min_obj_size, align_const)) <= free_list_size)
11946                 {
11947                     dprintf (3, ("Found adequate unused area: [%Ix, size: %Id",
11948                                  (size_t)free_list, free_list_size));
11949
11950                     gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
11951                     // We ask for more Align (min_obj_size)
11952                     // to make sure that we can insert a free object
11953                     // in adjust_limit will set the limit lower
11954                     size_t limit = limit_from_size (size, free_list_size, gen_number, align_const);
11955
11956                     uint8_t*  remain = (free_list + limit);
11957                     size_t remain_size = (free_list_size - limit);
11958                     if (remain_size >= Align(min_free_list, align_const))
11959                     {
11960                         make_unused_array (remain, remain_size);
11961                         gen_allocator->thread_item_front (remain, remain_size);
11962                         assert (remain_size >= Align (min_obj_size, align_const));
11963                     }
11964                     else
11965                     {
11966                         //absorb the entire free list
11967                         limit += remain_size;
11968                     }
11969                     generation_free_list_space (gen) -= limit;
11970
11971                     adjust_limit_clr (free_list, limit, acontext, 0, align_const, gen_number);
11972
11973                     can_fit = TRUE;
11974                     goto end;
11975                 }
11976                 else if (gen_allocator->discard_if_no_fit_p())
11977                 {
11978                     assert (prev_free_item == 0);
11979                     dprintf (3, ("couldn't use this free area, discarding"));
11980                     generation_free_obj_space (gen) += free_list_size;
11981
11982                     gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
11983                     generation_free_list_space (gen) -= free_list_size;
11984                 }
11985                 else
11986                 {
11987                     prev_free_item = free_list;
11988                 }
11989                 free_list = free_list_slot (free_list); 
11990             }
11991         }
11992         sz_list = sz_list * 2;
11993     }
11994 end:
11995     return can_fit;
11996 }
11997
11998
11999 #ifdef BACKGROUND_GC
12000 void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start,
12001                                  size_t size, 
12002                                  alloc_context* acontext,
12003                                  int align_const, 
12004                                  int lock_index,
12005                                  BOOL check_used_p,
12006                                  heap_segment* seg)
12007 {
12008     make_unused_array (alloc_start, size);
12009
12010 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
12011     if (g_fEnableAppDomainMonitoring)
12012     {
12013         GCToEEInterface::RecordAllocatedBytesForHeap(size, heap_number);
12014     }
12015 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
12016
12017     size_t size_of_array_base = sizeof(ArrayBase);
12018
12019     bgc_alloc_lock->loh_alloc_done_with_index (lock_index);
12020
12021     // clear memory while not holding the lock. 
12022     size_t size_to_skip = size_of_array_base;
12023     size_t size_to_clear = size - size_to_skip - plug_skew;
12024     size_t saved_size_to_clear = size_to_clear;
12025     if (check_used_p)
12026     {
12027         uint8_t* end = alloc_start + size - plug_skew;
12028         uint8_t* used = heap_segment_used (seg);
12029         if (used < end)
12030         {
12031             if ((alloc_start + size_to_skip) < used)
12032             {
12033                 size_to_clear = used - (alloc_start + size_to_skip);
12034             }
12035             else
12036             {
12037                 size_to_clear = 0;
12038             }
12039             dprintf (2, ("bgc loh: setting used to %Ix", end));
12040             heap_segment_used (seg) = end;
12041         }
12042
12043         dprintf (2, ("bgc loh: used: %Ix, alloc: %Ix, end of alloc: %Ix, clear %Id bytes",
12044                      used, alloc_start, end, size_to_clear));
12045     }
12046     else
12047     {
12048         dprintf (2, ("bgc loh: [%Ix-[%Ix(%Id)", alloc_start, alloc_start+size, size));
12049     }
12050
12051 #ifdef VERIFY_HEAP
12052     // since we filled in 0xcc for free object when we verify heap,
12053     // we need to make sure we clear those bytes.
12054     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
12055     {
12056         if (size_to_clear < saved_size_to_clear)
12057         {
12058             size_to_clear = saved_size_to_clear;
12059         }
12060     }
12061 #endif //VERIFY_HEAP
12062     
12063     dprintf (SPINLOCK_LOG, ("[%d]Lmsl to clear large obj", heap_number));
12064     add_saved_spinlock_info (true, me_release, mt_clr_large_mem);
12065     leave_spin_lock (&more_space_lock_loh);
12066     memclr (alloc_start + size_to_skip, size_to_clear);
12067
12068     bgc_alloc_lock->loh_alloc_set (alloc_start);
12069
12070     acontext->alloc_ptr = alloc_start;
12071     acontext->alloc_limit = (alloc_start + size - Align (min_obj_size, align_const));
12072
12073     // need to clear the rest of the object before we hand it out.
12074     clear_unused_array(alloc_start, size);
12075 }
12076 #endif //BACKGROUND_GC
12077
12078 BOOL gc_heap::a_fit_free_list_large_p (size_t size, 
12079                                        alloc_context* acontext,
12080                                        int align_const)
12081 {
12082     BOOL can_fit = FALSE;
12083     int gen_number = max_generation + 1;
12084     generation* gen = generation_of (gen_number);
12085     allocator* loh_allocator = generation_allocator (gen); 
12086
12087 #ifdef FEATURE_LOH_COMPACTION
12088     size_t loh_pad = Align (loh_padding_obj_size, align_const);
12089 #endif //FEATURE_LOH_COMPACTION
12090
12091 #ifdef BACKGROUND_GC
12092     int cookie = -1;
12093 #endif //BACKGROUND_GC
12094     size_t sz_list = loh_allocator->first_bucket_size();
12095     for (unsigned int a_l_idx = 0; a_l_idx < loh_allocator->number_of_buckets(); a_l_idx++)
12096     {
12097         if ((size < sz_list) || (a_l_idx == (loh_allocator->number_of_buckets()-1)))
12098         {
12099             uint8_t* free_list = loh_allocator->alloc_list_head_of (a_l_idx);
12100             uint8_t* prev_free_item = 0;
12101             while (free_list != 0)
12102             {
12103                 dprintf (3, ("considering free list %Ix", (size_t)free_list));
12104
12105                 size_t free_list_size = unused_array_size(free_list);
12106
12107 #ifdef FEATURE_LOH_COMPACTION
12108                 if ((size + loh_pad) <= free_list_size)
12109 #else
12110                 if (((size + Align (min_obj_size, align_const)) <= free_list_size)||
12111                     (size == free_list_size))
12112 #endif //FEATURE_LOH_COMPACTION
12113                 {
12114 #ifdef BACKGROUND_GC
12115                     cookie = bgc_alloc_lock->loh_alloc_set (free_list);
12116                     bgc_track_loh_alloc();
12117 #endif //BACKGROUND_GC
12118
12119                     //unlink the free_item
12120                     loh_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
12121
12122                     // Substract min obj size because limit_from_size adds it. Not needed for LOH
12123                     size_t limit = limit_from_size (size - Align(min_obj_size, align_const), free_list_size, 
12124                                                     gen_number, align_const);
12125
12126 #ifdef FEATURE_LOH_COMPACTION
12127                     make_unused_array (free_list, loh_pad);
12128                     limit -= loh_pad;
12129                     free_list += loh_pad;
12130                     free_list_size -= loh_pad;
12131 #endif //FEATURE_LOH_COMPACTION
12132
12133                     uint8_t*  remain = (free_list + limit);
12134                     size_t remain_size = (free_list_size - limit);
12135                     if (remain_size != 0)
12136                     {
12137                         assert (remain_size >= Align (min_obj_size, align_const));
12138                         make_unused_array (remain, remain_size);
12139                     }
12140                     if (remain_size >= Align(min_free_list, align_const))
12141                     {
12142                         loh_thread_gap_front (remain, remain_size, gen);
12143                         assert (remain_size >= Align (min_obj_size, align_const));
12144                     }
12145                     else
12146                     {
12147                         generation_free_obj_space (gen) += remain_size;
12148                     }
12149                     generation_free_list_space (gen) -= free_list_size;
12150                     dprintf (3, ("found fit on loh at %Ix", free_list));
12151 #ifdef BACKGROUND_GC
12152                     if (cookie != -1)
12153                     {
12154                         bgc_loh_alloc_clr (free_list, limit, acontext, align_const, cookie, FALSE, 0);
12155                     }
12156                     else
12157 #endif //BACKGROUND_GC
12158                     {
12159                         adjust_limit_clr (free_list, limit, acontext, 0, align_const, gen_number);
12160                     }
12161
12162                     //fix the limit to compensate for adjust_limit_clr making it too short 
12163                     acontext->alloc_limit += Align (min_obj_size, align_const);
12164                     can_fit = TRUE;
12165                     goto exit;
12166                 }
12167                 prev_free_item = free_list;
12168                 free_list = free_list_slot (free_list); 
12169             }
12170         }
12171         sz_list = sz_list * 2;
12172     }
12173 exit:
12174     return can_fit;
12175 }
12176
12177 #ifdef _MSC_VER
12178 #pragma warning(default:4706)
12179 #endif // _MSC_VER
12180
12181 BOOL gc_heap::a_fit_segment_end_p (int gen_number,
12182                                    heap_segment* seg,
12183                                    size_t size, 
12184                                    alloc_context* acontext,
12185                                    int align_const,
12186                                    BOOL* commit_failed_p)
12187 {
12188     *commit_failed_p = FALSE;
12189     size_t limit = 0;
12190 #ifdef BACKGROUND_GC
12191     int cookie = -1;
12192 #endif //BACKGROUND_GC
12193
12194     uint8_t*& allocated = ((gen_number == 0) ?
12195                         alloc_allocated : 
12196                         heap_segment_allocated(seg));
12197
12198     size_t pad = Align (min_obj_size, align_const);
12199
12200 #ifdef FEATURE_LOH_COMPACTION
12201     size_t loh_pad = Align (loh_padding_obj_size, align_const);
12202     if (gen_number == (max_generation + 1))
12203     {
12204         pad += loh_pad;
12205     }
12206 #endif //FEATURE_LOH_COMPACTION
12207
12208     uint8_t* end = heap_segment_committed (seg) - pad;
12209
12210     if (a_size_fit_p (size, allocated, end, align_const))
12211     {
12212         limit = limit_from_size (size, 
12213                                  (end - allocated), 
12214                                  gen_number, align_const);
12215         goto found_fit;
12216     }
12217
12218     end = heap_segment_reserved (seg) - pad;
12219
12220     if (a_size_fit_p (size, allocated, end, align_const))
12221     {
12222         limit = limit_from_size (size, 
12223                                  (end - allocated), 
12224                                  gen_number, align_const);
12225         if (grow_heap_segment (seg, allocated + limit))
12226         {
12227             goto found_fit;
12228         }
12229         else
12230         {
12231             dprintf (2, ("can't grow segment, doing a full gc"));
12232             *commit_failed_p = TRUE;
12233         }
12234     }
12235     goto found_no_fit;
12236
12237 found_fit:
12238
12239 #ifdef BACKGROUND_GC
12240     if (gen_number != 0)
12241     {
12242         cookie = bgc_alloc_lock->loh_alloc_set (allocated);
12243         bgc_track_loh_alloc();
12244     }
12245 #endif //BACKGROUND_GC
12246
12247     uint8_t* old_alloc;
12248     old_alloc = allocated;
12249 #ifdef FEATURE_LOH_COMPACTION
12250     if (gen_number == (max_generation + 1))
12251     {
12252         make_unused_array (old_alloc, loh_pad);
12253         old_alloc += loh_pad;
12254         allocated += loh_pad;
12255         limit -= loh_pad;
12256     }
12257 #endif //FEATURE_LOH_COMPACTION
12258
12259 #if defined (VERIFY_HEAP) && defined (_DEBUG)
12260         ((void**) allocated)[-1] = 0;     //clear the sync block
12261 #endif //VERIFY_HEAP && _DEBUG
12262     allocated += limit;
12263
12264     dprintf (3, ("found fit at end of seg: %Ix", old_alloc));
12265
12266 #ifdef BACKGROUND_GC
12267     if (cookie != -1)
12268     {
12269         bgc_loh_alloc_clr (old_alloc, limit, acontext, align_const, cookie, TRUE, seg);
12270     }
12271     else
12272 #endif //BACKGROUND_GC
12273     {
12274         adjust_limit_clr (old_alloc, limit, acontext, seg, align_const, gen_number);
12275     }
12276
12277     return TRUE;
12278
12279 found_no_fit:
12280
12281     return FALSE;
12282 }
12283
12284 BOOL gc_heap::loh_a_fit_segment_end_p (int gen_number,
12285                                        size_t size, 
12286                                        alloc_context* acontext,
12287                                        int align_const,
12288                                        BOOL* commit_failed_p,
12289                                        oom_reason* oom_r)
12290 {
12291     *commit_failed_p = FALSE;
12292     heap_segment* seg = generation_allocation_segment (generation_of (gen_number));
12293     BOOL can_allocate_p = FALSE;
12294
12295     while (seg)
12296     {
12297 #ifdef BACKGROUND_GC
12298         if (seg->flags & heap_segment_flags_loh_delete)
12299         {
12300             dprintf (3, ("h%d skipping seg %Ix to be deleted", heap_number, (size_t)seg));
12301         }
12302         else
12303 #endif //BACKGROUND_GC
12304         {
12305             if (a_fit_segment_end_p (gen_number, seg, (size - Align (min_obj_size, align_const)), 
12306                                         acontext, align_const, commit_failed_p))
12307             {
12308                 acontext->alloc_limit += Align (min_obj_size, align_const);
12309                 can_allocate_p = TRUE;
12310                 break;
12311             }
12312
12313             if (*commit_failed_p)
12314             {
12315                 *oom_r = oom_cant_commit;
12316                 break;
12317             }
12318         }
12319
12320         seg = heap_segment_next_rw (seg);
12321     }
12322
12323     return can_allocate_p;
12324 }
12325
12326 #ifdef BACKGROUND_GC
12327 inline
12328 void gc_heap::wait_for_background (alloc_wait_reason awr, bool loh_p)
12329 {
12330     GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
12331
12332     dprintf (2, ("BGC is already in progress, waiting for it to finish"));
12333     add_saved_spinlock_info (loh_p, me_release, mt_wait_bgc);
12334     leave_spin_lock (msl);
12335     background_gc_wait (awr);
12336     enter_spin_lock (msl);
12337     add_saved_spinlock_info (loh_p, me_acquire, mt_wait_bgc);
12338 }
12339
12340 void gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p)
12341 {
12342     if (recursive_gc_sync::background_running_p())
12343     {
12344         uint32_t memory_load;
12345         get_memory_info (&memory_load);
12346         if (memory_load >= m_high_memory_load_th)
12347         {
12348             dprintf (GTC_LOG, ("high mem - wait for BGC to finish, wait reason: %d", awr));
12349             wait_for_background (awr, loh_p);
12350         }
12351     }
12352 }
12353
12354 #endif //BACKGROUND_GC
12355
12356 // We request to trigger an ephemeral GC but we may get a full compacting GC.
12357 // return TRUE if that's the case.
12358 BOOL gc_heap::trigger_ephemeral_gc (gc_reason gr)
12359 {
12360 #ifdef BACKGROUND_GC
12361     wait_for_bgc_high_memory (awr_loh_oos_bgc, false);
12362 #endif //BACKGROUND_GC
12363
12364     BOOL did_full_compact_gc = FALSE;
12365
12366     dprintf (2, ("triggering a gen1 GC"));
12367     size_t last_full_compact_gc_count = get_full_compact_gc_count();
12368     vm_heap->GarbageCollectGeneration(max_generation - 1, gr);
12369
12370 #ifdef MULTIPLE_HEAPS
12371     enter_spin_lock (&more_space_lock_soh);
12372     add_saved_spinlock_info (false, me_acquire, mt_t_eph_gc);
12373 #endif //MULTIPLE_HEAPS
12374
12375     size_t current_full_compact_gc_count = get_full_compact_gc_count();
12376
12377     if (current_full_compact_gc_count > last_full_compact_gc_count)
12378     {
12379         dprintf (2, ("attempted to trigger an ephemeral GC and got a full compacting GC"));
12380         did_full_compact_gc = TRUE;
12381     }
12382
12383     return did_full_compact_gc;
12384 }
12385
12386 BOOL gc_heap::soh_try_fit (int gen_number,
12387                            size_t size, 
12388                            alloc_context* acontext,
12389                            int align_const,
12390                            BOOL* commit_failed_p,
12391                            BOOL* short_seg_end_p)
12392 {
12393     BOOL can_allocate = TRUE;
12394     if (short_seg_end_p)
12395     {
12396         *short_seg_end_p = FALSE;
12397     }
12398
12399     can_allocate = a_fit_free_list_p (gen_number, size, acontext, align_const);
12400     if (!can_allocate)
12401     {
12402         if (short_seg_end_p)
12403         {
12404             *short_seg_end_p = short_on_end_of_seg (gen_number, ephemeral_heap_segment, align_const);
12405         }
12406         // If the caller doesn't care, we always try to fit at the end of seg;
12407         // otherwise we would only try if we are actually not short at end of seg.
12408         if (!short_seg_end_p || !(*short_seg_end_p))
12409         {
12410             can_allocate = a_fit_segment_end_p (gen_number, ephemeral_heap_segment, size, 
12411                                                 acontext, align_const, commit_failed_p);
12412         }
12413     }
12414
12415     return can_allocate;
12416 }
12417
12418 BOOL gc_heap::allocate_small (int gen_number,
12419                               size_t size, 
12420                               alloc_context* acontext,
12421                               int align_const)
12422 {
12423 #if defined (BACKGROUND_GC) && !defined (MULTIPLE_HEAPS)
12424     if (recursive_gc_sync::background_running_p())
12425     {
12426         background_soh_alloc_count++;
12427         if ((background_soh_alloc_count % bgc_alloc_spin_count) == 0)
12428         {
12429             add_saved_spinlock_info (false, me_release, mt_alloc_small);
12430             leave_spin_lock (&more_space_lock_soh);
12431             bool cooperative_mode = enable_preemptive();
12432             GCToOSInterface::Sleep (bgc_alloc_spin);
12433             disable_preemptive (cooperative_mode);
12434             enter_spin_lock (&more_space_lock_soh);
12435             add_saved_spinlock_info (false, me_acquire, mt_alloc_small);
12436         }
12437         else
12438         {
12439             //GCToOSInterface::YieldThread (0);
12440         }
12441     }
12442 #endif //BACKGROUND_GC && !MULTIPLE_HEAPS
12443
12444     gc_reason gr = reason_oos_soh;
12445     oom_reason oom_r = oom_no_failure;
12446
12447     // No variable values should be "carried over" from one state to the other. 
12448     // That's why there are local variable for each state
12449
12450     allocation_state soh_alloc_state = a_state_start;
12451
12452     // If we can get a new seg it means allocation will succeed.
12453     while (1)
12454     {
12455         dprintf (3, ("[h%d]soh state is %s", heap_number, allocation_state_str[soh_alloc_state]));
12456         switch (soh_alloc_state)
12457         {
12458             case a_state_can_allocate:
12459             case a_state_cant_allocate:
12460             {
12461                 goto exit;
12462             }
12463             case a_state_start:
12464             {
12465                 soh_alloc_state = a_state_try_fit;
12466                 break;
12467             }
12468             case a_state_try_fit:
12469             {
12470                 BOOL commit_failed_p = FALSE;
12471                 BOOL can_use_existing_p = FALSE;
12472
12473                 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12474                                                   align_const, &commit_failed_p,
12475                                                   NULL);
12476                 soh_alloc_state = (can_use_existing_p ?
12477                                         a_state_can_allocate : 
12478                                         (commit_failed_p ? 
12479                                             a_state_trigger_full_compact_gc :
12480                                             a_state_trigger_ephemeral_gc));
12481                 break;
12482             }
12483             case a_state_try_fit_after_bgc:
12484             {
12485                 BOOL commit_failed_p = FALSE;
12486                 BOOL can_use_existing_p = FALSE;
12487                 BOOL short_seg_end_p = FALSE;
12488
12489                 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12490                                                   align_const, &commit_failed_p,
12491                                                   &short_seg_end_p);
12492                 soh_alloc_state = (can_use_existing_p ? 
12493                                         a_state_can_allocate : 
12494                                         (short_seg_end_p ? 
12495                                             a_state_trigger_2nd_ephemeral_gc : 
12496                                             a_state_trigger_full_compact_gc));
12497                 break;
12498             }
12499             case a_state_try_fit_after_cg:
12500             {
12501                 BOOL commit_failed_p = FALSE;
12502                 BOOL can_use_existing_p = FALSE;
12503                 BOOL short_seg_end_p = FALSE;
12504
12505                 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12506                                                   align_const, &commit_failed_p,
12507                                                   &short_seg_end_p);
12508
12509                 if (can_use_existing_p)
12510                 {
12511                     soh_alloc_state = a_state_can_allocate;
12512                 }
12513 #ifdef MULTIPLE_HEAPS
12514                 else if (gen0_allocated_after_gc_p)
12515                 {
12516                     // some other threads already grabbed the more space lock and allocated
12517                     // so we should attempt an ephemeral GC again.
12518                     soh_alloc_state = a_state_trigger_ephemeral_gc; 
12519                 }
12520 #endif //MULTIPLE_HEAPS
12521                 else if (short_seg_end_p)
12522                 {
12523                     soh_alloc_state = a_state_cant_allocate;
12524                     oom_r = oom_budget;
12525                 }
12526                 else 
12527                 {
12528                     assert (commit_failed_p);
12529                     soh_alloc_state = a_state_cant_allocate;
12530                     oom_r = oom_cant_commit;
12531                 }
12532                 break;
12533             }
12534             case a_state_check_and_wait_for_bgc:
12535             {
12536                 BOOL bgc_in_progress_p = FALSE;
12537                 BOOL did_full_compacting_gc = FALSE;
12538
12539                 bgc_in_progress_p = check_and_wait_for_bgc (awr_gen0_oos_bgc, &did_full_compacting_gc, false);
12540                 soh_alloc_state = (did_full_compacting_gc ? 
12541                                         a_state_try_fit_after_cg : 
12542                                         a_state_try_fit_after_bgc);
12543                 break;
12544             }
12545             case a_state_trigger_ephemeral_gc:
12546             {
12547                 BOOL commit_failed_p = FALSE;
12548                 BOOL can_use_existing_p = FALSE;
12549                 BOOL short_seg_end_p = FALSE;
12550                 BOOL bgc_in_progress_p = FALSE;
12551                 BOOL did_full_compacting_gc = FALSE;
12552
12553                 did_full_compacting_gc = trigger_ephemeral_gc (gr);
12554                 if (did_full_compacting_gc)
12555                 {
12556                     soh_alloc_state = a_state_try_fit_after_cg;
12557                 }
12558                 else
12559                 {
12560                     can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12561                                                       align_const, &commit_failed_p,
12562                                                       &short_seg_end_p);
12563 #ifdef BACKGROUND_GC
12564                     bgc_in_progress_p = recursive_gc_sync::background_running_p();
12565 #endif //BACKGROUND_GC
12566
12567                     if (can_use_existing_p)
12568                     {
12569                         soh_alloc_state = a_state_can_allocate;
12570                     }
12571                     else
12572                     {
12573                         if (short_seg_end_p)
12574                         {
12575                             if (should_expand_in_full_gc)
12576                             {
12577                                 dprintf (2, ("gen1 GC wanted to expand!"));
12578                                 soh_alloc_state = a_state_trigger_full_compact_gc;
12579                             }
12580                             else
12581                             {
12582                                 soh_alloc_state = (bgc_in_progress_p ? 
12583                                                         a_state_check_and_wait_for_bgc : 
12584                                                         a_state_trigger_full_compact_gc);
12585                             }
12586                         }
12587                         else if (commit_failed_p)
12588                         {
12589                             soh_alloc_state = a_state_trigger_full_compact_gc;
12590                         }
12591                         else
12592                         {
12593 #ifdef MULTIPLE_HEAPS
12594                             // some other threads already grabbed the more space lock and allocated
12595                             // so we should attemp an ephemeral GC again.
12596                             assert (gen0_allocated_after_gc_p);
12597                             soh_alloc_state = a_state_trigger_ephemeral_gc; 
12598 #else //MULTIPLE_HEAPS
12599                             assert (!"shouldn't get here");
12600 #endif //MULTIPLE_HEAPS
12601                         }
12602                     }
12603                 }
12604                 break;
12605             }
12606             case a_state_trigger_2nd_ephemeral_gc:
12607             {
12608                 BOOL commit_failed_p = FALSE;
12609                 BOOL can_use_existing_p = FALSE;
12610                 BOOL short_seg_end_p = FALSE;
12611                 BOOL did_full_compacting_gc = FALSE;
12612
12613
12614                 did_full_compacting_gc = trigger_ephemeral_gc (gr);
12615                 
12616                 if (did_full_compacting_gc)
12617                 {
12618                     soh_alloc_state = a_state_try_fit_after_cg;
12619                 }
12620                 else
12621                 {
12622                     can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12623                                                       align_const, &commit_failed_p,
12624                                                       &short_seg_end_p);
12625                     if (short_seg_end_p || commit_failed_p)
12626                     {
12627                         soh_alloc_state = a_state_trigger_full_compact_gc;
12628                     }
12629                     else
12630                     {
12631                         assert (can_use_existing_p);
12632                         soh_alloc_state = a_state_can_allocate;
12633                     }
12634                 }
12635                 break;
12636             }
12637             case a_state_trigger_full_compact_gc:
12638             {
12639                 if (fgn_maxgen_percent)
12640                 {
12641                     dprintf (2, ("FGN: SOH doing last GC before we throw OOM"));
12642                     send_full_gc_notification (max_generation, FALSE);
12643                 }
12644
12645                 BOOL got_full_compacting_gc = FALSE;
12646
12647                 got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r, false);
12648                 soh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate);
12649                 break;
12650             }
12651             default:
12652             {
12653                 assert (!"Invalid state!");
12654                 break;
12655             }
12656         }
12657     }
12658
12659 exit:
12660     if (soh_alloc_state == a_state_cant_allocate)
12661     {
12662         assert (oom_r != oom_no_failure);
12663         handle_oom (heap_number, 
12664                     oom_r, 
12665                     size,
12666                     heap_segment_allocated (ephemeral_heap_segment),
12667                     heap_segment_reserved (ephemeral_heap_segment));
12668
12669         add_saved_spinlock_info (false, me_release, mt_alloc_small_cant);
12670         leave_spin_lock (&more_space_lock_soh);
12671     }
12672
12673     return (soh_alloc_state == a_state_can_allocate);
12674 }
12675
12676 #ifdef BACKGROUND_GC
12677 inline
12678 void gc_heap::bgc_track_loh_alloc()
12679 {
12680     if (current_c_gc_state == c_gc_state_planning)
12681     {
12682         Interlocked::Increment (&loh_alloc_thread_count);
12683         dprintf (3, ("h%d: inc lc: %d", heap_number, loh_alloc_thread_count));
12684     }
12685 }
12686
12687 inline
12688 void gc_heap::bgc_untrack_loh_alloc()
12689 {
12690     if (current_c_gc_state == c_gc_state_planning)
12691     {
12692         Interlocked::Decrement (&loh_alloc_thread_count);
12693         dprintf (3, ("h%d: dec lc: %d", heap_number, loh_alloc_thread_count));
12694     }
12695 }
12696
12697 BOOL gc_heap::bgc_loh_should_allocate()
12698 {
12699     size_t min_gc_size = dd_min_size (dynamic_data_of (max_generation + 1));
12700
12701     if ((bgc_begin_loh_size + bgc_loh_size_increased) < (min_gc_size * 10))
12702     {
12703         return TRUE;
12704     }
12705
12706     if (((bgc_begin_loh_size / end_loh_size) >= 2) || (bgc_loh_size_increased >= bgc_begin_loh_size))
12707     {
12708         if ((bgc_begin_loh_size / end_loh_size) > 2)
12709         {
12710             dprintf (3, ("alloc-ed too much before bgc started"));
12711         }
12712         else
12713         {
12714             dprintf (3, ("alloc-ed too much after bgc started"));
12715         }
12716         return FALSE;
12717     }
12718     else
12719     {
12720         bgc_alloc_spin_loh = (uint32_t)(((float)bgc_loh_size_increased / (float)bgc_begin_loh_size) * 10);
12721         return TRUE;
12722     }
12723 }
12724 #endif //BACKGROUND_GC
12725
12726 size_t gc_heap::get_large_seg_size (size_t size)
12727 {
12728     size_t default_seg_size = min_loh_segment_size;
12729 #ifdef SEG_MAPPING_TABLE
12730     size_t align_size =  default_seg_size;
12731 #else //SEG_MAPPING_TABLE
12732     size_t align_size =  default_seg_size / 2;
12733 #endif //SEG_MAPPING_TABLE
12734     int align_const = get_alignment_constant (FALSE);
12735     size_t large_seg_size = align_on_page (
12736         max (default_seg_size,
12737             ((size + 2 * Align(min_obj_size, align_const) + OS_PAGE_SIZE +
12738             align_size) / align_size * align_size)));
12739     return large_seg_size;
12740 }
12741
12742 BOOL gc_heap::loh_get_new_seg (generation* gen,
12743                                size_t size,
12744                                int align_const,
12745                                BOOL* did_full_compact_gc,
12746                                oom_reason* oom_r)
12747 {
12748     UNREFERENCED_PARAMETER(gen);
12749     UNREFERENCED_PARAMETER(align_const);
12750
12751     *did_full_compact_gc = FALSE;
12752
12753     size_t seg_size = get_large_seg_size (size);
12754
12755     heap_segment* new_seg = get_large_segment (seg_size, did_full_compact_gc);
12756
12757     if (new_seg)
12758     {
12759         loh_alloc_since_cg += seg_size;
12760     }
12761     else
12762     {
12763         *oom_r = oom_loh;
12764     }
12765
12766     return (new_seg != 0);
12767 }
12768
12769 BOOL gc_heap::retry_full_compact_gc (size_t size)
12770 {
12771     size_t seg_size = get_large_seg_size (size);
12772
12773     if (loh_alloc_since_cg >= (2 * (uint64_t)seg_size))
12774     {
12775         return TRUE;
12776     }
12777
12778 #ifdef MULTIPLE_HEAPS
12779     uint64_t total_alloc_size = 0;
12780     for (int i = 0; i < n_heaps; i++)
12781     {
12782         total_alloc_size += g_heaps[i]->loh_alloc_since_cg;
12783     }
12784
12785     if (total_alloc_size >= (2 * (uint64_t)seg_size))
12786     {
12787         return TRUE;
12788     }
12789 #endif //MULTIPLE_HEAPS
12790
12791     return FALSE;
12792 }
12793
12794 BOOL gc_heap::check_and_wait_for_bgc (alloc_wait_reason awr,
12795                                       BOOL* did_full_compact_gc,
12796                                       bool loh_p)
12797 {
12798     BOOL bgc_in_progress = FALSE;
12799     *did_full_compact_gc = FALSE;
12800 #ifdef BACKGROUND_GC
12801     if (recursive_gc_sync::background_running_p())
12802     {
12803         bgc_in_progress = TRUE;
12804         size_t last_full_compact_gc_count = get_full_compact_gc_count();
12805         wait_for_background (awr, loh_p);
12806         size_t current_full_compact_gc_count = get_full_compact_gc_count();
12807         if (current_full_compact_gc_count > last_full_compact_gc_count)
12808         {
12809             *did_full_compact_gc = TRUE;
12810         }
12811     }
12812 #endif //BACKGROUND_GC
12813
12814     return bgc_in_progress;
12815 }
12816
12817 BOOL gc_heap::loh_try_fit (int gen_number,
12818                            size_t size, 
12819                            alloc_context* acontext,
12820                            int align_const,
12821                            BOOL* commit_failed_p,
12822                            oom_reason* oom_r)
12823 {
12824     BOOL can_allocate = TRUE;
12825
12826     if (!a_fit_free_list_large_p (size, acontext, align_const))
12827     {
12828         can_allocate = loh_a_fit_segment_end_p (gen_number, size, 
12829                                                 acontext, align_const, 
12830                                                 commit_failed_p, oom_r);
12831
12832 #ifdef BACKGROUND_GC
12833         if (can_allocate && recursive_gc_sync::background_running_p())
12834         {
12835             bgc_loh_size_increased += size;
12836         }
12837 #endif //BACKGROUND_GC
12838     }
12839 #ifdef BACKGROUND_GC
12840     else
12841     {
12842         if (recursive_gc_sync::background_running_p())
12843         {
12844             bgc_loh_allocated_in_free += size;
12845         }
12846     }
12847 #endif //BACKGROUND_GC
12848
12849     return can_allocate;
12850 }
12851
12852 BOOL gc_heap::trigger_full_compact_gc (gc_reason gr, 
12853                                        oom_reason* oom_r,
12854                                        bool loh_p)
12855 {
12856     BOOL did_full_compact_gc = FALSE;
12857
12858     size_t last_full_compact_gc_count = get_full_compact_gc_count();
12859
12860     // Set this so the next GC will be a full compacting GC.
12861     if (!last_gc_before_oom)
12862     {
12863         last_gc_before_oom = TRUE;
12864     }
12865
12866 #ifdef BACKGROUND_GC
12867     if (recursive_gc_sync::background_running_p())
12868     {
12869         wait_for_background (((gr == reason_oos_soh) ? awr_gen0_oos_bgc : awr_loh_oos_bgc), loh_p);
12870         dprintf (2, ("waited for BGC - done"));
12871     }
12872 #endif //BACKGROUND_GC
12873
12874     GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
12875     size_t current_full_compact_gc_count = get_full_compact_gc_count();
12876     if (current_full_compact_gc_count > last_full_compact_gc_count)
12877     {
12878         dprintf (3, ("a full compacting GC triggered while waiting for BGC (%d->%d)", last_full_compact_gc_count, current_full_compact_gc_count));
12879         assert (current_full_compact_gc_count > last_full_compact_gc_count);
12880         did_full_compact_gc = TRUE;
12881         goto exit;
12882     }
12883
12884     dprintf (3, ("h%d full GC", heap_number));
12885
12886     trigger_gc_for_alloc (max_generation, gr, msl, loh_p, mt_t_full_gc);
12887
12888     current_full_compact_gc_count = get_full_compact_gc_count();
12889
12890     if (current_full_compact_gc_count == last_full_compact_gc_count)
12891     {
12892         dprintf (2, ("attempted to trigger a full compacting GC but didn't get it"));
12893         // We requested a full GC but didn't get because of the elevation logic
12894         // which means we should fail.
12895         *oom_r = oom_unproductive_full_gc;
12896     }
12897     else
12898     {
12899         dprintf (3, ("h%d: T full compacting GC (%d->%d)", 
12900             heap_number, 
12901             last_full_compact_gc_count, 
12902             current_full_compact_gc_count));
12903
12904         assert (current_full_compact_gc_count > last_full_compact_gc_count);
12905         did_full_compact_gc = TRUE;
12906     }
12907
12908 exit:
12909     return did_full_compact_gc;
12910 }
12911
12912 #ifdef RECORD_LOH_STATE
12913 void gc_heap::add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id)
12914 {
12915     // When the state is can_allocate we already have released the more
12916     // space lock. So we are not logging states here since this code
12917     // is not thread safe.
12918     if (loh_state_to_save != a_state_can_allocate)
12919     {
12920         last_loh_states[loh_state_index].alloc_state = loh_state_to_save;
12921         last_loh_states[loh_state_index].thread_id = thread_id;
12922         loh_state_index++;
12923
12924         if (loh_state_index == max_saved_loh_states)
12925         {
12926             loh_state_index = 0;
12927         }
12928
12929         assert (loh_state_index < max_saved_loh_states);
12930     }
12931 }
12932 #endif //RECORD_LOH_STATE
12933
12934 BOOL gc_heap::allocate_large (int gen_number,
12935                               size_t size, 
12936                               alloc_context* acontext,
12937                               int align_const)
12938 {
12939 #ifdef BACKGROUND_GC
12940     if (recursive_gc_sync::background_running_p())
12941     {
12942         background_loh_alloc_count++;
12943         //if ((background_loh_alloc_count % bgc_alloc_spin_count_loh) == 0)
12944         {
12945             if (bgc_loh_should_allocate())
12946             {
12947                 if (!bgc_alloc_spin_loh)
12948                 {
12949                     add_saved_spinlock_info (true, me_release, mt_alloc_large);
12950                     leave_spin_lock (&more_space_lock_loh);
12951                     bool cooperative_mode = enable_preemptive();
12952                     GCToOSInterface::YieldThread (bgc_alloc_spin_loh);
12953                     disable_preemptive (cooperative_mode);
12954                     enter_spin_lock (&more_space_lock_loh);
12955                     add_saved_spinlock_info (true, me_acquire, mt_alloc_large);
12956                     dprintf (SPINLOCK_LOG, ("[%d]spin Emsl loh", heap_number));
12957                 }
12958             }
12959             else
12960             {
12961                 wait_for_background (awr_loh_alloc_during_bgc, true);
12962             }
12963         }
12964     }
12965 #endif //BACKGROUND_GC
12966
12967     gc_reason gr = reason_oos_loh;
12968     generation* gen = generation_of (gen_number);
12969     oom_reason oom_r = oom_no_failure;
12970     size_t current_full_compact_gc_count = 0;
12971
12972     // No variable values should be "carried over" from one state to the other. 
12973     // That's why there are local variable for each state
12974     allocation_state loh_alloc_state = a_state_start;
12975 #ifdef RECORD_LOH_STATE
12976     EEThreadId current_thread_id;
12977     current_thread_id.SetToCurrentThread();
12978 #endif //RECORD_LOH_STATE
12979
12980     // If we can get a new seg it means allocation will succeed.
12981     while (1)
12982     {
12983         dprintf (3, ("[h%d]loh state is %s", heap_number, allocation_state_str[loh_alloc_state]));
12984
12985 #ifdef RECORD_LOH_STATE
12986         add_saved_loh_state (loh_alloc_state, current_thread_id);
12987 #endif //RECORD_LOH_STATE
12988         switch (loh_alloc_state)
12989         {
12990             case a_state_can_allocate:
12991             case a_state_cant_allocate:
12992             {
12993                 goto exit;
12994             }
12995             case a_state_start:
12996             {
12997                 loh_alloc_state = a_state_try_fit;
12998                 break;
12999             }
13000             case a_state_try_fit:
13001             {
13002                 BOOL commit_failed_p = FALSE;
13003                 BOOL can_use_existing_p = FALSE;
13004
13005                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
13006                                                   align_const, &commit_failed_p, &oom_r);
13007                 loh_alloc_state = (can_use_existing_p ?
13008                                         a_state_can_allocate : 
13009                                         (commit_failed_p ? 
13010                                             a_state_trigger_full_compact_gc :
13011                                             a_state_acquire_seg));
13012                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
13013                 break;
13014             }
13015             case a_state_try_fit_new_seg:
13016             {
13017                 BOOL commit_failed_p = FALSE;
13018                 BOOL can_use_existing_p = FALSE;
13019
13020                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
13021                                                   align_const, &commit_failed_p, &oom_r);
13022                 // Even after we got a new seg it doesn't necessarily mean we can allocate,
13023                 // another LOH allocating thread could have beat us to acquire the msl so 
13024                 // we need to try again.
13025                 loh_alloc_state = (can_use_existing_p ? a_state_can_allocate : a_state_try_fit);
13026                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
13027                 break;
13028             }
13029             case a_state_try_fit_new_seg_after_cg:
13030             {
13031                 BOOL commit_failed_p = FALSE;
13032                 BOOL can_use_existing_p = FALSE;
13033
13034                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
13035                                                   align_const, &commit_failed_p, &oom_r);
13036                 // Even after we got a new seg it doesn't necessarily mean we can allocate,
13037                 // another LOH allocating thread could have beat us to acquire the msl so 
13038                 // we need to try again. However, if we failed to commit, which means we 
13039                 // did have space on the seg, we bail right away 'cause we already did a 
13040                 // full compacting GC.
13041                 loh_alloc_state = (can_use_existing_p ? 
13042                                         a_state_can_allocate : 
13043                                         (commit_failed_p ? 
13044                                             a_state_cant_allocate :
13045                                             a_state_acquire_seg_after_cg));
13046                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
13047                 break;
13048             }
13049             case a_state_try_fit_no_seg:
13050             {
13051                 BOOL commit_failed_p = FALSE;
13052                 BOOL can_use_existing_p = FALSE;
13053
13054                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
13055                                                   align_const, &commit_failed_p, &oom_r);
13056                 loh_alloc_state = (can_use_existing_p ? a_state_can_allocate : a_state_cant_allocate);
13057                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
13058                 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
13059                 break;
13060             }
13061             case a_state_try_fit_after_cg:
13062             {
13063                 BOOL commit_failed_p = FALSE;
13064                 BOOL can_use_existing_p = FALSE;
13065
13066                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
13067                                                   align_const, &commit_failed_p, &oom_r);
13068                 loh_alloc_state = (can_use_existing_p ?
13069                                         a_state_can_allocate : 
13070                                         (commit_failed_p ? 
13071                                             a_state_cant_allocate :
13072                                             a_state_acquire_seg_after_cg));
13073                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
13074                 break;
13075             }
13076             case a_state_try_fit_after_bgc:
13077             {
13078                 BOOL commit_failed_p = FALSE;
13079                 BOOL can_use_existing_p = FALSE;
13080
13081                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
13082                                                   align_const, &commit_failed_p, &oom_r);
13083                 loh_alloc_state = (can_use_existing_p ?
13084                                         a_state_can_allocate : 
13085                                         (commit_failed_p ? 
13086                                             a_state_trigger_full_compact_gc :
13087                                             a_state_acquire_seg_after_bgc));
13088                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
13089                 break;
13090             }
13091             case a_state_acquire_seg:
13092             {
13093                 BOOL can_get_new_seg_p = FALSE;
13094                 BOOL did_full_compacting_gc = FALSE;
13095
13096                 current_full_compact_gc_count = get_full_compact_gc_count();
13097
13098                 can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r);
13099                 loh_alloc_state = (can_get_new_seg_p ? 
13100                                         a_state_try_fit_new_seg : 
13101                                         (did_full_compacting_gc ? 
13102                                             a_state_check_retry_seg :
13103                                             a_state_check_and_wait_for_bgc));
13104                 break;
13105             }
13106             case a_state_acquire_seg_after_cg:
13107             {
13108                 BOOL can_get_new_seg_p = FALSE;
13109                 BOOL did_full_compacting_gc = FALSE;
13110
13111                 current_full_compact_gc_count = get_full_compact_gc_count();
13112
13113                 can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r);
13114                 // Since we release the msl before we try to allocate a seg, other
13115                 // threads could have allocated a bunch of segments before us so
13116                 // we might need to retry.
13117                 loh_alloc_state = (can_get_new_seg_p ? 
13118                                         a_state_try_fit_new_seg_after_cg : 
13119                                         a_state_check_retry_seg);
13120                 break;
13121             }
13122             case a_state_acquire_seg_after_bgc:
13123             {
13124                 BOOL can_get_new_seg_p = FALSE;
13125                 BOOL did_full_compacting_gc = FALSE;
13126              
13127                 current_full_compact_gc_count = get_full_compact_gc_count();
13128
13129                 can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r); 
13130                 loh_alloc_state = (can_get_new_seg_p ? 
13131                                         a_state_try_fit_new_seg : 
13132                                         (did_full_compacting_gc ? 
13133                                             a_state_check_retry_seg :
13134                                             a_state_trigger_full_compact_gc));
13135                 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
13136                 break;
13137             }
13138             case a_state_check_and_wait_for_bgc:
13139             {
13140                 BOOL bgc_in_progress_p = FALSE;
13141                 BOOL did_full_compacting_gc = FALSE;
13142
13143                 bgc_in_progress_p = check_and_wait_for_bgc (awr_loh_oos_bgc, &did_full_compacting_gc, true);
13144                 loh_alloc_state = (!bgc_in_progress_p ?
13145                                         a_state_trigger_full_compact_gc : 
13146                                         (did_full_compacting_gc ? 
13147                                             a_state_try_fit_after_cg :
13148                                             a_state_try_fit_after_bgc));
13149                 break;
13150             }
13151             case a_state_trigger_full_compact_gc:
13152             {
13153                 if (fgn_maxgen_percent)
13154                 {
13155                     dprintf (2, ("FGN: LOH doing last GC before we throw OOM"));
13156                     send_full_gc_notification (max_generation, FALSE);
13157                 }
13158
13159                 BOOL got_full_compacting_gc = FALSE;
13160
13161                 got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r, true);
13162                 loh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate);
13163                 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
13164                 break;
13165             }
13166             case a_state_check_retry_seg:
13167             {
13168                 BOOL should_retry_gc = retry_full_compact_gc (size);
13169                 BOOL should_retry_get_seg = FALSE;
13170                 if (!should_retry_gc)
13171                 {
13172                     size_t last_full_compact_gc_count = current_full_compact_gc_count;
13173                     current_full_compact_gc_count = get_full_compact_gc_count();
13174
13175                     if (current_full_compact_gc_count > (last_full_compact_gc_count + 1))
13176                     {
13177                         should_retry_get_seg = TRUE;
13178                     }
13179                 }
13180     
13181                 loh_alloc_state = (should_retry_gc ? 
13182                                         a_state_trigger_full_compact_gc : 
13183                                         (should_retry_get_seg ?
13184                                             a_state_acquire_seg_after_cg :
13185                                             a_state_cant_allocate));
13186                 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
13187                 break;
13188             }
13189             default:
13190             {
13191                 assert (!"Invalid state!");
13192                 break;
13193             }
13194         }
13195     }
13196
13197 exit:
13198     if (loh_alloc_state == a_state_cant_allocate)
13199     {
13200         assert (oom_r != oom_no_failure);
13201         handle_oom (heap_number, 
13202                     oom_r, 
13203                     size,
13204                     0,
13205                     0);
13206
13207         add_saved_spinlock_info (true, me_release, mt_alloc_large_cant);
13208         leave_spin_lock (&more_space_lock_loh);
13209     }
13210
13211     return (loh_alloc_state == a_state_can_allocate);
13212 }
13213
13214 // BGC's final mark phase will acquire the msl, so release it here and re-acquire.
13215 void gc_heap::trigger_gc_for_alloc (int gen_number, gc_reason gr, 
13216                                     GCSpinLock* msl, bool loh_p, 
13217                                     msl_take_state take_state)
13218 {
13219 #ifdef BACKGROUND_GC
13220     if (loh_p)
13221     {
13222         add_saved_spinlock_info (loh_p, me_release, take_state);
13223         leave_spin_lock (msl);
13224     }
13225 #endif //BACKGROUND_GC
13226
13227     vm_heap->GarbageCollectGeneration (gen_number, gr);
13228
13229 #ifdef MULTIPLE_HEAPS
13230     if (!loh_p)
13231     {
13232         enter_spin_lock (msl);
13233         add_saved_spinlock_info (loh_p, me_acquire, take_state);
13234     }
13235 #endif //MULTIPLE_HEAPS
13236
13237 #ifdef BACKGROUND_GC
13238     if (loh_p)
13239     {
13240         enter_spin_lock (msl);
13241         add_saved_spinlock_info (loh_p, me_acquire, take_state);
13242     }
13243 #endif //BACKGROUND_GC
13244 }
13245
13246 int gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
13247                                    int gen_number)
13248 {
13249     if (gc_heap::gc_started)
13250     {
13251         wait_for_gc_done();
13252         return -1;
13253     }
13254
13255     bool loh_p = (gen_number > 0);
13256     GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
13257
13258 #ifdef SYNCHRONIZATION_STATS
13259     int64_t msl_acquire_start = GCToOSInterface::QueryPerformanceCounter();
13260 #endif //SYNCHRONIZATION_STATS
13261     enter_spin_lock (msl);
13262     add_saved_spinlock_info (loh_p, me_acquire, mt_try_alloc);
13263     dprintf (SPINLOCK_LOG, ("[%d]Emsl for alloc", heap_number));
13264 #ifdef SYNCHRONIZATION_STATS
13265     int64_t msl_acquire = GCToOSInterface::QueryPerformanceCounter() - msl_acquire_start;
13266     total_msl_acquire += msl_acquire;
13267     num_msl_acquired++;
13268     if (msl_acquire > 200)
13269     {
13270         num_high_msl_acquire++;
13271     }
13272     else
13273     {
13274         num_low_msl_acquire++;
13275     }
13276 #endif //SYNCHRONIZATION_STATS
13277
13278     /*
13279     // We are commenting this out 'cause we don't see the point - we already
13280     // have checked gc_started when we were acquiring the msl - no need to check
13281     // again. This complicates the logic in bgc_suspend_EE 'cause that one would
13282     // need to release msl which causes all sorts of trouble.
13283     if (gc_heap::gc_started)
13284     {
13285 #ifdef SYNCHRONIZATION_STATS
13286         good_suspension++;
13287 #endif //SYNCHRONIZATION_STATS
13288         BOOL fStress = (g_pConfig->GetGCStressLevel() & GCConfig::GCSTRESS_TRANSITION) != 0;
13289         if (!fStress)
13290         {
13291             //Rendez vous early (MP scaling issue)
13292             //dprintf (1, ("[%d]waiting for gc", heap_number));
13293             wait_for_gc_done();
13294 #ifdef MULTIPLE_HEAPS
13295             return -1;
13296 #endif //MULTIPLE_HEAPS
13297         }
13298     }
13299     */
13300
13301     dprintf (3, ("requested to allocate %d bytes on gen%d", size, gen_number));
13302
13303     int align_const = get_alignment_constant (gen_number != (max_generation+1));
13304
13305     if (fgn_maxgen_percent)
13306     {
13307         check_for_full_gc (gen_number, size);
13308     }
13309
13310     if (!(new_allocation_allowed (gen_number)))
13311     {
13312         if (fgn_maxgen_percent && (gen_number == 0))
13313         {
13314             // We only check gen0 every so often, so take this opportunity to check again.
13315             check_for_full_gc (gen_number, size);
13316         }
13317
13318 #ifdef BACKGROUND_GC
13319         wait_for_bgc_high_memory (awr_gen0_alloc, loh_p);
13320 #endif //BACKGROUND_GC
13321
13322 #ifdef SYNCHRONIZATION_STATS
13323         bad_suspension++;
13324 #endif //SYNCHRONIZATION_STATS
13325         dprintf (/*100*/ 2, ("running out of budget on gen%d, gc", gen_number));
13326
13327         if (!settings.concurrent || (gen_number == 0))
13328         {
13329             trigger_gc_for_alloc (0, ((gen_number == 0) ? reason_alloc_soh : reason_alloc_loh),
13330                                   msl, loh_p, mt_try_budget);
13331         }
13332     }
13333
13334     BOOL can_allocate = ((gen_number == 0) ?
13335         allocate_small (gen_number, size, acontext, align_const) :
13336         allocate_large (gen_number, size, acontext, align_const));
13337    
13338     if (can_allocate)
13339     {
13340         size_t alloc_context_bytes = acontext->alloc_limit + Align (min_obj_size, align_const) - acontext->alloc_ptr;
13341         int etw_allocation_index = ((gen_number == 0) ? 0 : 1);
13342
13343         etw_allocation_running_amount[etw_allocation_index] += alloc_context_bytes;
13344
13345
13346         if (etw_allocation_running_amount[etw_allocation_index] > etw_allocation_tick)
13347         {
13348 #ifdef FEATURE_REDHAWK
13349             FIRE_EVENT(GCAllocationTick_V1, (uint32_t)etw_allocation_running_amount[etw_allocation_index],
13350                                             (gen_number == 0) ? gc_etw_alloc_soh : gc_etw_alloc_loh);
13351 #else
13352             // Unfortunately some of the ETW macros do not check whether the ETW feature is enabled.
13353             // The ones that do are much less efficient.
13354 #if defined(FEATURE_EVENT_TRACE)
13355             if (EVENT_ENABLED(GCAllocationTick_V3))
13356             {
13357                 fire_etw_allocation_event (etw_allocation_running_amount[etw_allocation_index], gen_number, acontext->alloc_ptr);
13358             }
13359 #endif //FEATURE_EVENT_TRACE
13360 #endif //FEATURE_REDHAWK
13361             etw_allocation_running_amount[etw_allocation_index] = 0;
13362         }
13363     }
13364
13365     return (int)can_allocate;
13366 }
13367
13368 #ifdef MULTIPLE_HEAPS
13369 void gc_heap::balance_heaps (alloc_context* acontext)
13370 {
13371
13372     if (acontext->alloc_count < 4)
13373     {
13374         if (acontext->alloc_count == 0)
13375         {
13376             acontext->set_home_heap(GCHeap::GetHeap( heap_select::select_heap(acontext, 0) ));
13377             gc_heap* hp = acontext->get_home_heap()->pGenGCHeap;
13378             dprintf (3, ("First allocation for context %Ix on heap %d\n", (size_t)acontext, (size_t)hp->heap_number));
13379             acontext->set_alloc_heap(acontext->get_home_heap());
13380             hp->alloc_context_count++;
13381         }
13382     }
13383     else
13384     {
13385         BOOL set_home_heap = FALSE;
13386         int hint = 0;
13387
13388         if (heap_select::can_find_heap_fast())
13389         {
13390             if (acontext->get_home_heap() != NULL)
13391                 hint = acontext->get_home_heap()->pGenGCHeap->heap_number;
13392             if (acontext->get_home_heap() != GCHeap::GetHeap(hint = heap_select::select_heap(acontext, hint)) || ((acontext->alloc_count & 15) == 0))
13393             {
13394                 set_home_heap = TRUE;
13395             }
13396         }
13397         else
13398         {
13399             // can't use gdt
13400             if ((acontext->alloc_count & 3) == 0)
13401                 set_home_heap = TRUE;
13402         }
13403
13404         if (set_home_heap)
13405         {
13406 /*
13407             // Since we are balancing up to MAX_SUPPORTED_CPUS, no need for this.
13408             if (n_heaps > MAX_SUPPORTED_CPUS)
13409             {
13410                 // on machines with many processors cache affinity is really king, so don't even try
13411                 // to balance on these.
13412                 acontext->home_heap = GCHeap::GetHeap( heap_select::select_heap(acontext, hint) );
13413                 acontext->alloc_heap = acontext->home_heap;
13414             }
13415             else
13416 */
13417             {
13418                 gc_heap* org_hp = acontext->get_alloc_heap()->pGenGCHeap;
13419
13420                 dynamic_data* dd = org_hp->dynamic_data_of (0);
13421                 ptrdiff_t org_size = dd_new_allocation (dd);
13422                 int org_alloc_context_count;
13423                 int max_alloc_context_count;
13424                 gc_heap* max_hp;
13425                 ptrdiff_t max_size;
13426                 size_t delta = dd_min_size (dd)/4;
13427
13428                 int start, end, finish;
13429                 heap_select::get_heap_range_for_heap(org_hp->heap_number, &start, &end);
13430                 finish = start + n_heaps;
13431
13432 try_again:
13433                 do
13434                 {
13435                     max_hp = org_hp;
13436                     max_size = org_size + delta;
13437                     acontext->set_home_heap(GCHeap::GetHeap( heap_select::select_heap(acontext, hint) ));
13438
13439                     if (org_hp == acontext->get_home_heap()->pGenGCHeap)
13440                         max_size = max_size + delta;
13441
13442                     org_alloc_context_count = org_hp->alloc_context_count;
13443                     max_alloc_context_count = org_alloc_context_count;
13444                     if (max_alloc_context_count > 1)
13445                         max_size /= max_alloc_context_count;
13446
13447                     for (int i = start; i < end; i++)
13448                     {
13449                         gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap;
13450                         dd = hp->dynamic_data_of (0);
13451                         ptrdiff_t size = dd_new_allocation (dd);
13452                         if (hp == acontext->get_home_heap()->pGenGCHeap)
13453                             size = size + delta;
13454                         int hp_alloc_context_count = hp->alloc_context_count;
13455                         if (hp_alloc_context_count > 0)
13456                             size /= (hp_alloc_context_count + 1);
13457                         if (size > max_size)
13458                         {
13459                             max_hp = hp;
13460                             max_size = size;
13461                             max_alloc_context_count = hp_alloc_context_count;
13462                         }
13463                     }
13464                 }
13465                 while (org_alloc_context_count != org_hp->alloc_context_count ||
13466                        max_alloc_context_count != max_hp->alloc_context_count);
13467
13468                 if ((max_hp == org_hp) && (end < finish))
13469                 {   
13470                     start = end; end = finish; 
13471                     delta = dd_min_size(dd)/2; // Make it twice as hard to balance to remote nodes on NUMA.
13472                     goto try_again;
13473                 }
13474
13475                 if (max_hp != org_hp)
13476                 {
13477                     org_hp->alloc_context_count--;
13478                     max_hp->alloc_context_count++;
13479                     acontext->set_alloc_heap(GCHeap::GetHeap(max_hp->heap_number));
13480                     if (!gc_thread_no_affinitize_p)
13481                     {
13482                         if (GCToOSInterface::CanEnableGCCPUGroups())
13483                         {   //only set ideal processor when max_hp and org_hp are in the same cpu
13484                             //group. DO NOT MOVE THREADS ACROSS CPU GROUPS
13485                             uint16_t org_gn = heap_select::find_cpu_group_from_heap_no(org_hp->heap_number);
13486                             uint16_t max_gn = heap_select::find_cpu_group_from_heap_no(max_hp->heap_number);
13487                             if (org_gn == max_gn) //only set within CPU group, so SetThreadIdealProcessor is enough
13488                             {   
13489                                 uint16_t group_proc_no = heap_select::find_group_proc_from_heap_no(max_hp->heap_number);
13490
13491                                 GCThreadAffinity affinity;
13492                                 affinity.Processor = group_proc_no;
13493                                 affinity.Group = org_gn;
13494                                 if (!GCToOSInterface::SetCurrentThreadIdealAffinity(&affinity))
13495                                 {
13496                                     dprintf (3, ("Failed to set the ideal processor and group for heap %d.",
13497                                                 org_hp->heap_number));
13498                                 }
13499                             }
13500                         }
13501                         else 
13502                         {
13503                             uint16_t proc_no = heap_select::find_proc_no_from_heap_no(max_hp->heap_number);
13504
13505                             GCThreadAffinity affinity;
13506                             affinity.Processor = proc_no;
13507                             affinity.Group = GCThreadAffinity::None;
13508
13509                             if (!GCToOSInterface::SetCurrentThreadIdealAffinity(&affinity))
13510                             {
13511                                 dprintf (3, ("Failed to set the ideal processor for heap %d.",
13512                                             org_hp->heap_number));
13513                             }
13514                         }
13515                     }
13516                     dprintf (3, ("Switching context %p (home heap %d) ", 
13517                                  acontext,
13518                         acontext->get_home_heap()->pGenGCHeap->heap_number));
13519                     dprintf (3, (" from heap %d (%Id free bytes, %d contexts) ", 
13520                                  org_hp->heap_number,
13521                                  org_size,
13522                                  org_alloc_context_count));
13523                     dprintf (3, (" to heap %d (%Id free bytes, %d contexts)\n", 
13524                                  max_hp->heap_number,
13525                                  dd_new_allocation(max_hp->dynamic_data_of(0)),
13526                                                    max_alloc_context_count));
13527                 }
13528             }
13529         }
13530     }
13531     acontext->alloc_count++;
13532 }
13533
13534 gc_heap* gc_heap::balance_heaps_loh (alloc_context* acontext, size_t /*size*/)
13535 {
13536     gc_heap* org_hp = acontext->get_alloc_heap()->pGenGCHeap;
13537     //dprintf (1, ("LA: %Id", size));
13538
13539     //if (size > 128*1024)
13540     if (1)
13541     {
13542         dynamic_data* dd = org_hp->dynamic_data_of (max_generation + 1);
13543
13544         ptrdiff_t org_size = dd_new_allocation (dd);
13545         gc_heap* max_hp;
13546         ptrdiff_t max_size;
13547         size_t delta = dd_min_size (dd) * 4;
13548
13549         int start, end, finish;
13550         heap_select::get_heap_range_for_heap(org_hp->heap_number, &start, &end);
13551         finish = start + n_heaps;
13552
13553 try_again:
13554         {
13555             max_hp = org_hp;
13556             max_size = org_size + delta;
13557             dprintf (3, ("orig hp: %d, max size: %d",
13558                 org_hp->heap_number,
13559                 max_size));
13560
13561             for (int i = start; i < end; i++)
13562             {
13563                 gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap;
13564                 dd = hp->dynamic_data_of (max_generation + 1);
13565                 ptrdiff_t size = dd_new_allocation (dd);
13566                 dprintf (3, ("hp: %d, size: %d",
13567                     hp->heap_number,
13568                     size));
13569                 if (size > max_size)
13570                 {
13571                     max_hp = hp;
13572                     max_size = size;
13573                     dprintf (3, ("max hp: %d, max size: %d",
13574                         max_hp->heap_number,
13575                         max_size));
13576                 }
13577             }
13578         }
13579
13580         if ((max_hp == org_hp) && (end < finish))
13581         {
13582             start = end; end = finish;
13583             delta = dd_min_size(dd) * 4;   // Need to tuning delta
13584             goto try_again;
13585         }
13586
13587         if (max_hp != org_hp)
13588         {
13589             dprintf (3, ("loh: %d(%Id)->%d(%Id)", 
13590                 org_hp->heap_number, dd_new_allocation (org_hp->dynamic_data_of (max_generation + 1)),
13591                 max_hp->heap_number, dd_new_allocation (max_hp->dynamic_data_of (max_generation + 1))));
13592         }
13593
13594         return max_hp;
13595     }
13596     else
13597     {
13598         return org_hp;
13599     }
13600 }
13601 #endif //MULTIPLE_HEAPS
13602
13603 BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size,
13604                                   int alloc_generation_number)
13605 {
13606     int status;
13607     do
13608     { 
13609 #ifdef MULTIPLE_HEAPS
13610         if (alloc_generation_number == 0)
13611         {
13612             balance_heaps (acontext);
13613             status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, alloc_generation_number);
13614         }
13615         else
13616         {
13617             gc_heap* alloc_heap = balance_heaps_loh (acontext, size);
13618             status = alloc_heap->try_allocate_more_space (acontext, size, alloc_generation_number);
13619         }
13620 #else
13621         status = try_allocate_more_space (acontext, size, alloc_generation_number);
13622 #endif //MULTIPLE_HEAPS
13623     }
13624     while (status == -1);
13625     
13626     return (status != 0);
13627 }
13628
13629 inline
13630 CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext)
13631 {
13632     size_t size = Align (jsize);
13633     assert (size >= Align (min_obj_size));
13634     {
13635     retry:
13636         uint8_t*  result = acontext->alloc_ptr;
13637         acontext->alloc_ptr+=size;
13638         if (acontext->alloc_ptr <= acontext->alloc_limit)
13639         {
13640             CObjectHeader* obj = (CObjectHeader*)result;
13641             assert (obj != 0);
13642             return obj;
13643         }
13644         else
13645         {
13646             acontext->alloc_ptr -= size;
13647
13648 #ifdef _MSC_VER
13649 #pragma inline_depth(0)
13650 #endif //_MSC_VER
13651
13652             if (! allocate_more_space (acontext, size, 0))
13653                 return 0;
13654
13655 #ifdef _MSC_VER
13656 #pragma inline_depth(20)
13657 #endif //_MSC_VER
13658
13659             goto retry;
13660         }
13661     }
13662 }
13663
13664 inline
13665 CObjectHeader* gc_heap::try_fast_alloc (size_t jsize)
13666 {
13667     size_t size = Align (jsize);
13668     assert (size >= Align (min_obj_size));
13669     generation* gen = generation_of (0);
13670     uint8_t*  result = generation_allocation_pointer (gen);
13671     generation_allocation_pointer (gen) += size;
13672     if (generation_allocation_pointer (gen) <=
13673         generation_allocation_limit (gen))
13674     {
13675         return (CObjectHeader*)result;
13676     }
13677     else
13678     {
13679         generation_allocation_pointer (gen) -= size;
13680         return 0;
13681     }
13682 }
13683 void  gc_heap::leave_allocation_segment (generation* gen)
13684 {
13685     adjust_limit (0, 0, gen, max_generation);
13686 }
13687
13688 void gc_heap::init_free_and_plug()
13689 {
13690 #ifdef FREE_USAGE_STATS
13691     for (int i = 0; i <= settings.condemned_generation; i++)
13692     {
13693         generation* gen = generation_of (i);
13694         memset (gen->gen_free_spaces, 0, sizeof (gen->gen_free_spaces));
13695         memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs));
13696         memset (gen->gen_current_pinned_free_spaces, 0, sizeof (gen->gen_current_pinned_free_spaces));
13697     }
13698
13699     if (settings.condemned_generation != max_generation)
13700     {
13701         for (int i = (settings.condemned_generation + 1); i <= max_generation; i++)
13702         {
13703             generation* gen = generation_of (i);
13704             memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs));
13705         }
13706     }
13707 #endif //FREE_USAGE_STATS
13708 }
13709
13710 void gc_heap::print_free_and_plug (const char* msg)
13711 {
13712 #if defined(FREE_USAGE_STATS) && defined(SIMPLE_DPRINTF)
13713     int older_gen = ((settings.condemned_generation == max_generation) ? max_generation : (settings.condemned_generation + 1));
13714     for (int i = 0; i <= older_gen; i++)
13715     {
13716         generation* gen = generation_of (i);
13717         for (int j = 0; j < NUM_GEN_POWER2; j++)
13718         {
13719             if ((gen->gen_free_spaces[j] != 0) || (gen->gen_plugs[j] != 0))
13720             {
13721                 dprintf (2, ("[%s][h%d][%s#%d]gen%d: 2^%d: F: %Id, P: %Id", 
13722                     msg, 
13723                     heap_number, 
13724                     (settings.concurrent ? "BGC" : "GC"),
13725                     settings.gc_index,
13726                     i,
13727                     (j + 9), gen->gen_free_spaces[j], gen->gen_plugs[j]));
13728             }
13729         }
13730     }
13731 #else
13732     UNREFERENCED_PARAMETER(msg);
13733 #endif //FREE_USAGE_STATS && SIMPLE_DPRINTF
13734 }
13735
13736 void gc_heap::add_gen_plug (int gen_number, size_t plug_size)
13737 {
13738 #ifdef FREE_USAGE_STATS
13739     dprintf (3, ("adding plug size %Id to gen%d", plug_size, gen_number));
13740     generation* gen = generation_of (gen_number);
13741     size_t sz = BASE_GEN_SIZE;
13742     int i = 0;
13743
13744     for (; i < NUM_GEN_POWER2; i++)
13745     {
13746         if (plug_size < sz)
13747         {
13748             break;
13749         }
13750         sz = sz * 2;
13751     }
13752     
13753     (gen->gen_plugs[i])++;
13754 #else
13755     UNREFERENCED_PARAMETER(gen_number);
13756     UNREFERENCED_PARAMETER(plug_size);
13757 #endif //FREE_USAGE_STATS
13758 }
13759
13760 void gc_heap::add_item_to_current_pinned_free (int gen_number, size_t free_size)
13761 {
13762 #ifdef FREE_USAGE_STATS
13763     generation* gen = generation_of (gen_number);
13764     size_t sz = BASE_GEN_SIZE;
13765     int i = 0;
13766
13767     for (; i < NUM_GEN_POWER2; i++)
13768     {
13769         if (free_size < sz)
13770         {
13771             break;
13772         }
13773         sz = sz * 2;
13774     }
13775     
13776     (gen->gen_current_pinned_free_spaces[i])++;
13777     generation_pinned_free_obj_space (gen) += free_size;
13778     dprintf (3, ("left pin free %Id(2^%d) to gen%d, total %Id bytes (%Id)", 
13779         free_size, (i + 10), gen_number, 
13780         generation_pinned_free_obj_space (gen),
13781         gen->gen_current_pinned_free_spaces[i]));
13782 #else
13783     UNREFERENCED_PARAMETER(gen_number);
13784     UNREFERENCED_PARAMETER(free_size);
13785 #endif //FREE_USAGE_STATS
13786 }
13787
13788 void gc_heap::add_gen_free (int gen_number, size_t free_size)
13789 {
13790 #ifdef FREE_USAGE_STATS
13791     dprintf (3, ("adding free size %Id to gen%d", free_size, gen_number));
13792     generation* gen = generation_of (gen_number);
13793     size_t sz = BASE_GEN_SIZE;
13794     int i = 0;
13795
13796     for (; i < NUM_GEN_POWER2; i++)
13797     {
13798         if (free_size < sz)
13799         {
13800             break;
13801         }
13802         sz = sz * 2;
13803     }
13804     
13805     (gen->gen_free_spaces[i])++;
13806 #else
13807     UNREFERENCED_PARAMETER(gen_number);
13808     UNREFERENCED_PARAMETER(free_size);
13809 #endif //FREE_USAGE_STATS
13810 }
13811
13812 void gc_heap::remove_gen_free (int gen_number, size_t free_size)
13813 {
13814 #ifdef FREE_USAGE_STATS
13815     dprintf (3, ("removing free %Id from gen%d", free_size, gen_number));
13816     generation* gen = generation_of (gen_number);
13817     size_t sz = BASE_GEN_SIZE;
13818     int i = 0;
13819
13820     for (; i < NUM_GEN_POWER2; i++)
13821     {
13822         if (free_size < sz)
13823         {
13824             break;
13825         }
13826         sz = sz * 2;
13827     }
13828     
13829     (gen->gen_free_spaces[i])--;
13830 #else
13831     UNREFERENCED_PARAMETER(gen_number);
13832     UNREFERENCED_PARAMETER(free_size);
13833 #endif //FREE_USAGE_STATS
13834 }
13835
13836 uint8_t* gc_heap::allocate_in_older_generation (generation* gen, size_t size,
13837                                              int from_gen_number,
13838                                              uint8_t* old_loc REQD_ALIGN_AND_OFFSET_DCL)
13839 {
13840     size = Align (size);
13841     assert (size >= Align (min_obj_size));
13842     assert (from_gen_number < max_generation);
13843     assert (from_gen_number >= 0);
13844     assert (generation_of (from_gen_number + 1) == gen);
13845
13846     allocator* gen_allocator = generation_allocator (gen);
13847     BOOL discard_p = gen_allocator->discard_if_no_fit_p ();
13848     int pad_in_front = ((old_loc != 0) && ((from_gen_number+1) != max_generation)) ? USE_PADDING_FRONT : 0;
13849
13850     size_t real_size = size + Align (min_obj_size);
13851     if (pad_in_front)
13852         real_size += Align (min_obj_size);
13853
13854     if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
13855                        generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front)))
13856     {
13857         size_t sz_list = gen_allocator->first_bucket_size();
13858         for (unsigned int a_l_idx = 0; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
13859         {
13860             if ((real_size < (sz_list / 2)) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
13861             {
13862                 uint8_t* free_list = gen_allocator->alloc_list_head_of (a_l_idx);
13863                 uint8_t* prev_free_item = 0;
13864                 while (free_list != 0)
13865                 {
13866                     dprintf (3, ("considering free list %Ix", (size_t)free_list));
13867
13868                     size_t free_list_size = unused_array_size (free_list);
13869
13870                     if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + free_list_size),
13871                                     old_loc, USE_PADDING_TAIL | pad_in_front))
13872                     {
13873                         dprintf (4, ("F:%Ix-%Id",
13874                                      (size_t)free_list, free_list_size));
13875
13876                         gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, !discard_p);
13877                         generation_free_list_space (gen) -= free_list_size;
13878                         remove_gen_free (gen->gen_num, free_list_size);
13879
13880                         adjust_limit (free_list, free_list_size, gen, from_gen_number+1);
13881                         generation_allocate_end_seg_p (gen) = FALSE;
13882                         goto finished;
13883                     }
13884                     // We do first fit on bucket 0 because we are not guaranteed to find a fit there.
13885                     else if (discard_p || (a_l_idx == 0))
13886                     {
13887                         dprintf (3, ("couldn't use this free area, discarding"));
13888                         generation_free_obj_space (gen) += free_list_size;
13889
13890                         gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
13891                         generation_free_list_space (gen) -= free_list_size;
13892                         remove_gen_free (gen->gen_num, free_list_size);
13893                     }
13894                     else
13895                     {
13896                         prev_free_item = free_list;
13897                     }
13898                     free_list = free_list_slot (free_list); 
13899                 }
13900             }
13901             sz_list = sz_list * 2;
13902         }
13903         //go back to the beginning of the segment list 
13904         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
13905         if (seg != generation_allocation_segment (gen))
13906         {
13907             leave_allocation_segment (gen);
13908             generation_allocation_segment (gen) = seg;
13909         }
13910         while (seg != ephemeral_heap_segment)
13911         {
13912             if (size_fit_p(size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg),
13913                            heap_segment_committed (seg), old_loc, USE_PADDING_TAIL | pad_in_front))
13914             {
13915                 dprintf (3, ("using what's left in committed"));
13916                 adjust_limit (heap_segment_plan_allocated (seg),
13917                               heap_segment_committed (seg) -
13918                               heap_segment_plan_allocated (seg),
13919                               gen, from_gen_number+1);
13920                 generation_allocate_end_seg_p (gen) = TRUE;
13921                 // dformat (t, 3, "Expanding segment allocation");
13922                 heap_segment_plan_allocated (seg) =
13923                     heap_segment_committed (seg);
13924                 goto finished;
13925             }
13926             else
13927             {
13928                 if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg),
13929                                 heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) &&
13930                     grow_heap_segment (seg, heap_segment_plan_allocated (seg), old_loc, size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG))
13931                 {
13932                     dprintf (3, ("using what's left in reserved"));
13933                     adjust_limit (heap_segment_plan_allocated (seg),
13934                                   heap_segment_committed (seg) -
13935                                   heap_segment_plan_allocated (seg),
13936                                   gen, from_gen_number+1);
13937                     generation_allocate_end_seg_p (gen) = TRUE;
13938                     heap_segment_plan_allocated (seg) =
13939                         heap_segment_committed (seg);
13940
13941                     goto finished;
13942                 }
13943                 else
13944                 {
13945                     leave_allocation_segment (gen);
13946                     heap_segment*   next_seg = heap_segment_next_rw (seg);
13947                     if (next_seg)
13948                     {
13949                         dprintf (3, ("getting next segment"));
13950                         generation_allocation_segment (gen) = next_seg;
13951                         generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
13952                         generation_allocation_limit (gen) = generation_allocation_pointer (gen);
13953                     }
13954                     else
13955                     {
13956                         size = 0;
13957                         goto finished;
13958                     }
13959                 }
13960             }
13961             seg = generation_allocation_segment (gen);
13962         }
13963         //No need to fix the last region. Will be done later
13964         size = 0;
13965         goto finished;
13966     }
13967     finished:
13968     if (0 == size)
13969     {
13970         return 0;
13971     }
13972     else
13973     {
13974         uint8_t*  result = generation_allocation_pointer (gen);
13975         size_t pad = 0;
13976
13977 #ifdef SHORT_PLUGS
13978         if ((pad_in_front & USE_PADDING_FRONT) &&
13979             (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
13980              ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
13981         {
13982             pad = Align (min_obj_size);
13983             set_plug_padded (old_loc);
13984         }
13985 #endif //SHORT_PLUGS
13986
13987 #ifdef FEATURE_STRUCTALIGN
13988         _ASSERTE(!old_loc || alignmentOffset != 0);
13989         _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
13990         if (old_loc != 0)
13991         {
13992             size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
13993             set_node_aligninfo (old_loc, requiredAlignment, pad1);
13994             pad += pad1;
13995         }
13996 #else // FEATURE_STRUCTALIGN
13997         if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
13998         {
13999             pad += switch_alignment_size (is_plug_padded (old_loc));
14000             set_node_realigned (old_loc);
14001             dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
14002                          (size_t)old_loc, (size_t)(result+pad)));
14003             assert (same_large_alignment_p (result + pad, old_loc));
14004         }
14005 #endif // FEATURE_STRUCTALIGN
14006         dprintf (3, ("Allocate %Id bytes", size));
14007
14008         if ((old_loc == 0) || (pad != 0))
14009         {
14010             //allocating a non plug or a gap, so reset the start region
14011             generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14012         }
14013
14014         generation_allocation_pointer (gen) += size + pad;
14015         assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
14016         if (generation_allocate_end_seg_p (gen))
14017         {
14018             generation_end_seg_allocated (gen) += size;
14019         }
14020         else
14021         {
14022             generation_free_list_allocated (gen) += size;
14023         }
14024         generation_allocation_size (gen) += size;
14025
14026         dprintf (3, ("aio: ptr: %Ix, limit: %Ix, sr: %Ix", 
14027             generation_allocation_pointer (gen), generation_allocation_limit (gen),
14028             generation_allocation_context_start_region (gen)));
14029
14030         return result + pad;;
14031     }
14032 }
14033
14034 void gc_heap::repair_allocation_in_expanded_heap (generation* consing_gen)
14035 {
14036     //make sure that every generation has a planned allocation start
14037     int  gen_number = max_generation - 1;
14038     while (gen_number>= 0)
14039     {
14040         generation* gen = generation_of (gen_number);
14041         if (0 == generation_plan_allocation_start (gen))
14042         {
14043             realloc_plan_generation_start (gen, consing_gen);
14044
14045             assert (generation_plan_allocation_start (gen));
14046         }
14047         gen_number--;
14048     }
14049
14050     // now we know the planned allocation size
14051     size_t  size = (generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
14052     heap_segment* seg = generation_allocation_segment (consing_gen);
14053     if (generation_allocation_limit (consing_gen) == heap_segment_plan_allocated (seg))
14054     {
14055         if (size != 0)
14056         {
14057             heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen);
14058         }
14059     }
14060     else
14061     {
14062         assert (settings.condemned_generation == max_generation);
14063         uint8_t* first_address = generation_allocation_limit (consing_gen);
14064         //look through the pinned plugs for relevant ones.
14065         //Look for the right pinned plug to start from.
14066         size_t mi = 0;
14067         mark* m = 0;
14068         while (mi != mark_stack_tos)
14069         {
14070             m = pinned_plug_of (mi);
14071             if ((pinned_plug (m) == first_address))
14072                 break;
14073             else
14074                 mi++;
14075         }
14076         assert (mi != mark_stack_tos);
14077         pinned_len (m) = size;
14078     }
14079 }
14080
14081 //tododefrag optimize for new segment (plan_allocated == mem)
14082 uint8_t* gc_heap::allocate_in_expanded_heap (generation* gen,
14083                                           size_t size,
14084                                           BOOL& adjacentp,
14085                                           uint8_t* old_loc,
14086 #ifdef SHORT_PLUGS
14087                                           BOOL set_padding_on_saved_p,
14088                                           mark* pinned_plug_entry,
14089 #endif //SHORT_PLUGS
14090                                           BOOL consider_bestfit,
14091                                           int active_new_gen_number
14092                                           REQD_ALIGN_AND_OFFSET_DCL)
14093 {
14094     UNREFERENCED_PARAMETER(active_new_gen_number);
14095     dprintf (3, ("aie: P: %Ix, size: %Ix", old_loc, size));
14096
14097     size = Align (size);
14098     assert (size >= Align (min_obj_size));
14099     int pad_in_front = ((old_loc != 0) && (active_new_gen_number != max_generation)) ? USE_PADDING_FRONT : 0;
14100
14101     if (consider_bestfit && use_bestfit)
14102     {
14103         assert (bestfit_seg);
14104         dprintf (SEG_REUSE_LOG_1, ("reallocating 0x%Ix in expanded heap, size: %Id", 
14105                     old_loc, size));
14106         return bestfit_seg->fit (old_loc, 
14107 #ifdef SHORT_PLUGS
14108                                  set_padding_on_saved_p,
14109                                  pinned_plug_entry,
14110 #endif //SHORT_PLUGS
14111                                  size REQD_ALIGN_AND_OFFSET_ARG);
14112     }
14113
14114     heap_segment* seg = generation_allocation_segment (gen);
14115
14116     if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14117                        generation_allocation_limit (gen), old_loc,
14118                        ((generation_allocation_limit (gen) !=
14119                           heap_segment_plan_allocated (seg))? USE_PADDING_TAIL : 0) | pad_in_front)))
14120     {
14121         dprintf (3, ("aie: can't fit: ptr: %Ix, limit: %Ix", generation_allocation_pointer (gen),
14122             generation_allocation_limit (gen)));
14123
14124         adjacentp = FALSE;
14125         uint8_t* first_address = (generation_allocation_limit (gen) ?
14126                                generation_allocation_limit (gen) :
14127                                heap_segment_mem (seg));
14128         assert (in_range_for_segment (first_address, seg));
14129
14130         uint8_t* end_address   = heap_segment_reserved (seg);
14131
14132         dprintf (3, ("aie: first_addr: %Ix, gen alloc limit: %Ix, end_address: %Ix",
14133             first_address, generation_allocation_limit (gen), end_address));
14134
14135         size_t mi = 0;
14136         mark* m = 0;
14137
14138         if (heap_segment_allocated (seg) != heap_segment_mem (seg))
14139         {
14140             assert (settings.condemned_generation == max_generation);
14141             //look through the pinned plugs for relevant ones.
14142             //Look for the right pinned plug to start from.
14143             while (mi != mark_stack_tos)
14144             {
14145                 m = pinned_plug_of (mi);
14146                 if ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address))
14147                 {
14148                     dprintf (3, ("aie: found pin: %Ix", pinned_plug (m)));
14149                     break;
14150                 }
14151                 else
14152                     mi++;
14153             }
14154             if (mi != mark_stack_tos)
14155             {
14156                 //fix old free list.
14157                 size_t  hsize = (generation_allocation_limit (gen) - generation_allocation_pointer (gen));
14158                 {
14159                     dprintf(3,("gc filling up hole"));
14160                     ptrdiff_t mi1 = (ptrdiff_t)mi;
14161                     while ((mi1 >= 0) &&
14162                            (pinned_plug (pinned_plug_of(mi1)) != generation_allocation_limit (gen)))
14163                     {
14164                         dprintf (3, ("aie: checking pin %Ix", pinned_plug (pinned_plug_of(mi1))));
14165                         mi1--;
14166                     }
14167                     if (mi1 >= 0)
14168                     {
14169                         size_t saved_pinned_len = pinned_len (pinned_plug_of(mi1));
14170                         pinned_len (pinned_plug_of(mi1)) = hsize;
14171                         dprintf (3, ("changing %Ix len %Ix->%Ix", 
14172                             pinned_plug (pinned_plug_of(mi1)), 
14173                             saved_pinned_len, pinned_len (pinned_plug_of(mi1))));
14174                     }
14175                 }
14176             }
14177         }
14178         else
14179         {
14180             assert (generation_allocation_limit (gen) ==
14181                     generation_allocation_pointer (gen));
14182             mi = mark_stack_tos;
14183         }
14184
14185         while ((mi != mark_stack_tos) && in_range_for_segment (pinned_plug (m), seg))
14186         {
14187             size_t len = pinned_len (m);
14188             uint8_t*  free_list = (pinned_plug (m) - len);
14189             dprintf (3, ("aie: testing free item: %Ix->%Ix(%Ix)", 
14190                 free_list, (free_list + len), len));
14191             if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + len), old_loc, USE_PADDING_TAIL | pad_in_front))
14192             {
14193                 dprintf (3, ("aie: Found adequate unused area: %Ix, size: %Id",
14194                             (size_t)free_list, len));
14195                 {
14196                     generation_allocation_pointer (gen) = free_list;
14197                     generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14198                     generation_allocation_limit (gen) = (free_list + len);
14199                 }
14200                 goto allocate_in_free;
14201             }
14202             mi++;
14203             m = pinned_plug_of (mi);
14204         }
14205
14206         //switch to the end of the segment.
14207         generation_allocation_pointer (gen) = heap_segment_plan_allocated (seg);
14208         generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14209         heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
14210         generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14211         dprintf (3, ("aie: switching to end of seg: %Ix->%Ix(%Ix)", 
14212             generation_allocation_pointer (gen), generation_allocation_limit (gen),
14213             (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
14214
14215         if (!size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14216                          generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front))
14217         {
14218             dprintf (3, ("aie: ptr: %Ix, limit: %Ix, can't alloc", generation_allocation_pointer (gen),
14219                 generation_allocation_limit (gen)));
14220             assert (!"Can't allocate if no free space");
14221             return 0;
14222         }
14223     }
14224     else
14225     {
14226         adjacentp = TRUE;
14227     }
14228
14229 allocate_in_free:
14230     {
14231         uint8_t*  result = generation_allocation_pointer (gen);
14232         size_t pad = 0;
14233
14234 #ifdef SHORT_PLUGS
14235         if ((pad_in_front & USE_PADDING_FRONT) &&
14236             (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
14237              ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
14238
14239         {
14240             pad = Align (min_obj_size);
14241             set_padding_in_expand (old_loc, set_padding_on_saved_p, pinned_plug_entry);
14242         }
14243 #endif //SHORT_PLUGS
14244
14245 #ifdef FEATURE_STRUCTALIGN
14246         _ASSERTE(!old_loc || alignmentOffset != 0);
14247         _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
14248         if (old_loc != 0)
14249         {
14250             size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
14251             set_node_aligninfo (old_loc, requiredAlignment, pad1);
14252             pad += pad1;
14253             adjacentp = FALSE;
14254         }
14255 #else // FEATURE_STRUCTALIGN
14256         if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
14257         {
14258             pad += switch_alignment_size (is_plug_padded (old_loc));
14259             set_node_realigned (old_loc);
14260             dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
14261                          (size_t)old_loc, (size_t)(result+pad)));
14262             assert (same_large_alignment_p (result + pad, old_loc));
14263             adjacentp = FALSE;
14264         }
14265 #endif // FEATURE_STRUCTALIGN
14266
14267         if ((old_loc == 0) || (pad != 0))
14268         {
14269             //allocating a non plug or a gap, so reset the start region
14270             generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14271         }
14272
14273         generation_allocation_pointer (gen) += size + pad;
14274         assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
14275         dprintf (3, ("Allocated in expanded heap %Ix:%Id", (size_t)(result+pad), size));
14276
14277         dprintf (3, ("aie: ptr: %Ix, limit: %Ix, sr: %Ix", 
14278             generation_allocation_pointer (gen), generation_allocation_limit (gen),
14279             generation_allocation_context_start_region (gen)));
14280
14281         return result + pad;
14282     }
14283 }
14284
14285 generation*  gc_heap::ensure_ephemeral_heap_segment (generation* consing_gen)
14286 {
14287     heap_segment* seg = generation_allocation_segment (consing_gen);
14288     if (seg != ephemeral_heap_segment)
14289     {
14290         assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (seg));
14291         assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (seg));
14292
14293         //fix the allocated size of the segment.
14294         heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen);
14295
14296         generation* new_consing_gen = generation_of (max_generation - 1);
14297         generation_allocation_pointer (new_consing_gen) =
14298                 heap_segment_mem (ephemeral_heap_segment);
14299         generation_allocation_limit (new_consing_gen) =
14300             generation_allocation_pointer (new_consing_gen);
14301         generation_allocation_context_start_region (new_consing_gen) = 
14302             generation_allocation_pointer (new_consing_gen);
14303         generation_allocation_segment (new_consing_gen) = ephemeral_heap_segment;
14304
14305         return new_consing_gen;
14306     }
14307     else
14308         return consing_gen;
14309 }
14310
14311 uint8_t* gc_heap::allocate_in_condemned_generations (generation* gen,
14312                                                   size_t size,
14313                                                   int from_gen_number,
14314 #ifdef SHORT_PLUGS
14315                                                   BOOL* convert_to_pinned_p,
14316                                                   uint8_t* next_pinned_plug,
14317                                                   heap_segment* current_seg,
14318 #endif //SHORT_PLUGS
14319                                                   uint8_t* old_loc
14320                                                   REQD_ALIGN_AND_OFFSET_DCL)
14321 {
14322     // Make sure that the youngest generation gap hasn't been allocated
14323     if (settings.promotion)
14324     {
14325         assert (generation_plan_allocation_start (youngest_generation) == 0);
14326     }
14327
14328     size = Align (size);
14329     assert (size >= Align (min_obj_size));
14330     int to_gen_number = from_gen_number;
14331     if (from_gen_number != (int)max_generation)
14332     {
14333         to_gen_number = from_gen_number + (settings.promotion ? 1 : 0);
14334     }
14335
14336     dprintf (3, ("aic gen%d: s: %Id", gen->gen_num, size));
14337
14338     int pad_in_front = ((old_loc != 0) && (to_gen_number != max_generation)) ? USE_PADDING_FRONT : 0;
14339     
14340     if ((from_gen_number != -1) && (from_gen_number != (int)max_generation) && settings.promotion)
14341     {
14342         generation_condemned_allocated (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size;
14343         generation_allocation_size (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size;
14344     }
14345 retry:
14346     {
14347         heap_segment* seg = generation_allocation_segment (gen);
14348         if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14349                            generation_allocation_limit (gen), old_loc,
14350                            ((generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))?USE_PADDING_TAIL:0)|pad_in_front)))
14351         {
14352             if ((! (pinned_plug_que_empty_p()) &&
14353                  (generation_allocation_limit (gen) ==
14354                   pinned_plug (oldest_pin()))))
14355             {
14356                 size_t entry = deque_pinned_plug();
14357                 mark* pinned_plug_entry = pinned_plug_of (entry);
14358                 size_t len = pinned_len (pinned_plug_entry);
14359                 uint8_t* plug = pinned_plug (pinned_plug_entry);
14360                 set_new_pin_info (pinned_plug_entry, generation_allocation_pointer (gen));
14361
14362 #ifdef FREE_USAGE_STATS
14363                 generation_allocated_in_pinned_free (gen) += generation_allocated_since_last_pin (gen);
14364                 dprintf (3, ("allocated %Id so far within pin %Ix, total->%Id", 
14365                     generation_allocated_since_last_pin (gen), 
14366                     plug,
14367                     generation_allocated_in_pinned_free (gen)));
14368                 generation_allocated_since_last_pin (gen) = 0;
14369
14370                 add_item_to_current_pinned_free (gen->gen_num, pinned_len (pinned_plug_of (entry)));
14371 #endif //FREE_USAGE_STATS
14372
14373                 dprintf (3, ("mark stack bos: %Id, tos: %Id, aic: p %Ix len: %Ix->%Ix", 
14374                     mark_stack_bos, mark_stack_tos, plug, len, pinned_len (pinned_plug_of (entry))));
14375
14376                 assert(mark_stack_array[entry].len == 0 ||
14377                        mark_stack_array[entry].len >= Align(min_obj_size));
14378                 generation_allocation_pointer (gen) = plug + len;
14379                 generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14380                 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14381                 set_allocator_next_pin (gen);
14382
14383                 //Add the size of the pinned plug to the right pinned allocations
14384                 //find out which gen this pinned plug came from 
14385                 int frgn = object_gennum (plug);
14386                 if ((frgn != (int)max_generation) && settings.promotion)
14387                 {
14388                     generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
14389                     int togn = object_gennum_plan (plug);
14390                     if (frgn < togn)
14391                     {
14392                         generation_pinned_allocation_compact_size (generation_of (togn)) += len;
14393                     }
14394                 }
14395                 goto retry;
14396             }
14397             
14398             if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))
14399             {
14400                 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14401                 dprintf (3, ("changed limit to plan alloc: %Ix", generation_allocation_limit (gen)));
14402             }
14403             else
14404             {
14405                 if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg))
14406                 {
14407                     heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
14408                     generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14409                     dprintf (3, ("changed limit to commit: %Ix", generation_allocation_limit (gen)));
14410                 }
14411                 else
14412                 {
14413 #ifndef RESPECT_LARGE_ALIGNMENT
14414                     assert (gen != youngest_generation);
14415 #endif //RESPECT_LARGE_ALIGNMENT
14416
14417                     if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14418                                     heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) &&
14419                         (grow_heap_segment (seg, generation_allocation_pointer (gen), old_loc,
14420                                             size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG)))
14421                     {
14422                         dprintf (3, ("Expanded segment allocation by committing more memory"));
14423                         heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
14424                         generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14425                     }
14426                     else
14427                     {
14428                         heap_segment*   next_seg = heap_segment_next (seg);
14429                         assert (generation_allocation_pointer (gen)>=
14430                                 heap_segment_mem (seg));
14431                         // Verify that all pinned plugs for this segment are consumed
14432                         if (!pinned_plug_que_empty_p() &&
14433                             ((pinned_plug (oldest_pin()) <
14434                               heap_segment_allocated (seg)) &&
14435                              (pinned_plug (oldest_pin()) >=
14436                               generation_allocation_pointer (gen))))
14437                         {
14438                             LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation",
14439                                          pinned_plug (oldest_pin())));
14440                             FATAL_GC_ERROR();
14441                         }
14442                         assert (generation_allocation_pointer (gen)>=
14443                                 heap_segment_mem (seg));
14444                         assert (generation_allocation_pointer (gen)<=
14445                                 heap_segment_committed (seg));
14446                         heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen);
14447
14448                         if (next_seg)
14449                         {
14450                             generation_allocation_segment (gen) = next_seg;
14451                             generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
14452                             generation_allocation_limit (gen) = generation_allocation_pointer (gen);
14453                             generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14454                         }
14455                         else
14456                         {
14457                             return 0; //should only happen during allocation of generation 0 gap
14458                             // in that case we are going to grow the heap anyway
14459                         }
14460                     }
14461                 }
14462             }
14463             set_allocator_next_pin (gen);
14464
14465             goto retry;
14466         }
14467     }
14468
14469     {
14470         assert (generation_allocation_pointer (gen)>=
14471                 heap_segment_mem (generation_allocation_segment (gen)));
14472         uint8_t* result = generation_allocation_pointer (gen);
14473         size_t pad = 0;
14474 #ifdef SHORT_PLUGS
14475         if ((pad_in_front & USE_PADDING_FRONT) &&
14476             (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
14477              ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
14478         {
14479             ptrdiff_t dist = old_loc - result;
14480             if (dist == 0)
14481             {
14482                 dprintf (3, ("old alloc: %Ix, same as new alloc, not padding", old_loc));
14483                 pad = 0;
14484             }
14485             else
14486             {
14487                 if ((dist > 0) && (dist < (ptrdiff_t)Align (min_obj_size)))
14488                 {
14489                     dprintf (3, ("old alloc: %Ix, only %d bytes > new alloc! Shouldn't happen", old_loc, dist));
14490                     FATAL_GC_ERROR();
14491                 }
14492
14493                 pad = Align (min_obj_size);
14494                 set_plug_padded (old_loc);
14495             }
14496         }
14497 #endif //SHORT_PLUGS
14498 #ifdef FEATURE_STRUCTALIGN
14499         _ASSERTE(!old_loc || alignmentOffset != 0);
14500         _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
14501         if ((old_loc != 0))
14502         {
14503             size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
14504             set_node_aligninfo (old_loc, requiredAlignment, pad1);
14505             pad += pad1;
14506         }
14507 #else // FEATURE_STRUCTALIGN
14508         if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
14509         {
14510             pad += switch_alignment_size (is_plug_padded (old_loc));
14511             set_node_realigned(old_loc);
14512             dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
14513                          (size_t)old_loc, (size_t)(result+pad)));
14514             assert (same_large_alignment_p (result + pad, old_loc));
14515         }
14516 #endif // FEATURE_STRUCTALIGN
14517
14518 #ifdef SHORT_PLUGS
14519         if ((next_pinned_plug != 0) && (pad != 0) && (generation_allocation_segment (gen) == current_seg))
14520         {
14521             assert (old_loc != 0);
14522             ptrdiff_t dist_to_next_pin = (ptrdiff_t)(next_pinned_plug - (generation_allocation_pointer (gen) + size + pad));
14523             assert (dist_to_next_pin >= 0);
14524
14525             if ((dist_to_next_pin >= 0) && (dist_to_next_pin < (ptrdiff_t)Align (min_obj_size)))
14526             {
14527                 dprintf (3, ("%Ix->(%Ix,%Ix),%Ix(%Ix)(%Ix),NP->PP", 
14528                     old_loc, 
14529                     generation_allocation_pointer (gen),
14530                     generation_allocation_limit (gen),
14531                     next_pinned_plug,
14532                     size, 
14533                     dist_to_next_pin));
14534                 clear_plug_padded (old_loc);
14535                 pad = 0;
14536                 *convert_to_pinned_p = TRUE;
14537                 record_interesting_data_point (idp_converted_pin);
14538
14539                 return 0;
14540             }
14541         }
14542 #endif //SHORT_PLUGS
14543
14544         if ((old_loc == 0) || (pad != 0))
14545         {
14546             //allocating a non plug or a gap, so reset the start region
14547             generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14548         }
14549
14550         generation_allocation_pointer (gen) += size + pad;
14551         assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
14552
14553 #ifdef FREE_USAGE_STATS
14554         generation_allocated_since_last_pin (gen) += size;
14555 #endif //FREE_USAGE_STATS
14556
14557         dprintf (3, ("aic: ptr: %Ix, limit: %Ix, sr: %Ix", 
14558             generation_allocation_pointer (gen), generation_allocation_limit (gen),
14559             generation_allocation_context_start_region (gen)));
14560
14561         assert (result + pad);
14562         return result + pad;
14563     }
14564 }
14565
14566 inline int power (int x, int y)
14567 {
14568     int z = 1;
14569     for (int i = 0; i < y; i++)
14570     {
14571         z = z*x;
14572     }
14573     return z;
14574 }
14575
14576 int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation, 
14577                                            int initial_gen,
14578                                            int current_gen,
14579                                            BOOL* blocking_collection_p
14580                                            STRESS_HEAP_ARG(int n_original))
14581 {
14582     int n = current_gen;
14583 #ifdef MULTIPLE_HEAPS
14584     BOOL joined_last_gc_before_oom = FALSE;
14585     for (int i = 0; i < n_heaps; i++)
14586     {
14587         if (g_heaps[i]->last_gc_before_oom)
14588         {
14589             dprintf (GTC_LOG, ("h%d is setting blocking to TRUE", i));
14590             joined_last_gc_before_oom = TRUE;
14591             break;
14592         }
14593     }
14594 #else
14595     BOOL joined_last_gc_before_oom = last_gc_before_oom;
14596 #endif //MULTIPLE_HEAPS
14597
14598     if (joined_last_gc_before_oom && settings.pause_mode != pause_low_latency)
14599     {
14600         assert (*blocking_collection_p);
14601     }
14602
14603     if (should_evaluate_elevation && (n == max_generation))
14604     {
14605         dprintf (GTC_LOG, ("lock: %d(%d)", 
14606             (settings.should_lock_elevation ? 1 : 0), 
14607             settings.elevation_locked_count));
14608
14609         if (settings.should_lock_elevation)
14610         {
14611             settings.elevation_locked_count++;
14612             if (settings.elevation_locked_count == 6)
14613             {
14614                 settings.elevation_locked_count = 0;
14615             }
14616             else
14617             {
14618                 n = max_generation - 1;
14619                 settings.elevation_reduced = TRUE;
14620             }
14621         }
14622         else
14623         {
14624             settings.elevation_locked_count = 0;
14625         }
14626     }
14627     else
14628     {
14629         settings.should_lock_elevation = FALSE;
14630         settings.elevation_locked_count = 0;
14631     }
14632
14633     if (provisional_mode_triggered && (n == max_generation))
14634     {
14635         // There are a few cases where we should not reduce the generation.
14636         if ((initial_gen == max_generation) || (settings.reason == reason_alloc_loh))
14637         {
14638             // If we are doing a full GC in the provisional mode, we always
14639             // make it blocking because we don't want to get into a situation
14640             // where foreground GCs are asking for a compacting full GC right away
14641             // and not getting it.
14642             dprintf (GTC_LOG, ("full GC induced, not reducing gen"));
14643             *blocking_collection_p = TRUE;
14644         }
14645         else if (should_expand_in_full_gc || joined_last_gc_before_oom)
14646         {
14647             dprintf (GTC_LOG, ("need full blocking GCs to expand heap or avoid OOM, not reducing gen"));
14648             assert (*blocking_collection_p);
14649         }
14650         else
14651         {
14652             dprintf (GTC_LOG, ("reducing gen in PM: %d->%d->%d", initial_gen, n, (max_generation - 1)));
14653             n = max_generation - 1;
14654         }
14655     }
14656
14657     if (should_expand_in_full_gc)
14658     {
14659         should_expand_in_full_gc = FALSE;
14660     }
14661
14662     if ((n == max_generation) && (*blocking_collection_p == FALSE))
14663     {
14664         // If we are doing a gen2 we should reset elevation regardless and let the gen2
14665         // decide if we should lock again or in the bgc case by design we will not retract
14666         // gen1 start.
14667         settings.should_lock_elevation = FALSE;
14668         settings.elevation_locked_count = 0;
14669         dprintf (1, ("doing bgc, reset elevation"));
14670     }
14671
14672 #ifdef STRESS_HEAP
14673 #ifdef BACKGROUND_GC
14674     // We can only do Concurrent GC Stress if the caller did not explicitly ask for all
14675     // generations to be collected,
14676     //
14677     // [LOCALGC TODO] STRESS_HEAP is not defined for a standalone GC so there are multiple
14678     // things that need to be fixed in this code block.
14679     if (n_original != max_generation &&
14680         g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
14681     {
14682 #ifndef FEATURE_REDHAWK
14683         // for the GC stress mix mode throttle down gen2 collections
14684         if (g_pConfig->IsGCStressMix())
14685         {
14686             size_t current_gc_count = 0;
14687
14688 #ifdef MULTIPLE_HEAPS
14689             current_gc_count = (size_t)dd_collection_count (g_heaps[0]->dynamic_data_of (0));
14690 #else
14691             current_gc_count = (size_t)dd_collection_count (dynamic_data_of (0));
14692 #endif //MULTIPLE_HEAPS
14693             // in gc stress, only escalate every 10th non-gen2 collection to a gen2...
14694             if ((current_gc_count % 10) == 0)
14695             {
14696                 n = max_generation;
14697             }
14698         }
14699         // for traditional GC stress
14700         else
14701 #endif // !FEATURE_REDHAWK
14702         if (*blocking_collection_p)
14703         {
14704             // We call StressHeap() a lot for Concurrent GC Stress. However,
14705             // if we can not do a concurrent collection, no need to stress anymore.
14706             // @TODO: Enable stress when the memory pressure goes down again
14707             GCStressPolicy::GlobalDisable();
14708         }
14709         else
14710         {
14711             n = max_generation;
14712         }
14713     }
14714 #endif //BACKGROUND_GC
14715 #endif //STRESS_HEAP
14716
14717     return n;
14718 }
14719
14720 inline
14721 size_t get_survived_size (gc_history_per_heap* hist)
14722 {
14723     size_t surv_size = 0;
14724     gc_generation_data* gen_data;
14725
14726     for (int gen_number = 0; gen_number <= (max_generation + 1); gen_number++)
14727     {
14728         gen_data = &(hist->gen_data[gen_number]); 
14729         surv_size += (gen_data->size_after - 
14730                       gen_data->free_list_space_after - 
14731                       gen_data->free_obj_space_after);
14732     }
14733
14734     return surv_size;
14735 }
14736
14737 size_t gc_heap::get_total_survived_size()
14738 {
14739     size_t total_surv_size = 0;
14740 #ifdef MULTIPLE_HEAPS
14741     for (int i = 0; i < gc_heap::n_heaps; i++)
14742     {
14743         gc_heap* hp = gc_heap::g_heaps[i];
14744         gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap();
14745         total_surv_size += get_survived_size (current_gc_data_per_heap);
14746     }
14747 #else
14748     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
14749     total_surv_size = get_survived_size (current_gc_data_per_heap);
14750 #endif //MULTIPLE_HEAPS
14751     return total_surv_size;
14752 }
14753
14754 // Gets what's allocated on both SOH and LOH that hasn't been collected.
14755 size_t gc_heap::get_current_allocated()
14756 {
14757     dynamic_data* dd = dynamic_data_of (0);
14758     size_t current_alloc = dd_desired_allocation (dd) - dd_new_allocation (dd);
14759     dd = dynamic_data_of (max_generation + 1);
14760     current_alloc += dd_desired_allocation (dd) - dd_new_allocation (dd);
14761
14762     return current_alloc;
14763 }
14764
14765 size_t gc_heap::get_total_allocated()
14766 {
14767     size_t total_current_allocated = 0;
14768 #ifdef MULTIPLE_HEAPS
14769     for (int i = 0; i < gc_heap::n_heaps; i++)
14770     {
14771         gc_heap* hp = gc_heap::g_heaps[i];
14772         total_current_allocated += hp->get_current_allocated();
14773     }
14774 #else
14775     total_current_allocated = get_current_allocated();
14776 #endif //MULTIPLE_HEAPS
14777     return total_current_allocated;
14778 }
14779
14780 size_t gc_heap::current_generation_size (int gen_number)
14781 {
14782     dynamic_data* dd = dynamic_data_of (gen_number);
14783     size_t gen_size = (dd_current_size (dd) + dd_desired_allocation (dd)
14784                         - dd_new_allocation (dd));
14785
14786     return gen_size;
14787 }
14788
14789 #ifdef _PREFAST_
14790 #pragma warning(push)
14791 #pragma warning(disable:6326) // "Potential comparison of a constant with another constant" is intentional in this function.
14792 #endif //_PREFAST_
14793
14794 /*
14795     This is called by when we are actually doing a GC, or when we are just checking whether
14796     we would do a full blocking GC, in which case check_only_p is TRUE.
14797
14798     The difference between calling this with check_only_p TRUE and FALSE is that when it's
14799     TRUE: 
14800             settings.reason is ignored
14801             budgets are not checked (since they are checked before this is called)
14802             it doesn't change anything non local like generation_skip_ratio
14803 */
14804 int gc_heap::generation_to_condemn (int n_initial, 
14805                                     BOOL* blocking_collection_p, 
14806                                     BOOL* elevation_requested_p,
14807                                     BOOL check_only_p)
14808 {
14809     gc_mechanisms temp_settings = settings;
14810     gen_to_condemn_tuning temp_condemn_reasons;
14811     gc_mechanisms* local_settings = (check_only_p ? &temp_settings : &settings);
14812     gen_to_condemn_tuning* local_condemn_reasons = (check_only_p ? &temp_condemn_reasons : &gen_to_condemn_reasons);
14813     if (!check_only_p)
14814     {
14815         if ((local_settings->reason == reason_oos_soh) || (local_settings->reason == reason_oos_loh))
14816         {
14817             assert (n_initial >= 1);
14818         }
14819
14820         assert (settings.reason != reason_empty);
14821     }
14822
14823     local_condemn_reasons->init();
14824
14825     int n = n_initial;
14826     int n_alloc = n;
14827     if (heap_number == 0)
14828     {
14829         dprintf (GTC_LOG, ("init: %d(%d)", n_initial, settings.reason));
14830     }
14831     int i = 0;
14832     int temp_gen = 0;
14833     BOOL low_memory_detected = g_low_memory_status;
14834     uint32_t memory_load = 0;
14835     uint64_t available_physical = 0;
14836     uint64_t available_page_file = 0;
14837     BOOL check_memory = FALSE;
14838     BOOL high_fragmentation  = FALSE;
14839     BOOL v_high_memory_load  = FALSE;
14840     BOOL high_memory_load    = FALSE;
14841     BOOL low_ephemeral_space = FALSE;
14842     BOOL evaluate_elevation  = TRUE;
14843     *elevation_requested_p   = FALSE;
14844     *blocking_collection_p   = FALSE;
14845
14846     BOOL check_max_gen_alloc = TRUE;
14847
14848 #ifdef STRESS_HEAP
14849     int orig_gen = n;
14850 #endif //STRESS_HEAP
14851
14852     if (!check_only_p)
14853     {
14854         dd_fragmentation (dynamic_data_of (0)) = 
14855             generation_free_list_space (youngest_generation) + 
14856             generation_free_obj_space (youngest_generation);
14857
14858         dd_fragmentation (dynamic_data_of (max_generation + 1)) = 
14859             generation_free_list_space (large_object_generation) + 
14860             generation_free_obj_space (large_object_generation);
14861
14862         //save new_allocation
14863         for (i = 0; i <= max_generation+1; i++)
14864         {
14865             dynamic_data* dd = dynamic_data_of (i);
14866             dprintf (GTC_LOG, ("h%d: g%d: l: %Id (%Id)", 
14867                             heap_number, i,
14868                             dd_new_allocation (dd),
14869                             dd_desired_allocation (dd)));
14870             dd_gc_new_allocation (dd) = dd_new_allocation (dd);
14871         }
14872
14873         local_condemn_reasons->set_gen (gen_initial, n);
14874         temp_gen = n;
14875
14876 #ifdef BACKGROUND_GC
14877         if (recursive_gc_sync::background_running_p())
14878         {
14879             dprintf (GTC_LOG, ("bgc in prog, 1"));
14880             check_max_gen_alloc = FALSE;
14881         }
14882 #endif //BACKGROUND_GC
14883
14884         if (check_max_gen_alloc)
14885         {
14886             //figure out if large objects need to be collected.
14887             if (get_new_allocation (max_generation+1) <= 0)
14888             {
14889                 n = max_generation;
14890                 local_condemn_reasons->set_gen (gen_alloc_budget, n);
14891             }
14892         }
14893
14894         //figure out which generation ran out of allocation
14895         for (i = n+1; i <= (check_max_gen_alloc ? max_generation : (max_generation - 1)); i++)
14896         {
14897             if (get_new_allocation (i) <= 0)
14898             {
14899                 n = i;
14900             }
14901             else
14902                 break;
14903         }
14904     }
14905
14906     if (n > temp_gen)
14907     {
14908         local_condemn_reasons->set_gen (gen_alloc_budget, n);
14909     }
14910
14911     dprintf (GTC_LOG, ("h%d: g%d budget", heap_number, ((get_new_allocation (max_generation+1) <= 0) ? 3 : n)));
14912
14913     n_alloc = n;
14914
14915 #if defined(BACKGROUND_GC) && !defined(MULTIPLE_HEAPS)
14916     //time based tuning
14917     // if enough time has elapsed since the last gc
14918     // and the number of gc is too low (1/10 of lower gen) then collect
14919     // This should also be enabled if we have memory concerns
14920     int n_time_max = max_generation;
14921
14922     if (!check_only_p)
14923     {
14924         if (recursive_gc_sync::background_running_p())
14925         {
14926             n_time_max = max_generation - 1;
14927         }
14928     }
14929
14930     if ((local_settings->pause_mode == pause_interactive) ||
14931         (local_settings->pause_mode == pause_sustained_low_latency))
14932     {
14933         dynamic_data* dd0 = dynamic_data_of (0);
14934         size_t now = GetHighPrecisionTimeStamp();
14935         temp_gen = n;
14936         for (i = (temp_gen+1); i <= n_time_max; i++)
14937         {
14938             dynamic_data* dd = dynamic_data_of (i);
14939             if ((now > dd_time_clock(dd) + dd_time_clock_interval(dd)) &&
14940                 (dd_gc_clock (dd0) > (dd_gc_clock (dd) + dd_gc_clock_interval(dd))) &&
14941                 ((n < max_generation) || ((dd_current_size (dd) < dd_max_size (dd0)))))
14942             {
14943                 n = min (i, n_time_max);
14944                 dprintf (GTC_LOG, ("time %d", n));
14945             }
14946         }
14947         if (n > temp_gen)
14948         {
14949             local_condemn_reasons->set_gen (gen_time_tuning, n);
14950         }
14951     }
14952
14953     if (n != n_alloc)
14954     {
14955         dprintf (GTC_LOG, ("Condemning %d based on time tuning and fragmentation", n));
14956     }
14957 #endif //BACKGROUND_GC && !MULTIPLE_HEAPS
14958
14959     if (n < (max_generation - 1))
14960     {
14961         if (dt_low_card_table_efficiency_p (tuning_deciding_condemned_gen))
14962         {
14963             n = max (n, max_generation - 1);
14964             local_settings->promotion = TRUE;
14965             dprintf (GTC_LOG, ("h%d: skip %d, c %d",
14966                         heap_number, generation_skip_ratio, n));
14967             local_condemn_reasons->set_condition (gen_low_card_p);
14968         }
14969     }
14970
14971     if (!check_only_p)
14972     {
14973         generation_skip_ratio = 100;
14974     }
14975
14976     if (dt_low_ephemeral_space_p (check_only_p ? 
14977                                   tuning_deciding_full_gc : 
14978                                   tuning_deciding_condemned_gen))
14979     {
14980         low_ephemeral_space = TRUE;
14981
14982         n = max (n, max_generation - 1);
14983         local_condemn_reasons->set_condition (gen_low_ephemeral_p);
14984         dprintf (GTC_LOG, ("h%d: low eph", heap_number));
14985
14986         if (!provisional_mode_triggered)
14987         {
14988 #ifdef BACKGROUND_GC
14989             if (!gc_can_use_concurrent || (generation_free_list_space (generation_of (max_generation)) == 0))
14990 #endif //BACKGROUND_GC
14991             {
14992                 //It is better to defragment first if we are running out of space for
14993                 //the ephemeral generation but we have enough fragmentation to make up for it
14994                 //in the non ephemeral generation. Essentially we are trading a gen2 for 
14995                 // having to expand heap in ephemeral collections.
14996                 if (dt_high_frag_p (tuning_deciding_condemned_gen, 
14997                                     max_generation - 1, 
14998                                     TRUE))
14999                 {
15000                     high_fragmentation = TRUE;
15001                     local_condemn_reasons->set_condition (gen_max_high_frag_e_p);
15002                     dprintf (GTC_LOG, ("heap%d: gen1 frag", heap_number));
15003                 }
15004             }
15005         }
15006     }
15007
15008     //figure out which ephemeral generation is too fragramented
15009     temp_gen = n;
15010     for (i = n+1; i < max_generation; i++)
15011     {
15012         if (dt_high_frag_p (tuning_deciding_condemned_gen, i))
15013         {
15014             dprintf (GTC_LOG, ("h%d g%d too frag", heap_number, i));
15015             n = i;
15016         }
15017         else
15018             break;
15019     }
15020
15021     if (low_ephemeral_space)
15022     {
15023         //enable promotion
15024         local_settings->promotion = TRUE;
15025     }
15026
15027     if (n > temp_gen)
15028     {
15029         local_condemn_reasons->set_condition (gen_eph_high_frag_p);
15030     }
15031
15032     if (!check_only_p)
15033     {
15034         if (settings.pause_mode == pause_low_latency)
15035         {
15036             if (!is_induced (settings.reason))
15037             {
15038                 n = min (n, max_generation - 1);
15039                 dprintf (GTC_LOG, ("low latency mode is enabled, condemning %d", n));
15040                 evaluate_elevation = FALSE;
15041                 goto exit;
15042             }
15043         }
15044     }
15045
15046     // It's hard to catch when we get to the point that the memory load is so high
15047     // we get an induced GC from the finalizer thread so we are checking the memory load
15048     // for every gen0 GC.
15049     check_memory = (check_only_p ? 
15050                     (n >= 0) : 
15051                     ((n >= 1) || low_memory_detected));
15052
15053     if (check_memory)
15054     {
15055         //find out if we are short on memory
15056         get_memory_info (&memory_load, &available_physical, &available_page_file);
15057         if (heap_number == 0)
15058         {
15059             dprintf (GTC_LOG, ("ml: %d", memory_load));
15060         }
15061         
15062         // Need to get it early enough for all heaps to use.
15063         entry_available_physical_mem = available_physical;
15064         local_settings->entry_memory_load = memory_load;
15065
15066         // @TODO: Force compaction more often under GCSTRESS
15067         if (memory_load >= high_memory_load_th || low_memory_detected)
15068         {
15069 #ifdef SIMPLE_DPRINTF
15070             // stress log can't handle any parameter that's bigger than a void*.
15071             if (heap_number == 0)
15072             {
15073                 dprintf (GTC_LOG, ("tp: %I64d, ap: %I64d", total_physical_mem, available_physical));
15074             }
15075 #endif //SIMPLE_DPRINTF
15076
15077             high_memory_load = TRUE;
15078
15079             if (memory_load >= v_high_memory_load_th || low_memory_detected)
15080             {
15081                 // TODO: Perhaps in 64-bit we should be estimating gen1's fragmentation as well since
15082                 // gen1/gen0 may take a lot more memory than gen2.
15083                 if (!high_fragmentation)
15084                 {
15085                     high_fragmentation = dt_estimate_reclaim_space_p (tuning_deciding_condemned_gen, max_generation);
15086                 }
15087                 v_high_memory_load = TRUE;
15088             }
15089             else
15090             {
15091                 if (!high_fragmentation)
15092                 {
15093                     high_fragmentation = dt_estimate_high_frag_p (tuning_deciding_condemned_gen, max_generation, available_physical);
15094                 }
15095             }
15096
15097             if (high_fragmentation)
15098             {
15099                 if (high_memory_load)
15100                 {
15101                     local_condemn_reasons->set_condition (gen_max_high_frag_m_p);
15102                 }
15103                 else if (v_high_memory_load)
15104                 {
15105                     local_condemn_reasons->set_condition (gen_max_high_frag_vm_p);
15106                 }
15107             }
15108         }
15109     }
15110
15111     dprintf (GTC_LOG, ("h%d: le: %d, hm: %d, vm: %d, f: %d",
15112                  heap_number, low_ephemeral_space, high_memory_load, v_high_memory_load,
15113                  high_fragmentation));
15114
15115     if (should_expand_in_full_gc)
15116     {
15117         dprintf (GTC_LOG, ("h%d: expand_in_full - BLOCK", heap_number));
15118         *blocking_collection_p = TRUE;
15119         evaluate_elevation = FALSE;
15120         n = max_generation;
15121         local_condemn_reasons->set_condition (gen_expand_fullgc_p);
15122     }
15123
15124     if (last_gc_before_oom)
15125     {
15126         dprintf (GTC_LOG, ("h%d: alloc full - BLOCK", heap_number));
15127         n = max_generation;
15128         *blocking_collection_p = TRUE;
15129         if ((local_settings->reason == reason_oos_loh) ||
15130             (local_settings->reason == reason_alloc_loh))
15131         {
15132             evaluate_elevation = FALSE;
15133         }
15134
15135         local_condemn_reasons->set_condition (gen_before_oom);
15136     }
15137
15138     if (!check_only_p)
15139     {
15140         if (is_induced_blocking (settings.reason) && 
15141             n_initial == max_generation
15142             IN_STRESS_HEAP( && !settings.stress_induced ))
15143         {
15144             if (heap_number == 0)
15145             {
15146                 dprintf (GTC_LOG, ("induced - BLOCK"));
15147             }
15148
15149             *blocking_collection_p = TRUE;
15150             local_condemn_reasons->set_condition (gen_induced_fullgc_p);
15151             evaluate_elevation = FALSE;
15152         }
15153
15154         if (settings.reason == reason_induced_noforce)
15155         {
15156             local_condemn_reasons->set_condition (gen_induced_noforce_p);
15157             evaluate_elevation = FALSE;
15158         }
15159     }
15160
15161     if (!provisional_mode_triggered && evaluate_elevation && (low_ephemeral_space || high_memory_load || v_high_memory_load))
15162     {
15163         *elevation_requested_p = TRUE;
15164 #ifdef BIT64
15165         // if we are in high memory load and have consumed 10% of the gen2 budget, do a gen2 now.
15166         if (high_memory_load || v_high_memory_load)
15167         {
15168             dynamic_data* dd_max = dynamic_data_of (max_generation);
15169             if (((float)dd_new_allocation (dd_max) / (float)dd_desired_allocation (dd_max)) < 0.9)
15170             {
15171                 dprintf (GTC_LOG, ("%Id left in gen2 alloc (%Id)", 
15172                     dd_new_allocation (dd_max), dd_desired_allocation (dd_max)));
15173                 n = max_generation;
15174                 local_condemn_reasons->set_condition (gen_almost_max_alloc);
15175             }
15176         }
15177
15178         if (n <= max_generation)
15179         {
15180 #endif // BIT64
15181             if (high_fragmentation)
15182             {
15183                 //elevate to max_generation
15184                 n = max_generation;
15185                 dprintf (GTC_LOG, ("h%d: f full", heap_number));
15186
15187 #ifdef BACKGROUND_GC
15188                 if (high_memory_load || v_high_memory_load)
15189                 {
15190                     // For background GC we want to do blocking collections more eagerly because we don't
15191                     // want to get into the situation where the memory load becomes high while we are in
15192                     // a background GC and we'd have to wait for the background GC to finish to start
15193                     // a blocking collection (right now the implemenation doesn't handle converting 
15194                     // a background GC to a blocking collection midway.
15195                     dprintf (GTC_LOG, ("h%d: bgc - BLOCK", heap_number));
15196                     *blocking_collection_p = TRUE;
15197                 }
15198 #else
15199                 if (v_high_memory_load)
15200                 {
15201                     dprintf (GTC_LOG, ("h%d: - BLOCK", heap_number));
15202                     *blocking_collection_p = TRUE;
15203                 }
15204 #endif //BACKGROUND_GC
15205             }
15206             else
15207             {
15208                 n = max (n, max_generation - 1);
15209                 dprintf (GTC_LOG, ("h%d: nf c %d", heap_number, n));
15210             }
15211 #ifdef BIT64
15212         }
15213 #endif // BIT64
15214     }
15215
15216     if (!provisional_mode_triggered && (n == (max_generation - 1)) && (n_alloc < (max_generation -1)))
15217     {
15218         dprintf (GTC_LOG, ("h%d: budget %d, check 2",
15219                       heap_number, n_alloc));
15220         if (get_new_allocation (max_generation) <= 0)
15221         {
15222             dprintf (GTC_LOG, ("h%d: budget alloc", heap_number));
15223             n = max_generation;
15224             local_condemn_reasons->set_condition (gen_max_gen1);
15225         }
15226     }
15227
15228     //figure out if max_generation is too fragmented -> blocking collection
15229     if (!provisional_mode_triggered && (n == max_generation))
15230     {
15231         if (dt_high_frag_p (tuning_deciding_condemned_gen, n))
15232         {
15233             dprintf (GTC_LOG, ("h%d: g%d too frag", heap_number, n));
15234             local_condemn_reasons->set_condition (gen_max_high_frag_p);
15235             if (local_settings->pause_mode != pause_sustained_low_latency)
15236             {
15237                 *blocking_collection_p = TRUE;
15238             }
15239         }
15240     }
15241
15242 #ifdef BACKGROUND_GC
15243     if (n == max_generation)
15244     {
15245         if (heap_number == 0)
15246         {
15247             BOOL bgc_heap_too_small = TRUE;
15248             size_t gen2size = 0;
15249             size_t gen3size = 0;
15250 #ifdef MULTIPLE_HEAPS
15251             for (int i = 0; i < n_heaps; i++)
15252             {
15253                 if (((g_heaps[i]->current_generation_size (max_generation)) > bgc_min_per_heap) || 
15254                     ((g_heaps[i]->current_generation_size (max_generation + 1)) > bgc_min_per_heap))
15255                 {
15256                     bgc_heap_too_small = FALSE;
15257                     break;
15258                 }
15259             }
15260 #else //MULTIPLE_HEAPS
15261             if ((current_generation_size (max_generation) > bgc_min_per_heap) || 
15262                 (current_generation_size (max_generation + 1) > bgc_min_per_heap))
15263             {
15264                 bgc_heap_too_small = FALSE;
15265             }            
15266 #endif //MULTIPLE_HEAPS
15267
15268             if (bgc_heap_too_small)
15269             {
15270                 dprintf (GTC_LOG, ("gen2 and gen3 too small"));
15271
15272 #ifdef STRESS_HEAP
15273                 // do not turn stress-induced collections into blocking GCs
15274                 if (!settings.stress_induced)
15275 #endif //STRESS_HEAP
15276                 {
15277                     *blocking_collection_p = TRUE;
15278                 }
15279
15280                 local_condemn_reasons->set_condition (gen_gen2_too_small);
15281             }
15282         }
15283     }
15284 #endif //BACKGROUND_GC
15285
15286 exit:
15287     if (!check_only_p)
15288     {
15289 #ifdef STRESS_HEAP
15290 #ifdef BACKGROUND_GC
15291         // We can only do Concurrent GC Stress if the caller did not explicitly ask for all
15292         // generations to be collected,
15293
15294         if (orig_gen != max_generation &&
15295             g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
15296         {
15297             *elevation_requested_p = FALSE;
15298         }
15299 #endif //BACKGROUND_GC
15300 #endif //STRESS_HEAP
15301
15302         if (check_memory)
15303         {
15304             fgm_result.available_pagefile_mb = (size_t)(available_page_file / (1024 * 1024));
15305         }
15306
15307         local_condemn_reasons->set_gen (gen_final_per_heap, n);
15308         get_gc_data_per_heap()->gen_to_condemn_reasons.init (local_condemn_reasons);
15309
15310 #ifdef DT_LOG
15311         local_condemn_reasons->print (heap_number);
15312 #endif //DT_LOG
15313
15314         if ((local_settings->reason == reason_oos_soh) || 
15315             (local_settings->reason == reason_oos_loh))
15316         {
15317             assert (n >= 1);
15318         }
15319     }
15320
15321     return n;
15322 }
15323
15324 #ifdef _PREFAST_
15325 #pragma warning(pop)
15326 #endif //_PREFAST_
15327
15328 inline
15329 size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps)
15330 {
15331     // if the memory load is higher, the threshold we'd want to collect gets lower.
15332     size_t min_mem_based_on_available = 
15333         (500 - (settings.entry_memory_load - high_memory_load_th) * 40) * 1024 * 1024 / num_heaps;
15334     size_t ten_percent_size = (size_t)((float)generation_size (max_generation) * 0.10);
15335     uint64_t three_percent_mem = mem_one_percent * 3 / num_heaps;
15336
15337 #ifdef SIMPLE_DPRINTF
15338     dprintf (GTC_LOG, ("min av: %Id, 10%% gen2: %Id, 3%% mem: %I64d", 
15339         min_mem_based_on_available, ten_percent_size, three_percent_mem));
15340 #endif //SIMPLE_DPRINTF
15341     return (size_t)(min (min_mem_based_on_available, min (ten_percent_size, three_percent_mem)));
15342 }
15343
15344 inline
15345 uint64_t gc_heap::min_high_fragmentation_threshold(uint64_t available_mem, uint32_t num_heaps)
15346 {
15347     return min (available_mem, (256*1024*1024)) / num_heaps;
15348 }
15349
15350 enum {
15351 CORINFO_EXCEPTION_GC = 0xE0004743 // 'GC'
15352 };
15353
15354
15355 #ifdef BACKGROUND_GC
15356 void gc_heap::init_background_gc ()
15357 {
15358     //reset the allocation so foreground gc can allocate into older (max_generation) generation
15359     generation* gen = generation_of (max_generation);
15360     generation_allocation_pointer (gen)= 0;
15361     generation_allocation_limit (gen) = 0;
15362     generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));
15363
15364     PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);
15365
15366     //reset the plan allocation for each segment
15367     for (heap_segment* seg = generation_allocation_segment (gen); seg != ephemeral_heap_segment;
15368         seg = heap_segment_next_rw (seg))
15369     {
15370         heap_segment_plan_allocated (seg) = heap_segment_allocated (seg);
15371     }
15372
15373     if (heap_number == 0)
15374     {
15375         dprintf (2, ("heap%d: bgc lowest: %Ix, highest: %Ix", 
15376             heap_number,
15377             background_saved_lowest_address, 
15378             background_saved_highest_address));
15379     }
15380
15381     gc_lh_block_event.Reset();
15382 }
15383
15384 #endif //BACKGROUND_GC
15385
15386 inline
15387 void fire_drain_mark_list_event (size_t mark_list_objects)
15388 {
15389     FIRE_EVENT(BGCDrainMark, mark_list_objects);
15390 }
15391
15392 inline
15393 void fire_revisit_event (size_t dirtied_pages, 
15394                          size_t marked_objects,
15395                          BOOL large_objects_p)
15396 {
15397     FIRE_EVENT(BGCRevisit, dirtied_pages, marked_objects, large_objects_p);
15398 }
15399
15400 inline
15401 void fire_overflow_event (uint8_t* overflow_min,
15402                           uint8_t* overflow_max,
15403                           size_t marked_objects, 
15404                           int large_objects_p)
15405 {
15406     FIRE_EVENT(BGCOverflow, (uint64_t)overflow_min, (uint64_t)overflow_max, marked_objects, large_objects_p);
15407 }
15408
15409 void gc_heap::concurrent_print_time_delta (const char* msg)
15410 {
15411 #ifdef TRACE_GC
15412     size_t current_time = GetHighPrecisionTimeStamp();
15413     size_t elapsed_time = current_time - time_bgc_last;
15414     time_bgc_last = current_time;
15415
15416     dprintf (2, ("h%d: %s T %Id ms", heap_number, msg, elapsed_time));
15417 #else
15418     UNREFERENCED_PARAMETER(msg);
15419 #endif //TRACE_GC
15420 }
15421
15422 void gc_heap::free_list_info (int gen_num, const char* msg)
15423 {
15424     UNREFERENCED_PARAMETER(gen_num);
15425 #if defined (BACKGROUND_GC) && defined (TRACE_GC)
15426     dprintf (3, ("h%d: %s", heap_number, msg));
15427     for (int i = 0; i <= (max_generation + 1); i++)
15428     {
15429         generation* gen = generation_of (i);
15430         if ((generation_allocation_size (gen) == 0) && 
15431             (generation_free_list_space (gen) == 0) && 
15432             (generation_free_obj_space (gen) == 0))
15433         {
15434             // don't print if everything is 0.
15435         }
15436         else
15437         {
15438             dprintf (3, ("h%d: g%d: a-%Id, fl-%Id, fo-%Id",
15439                 heap_number, i, 
15440                 generation_allocation_size (gen), 
15441                 generation_free_list_space (gen), 
15442                 generation_free_obj_space (gen)));
15443         }
15444     }
15445 #else
15446     UNREFERENCED_PARAMETER(msg);
15447 #endif // BACKGROUND_GC && TRACE_GC
15448 }
15449
15450 void gc_heap::update_collection_counts_for_no_gc()
15451 {
15452     assert (settings.pause_mode == pause_no_gc);
15453
15454     settings.condemned_generation = max_generation;
15455 #ifdef MULTIPLE_HEAPS
15456     for (int i = 0; i < n_heaps; i++)
15457         g_heaps[i]->update_collection_counts();
15458 #else //MULTIPLE_HEAPS
15459     update_collection_counts();
15460 #endif //MULTIPLE_HEAPS
15461
15462     full_gc_counts[gc_type_blocking]++;
15463 }
15464
15465 BOOL gc_heap::should_proceed_with_gc()
15466 {
15467     if (gc_heap::settings.pause_mode == pause_no_gc)
15468     {
15469         if (current_no_gc_region_info.started)
15470         {
15471             // The no_gc mode was already in progress yet we triggered another GC,
15472             // this effectively exits the no_gc mode.
15473             restore_data_for_no_gc();
15474         }
15475         else
15476             return should_proceed_for_no_gc();
15477     }
15478
15479     return TRUE;
15480 }
15481
15482 //internal part of gc used by the serial and concurrent version
15483 void gc_heap::gc1()
15484 {
15485 #ifdef BACKGROUND_GC
15486     assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
15487 #endif //BACKGROUND_GC
15488
15489 #ifdef TIME_GC
15490     mark_time = plan_time = reloc_time = compact_time = sweep_time = 0;
15491 #endif //TIME_GC
15492
15493     verify_soh_segment_list();
15494
15495     int n = settings.condemned_generation;
15496
15497     if (settings.reason == reason_pm_full_gc)
15498     {
15499         assert (n == max_generation);
15500         init_records();
15501
15502         gen_to_condemn_tuning* local_condemn_reasons = &(get_gc_data_per_heap()->gen_to_condemn_reasons);
15503         local_condemn_reasons->init();
15504         local_condemn_reasons->set_gen (gen_initial, n);
15505         local_condemn_reasons->set_gen (gen_final_per_heap, n);
15506     }
15507
15508     update_collection_counts ();
15509
15510 #ifdef BACKGROUND_GC
15511     bgc_alloc_lock->check();
15512 #endif //BACKGROUND_GC
15513
15514     free_list_info (max_generation, "beginning");
15515
15516     vm_heap->GcCondemnedGeneration = settings.condemned_generation;
15517
15518     assert (g_gc_card_table == card_table);
15519
15520 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
15521     assert (g_gc_card_bundle_table == card_bundle_table);
15522 #endif    
15523
15524     {
15525         if (n == max_generation)
15526         {
15527             gc_low = lowest_address;
15528             gc_high = highest_address;
15529         }
15530         else
15531         {
15532             gc_low = generation_allocation_start (generation_of (n));
15533             gc_high = heap_segment_reserved (ephemeral_heap_segment);
15534         }   
15535 #ifdef BACKGROUND_GC
15536         if (settings.concurrent)
15537         {
15538 #ifdef TRACE_GC
15539             time_bgc_last = GetHighPrecisionTimeStamp();
15540 #endif //TRACE_GC
15541
15542             FIRE_EVENT(BGCBegin);
15543
15544             concurrent_print_time_delta ("BGC");
15545
15546 //#ifdef WRITE_WATCH
15547             //reset_write_watch (FALSE);
15548 //#endif //WRITE_WATCH
15549
15550             concurrent_print_time_delta ("RW");
15551             background_mark_phase();
15552             free_list_info (max_generation, "after mark phase");
15553             
15554             background_sweep();
15555             free_list_info (max_generation, "after sweep phase");
15556         }
15557         else
15558 #endif //BACKGROUND_GC
15559         {
15560             mark_phase (n, FALSE);
15561
15562             GCScan::GcRuntimeStructuresValid (FALSE);
15563             plan_phase (n);
15564             GCScan::GcRuntimeStructuresValid (TRUE);
15565         }
15566     }
15567
15568     size_t end_gc_time = GetHighPrecisionTimeStamp();
15569 //    printf ("generation: %d, elapsed time: %Id\n", n,  end_gc_time - dd_time_clock (dynamic_data_of (0)));
15570
15571     //adjust the allocation size from the pinned quantities. 
15572     for (int gen_number = 0; gen_number <= min (max_generation,n+1); gen_number++)
15573     {
15574         generation* gn = generation_of (gen_number);
15575         if (settings.compaction)
15576         {
15577             generation_pinned_allocated (gn) += generation_pinned_allocation_compact_size (gn);
15578             generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_compact_size (gn);
15579         }
15580         else
15581         {
15582             generation_pinned_allocated (gn) += generation_pinned_allocation_sweep_size (gn);
15583             generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_sweep_size (gn);
15584         }
15585         generation_pinned_allocation_sweep_size (gn) = 0;
15586         generation_pinned_allocation_compact_size (gn) = 0;
15587     }
15588
15589 #ifdef BACKGROUND_GC
15590     if (settings.concurrent)
15591     {
15592         dynamic_data* dd = dynamic_data_of (n);
15593         dd_gc_elapsed_time (dd) = end_gc_time - dd_time_clock (dd);
15594
15595         free_list_info (max_generation, "after computing new dynamic data");
15596
15597         gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
15598
15599         for (int gen_number = 0; gen_number < max_generation; gen_number++)
15600         {
15601             dprintf (2, ("end of BGC: gen%d new_alloc: %Id", 
15602                          gen_number, dd_desired_allocation (dynamic_data_of (gen_number))));
15603             current_gc_data_per_heap->gen_data[gen_number].size_after = generation_size (gen_number);
15604             current_gc_data_per_heap->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number));
15605             current_gc_data_per_heap->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number));
15606         }
15607     }
15608     else
15609 #endif //BACKGROUND_GC
15610     {
15611         free_list_info (max_generation, "end");
15612         for (int gen_number = 0; gen_number <= n; gen_number++)
15613         {
15614             dynamic_data* dd = dynamic_data_of (gen_number);
15615             dd_gc_elapsed_time (dd) = end_gc_time - dd_time_clock (dd);
15616             compute_new_dynamic_data (gen_number);
15617         }
15618
15619         if (n != max_generation)
15620         {
15621             int gen_num_for_data = ((n < (max_generation - 1)) ? (n + 1) : (max_generation + 1));
15622             for (int gen_number = (n + 1); gen_number <= gen_num_for_data; gen_number++)
15623             {
15624                 get_gc_data_per_heap()->gen_data[gen_number].size_after = generation_size (gen_number);
15625                 get_gc_data_per_heap()->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number));
15626                 get_gc_data_per_heap()->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number));
15627             }
15628         }
15629
15630         get_gc_data_per_heap()->maxgen_size_info.running_free_list_efficiency = (uint32_t)(generation_allocator_efficiency (generation_of (max_generation)) * 100);
15631
15632         free_list_info (max_generation, "after computing new dynamic data");
15633         
15634         if (heap_number == 0)
15635         {
15636             dprintf (GTC_LOG, ("GC#%d(gen%d) took %Idms", 
15637                 dd_collection_count (dynamic_data_of (0)), 
15638                 settings.condemned_generation,
15639                 dd_gc_elapsed_time (dynamic_data_of (0))));
15640         }
15641
15642         for (int gen_number = 0; gen_number <= (max_generation + 1); gen_number++)
15643         {
15644             dprintf (2, ("end of FGC/NGC: gen%d new_alloc: %Id", 
15645                          gen_number, dd_desired_allocation (dynamic_data_of (gen_number))));
15646         }
15647     }
15648
15649     if (n < max_generation)
15650     {
15651         compute_promoted_allocation (1 + n);
15652
15653         dynamic_data* dd = dynamic_data_of (1 + n);
15654         size_t new_fragmentation = generation_free_list_space (generation_of (1 + n)) + 
15655                                    generation_free_obj_space (generation_of (1 + n));
15656
15657 #ifdef BACKGROUND_GC
15658         if (current_c_gc_state != c_gc_state_planning)
15659 #endif //BACKGROUND_GC
15660         {
15661             if (settings.promotion)
15662             {
15663                 dd_fragmentation (dd) = new_fragmentation;
15664             }
15665             else
15666             {
15667                 //assert (dd_fragmentation (dd) == new_fragmentation);
15668             }
15669         }
15670     }
15671
15672 #ifdef BACKGROUND_GC
15673     if (!settings.concurrent)
15674 #endif //BACKGROUND_GC
15675     {
15676 #ifndef FEATURE_REDHAWK
15677         // GCToEEInterface::IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR.
15678         assert(GCToEEInterface::IsGCThread());
15679 #endif // FEATURE_REDHAWK
15680         adjust_ephemeral_limits();
15681     }
15682
15683 #ifdef BACKGROUND_GC
15684     assert (ephemeral_low == generation_allocation_start (generation_of ( max_generation -1)));
15685     assert (ephemeral_high == heap_segment_reserved (ephemeral_heap_segment));
15686 #endif //BACKGROUND_GC
15687
15688     if (fgn_maxgen_percent)
15689     {
15690         if (settings.condemned_generation == (max_generation - 1))
15691         {
15692             check_for_full_gc (max_generation - 1, 0);
15693         }
15694         else if (settings.condemned_generation == max_generation)
15695         {
15696             if (full_gc_approach_event_set 
15697 #ifdef MULTIPLE_HEAPS
15698                 && (heap_number == 0)
15699 #endif //MULTIPLE_HEAPS
15700                 )
15701             {
15702                 dprintf (2, ("FGN-GC: setting gen2 end event"));
15703
15704                 full_gc_approach_event.Reset();
15705 #ifdef BACKGROUND_GC
15706                 // By definition WaitForFullGCComplete only succeeds if it's full, *blocking* GC, otherwise need to return N/A
15707                 fgn_last_gc_was_concurrent = settings.concurrent ? TRUE : FALSE;
15708 #endif //BACKGROUND_GC
15709                 full_gc_end_event.Set();
15710                 full_gc_approach_event_set = false;            
15711             }
15712         }
15713     }
15714
15715 #ifdef BACKGROUND_GC
15716     if (!settings.concurrent)
15717 #endif //BACKGROUND_GC
15718     {
15719         //decide on the next allocation quantum
15720         if (alloc_contexts_used >= 1)
15721         {
15722             allocation_quantum = Align (min ((size_t)CLR_SIZE,
15723                                             (size_t)max (1024, get_new_allocation (0) / (2 * alloc_contexts_used))),
15724                                             get_alignment_constant(FALSE));
15725             dprintf (3, ("New allocation quantum: %d(0x%Ix)", allocation_quantum, allocation_quantum));
15726         }
15727     }
15728
15729     descr_generations (FALSE);
15730
15731     verify_soh_segment_list();
15732
15733 #ifdef BACKGROUND_GC
15734     add_to_history_per_heap();
15735     if (heap_number == 0)
15736     {
15737         add_to_history();
15738     }
15739 #endif // BACKGROUND_GC
15740
15741 #ifdef GC_STATS
15742     if (GCStatistics::Enabled() && heap_number == 0)
15743         g_GCStatistics.AddGCStats(settings, 
15744             dd_gc_elapsed_time(dynamic_data_of(settings.condemned_generation)));
15745 #endif // GC_STATS
15746
15747 #ifdef TIME_GC
15748     fprintf (stdout, "%d,%d,%d,%d,%d,%d\n",
15749              n, mark_time, plan_time, reloc_time, compact_time, sweep_time);
15750 #endif //TIME_GC
15751
15752 #ifdef BACKGROUND_GC
15753     assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
15754 #endif //BACKGROUND_GC
15755
15756 #if defined(VERIFY_HEAP) || (defined (FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
15757     if (FALSE 
15758 #ifdef VERIFY_HEAP
15759         // Note that right now g_pConfig->GetHeapVerifyLevel always returns the same
15760         // value. If we ever allow randomly adjusting this as the process runs,
15761         // we cannot call it this way as joins need to match - we must have the same
15762         // value for all heaps like we do with bgc_heap_walk_for_etw_p.
15763         || (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
15764 #endif
15765 #if defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC)
15766         || (bgc_heap_walk_for_etw_p && settings.concurrent)
15767 #endif
15768         )
15769     {
15770 #ifdef BACKGROUND_GC
15771         bool cooperative_mode = true;
15772
15773         if (settings.concurrent)
15774         {
15775             cooperative_mode = enable_preemptive ();
15776
15777 #ifdef MULTIPLE_HEAPS
15778             bgc_t_join.join(this, gc_join_suspend_ee_verify);
15779             if (bgc_t_join.joined())
15780             {
15781                 bgc_threads_sync_event.Reset();
15782
15783                 dprintf(2, ("Joining BGC threads to suspend EE for verify heap"));
15784                 bgc_t_join.restart();
15785             }
15786             if (heap_number == 0)
15787             {
15788                 suspend_EE();
15789                 bgc_threads_sync_event.Set();
15790             }
15791             else
15792             {
15793                 bgc_threads_sync_event.Wait(INFINITE, FALSE);
15794                 dprintf (2, ("bgc_threads_sync_event is signalled"));
15795             }
15796 #else
15797             suspend_EE();
15798 #endif //MULTIPLE_HEAPS
15799
15800             //fix the allocation area so verify_heap can proceed.
15801             fix_allocation_contexts (FALSE);
15802         }
15803 #endif //BACKGROUND_GC
15804
15805 #ifdef BACKGROUND_GC
15806         assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
15807 #ifdef FEATURE_EVENT_TRACE
15808         if (bgc_heap_walk_for_etw_p && settings.concurrent)
15809         {
15810             GCToEEInterface::DiagWalkBGCSurvivors(__this);
15811
15812 #ifdef MULTIPLE_HEAPS
15813             bgc_t_join.join(this, gc_join_after_profiler_heap_walk);
15814             if (bgc_t_join.joined())
15815             {
15816                 bgc_t_join.restart();
15817             }
15818 #endif // MULTIPLE_HEAPS
15819         }
15820 #endif // FEATURE_EVENT_TRACE
15821 #endif //BACKGROUND_GC
15822
15823 #ifdef VERIFY_HEAP
15824         if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
15825             verify_heap (FALSE);
15826 #endif // VERIFY_HEAP
15827
15828 #ifdef BACKGROUND_GC
15829         if (settings.concurrent)
15830         {
15831             repair_allocation_contexts (TRUE);
15832
15833 #ifdef MULTIPLE_HEAPS
15834             bgc_t_join.join(this, gc_join_restart_ee_verify);
15835             if (bgc_t_join.joined())
15836             {
15837                 bgc_threads_sync_event.Reset();
15838
15839                 dprintf(2, ("Joining BGC threads to restart EE after verify heap"));
15840                 bgc_t_join.restart();
15841             }
15842             if (heap_number == 0)
15843             {
15844                 restart_EE();
15845                 bgc_threads_sync_event.Set();
15846             }
15847             else
15848             {
15849                 bgc_threads_sync_event.Wait(INFINITE, FALSE);
15850                 dprintf (2, ("bgc_threads_sync_event is signalled"));
15851             }
15852 #else
15853             restart_EE();
15854 #endif //MULTIPLE_HEAPS
15855
15856             disable_preemptive (cooperative_mode);
15857         }
15858 #endif //BACKGROUND_GC
15859     }
15860 #endif // defined(VERIFY_HEAP) || (defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
15861
15862 #ifdef MULTIPLE_HEAPS
15863     if (!settings.concurrent)
15864     {
15865         gc_t_join.join(this, gc_join_done);
15866         if (gc_t_join.joined ())
15867         {
15868             gc_heap::internal_gc_done = false;
15869
15870             //equalize the new desired size of the generations
15871             int limit = settings.condemned_generation;
15872             if (limit == max_generation)
15873             {
15874                 limit = max_generation+1;
15875             }
15876             for (int gen = 0; gen <= limit; gen++)
15877             {
15878                 size_t total_desired = 0;
15879
15880                 for (int i = 0; i < gc_heap::n_heaps; i++)
15881                 {
15882                     gc_heap* hp = gc_heap::g_heaps[i];
15883                     dynamic_data* dd = hp->dynamic_data_of (gen);
15884                     size_t temp_total_desired = total_desired + dd_desired_allocation (dd);
15885                     if (temp_total_desired < total_desired)
15886                     {
15887                         // we overflowed.
15888                         total_desired = (size_t)MAX_PTR;
15889                         break;
15890                     }
15891                     total_desired = temp_total_desired;
15892                 }
15893
15894                 size_t desired_per_heap = Align (total_desired/gc_heap::n_heaps,
15895                                                     get_alignment_constant ((gen != (max_generation+1))));
15896
15897                 if (gen == 0)
15898                 {
15899 #if 1 //subsumed by the linear allocation model 
15900                     // to avoid spikes in mem usage due to short terms fluctuations in survivorship,
15901                     // apply some smoothing.
15902                     static size_t smoothed_desired_per_heap = 0;
15903                     size_t smoothing = 3; // exponential smoothing factor
15904                     if (smoothing  > VolatileLoad(&settings.gc_index))
15905                         smoothing  = VolatileLoad(&settings.gc_index);
15906                     smoothed_desired_per_heap = desired_per_heap / smoothing + ((smoothed_desired_per_heap / smoothing) * (smoothing-1));
15907                     dprintf (1, ("sn = %Id  n = %Id", smoothed_desired_per_heap, desired_per_heap));
15908                     desired_per_heap = Align(smoothed_desired_per_heap, get_alignment_constant (true));
15909 #endif //0
15910
15911                     // if desired_per_heap is close to min_gc_size, trim it
15912                     // down to min_gc_size to stay in the cache
15913                     gc_heap* hp = gc_heap::g_heaps[0];
15914                     dynamic_data* dd = hp->dynamic_data_of (gen);
15915                     size_t min_gc_size = dd_min_size(dd);
15916                     // if min GC size larger than true on die cache, then don't bother
15917                     // limiting the desired size
15918                     if ((min_gc_size <= GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE)) &&
15919                         desired_per_heap <= 2*min_gc_size)
15920                     {
15921                         desired_per_heap = min_gc_size;
15922                     }
15923 #ifdef BIT64
15924                     desired_per_heap = joined_youngest_desired (desired_per_heap);
15925                     dprintf (2, ("final gen0 new_alloc: %Id", desired_per_heap));
15926 #endif // BIT64
15927
15928                     gc_data_global.final_youngest_desired = desired_per_heap;
15929                 }
15930 #if 1 //subsumed by the linear allocation model 
15931                 if (gen == (max_generation + 1))
15932                 {
15933                     // to avoid spikes in mem usage due to short terms fluctuations in survivorship,
15934                     // apply some smoothing.
15935                     static size_t smoothed_desired_per_heap_loh = 0;
15936                     size_t smoothing = 3; // exponential smoothing factor
15937                     size_t loh_count = dd_collection_count (dynamic_data_of (max_generation));
15938                     if (smoothing  > loh_count)
15939                         smoothing  = loh_count;
15940                     smoothed_desired_per_heap_loh = desired_per_heap / smoothing + ((smoothed_desired_per_heap_loh / smoothing) * (smoothing-1));
15941                     dprintf( 2, ("smoothed_desired_per_heap_loh  = %Id  desired_per_heap = %Id", smoothed_desired_per_heap_loh, desired_per_heap));
15942                     desired_per_heap = Align(smoothed_desired_per_heap_loh, get_alignment_constant (false));
15943                 }
15944 #endif //0
15945                 for (int i = 0; i < gc_heap::n_heaps; i++)
15946                 {
15947                     gc_heap* hp = gc_heap::g_heaps[i];
15948                     dynamic_data* dd = hp->dynamic_data_of (gen);
15949                     dd_desired_allocation (dd) = desired_per_heap;
15950                     dd_gc_new_allocation (dd) = desired_per_heap;
15951                     dd_new_allocation (dd) = desired_per_heap;
15952
15953                     if (gen == 0)
15954                     {
15955                         hp->fgn_last_alloc = desired_per_heap;
15956                     }
15957                 }
15958             }
15959
15960 #ifdef FEATURE_LOH_COMPACTION
15961             BOOL all_heaps_compacted_p = TRUE;
15962 #endif //FEATURE_LOH_COMPACTION
15963             for (int i = 0; i < gc_heap::n_heaps; i++)
15964             {
15965                 gc_heap* hp = gc_heap::g_heaps[i];
15966                 hp->decommit_ephemeral_segment_pages();
15967                 hp->rearrange_large_heap_segments();
15968 #ifdef FEATURE_LOH_COMPACTION
15969                 all_heaps_compacted_p &= hp->loh_compacted_p;
15970 #endif //FEATURE_LOH_COMPACTION
15971             }
15972
15973 #ifdef FEATURE_LOH_COMPACTION
15974             check_loh_compact_mode (all_heaps_compacted_p);
15975 #endif //FEATURE_LOH_COMPACTION
15976
15977             fire_pevents();
15978             pm_full_gc_init_or_clear();
15979
15980             gc_t_join.restart();
15981         }
15982         alloc_context_count = 0;
15983         heap_select::mark_heap (heap_number);
15984     }
15985
15986 #else
15987     gc_data_global.final_youngest_desired = 
15988         dd_desired_allocation (dynamic_data_of (0));
15989
15990     check_loh_compact_mode (loh_compacted_p);
15991
15992     decommit_ephemeral_segment_pages();
15993     fire_pevents();
15994
15995     if (!(settings.concurrent))
15996     {
15997         rearrange_large_heap_segments();
15998         do_post_gc();
15999     }
16000
16001     pm_full_gc_init_or_clear();
16002
16003 #ifdef BACKGROUND_GC
16004     recover_bgc_settings();
16005 #endif //BACKGROUND_GC
16006 #endif //MULTIPLE_HEAPS
16007 }
16008
16009 void gc_heap::save_data_for_no_gc()
16010 {
16011     current_no_gc_region_info.saved_pause_mode = settings.pause_mode;
16012 #ifdef MULTIPLE_HEAPS
16013     // This is to affect heap balancing. 
16014     for (int i = 0; i < n_heaps; i++)
16015     {
16016         current_no_gc_region_info.saved_gen0_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (0));
16017         dd_min_size (g_heaps[i]->dynamic_data_of (0)) = min_balance_threshold;
16018         current_no_gc_region_info.saved_gen3_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1));
16019         dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1)) = 0;
16020     }
16021 #endif //MULTIPLE_HEAPS
16022 }
16023
16024 void gc_heap::restore_data_for_no_gc()
16025 {
16026     gc_heap::settings.pause_mode = current_no_gc_region_info.saved_pause_mode;
16027 #ifdef MULTIPLE_HEAPS
16028     for (int i = 0; i < n_heaps; i++)
16029     {
16030         dd_min_size (g_heaps[i]->dynamic_data_of (0)) = current_no_gc_region_info.saved_gen0_min_size;
16031         dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1)) = current_no_gc_region_info.saved_gen3_min_size;
16032     }
16033 #endif //MULTIPLE_HEAPS
16034 }
16035
16036 start_no_gc_region_status gc_heap::prepare_for_no_gc_region (uint64_t total_size,
16037                                                              BOOL loh_size_known, 
16038                                                              uint64_t loh_size,
16039                                                              BOOL disallow_full_blocking)
16040 {
16041     if (current_no_gc_region_info.started)
16042     {
16043         return start_no_gc_in_progress;
16044     }
16045
16046     start_no_gc_region_status status = start_no_gc_success;
16047
16048     save_data_for_no_gc();
16049     settings.pause_mode = pause_no_gc;
16050     current_no_gc_region_info.start_status = start_no_gc_success;
16051
16052     uint64_t allocation_no_gc_loh = 0;
16053     uint64_t allocation_no_gc_soh = 0;
16054     assert(total_size != 0);
16055     if (loh_size_known)
16056     {
16057         assert(loh_size != 0);
16058         assert(loh_size <= total_size);
16059         allocation_no_gc_loh = loh_size;
16060         allocation_no_gc_soh = total_size - loh_size;
16061     }
16062     else
16063     {
16064         allocation_no_gc_soh = total_size;
16065         allocation_no_gc_loh = total_size;
16066     }
16067
16068     int soh_align_const = get_alignment_constant (TRUE);
16069     size_t max_soh_allocated = soh_segment_size - segment_info_size - eph_gen_starts_size;
16070     size_t size_per_heap = 0;
16071     const double scale_factor = 1.05;
16072
16073     int num_heaps = 1;
16074 #ifdef MULTIPLE_HEAPS
16075     num_heaps = n_heaps;
16076 #endif // MULTIPLE_HEAPS
16077
16078     uint64_t total_allowed_soh_allocation = max_soh_allocated * num_heaps;
16079     // [LOCALGC TODO]
16080     // In theory, the upper limit here is the physical memory of the machine, not
16081     // SIZE_T_MAX. This is not true today because total_physical_mem can be
16082     // larger than SIZE_T_MAX if running in wow64 on a machine with more than
16083     // 4GB of RAM. Once Local GC code divergence is resolved and code is flowing
16084     // more freely between branches, it would be good to clean this up to use
16085     // total_physical_mem instead of SIZE_T_MAX.
16086     assert(total_allowed_soh_allocation <= SIZE_T_MAX);
16087     uint64_t total_allowed_loh_allocation = SIZE_T_MAX;
16088     uint64_t total_allowed_soh_alloc_scaled = allocation_no_gc_soh > 0 ? static_cast<uint64_t>(total_allowed_soh_allocation / scale_factor) : 0;
16089     uint64_t total_allowed_loh_alloc_scaled = allocation_no_gc_loh > 0 ? static_cast<uint64_t>(total_allowed_loh_allocation / scale_factor) : 0;
16090
16091     if (allocation_no_gc_soh > total_allowed_soh_alloc_scaled ||
16092         allocation_no_gc_loh > total_allowed_loh_alloc_scaled)
16093     {
16094         status = start_no_gc_too_large;
16095         goto done;
16096     }
16097
16098     if (allocation_no_gc_soh > 0)
16099     {
16100         allocation_no_gc_soh = static_cast<uint64_t>(allocation_no_gc_soh * scale_factor);
16101         allocation_no_gc_soh = min (allocation_no_gc_soh, total_allowed_soh_alloc_scaled);
16102     }
16103
16104     if (allocation_no_gc_loh > 0)
16105     {
16106         allocation_no_gc_loh = static_cast<uint64_t>(allocation_no_gc_loh * scale_factor);
16107         allocation_no_gc_loh = min (allocation_no_gc_loh, total_allowed_loh_alloc_scaled);
16108     }
16109
16110     if (disallow_full_blocking)
16111         current_no_gc_region_info.minimal_gc_p = TRUE;
16112
16113     if (allocation_no_gc_soh != 0)
16114     {
16115         current_no_gc_region_info.soh_allocation_size = static_cast<size_t>(allocation_no_gc_soh);
16116         size_per_heap = current_no_gc_region_info.soh_allocation_size;
16117 #ifdef MULTIPLE_HEAPS
16118         size_per_heap /= n_heaps;
16119         for (int i = 0; i < n_heaps; i++)
16120         {
16121             // due to heap balancing we need to allow some room before we even look to balance to another heap.
16122             g_heaps[i]->soh_allocation_no_gc = min (Align ((size_per_heap + min_balance_threshold), soh_align_const), max_soh_allocated);
16123         }
16124 #else //MULTIPLE_HEAPS
16125         soh_allocation_no_gc = min (Align (size_per_heap, soh_align_const), max_soh_allocated);
16126 #endif //MULTIPLE_HEAPS
16127     }
16128
16129     if (allocation_no_gc_loh != 0)
16130     {
16131         current_no_gc_region_info.loh_allocation_size = static_cast<size_t>(allocation_no_gc_loh);
16132         size_per_heap = current_no_gc_region_info.loh_allocation_size;
16133 #ifdef MULTIPLE_HEAPS
16134         size_per_heap /= n_heaps;
16135         for (int i = 0; i < n_heaps; i++)
16136             g_heaps[i]->loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE));
16137 #else //MULTIPLE_HEAPS
16138         loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE));
16139 #endif //MULTIPLE_HEAPS
16140     }
16141
16142 done:
16143     if (status != start_no_gc_success)
16144         restore_data_for_no_gc();
16145     return status;
16146 }
16147
16148 void gc_heap::handle_failure_for_no_gc()
16149 {
16150     gc_heap::restore_data_for_no_gc();
16151     // sets current_no_gc_region_info.started to FALSE here.
16152     memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
16153 }
16154
16155 start_no_gc_region_status gc_heap::get_start_no_gc_region_status()
16156 {
16157     return current_no_gc_region_info.start_status;
16158 }
16159
16160 void gc_heap::record_gcs_during_no_gc()
16161 {
16162     if (current_no_gc_region_info.started)
16163     {
16164         current_no_gc_region_info.num_gcs++;
16165         if (is_induced (settings.reason))
16166             current_no_gc_region_info.num_gcs_induced++;
16167     }
16168 }
16169
16170 BOOL gc_heap::find_loh_free_for_no_gc()
16171 {
16172     allocator* loh_allocator = generation_allocator (generation_of (max_generation + 1));
16173     size_t sz_list = loh_allocator->first_bucket_size();
16174     size_t size = loh_allocation_no_gc;
16175     for (unsigned int a_l_idx = 0; a_l_idx < loh_allocator->number_of_buckets(); a_l_idx++)
16176     {
16177         if ((size < sz_list) || (a_l_idx == (loh_allocator->number_of_buckets()-1)))
16178         {
16179             uint8_t* free_list = loh_allocator->alloc_list_head_of (a_l_idx);
16180             while (free_list)
16181             {
16182                 size_t free_list_size = unused_array_size(free_list);
16183
16184                 if (free_list_size > loh_allocation_no_gc)
16185                 {
16186                     dprintf (3, ("free item %Ix(%Id) for no gc", (size_t)free_list, free_list_size));
16187                     return TRUE;
16188                 }
16189
16190                 free_list = free_list_slot (free_list); 
16191             }
16192         }
16193         sz_list = sz_list * 2;
16194     }
16195
16196     return FALSE;
16197 }
16198
16199 BOOL gc_heap::find_loh_space_for_no_gc()
16200 {
16201     saved_loh_segment_no_gc = 0;
16202
16203     if (find_loh_free_for_no_gc())
16204         return TRUE;
16205
16206     heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
16207
16208     while (seg)
16209     {
16210         size_t remaining = heap_segment_reserved (seg) - heap_segment_allocated (seg);
16211         if (remaining >= loh_allocation_no_gc)
16212         {
16213             saved_loh_segment_no_gc = seg;
16214             break;
16215         }
16216         seg = heap_segment_next (seg);
16217     }
16218
16219     if (!saved_loh_segment_no_gc && current_no_gc_region_info.minimal_gc_p)
16220     {
16221         // If no full GC is allowed, we try to get a new seg right away.
16222         saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc)
16223 #ifdef MULTIPLE_HEAPS
16224                                                       , this
16225 #endif //MULTIPLE_HEAPS
16226                                                       );
16227     }
16228
16229     return (saved_loh_segment_no_gc != 0);
16230 }
16231
16232 BOOL gc_heap::loh_allocated_for_no_gc()
16233 {
16234     if (!saved_loh_segment_no_gc)
16235         return FALSE;
16236
16237     heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
16238     do 
16239     {
16240         if (seg == saved_loh_segment_no_gc)
16241         {
16242             return FALSE;
16243         }
16244         seg = heap_segment_next (seg);
16245     } while (seg);
16246
16247     return TRUE;
16248 }
16249
16250 BOOL gc_heap::commit_loh_for_no_gc (heap_segment* seg)
16251 {
16252     uint8_t* end_committed = heap_segment_allocated (seg) + loh_allocation_no_gc;
16253     assert (end_committed <= heap_segment_reserved (seg));
16254     return (grow_heap_segment (seg, end_committed));
16255 }
16256
16257 void gc_heap::thread_no_gc_loh_segments()
16258 {
16259 #ifdef MULTIPLE_HEAPS
16260     for (int i = 0; i < n_heaps; i++)
16261     {
16262         gc_heap* hp = g_heaps[i];
16263         if (hp->loh_allocated_for_no_gc())
16264         {
16265             hp->thread_loh_segment (hp->saved_loh_segment_no_gc);
16266             hp->saved_loh_segment_no_gc = 0;
16267         }
16268     }
16269 #else //MULTIPLE_HEAPS
16270     if (loh_allocated_for_no_gc())
16271     {
16272         thread_loh_segment (saved_loh_segment_no_gc);
16273         saved_loh_segment_no_gc = 0;
16274     }
16275 #endif //MULTIPLE_HEAPS    
16276 }
16277
16278 void gc_heap::set_loh_allocations_for_no_gc()
16279 {
16280     if (current_no_gc_region_info.loh_allocation_size != 0)
16281     {
16282         dynamic_data* dd = dynamic_data_of (max_generation + 1);
16283         dd_new_allocation (dd) = loh_allocation_no_gc;
16284         dd_gc_new_allocation (dd) = dd_new_allocation (dd);
16285     }
16286 }
16287
16288 void gc_heap::set_soh_allocations_for_no_gc()
16289 {
16290     if (current_no_gc_region_info.soh_allocation_size != 0)
16291     {
16292         dynamic_data* dd = dynamic_data_of (0);
16293         dd_new_allocation (dd) = soh_allocation_no_gc;
16294         dd_gc_new_allocation (dd) = dd_new_allocation (dd);
16295 #ifdef MULTIPLE_HEAPS
16296         alloc_context_count = 0;
16297 #endif //MULTIPLE_HEAPS
16298     }
16299 }
16300
16301 void gc_heap::set_allocations_for_no_gc()
16302 {
16303 #ifdef MULTIPLE_HEAPS
16304     for (int i = 0; i < n_heaps; i++)
16305     {
16306         gc_heap* hp = g_heaps[i];
16307         hp->set_loh_allocations_for_no_gc();
16308         hp->set_soh_allocations_for_no_gc();
16309     }
16310 #else //MULTIPLE_HEAPS
16311     set_loh_allocations_for_no_gc();
16312     set_soh_allocations_for_no_gc();
16313 #endif //MULTIPLE_HEAPS
16314 }
16315
16316 BOOL gc_heap::should_proceed_for_no_gc()
16317 {
16318     BOOL gc_requested = FALSE;
16319     BOOL loh_full_gc_requested = FALSE;
16320     BOOL soh_full_gc_requested = FALSE;
16321     BOOL no_gc_requested = FALSE;
16322     BOOL get_new_loh_segments = FALSE;
16323
16324     if (current_no_gc_region_info.soh_allocation_size)
16325     {
16326 #ifdef MULTIPLE_HEAPS
16327         for (int i = 0; i < n_heaps; i++)
16328         {
16329             gc_heap* hp = g_heaps[i];
16330             if ((size_t)(heap_segment_reserved (hp->ephemeral_heap_segment) - hp->alloc_allocated) < hp->soh_allocation_no_gc)
16331             {
16332                 gc_requested = TRUE;
16333                 break;
16334             }
16335         }
16336 #else //MULTIPLE_HEAPS
16337         if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated) < soh_allocation_no_gc)
16338             gc_requested = TRUE;
16339 #endif //MULTIPLE_HEAPS
16340
16341         if (!gc_requested)
16342         {
16343 #ifdef MULTIPLE_HEAPS
16344             for (int i = 0; i < n_heaps; i++)
16345             {
16346                 gc_heap* hp = g_heaps[i];
16347                 if (!(hp->grow_heap_segment (hp->ephemeral_heap_segment, (hp->alloc_allocated + hp->soh_allocation_no_gc))))
16348                 {
16349                     soh_full_gc_requested = TRUE;
16350                     break;
16351                 }
16352             }
16353 #else //MULTIPLE_HEAPS
16354             if (!grow_heap_segment (ephemeral_heap_segment, (alloc_allocated + soh_allocation_no_gc)))
16355                 soh_full_gc_requested = TRUE;
16356 #endif //MULTIPLE_HEAPS
16357         }
16358     }
16359
16360     if (!current_no_gc_region_info.minimal_gc_p && gc_requested)
16361     {
16362         soh_full_gc_requested = TRUE;
16363     }
16364
16365     no_gc_requested = !(soh_full_gc_requested || gc_requested);
16366
16367     if (soh_full_gc_requested && current_no_gc_region_info.minimal_gc_p)
16368     {
16369         current_no_gc_region_info.start_status = start_no_gc_no_memory;
16370         goto done;
16371     }
16372
16373     if (!soh_full_gc_requested && current_no_gc_region_info.loh_allocation_size)
16374     {
16375         // Check to see if we have enough reserved space. 
16376 #ifdef MULTIPLE_HEAPS
16377         for (int i = 0; i < n_heaps; i++)
16378         {
16379             gc_heap* hp = g_heaps[i];
16380             if (!hp->find_loh_space_for_no_gc())
16381             {
16382                 loh_full_gc_requested = TRUE;
16383                 break;
16384             }
16385         }
16386 #else //MULTIPLE_HEAPS
16387         if (!find_loh_space_for_no_gc())
16388             loh_full_gc_requested = TRUE;
16389 #endif //MULTIPLE_HEAPS
16390
16391         // Check to see if we have committed space.
16392         if (!loh_full_gc_requested)
16393         {
16394 #ifdef MULTIPLE_HEAPS
16395             for (int i = 0; i < n_heaps; i++)
16396             {
16397                 gc_heap* hp = g_heaps[i];
16398                 if (hp->saved_loh_segment_no_gc &&!hp->commit_loh_for_no_gc (hp->saved_loh_segment_no_gc))
16399                 {
16400                     loh_full_gc_requested = TRUE;
16401                     break;
16402                 }
16403             }
16404 #else //MULTIPLE_HEAPS
16405             if (saved_loh_segment_no_gc && !commit_loh_for_no_gc (saved_loh_segment_no_gc))
16406                 loh_full_gc_requested = TRUE;
16407 #endif //MULTIPLE_HEAPS
16408         }
16409     }
16410
16411     if (loh_full_gc_requested || soh_full_gc_requested)
16412     {
16413         if (current_no_gc_region_info.minimal_gc_p)
16414             current_no_gc_region_info.start_status = start_no_gc_no_memory;
16415     }
16416
16417     no_gc_requested = !(loh_full_gc_requested || soh_full_gc_requested || gc_requested);
16418
16419     if (current_no_gc_region_info.start_status == start_no_gc_success)
16420     {
16421         if (no_gc_requested)
16422             set_allocations_for_no_gc();
16423     }
16424
16425 done:
16426
16427     if ((current_no_gc_region_info.start_status == start_no_gc_success) && !no_gc_requested)
16428         return TRUE;
16429     else
16430     {
16431         // We are done with starting the no_gc_region.
16432         current_no_gc_region_info.started = TRUE;
16433         return FALSE;
16434     }
16435 }
16436
16437 end_no_gc_region_status gc_heap::end_no_gc_region()
16438 {
16439     dprintf (1, ("end no gc called"));
16440
16441     end_no_gc_region_status status = end_no_gc_success;
16442
16443     if (!(current_no_gc_region_info.started))
16444         status = end_no_gc_not_in_progress;
16445     if (current_no_gc_region_info.num_gcs_induced)
16446         status = end_no_gc_induced;
16447     else if (current_no_gc_region_info.num_gcs)
16448         status = end_no_gc_alloc_exceeded;
16449
16450     if (settings.pause_mode == pause_no_gc)
16451         restore_data_for_no_gc();
16452
16453     // sets current_no_gc_region_info.started to FALSE here.
16454     memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
16455
16456     return status;
16457 }
16458
16459 //update counters
16460 void gc_heap::update_collection_counts ()
16461 {
16462     dynamic_data* dd0 = dynamic_data_of (0);
16463     dd_gc_clock (dd0) += 1;
16464
16465     size_t now = GetHighPrecisionTimeStamp();
16466
16467     for (int i = 0; i <= settings.condemned_generation;i++)
16468     {
16469         dynamic_data* dd = dynamic_data_of (i);
16470         dd_collection_count (dd)++;
16471         //this is needed by the linear allocation model
16472         if (i == max_generation)
16473             dd_collection_count (dynamic_data_of (max_generation+1))++;
16474         dd_gc_clock (dd) = dd_gc_clock (dd0);
16475         dd_time_clock (dd) = now;
16476     }
16477 }
16478
16479 BOOL gc_heap::expand_soh_with_minimal_gc()
16480 {
16481     if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) >= soh_allocation_no_gc)
16482         return TRUE;
16483
16484     heap_segment* new_seg = soh_get_segment_to_expand();
16485     if (new_seg)
16486     {
16487         if (g_gc_card_table != card_table)
16488             copy_brick_card_table();
16489
16490         settings.promotion = TRUE;
16491         settings.demotion = FALSE;
16492         ephemeral_promotion = TRUE;
16493         int condemned_gen_number = max_generation - 1;
16494
16495         generation* gen = 0;
16496         int align_const = get_alignment_constant (TRUE);
16497
16498         for (int i = 0; i <= condemned_gen_number; i++)
16499         {
16500             gen = generation_of (i);
16501             saved_ephemeral_plan_start[i] = generation_allocation_start (gen);
16502             saved_ephemeral_plan_start_size[i] = Align (size (generation_allocation_start (gen)), align_const);
16503         }
16504
16505         // We do need to clear the bricks here as we are converting a bunch of ephemeral objects to gen2
16506         // and need to make sure that there are no left over bricks from the previous GCs for the space 
16507         // we just used for gen0 allocation. We will need to go through the bricks for these objects for 
16508         // ephemeral GCs later.
16509         for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
16510              b < brick_of (align_on_brick (heap_segment_allocated (ephemeral_heap_segment)));
16511              b++)
16512         {
16513             set_brick (b, -1);
16514         }
16515
16516         size_t ephemeral_size = (heap_segment_allocated (ephemeral_heap_segment) - 
16517                                 generation_allocation_start (generation_of (max_generation - 1)));
16518         heap_segment_next (ephemeral_heap_segment) = new_seg;
16519         ephemeral_heap_segment = new_seg;
16520         uint8_t*  start = heap_segment_mem (ephemeral_heap_segment);
16521
16522         for (int i = condemned_gen_number; i >= 0; i--)
16523         {
16524             gen = generation_of (i);
16525             size_t gen_start_size = Align (min_obj_size);
16526             make_generation (generation_table[i], ephemeral_heap_segment, start, 0);
16527             generation_plan_allocation_start (gen) = start;
16528             generation_plan_allocation_start_size (gen) = gen_start_size;
16529             start += gen_start_size;
16530         }
16531         heap_segment_used (ephemeral_heap_segment) = start - plug_skew;
16532         heap_segment_plan_allocated (ephemeral_heap_segment) = start;
16533
16534         fix_generation_bounds (condemned_gen_number, generation_of (0));
16535
16536         dd_gc_new_allocation (dynamic_data_of (max_generation)) -= ephemeral_size;
16537         dd_new_allocation (dynamic_data_of (max_generation)) = dd_gc_new_allocation (dynamic_data_of (max_generation));
16538
16539         adjust_ephemeral_limits();
16540         return TRUE;
16541     }
16542     else
16543         return FALSE;
16544 }
16545
16546 // Only to be done on the thread that calls restart in a join for server GC
16547 // and reset the oom status per heap.
16548 void gc_heap::check_and_set_no_gc_oom()
16549 {
16550 #ifdef MULTIPLE_HEAPS
16551     for (int i = 0; i < n_heaps; i++)
16552     {
16553         gc_heap* hp = g_heaps[i];
16554         if (hp->no_gc_oom_p)
16555         {
16556             current_no_gc_region_info.start_status = start_no_gc_no_memory;
16557             hp->no_gc_oom_p = false;
16558         }
16559     }
16560 #else
16561     if (no_gc_oom_p)
16562     {
16563         current_no_gc_region_info.start_status = start_no_gc_no_memory;
16564         no_gc_oom_p = false;
16565     }
16566 #endif //MULTIPLE_HEAPS
16567 }
16568
16569 void gc_heap::allocate_for_no_gc_after_gc()
16570 {
16571     if (current_no_gc_region_info.minimal_gc_p)
16572         repair_allocation_contexts (TRUE);
16573
16574     no_gc_oom_p = false;
16575
16576     if (current_no_gc_region_info.start_status != start_no_gc_no_memory)
16577     {
16578         if (current_no_gc_region_info.soh_allocation_size != 0)
16579         {
16580             if (((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) < soh_allocation_no_gc) ||
16581                 (!grow_heap_segment (ephemeral_heap_segment, (heap_segment_allocated (ephemeral_heap_segment) + soh_allocation_no_gc))))
16582             {
16583                 no_gc_oom_p = true;
16584             }
16585
16586 #ifdef MULTIPLE_HEAPS
16587             gc_t_join.join(this, gc_join_after_commit_soh_no_gc);
16588             if (gc_t_join.joined())
16589             {
16590 #endif //MULTIPLE_HEAPS
16591
16592                 check_and_set_no_gc_oom();
16593
16594 #ifdef MULTIPLE_HEAPS
16595                 gc_t_join.restart();
16596             }
16597 #endif //MULTIPLE_HEAPS
16598         }
16599
16600         if ((current_no_gc_region_info.start_status == start_no_gc_success) &&
16601             !(current_no_gc_region_info.minimal_gc_p) && 
16602             (current_no_gc_region_info.loh_allocation_size != 0))
16603         {
16604             gc_policy = policy_compact;
16605             saved_loh_segment_no_gc = 0;
16606
16607             if (!find_loh_free_for_no_gc())
16608             {
16609                 heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
16610                 BOOL found_seg_p = FALSE;
16611                 while (seg)
16612                 {
16613                     if ((size_t)(heap_segment_reserved (seg) - heap_segment_allocated (seg)) >= loh_allocation_no_gc)
16614                     {
16615                         found_seg_p = TRUE;
16616                         if (!commit_loh_for_no_gc (seg))
16617                         {
16618                             no_gc_oom_p = true;
16619                             break;
16620                         }
16621                     }
16622                     seg = heap_segment_next (seg);
16623                 }
16624
16625                 if (!found_seg_p)
16626                     gc_policy = policy_expand;
16627             }
16628
16629 #ifdef MULTIPLE_HEAPS
16630             gc_t_join.join(this, gc_join_expand_loh_no_gc);
16631             if (gc_t_join.joined())
16632             {
16633                 check_and_set_no_gc_oom();
16634
16635                 if (current_no_gc_region_info.start_status == start_no_gc_success)
16636                 {
16637                     for (int i = 0; i < n_heaps; i++)
16638                     {
16639                         gc_heap* hp = g_heaps[i];
16640                         if (hp->gc_policy == policy_expand)
16641                         {
16642                             hp->saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc), hp);
16643                             if (!(hp->saved_loh_segment_no_gc))
16644                             {
16645                                 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16646                                 break;
16647                             }
16648                         }
16649                     }
16650                 }
16651
16652                 gc_t_join.restart();
16653             }
16654 #else //MULTIPLE_HEAPS
16655             check_and_set_no_gc_oom();
16656
16657             if ((current_no_gc_region_info.start_status == start_no_gc_success) && (gc_policy == policy_expand))
16658             {
16659                 saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc));
16660                 if (!saved_loh_segment_no_gc)
16661                     current_no_gc_region_info.start_status = start_no_gc_no_memory;
16662             }
16663 #endif //MULTIPLE_HEAPS
16664
16665             if ((current_no_gc_region_info.start_status == start_no_gc_success) && saved_loh_segment_no_gc)
16666             {
16667                 if (!commit_loh_for_no_gc (saved_loh_segment_no_gc))
16668                 {
16669                     no_gc_oom_p = true;
16670                 }
16671             }
16672         }
16673     }
16674
16675 #ifdef MULTIPLE_HEAPS
16676     gc_t_join.join(this, gc_join_final_no_gc);
16677     if (gc_t_join.joined())
16678     {
16679 #endif //MULTIPLE_HEAPS
16680
16681         check_and_set_no_gc_oom();
16682
16683         if (current_no_gc_region_info.start_status == start_no_gc_success)
16684         {
16685             set_allocations_for_no_gc();
16686             current_no_gc_region_info.started = TRUE;
16687         }
16688
16689 #ifdef MULTIPLE_HEAPS
16690         gc_t_join.restart();
16691     }
16692 #endif //MULTIPLE_HEAPS
16693 }
16694
16695 void gc_heap::init_records()
16696 {
16697     // An option is to move this to be after we figure out which gen to condemn so we don't 
16698     // need to clear some generations' data 'cause we know they don't change, but that also means 
16699     // we can't simply call memset here. 
16700     memset (&gc_data_per_heap, 0, sizeof (gc_data_per_heap));
16701     gc_data_per_heap.heap_index = heap_number;
16702     if (heap_number == 0)
16703         memset (&gc_data_global, 0, sizeof (gc_data_global));
16704
16705 #ifdef GC_CONFIG_DRIVEN
16706     memset (interesting_data_per_gc, 0, sizeof (interesting_data_per_gc));
16707 #endif //GC_CONFIG_DRIVEN
16708     memset (&fgm_result, 0, sizeof (fgm_result));
16709
16710     for (int i = 0; i <= (max_generation + 1); i++)
16711     {
16712         gc_data_per_heap.gen_data[i].size_before = generation_size (i);
16713         generation* gen = generation_of (i);
16714         gc_data_per_heap.gen_data[i].free_list_space_before = generation_free_list_space (gen);
16715         gc_data_per_heap.gen_data[i].free_obj_space_before = generation_free_obj_space (gen);
16716     }
16717
16718     sufficient_gen0_space_p = FALSE;
16719 #if defined (_DEBUG) && defined (VERIFY_HEAP)
16720     verify_pinned_queue_p = FALSE;
16721 #endif // _DEBUG && VERIFY_HEAP
16722 }
16723
16724 void gc_heap::pm_full_gc_init_or_clear()
16725 {
16726     // This means the next GC will be a full blocking GC and we need to init.
16727     if (settings.condemned_generation == (max_generation - 1))
16728     {
16729         if (pm_trigger_full_gc)
16730         {
16731 #ifdef MULTIPLE_HEAPS
16732             do_post_gc();
16733 #endif //MULTIPLE_HEAPS
16734             dprintf (GTC_LOG, ("init for PM triggered full GC"));
16735             uint32_t saved_entry_memory_load = settings.entry_memory_load;
16736             settings.init_mechanisms();
16737             settings.reason = reason_pm_full_gc;
16738             settings.condemned_generation = max_generation;
16739             settings.entry_memory_load = saved_entry_memory_load;
16740             // Can't assert this since we only check at the end of gen2 GCs,
16741             // during gen1 the memory load could have already dropped. 
16742             // Although arguably we should just turn off PM then...
16743             //assert (settings.entry_memory_load >= high_memory_load_th);
16744             assert (settings.entry_memory_load > 0);
16745             settings.gc_index += 1;
16746             do_pre_gc();
16747         }
16748     }
16749     // This means we are in the progress of a full blocking GC triggered by
16750     // this PM mode.
16751     else if (settings.reason == reason_pm_full_gc)
16752     {
16753         assert (settings.condemned_generation == max_generation);
16754         assert (pm_trigger_full_gc);
16755         pm_trigger_full_gc = false;
16756
16757         dprintf (GTC_LOG, ("PM triggered full GC done"));
16758     }
16759 }
16760
16761 void gc_heap::garbage_collect_pm_full_gc()
16762 {
16763     assert (settings.condemned_generation == max_generation);
16764     assert (settings.reason == reason_pm_full_gc);
16765     assert (!settings.concurrent);
16766     gc1();
16767 }
16768
16769 void gc_heap::garbage_collect (int n)
16770 {
16771     //reset the number of alloc contexts
16772     alloc_contexts_used = 0;
16773
16774     fix_allocation_contexts (TRUE);
16775 #ifdef MULTIPLE_HEAPS
16776 #ifdef JOIN_STATS
16777     gc_t_join.start_ts(this);
16778 #endif //JOIN_STATS
16779     clear_gen0_bricks();
16780 #endif //MULTIPLE_HEAPS
16781
16782     if ((settings.pause_mode == pause_no_gc) && current_no_gc_region_info.minimal_gc_p)
16783     {
16784 #ifdef MULTIPLE_HEAPS
16785         gc_t_join.join(this, gc_join_minimal_gc);
16786         if (gc_t_join.joined())
16787         {
16788 #endif //MULTIPLE_HEAPS
16789
16790 #ifdef MULTIPLE_HEAPS
16791             // this is serialized because we need to get a segment
16792             for (int i = 0; i < n_heaps; i++)
16793             {
16794                 if (!(g_heaps[i]->expand_soh_with_minimal_gc()))
16795                     current_no_gc_region_info.start_status = start_no_gc_no_memory;
16796             }
16797 #else
16798             if (!expand_soh_with_minimal_gc())
16799                 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16800 #endif //MULTIPLE_HEAPS
16801
16802             update_collection_counts_for_no_gc();
16803
16804 #ifdef MULTIPLE_HEAPS
16805             gc_t_join.restart();
16806         }
16807 #endif //MULTIPLE_HEAPS
16808
16809         goto done;
16810     }
16811
16812     init_records();
16813
16814     settings.reason = gc_trigger_reason;
16815 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
16816     num_pinned_objects = 0;
16817 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
16818
16819 #ifdef STRESS_HEAP
16820     if (settings.reason == reason_gcstress)
16821     {
16822         settings.reason = reason_induced;
16823         settings.stress_induced = TRUE;
16824     }
16825 #endif // STRESS_HEAP
16826
16827 #ifdef MULTIPLE_HEAPS
16828     //align all heaps on the max generation to condemn
16829     dprintf (3, ("Joining for max generation to condemn"));
16830     condemned_generation_num = generation_to_condemn (n, 
16831                                                     &blocking_collection, 
16832                                                     &elevation_requested, 
16833                                                     FALSE);
16834     gc_t_join.join(this, gc_join_generation_determined);
16835     if (gc_t_join.joined())
16836 #endif //MULTIPLE_HEAPS
16837     {
16838 #if !defined(SEG_MAPPING_TABLE) && !defined(FEATURE_BASICFREEZE)
16839         //delete old slots from the segment table
16840         seg_table->delete_old_slots();
16841 #endif //!SEG_MAPPING_TABLE && !FEATURE_BASICFREEZE
16842
16843 #ifdef MULTIPLE_HEAPS
16844         for (int i = 0; i < n_heaps; i++)
16845         {
16846             gc_heap* hp = g_heaps[i];
16847             // check for card table growth
16848             if (g_gc_card_table != hp->card_table)
16849                 hp->copy_brick_card_table();
16850
16851             hp->rearrange_large_heap_segments();
16852 #ifdef BACKGROUND_GC
16853             hp->background_delay_delete_loh_segments();
16854             if (!recursive_gc_sync::background_running_p())
16855                 hp->rearrange_small_heap_segments();
16856 #endif //BACKGROUND_GC
16857         }
16858 #else //MULTIPLE_HEAPS
16859         if (g_gc_card_table != card_table)
16860             copy_brick_card_table();
16861
16862         rearrange_large_heap_segments();
16863 #ifdef BACKGROUND_GC
16864         background_delay_delete_loh_segments();
16865         if (!recursive_gc_sync::background_running_p())
16866             rearrange_small_heap_segments();
16867 #endif //BACKGROUND_GC
16868 #endif //MULTIPLE_HEAPS
16869
16870     BOOL should_evaluate_elevation = FALSE;
16871     BOOL should_do_blocking_collection = FALSE;
16872
16873 #ifdef MULTIPLE_HEAPS
16874     int gen_max = condemned_generation_num;
16875     for (int i = 0; i < n_heaps; i++)
16876     {
16877         if (gen_max < g_heaps[i]->condemned_generation_num)
16878             gen_max = g_heaps[i]->condemned_generation_num;
16879         if ((!should_evaluate_elevation) && (g_heaps[i]->elevation_requested))
16880             should_evaluate_elevation = TRUE;
16881         if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection))
16882             should_do_blocking_collection = TRUE;
16883     }
16884
16885     settings.condemned_generation = gen_max;
16886 #else //MULTIPLE_HEAPS
16887     settings.condemned_generation = generation_to_condemn (n, 
16888                                                         &blocking_collection, 
16889                                                         &elevation_requested, 
16890                                                         FALSE);
16891     should_evaluate_elevation = elevation_requested;
16892     should_do_blocking_collection = blocking_collection;
16893 #endif //MULTIPLE_HEAPS
16894
16895     settings.condemned_generation = joined_generation_to_condemn (
16896                                         should_evaluate_elevation,
16897                                         n,
16898                                         settings.condemned_generation,
16899                                         &should_do_blocking_collection
16900                                         STRESS_HEAP_ARG(n)
16901                                         );
16902
16903     STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10, 
16904             "condemned generation num: %d\n", settings.condemned_generation);
16905
16906     record_gcs_during_no_gc();
16907
16908     if (settings.condemned_generation > 1)
16909         settings.promotion = TRUE;
16910
16911 #ifdef HEAP_ANALYZE
16912     // At this point we've decided what generation is condemned
16913     // See if we've been requested to analyze survivors after the mark phase
16914     if (GCToEEInterface::AnalyzeSurvivorsRequested(settings.condemned_generation))
16915     {
16916         heap_analyze_enabled = TRUE;
16917     }
16918 #endif // HEAP_ANALYZE
16919
16920         GCToEEInterface::DiagGCStart(settings.condemned_generation, settings.reason == reason_induced);
16921
16922 #ifdef BACKGROUND_GC
16923         if ((settings.condemned_generation == max_generation) &&
16924             (recursive_gc_sync::background_running_p()))
16925         {
16926             //TODO BACKGROUND_GC If we just wait for the end of gc, it won't work
16927             // because we have to collect 0 and 1 properly
16928             // in particular, the allocation contexts are gone.
16929             // For now, it is simpler to collect max_generation-1
16930             settings.condemned_generation = max_generation - 1;
16931             dprintf (GTC_LOG, ("bgc - 1 instead of 2"));
16932         }
16933
16934         if ((settings.condemned_generation == max_generation) &&
16935             (should_do_blocking_collection == FALSE) &&
16936             gc_can_use_concurrent &&
16937             !temp_disable_concurrent_p &&                 
16938             ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency)))
16939         {
16940             keep_bgc_threads_p = TRUE;
16941             c_write (settings.concurrent,  TRUE);
16942         }
16943 #endif //BACKGROUND_GC
16944
16945         settings.gc_index = (uint32_t)dd_collection_count (dynamic_data_of (0)) + 1;
16946
16947         // Call the EE for start of GC work
16948         // just one thread for MP GC
16949         GCToEEInterface::GcStartWork (settings.condemned_generation,
16950                                 max_generation);            
16951
16952         // TODO: we could fire an ETW event to say this GC as a concurrent GC but later on due to not being able to
16953         // create threads or whatever, this could be a non concurrent GC. Maybe for concurrent GC we should fire
16954         // it in do_background_gc and if it failed to be a CGC we fire it in gc1... in other words, this should be
16955         // fired in gc1.
16956         do_pre_gc();
16957
16958 #ifdef MULTIPLE_HEAPS
16959         gc_start_event.Reset();
16960         //start all threads on the roots.
16961         dprintf(3, ("Starting all gc threads for gc"));
16962         gc_t_join.restart();
16963 #endif //MULTIPLE_HEAPS
16964     }
16965
16966         descr_generations (TRUE);
16967
16968 #ifdef VERIFY_HEAP
16969     if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
16970        !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_POST_GC_ONLY))
16971     {
16972         verify_heap (TRUE);
16973     }
16974     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK)
16975         checkGCWriteBarrier();
16976
16977 #endif // VERIFY_HEAP
16978
16979 #ifdef BACKGROUND_GC
16980     if (settings.concurrent)
16981     {
16982         // We need to save the settings because we'll need to restore it after each FGC.
16983         assert (settings.condemned_generation == max_generation);
16984         settings.compaction = FALSE;
16985         saved_bgc_settings = settings;
16986
16987 #ifdef MULTIPLE_HEAPS
16988         if (heap_number == 0)
16989         {
16990             for (int i = 0; i < n_heaps; i++)
16991             {
16992                 prepare_bgc_thread (g_heaps[i]);
16993             }
16994             dprintf (2, ("setting bgc_threads_sync_event"));
16995             bgc_threads_sync_event.Set();
16996         }
16997         else
16998         {
16999             bgc_threads_sync_event.Wait(INFINITE, FALSE);
17000             dprintf (2, ("bgc_threads_sync_event is signalled"));
17001         }
17002 #else
17003         prepare_bgc_thread(0);
17004 #endif //MULTIPLE_HEAPS
17005
17006 #ifdef MULTIPLE_HEAPS
17007         gc_t_join.join(this, gc_join_start_bgc);
17008         if (gc_t_join.joined())
17009 #endif //MULTIPLE_HEAPS
17010         {
17011             do_concurrent_p = TRUE;
17012             do_ephemeral_gc_p = FALSE;
17013 #ifdef MULTIPLE_HEAPS
17014             dprintf(2, ("Joined to perform a background GC"));
17015
17016             for (int i = 0; i < n_heaps; i++)
17017             {
17018                 gc_heap* hp = g_heaps[i];
17019                 if (!(hp->bgc_thread) || !hp->commit_mark_array_bgc_init (hp->mark_array))
17020                 {
17021                     do_concurrent_p = FALSE;
17022                     break;
17023                 }
17024                 else
17025                 {
17026                     hp->background_saved_lowest_address = hp->lowest_address;
17027                     hp->background_saved_highest_address = hp->highest_address;
17028                 }
17029             }
17030 #else
17031             do_concurrent_p = (!!bgc_thread && commit_mark_array_bgc_init (mark_array));
17032             if (do_concurrent_p)
17033             {
17034                 background_saved_lowest_address = lowest_address;
17035                 background_saved_highest_address = highest_address;
17036             }
17037 #endif //MULTIPLE_HEAPS
17038
17039             if (do_concurrent_p)
17040             {
17041 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
17042                 SoftwareWriteWatch::EnableForGCHeap();
17043 #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
17044
17045 #ifdef MULTIPLE_HEAPS
17046                 for (int i = 0; i < n_heaps; i++)
17047                     g_heaps[i]->current_bgc_state = bgc_initialized;
17048 #else
17049                 current_bgc_state = bgc_initialized;
17050 #endif //MULTIPLE_HEAPS
17051
17052                 int gen = check_for_ephemeral_alloc();
17053                 // always do a gen1 GC before we start BGC. 
17054                 // This is temporary for testing purpose.
17055                 //int gen = max_generation - 1;
17056                 dont_restart_ee_p = TRUE;
17057                 if (gen == -1)
17058                 {
17059                     // If we decide to not do a GC before the BGC we need to 
17060                     // restore the gen0 alloc context.
17061 #ifdef MULTIPLE_HEAPS
17062                     for (int i = 0; i < n_heaps; i++)
17063                     {
17064                         generation_allocation_pointer (g_heaps[i]->generation_of (0)) =  0;
17065                         generation_allocation_limit (g_heaps[i]->generation_of (0)) = 0;
17066                     }
17067 #else
17068                     generation_allocation_pointer (youngest_generation) =  0;
17069                     generation_allocation_limit (youngest_generation) = 0;
17070 #endif //MULTIPLE_HEAPS
17071                 }
17072                 else
17073                 {
17074                     do_ephemeral_gc_p = TRUE;
17075
17076                     settings.init_mechanisms();
17077                     settings.condemned_generation = gen;
17078                     settings.gc_index = (size_t)dd_collection_count (dynamic_data_of (0)) + 2;
17079                     do_pre_gc();
17080
17081                     // TODO BACKGROUND_GC need to add the profiling stuff here.
17082                     dprintf (GTC_LOG, ("doing gen%d before doing a bgc", gen));
17083                 }
17084
17085                 //clear the cards so they don't bleed in gen 1 during collection
17086                 // shouldn't this always be done at the beginning of any GC?
17087                 //clear_card_for_addresses (
17088                 //    generation_allocation_start (generation_of (0)),
17089                 //    heap_segment_allocated (ephemeral_heap_segment));
17090
17091                 if (!do_ephemeral_gc_p)
17092                 {
17093                     do_background_gc();
17094                 }
17095             }
17096             else
17097             {
17098                 settings.compaction = TRUE;
17099                 c_write (settings.concurrent, FALSE);
17100             }
17101
17102 #ifdef MULTIPLE_HEAPS
17103             gc_t_join.restart();
17104 #endif //MULTIPLE_HEAPS
17105         }
17106
17107         if (do_concurrent_p)
17108         {
17109             // At this point we are sure we'll be starting a BGC, so save its per heap data here.
17110             // global data is only calculated at the end of the GC so we don't need to worry about
17111             // FGCs overwriting it.
17112             memset (&bgc_data_per_heap, 0, sizeof (bgc_data_per_heap));
17113             memcpy (&bgc_data_per_heap, &gc_data_per_heap, sizeof(gc_data_per_heap));
17114
17115             if (do_ephemeral_gc_p)
17116             {
17117                 dprintf (2, ("GC threads running, doing gen%d GC", settings.condemned_generation));
17118
17119                 gen_to_condemn_reasons.init();
17120                 gen_to_condemn_reasons.set_condition (gen_before_bgc);
17121                 gc_data_per_heap.gen_to_condemn_reasons.init (&gen_to_condemn_reasons);
17122                 gc1();
17123 #ifdef MULTIPLE_HEAPS
17124                 gc_t_join.join(this, gc_join_bgc_after_ephemeral);
17125                 if (gc_t_join.joined())
17126 #endif //MULTIPLE_HEAPS
17127                 {
17128 #ifdef MULTIPLE_HEAPS
17129                     do_post_gc();
17130 #endif //MULTIPLE_HEAPS
17131                     settings = saved_bgc_settings;
17132                     assert (settings.concurrent);
17133
17134                     do_background_gc();
17135
17136 #ifdef MULTIPLE_HEAPS
17137                     gc_t_join.restart();
17138 #endif //MULTIPLE_HEAPS
17139                 }
17140             }
17141         }
17142         else
17143         {
17144             dprintf (2, ("couldn't create BGC threads, reverting to doing a blocking GC"));
17145             gc1();
17146         }
17147     }
17148     else
17149 #endif //BACKGROUND_GC
17150     {
17151         gc1();
17152     }
17153 #ifndef MULTIPLE_HEAPS
17154     allocation_running_time = (size_t)GCToOSInterface::GetLowPrecisionTimeStamp();
17155     allocation_running_amount = dd_new_allocation (dynamic_data_of (0));
17156     fgn_last_alloc = dd_new_allocation (dynamic_data_of (0));
17157 #endif //MULTIPLE_HEAPS
17158
17159 done:
17160     if (settings.pause_mode == pause_no_gc)
17161         allocate_for_no_gc_after_gc();
17162
17163 }
17164
17165 #define mark_stack_empty_p() (mark_stack_base == mark_stack_tos)
17166
17167 inline
17168 size_t& gc_heap::promoted_bytes(int thread)
17169 {
17170 #ifdef MULTIPLE_HEAPS
17171     return g_promoted [thread*16];
17172 #else //MULTIPLE_HEAPS
17173     UNREFERENCED_PARAMETER(thread);
17174     return g_promoted;
17175 #endif //MULTIPLE_HEAPS
17176 }
17177
17178 #ifdef INTERIOR_POINTERS
17179 heap_segment* gc_heap::find_segment (uint8_t* interior, BOOL small_segment_only_p)
17180 {
17181 #ifdef SEG_MAPPING_TABLE
17182     heap_segment* seg = seg_mapping_table_segment_of (interior);
17183     if (seg)
17184     {
17185         if (small_segment_only_p && heap_segment_loh_p (seg))
17186             return 0;
17187     }
17188     return seg;
17189 #else //SEG_MAPPING_TABLE
17190 #ifdef MULTIPLE_HEAPS
17191     for (int i = 0; i < gc_heap::n_heaps; i++)
17192     {
17193         gc_heap* h = gc_heap::g_heaps [i];
17194         hs = h->find_segment_per_heap (o, small_segment_only_p);
17195         if (hs)
17196         {
17197             break;
17198         }        
17199     }
17200 #else
17201     {
17202         gc_heap* h = pGenGCHeap;
17203         hs = h->find_segment_per_heap (o, small_segment_only_p);
17204     }
17205 #endif //MULTIPLE_HEAPS
17206 #endif //SEG_MAPPING_TABLE
17207 }
17208
17209 heap_segment* gc_heap::find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p)
17210 {
17211 #ifdef SEG_MAPPING_TABLE
17212     return find_segment (interior, small_segment_only_p);
17213 #else //SEG_MAPPING_TABLE
17214     if (in_range_for_segment (interior, ephemeral_heap_segment))
17215     {
17216         return ephemeral_heap_segment;
17217     }
17218     else
17219     {
17220         heap_segment* found_seg = 0;
17221
17222         {
17223             heap_segment* seg = generation_start_segment (generation_of (max_generation));
17224             do
17225             {
17226                 if (in_range_for_segment (interior, seg))
17227                 {
17228                     found_seg = seg;
17229                     goto end_find_segment;
17230                 }
17231
17232             } while ((seg = heap_segment_next (seg)) != 0);
17233         }
17234         if (!small_segment_only_p)
17235         {
17236 #ifdef BACKGROUND_GC
17237             {
17238                 ptrdiff_t delta = 0;
17239                 heap_segment* seg = segment_of (interior, delta);
17240                 if (seg && in_range_for_segment (interior, seg))
17241                 {
17242                     found_seg = seg;
17243                 }
17244                 goto end_find_segment;
17245             }
17246 #else //BACKGROUND_GC
17247             heap_segment* seg = generation_start_segment (generation_of (max_generation+1));
17248             do
17249             {
17250                 if (in_range_for_segment(interior, seg))
17251                 {
17252                     found_seg = seg;
17253                     goto end_find_segment;
17254                 }
17255
17256             } while ((seg = heap_segment_next (seg)) != 0);
17257 #endif //BACKGROUND_GC
17258         }
17259 end_find_segment:
17260
17261         return found_seg;
17262     }
17263 #endif //SEG_MAPPING_TABLE
17264 }
17265 #endif //INTERIOR_POINTERS
17266
17267 #if !defined(_DEBUG) && !defined(__GNUC__)
17268 inline // This causes link errors if global optimization is off
17269 #endif //!_DEBUG && !__GNUC__
17270 gc_heap* gc_heap::heap_of (uint8_t* o)
17271 {
17272 #ifdef MULTIPLE_HEAPS
17273     if (o == 0)
17274         return g_heaps [0];
17275 #ifdef SEG_MAPPING_TABLE
17276     gc_heap* hp = seg_mapping_table_heap_of (o);
17277     return (hp ? hp : g_heaps[0]);
17278 #else //SEG_MAPPING_TABLE
17279     ptrdiff_t delta = 0;
17280     heap_segment* seg = segment_of (o, delta);
17281     return (seg ? heap_segment_heap (seg) : g_heaps [0]);
17282 #endif //SEG_MAPPING_TABLE
17283 #else //MULTIPLE_HEAPS
17284     UNREFERENCED_PARAMETER(o);
17285     return __this;
17286 #endif //MULTIPLE_HEAPS
17287 }
17288
17289 inline
17290 gc_heap* gc_heap::heap_of_gc (uint8_t* o)
17291 {
17292 #ifdef MULTIPLE_HEAPS
17293     if (o == 0)
17294         return g_heaps [0];
17295 #ifdef SEG_MAPPING_TABLE
17296     gc_heap* hp = seg_mapping_table_heap_of_gc (o);
17297     return (hp ? hp : g_heaps[0]);
17298 #else //SEG_MAPPING_TABLE
17299     ptrdiff_t delta = 0;
17300     heap_segment* seg = segment_of (o, delta);
17301     return (seg ? heap_segment_heap (seg) : g_heaps [0]);
17302 #endif //SEG_MAPPING_TABLE
17303 #else //MULTIPLE_HEAPS
17304     UNREFERENCED_PARAMETER(o);
17305     return __this;
17306 #endif //MULTIPLE_HEAPS
17307 }
17308
17309 #ifdef INTERIOR_POINTERS
17310 // will find all heap objects (large and small)
17311 uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
17312 {
17313     if (!gen0_bricks_cleared)
17314     {
17315 #ifdef MULTIPLE_HEAPS
17316         assert (!"Should have already been done in server GC");
17317 #endif //MULTIPLE_HEAPS
17318         gen0_bricks_cleared = TRUE;
17319         //initialize brick table for gen 0
17320         for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
17321              b < brick_of (align_on_brick
17322                            (heap_segment_allocated (ephemeral_heap_segment)));
17323              b++)
17324         {
17325             set_brick (b, -1);
17326         }
17327     }
17328 #ifdef FFIND_OBJECT
17329     //indicate that in the future this needs to be done during allocation
17330 #ifdef MULTIPLE_HEAPS
17331     gen0_must_clear_bricks = FFIND_DECAY*gc_heap::n_heaps;
17332 #else
17333     gen0_must_clear_bricks = FFIND_DECAY;
17334 #endif //MULTIPLE_HEAPS
17335 #endif //FFIND_OBJECT
17336
17337     int brick_entry = get_brick_entry(brick_of (interior));
17338     if (brick_entry == 0)
17339     {
17340         // this is a pointer to a large object
17341         heap_segment* seg = find_segment_per_heap (interior, FALSE);
17342         if (seg
17343 #ifdef FEATURE_CONSERVATIVE_GC
17344             && (GCConfig::GetConservativeGC() || interior <= heap_segment_allocated(seg))
17345 #endif
17346             )
17347         {
17348             // If interior falls within the first free object at the beginning of a generation,
17349             // we don't have brick entry for it, and we may incorrectly treat it as on large object heap.
17350             int align_const = get_alignment_constant (heap_segment_read_only_p (seg)
17351 #ifdef FEATURE_CONSERVATIVE_GC
17352                                                        || (GCConfig::GetConservativeGC() && !heap_segment_loh_p (seg))
17353 #endif
17354                                                       );
17355             //int align_const = get_alignment_constant (heap_segment_read_only_p (seg));
17356             assert (interior < heap_segment_allocated (seg));
17357
17358             uint8_t* o = heap_segment_mem (seg);
17359             while (o < heap_segment_allocated (seg))
17360             {
17361                 uint8_t* next_o = o + Align (size (o), align_const);
17362                 assert (next_o > o);
17363                 if ((o <= interior) && (interior < next_o))
17364                 return o;
17365                 o = next_o;
17366             }
17367             return 0;
17368         }
17369         else
17370         {
17371             return 0;
17372         }
17373     }
17374     else if (interior >= low)
17375     {
17376         heap_segment* seg = find_segment_per_heap (interior, TRUE);
17377         if (seg)
17378         {
17379 #ifdef FEATURE_CONSERVATIVE_GC
17380             if (interior >= heap_segment_allocated (seg))
17381                 return 0;
17382 #else
17383             assert (interior < heap_segment_allocated (seg));
17384 #endif
17385             uint8_t* o = find_first_object (interior, heap_segment_mem (seg));
17386             return o;
17387         }
17388         else
17389             return 0;
17390     }
17391     else
17392         return 0;
17393 }
17394
17395 uint8_t*
17396 gc_heap::find_object_for_relocation (uint8_t* interior, uint8_t* low, uint8_t* high)
17397 {
17398     uint8_t* old_address = interior;
17399     if (!((old_address >= low) && (old_address < high)))
17400         return 0;
17401     uint8_t* plug = 0;
17402     size_t  brick = brick_of (old_address);
17403     int    brick_entry =  brick_table [ brick ];
17404     if (brick_entry != 0)
17405     {
17406     retry:
17407         {
17408             while (brick_entry < 0)
17409             {
17410                 brick = (brick + brick_entry);
17411                 brick_entry =  brick_table [ brick ];
17412             }
17413             uint8_t* old_loc = old_address;
17414             uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1),
17415                                       old_loc);
17416             if (node <= old_loc)
17417                 plug = node;
17418             else
17419             {
17420                 brick = brick - 1;
17421                 brick_entry =  brick_table [ brick ];
17422                 goto retry;
17423             }
17424
17425         }
17426         assert (plug);
17427         //find the object by going along the plug
17428         uint8_t* o = plug;
17429         while (o <= interior)
17430         {
17431             uint8_t* next_o = o + Align (size (o));
17432             assert (next_o > o);
17433             if (next_o > interior)
17434             {
17435                 break;
17436             }
17437             o = next_o;
17438         }
17439         assert ((o <= interior) && ((o + Align (size (o))) > interior));
17440         return o;
17441     }
17442     else
17443     {
17444         // this is a pointer to a large object
17445         heap_segment* seg = find_segment_per_heap (interior, FALSE);
17446         if (seg)
17447         {
17448             assert (interior < heap_segment_allocated (seg));
17449
17450             uint8_t* o = heap_segment_mem (seg);
17451             while (o < heap_segment_allocated (seg))
17452             {
17453                 uint8_t* next_o = o + Align (size (o));
17454                 assert (next_o > o);
17455                 if ((o < interior) && (interior < next_o))
17456                 return o;
17457                 o = next_o;
17458             }
17459             return 0;
17460         }
17461         else
17462             {
17463             return 0;
17464         }
17465     }
17466 }
17467 #else //INTERIOR_POINTERS
17468 inline
17469 uint8_t* gc_heap::find_object (uint8_t* o, uint8_t* low)
17470 {
17471     return o;
17472 }
17473 #endif //INTERIOR_POINTERS
17474
17475 #ifdef MULTIPLE_HEAPS
17476
17477 #ifdef MARK_LIST
17478 #ifdef GC_CONFIG_DRIVEN
17479 #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;}}
17480 #else //GC_CONFIG_DRIVEN
17481 #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}}
17482 #endif //GC_CONFIG_DRIVEN
17483 #else //MARK_LIST
17484 #define m_boundary(o) {}
17485 #endif //MARK_LIST
17486
17487 #define m_boundary_fullgc(o) {}
17488
17489 #else //MULTIPLE_HEAPS
17490
17491 #ifdef MARK_LIST
17492 #ifdef GC_CONFIG_DRIVEN
17493 #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;} if (slow > o) slow = o; if (shigh < o) shigh = o;}
17494 #else
17495 #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}if (slow > o) slow = o; if (shigh < o) shigh = o;}
17496 #endif //GC_CONFIG_DRIVEN
17497 #else //MARK_LIST
17498 #define m_boundary(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;}
17499 #endif //MARK_LIST
17500
17501 #define m_boundary_fullgc(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;}
17502
17503 #endif //MULTIPLE_HEAPS
17504
17505 #define method_table(o) ((CObjectHeader*)(o))->GetMethodTable()
17506
17507 inline
17508 BOOL gc_heap::gc_mark1 (uint8_t* o)
17509 {
17510     BOOL marked = !marked (o);
17511     set_marked (o);
17512     dprintf (3, ("*%Ix*, newly marked: %d", (size_t)o, marked));
17513     return marked;
17514 }
17515
17516 inline
17517 BOOL gc_heap::gc_mark (uint8_t* o, uint8_t* low, uint8_t* high)
17518 {
17519     BOOL marked = FALSE;
17520     if ((o >= low) && (o < high))
17521         marked = gc_mark1 (o);
17522 #ifdef MULTIPLE_HEAPS
17523     else if (o)
17524     {
17525         //find the heap
17526         gc_heap* hp = heap_of_gc (o);
17527         assert (hp);
17528         if ((o >= hp->gc_low) && (o < hp->gc_high))
17529             marked = gc_mark1 (o);
17530     }
17531 #ifdef SNOOP_STATS
17532     snoop_stat.objects_checked_count++;
17533
17534     if (marked)
17535     {
17536         snoop_stat.objects_marked_count++;
17537     }
17538     if (!o)
17539     {
17540         snoop_stat.zero_ref_count++;
17541     }
17542
17543 #endif //SNOOP_STATS
17544 #endif //MULTIPLE_HEAPS
17545     return marked;
17546 }
17547
17548 #ifdef BACKGROUND_GC
17549
17550 inline
17551 BOOL gc_heap::background_marked (uint8_t* o)
17552 {
17553     return mark_array_marked (o);
17554 }
17555 inline
17556 BOOL gc_heap::background_mark1 (uint8_t* o)
17557 {
17558     BOOL to_mark = !mark_array_marked (o);
17559
17560     dprintf (3, ("b*%Ix*b(%d)", (size_t)o, (to_mark ? 1 : 0)));
17561     if (to_mark)
17562     {
17563         mark_array_set_marked (o);
17564         dprintf (4, ("n*%Ix*n", (size_t)o));
17565         return TRUE;
17566     }
17567     else
17568         return FALSE;
17569 }
17570
17571 // TODO: we could consider filtering out NULL's here instead of going to 
17572 // look for it on other heaps
17573 inline
17574 BOOL gc_heap::background_mark (uint8_t* o, uint8_t* low, uint8_t* high)
17575 {
17576     BOOL marked = FALSE;
17577     if ((o >= low) && (o < high))
17578         marked = background_mark1 (o);
17579 #ifdef MULTIPLE_HEAPS
17580     else if (o)
17581     {
17582         //find the heap
17583         gc_heap* hp = heap_of (o);
17584         assert (hp);
17585         if ((o >= hp->background_saved_lowest_address) && (o < hp->background_saved_highest_address))
17586             marked = background_mark1 (o);
17587     }
17588 #endif //MULTIPLE_HEAPS
17589     return marked;
17590 }
17591
17592 #endif //BACKGROUND_GC
17593
17594 inline
17595 uint8_t* gc_heap::next_end (heap_segment* seg, uint8_t* f)
17596 {
17597     if (seg == ephemeral_heap_segment)
17598         return  f;
17599     else
17600         return  heap_segment_allocated (seg);
17601 }
17602
17603 #define new_start() {if (ppstop <= start) {break;} else {parm = start}}
17604 #define ignore_start 0
17605 #define use_start 1
17606
17607 #define go_through_object(mt,o,size,parm,start,start_useful,limit,exp)      \
17608 {                                                                           \
17609     CGCDesc* map = CGCDesc::GetCGCDescFromMT((MethodTable*)(mt));           \
17610     CGCDescSeries* cur = map->GetHighestSeries();                           \
17611     ptrdiff_t cnt = (ptrdiff_t) map->GetNumSeries();                        \
17612                                                                             \
17613     if (cnt >= 0)                                                           \
17614     {                                                                       \
17615         CGCDescSeries* last = map->GetLowestSeries();                       \
17616         uint8_t** parm = 0;                                                 \
17617         do                                                                  \
17618         {                                                                   \
17619             assert (parm <= (uint8_t**)((o) + cur->GetSeriesOffset()));     \
17620             parm = (uint8_t**)((o) + cur->GetSeriesOffset());               \
17621             uint8_t** ppstop =                                              \
17622                 (uint8_t**)((uint8_t*)parm + cur->GetSeriesSize() + (size));\
17623             if (!start_useful || (uint8_t*)ppstop > (start))                \
17624             {                                                               \
17625                 if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start);\
17626                 while (parm < ppstop)                                       \
17627                 {                                                           \
17628                    {exp}                                                    \
17629                    parm++;                                                  \
17630                 }                                                           \
17631             }                                                               \
17632             cur--;                                                          \
17633                                                                             \
17634         } while (cur >= last);                                              \
17635     }                                                                       \
17636     else                                                                    \
17637     {                                                                       \
17638         /* Handle the repeating case - array of valuetypes */               \
17639         uint8_t** parm = (uint8_t**)((o) + cur->startoffset);               \
17640         if (start_useful && start > (uint8_t*)parm)                         \
17641         {                                                                   \
17642             ptrdiff_t cs = mt->RawGetComponentSize();                         \
17643             parm = (uint8_t**)((uint8_t*)parm + (((start) - (uint8_t*)parm)/cs)*cs); \
17644         }                                                                   \
17645         while ((uint8_t*)parm < ((o)+(size)-plug_skew))                     \
17646         {                                                                   \
17647             for (ptrdiff_t __i = 0; __i > cnt; __i--)                         \
17648             {                                                               \
17649                 HALF_SIZE_T skip =  cur->val_serie[__i].skip;               \
17650                 HALF_SIZE_T nptrs = cur->val_serie[__i].nptrs;              \
17651                 uint8_t** ppstop = parm + nptrs;                            \
17652                 if (!start_useful || (uint8_t*)ppstop > (start))            \
17653                 {                                                           \
17654                     if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start);      \
17655                     do                                                      \
17656                     {                                                       \
17657                        {exp}                                                \
17658                        parm++;                                              \
17659                     } while (parm < ppstop);                                \
17660                 }                                                           \
17661                 parm = (uint8_t**)((uint8_t*)ppstop + skip);                \
17662             }                                                               \
17663         }                                                                   \
17664     }                                                                       \
17665 }
17666
17667 #define go_through_object_nostart(mt,o,size,parm,exp) {go_through_object(mt,o,size,parm,o,ignore_start,(o + size),exp); }
17668
17669 // 1 thing to note about this macro:
17670 // 1) you can use *parm safely but in general you don't want to use parm 
17671 // because for the collectible types it's not an address on the managed heap.
17672 #ifndef COLLECTIBLE_CLASS
17673 #define go_through_object_cl(mt,o,size,parm,exp)                            \
17674 {                                                                           \
17675     if (header(o)->ContainsPointers())                                      \
17676     {                                                                       \
17677         go_through_object_nostart(mt,o,size,parm,exp);                      \
17678     }                                                                       \
17679 }
17680 #else //COLLECTIBLE_CLASS
17681 #define go_through_object_cl(mt,o,size,parm,exp)                            \
17682 {                                                                           \
17683     if (header(o)->Collectible())                                           \
17684     {                                                                       \
17685         uint8_t* class_obj = get_class_object (o);                             \
17686         uint8_t** parm = &class_obj;                                           \
17687         do {exp} while (false);                                             \
17688     }                                                                       \
17689     if (header(o)->ContainsPointers())                                      \
17690     {                                                                       \
17691         go_through_object_nostart(mt,o,size,parm,exp);                      \
17692     }                                                                       \
17693 }
17694 #endif //COLLECTIBLE_CLASS
17695
17696 // This starts a plug. But mark_stack_tos isn't increased until set_pinned_info is called.
17697 void gc_heap::enque_pinned_plug (uint8_t* plug,
17698                                  BOOL save_pre_plug_info_p, 
17699                                  uint8_t* last_object_in_last_plug)
17700 {
17701     if (mark_stack_array_length <= mark_stack_tos)
17702     {
17703         if (!grow_mark_stack (mark_stack_array, mark_stack_array_length, MARK_STACK_INITIAL_LENGTH))
17704         {
17705             // we don't want to continue here due to security
17706             // risks. This happens very rarely and fixing it in the
17707             // way so that we can continue is a bit involved and will
17708             // not be done in Dev10.
17709             GCToEEInterface::HandleFatalError(CORINFO_EXCEPTION_GC);
17710         }
17711     }
17712
17713     dprintf (3, ("enqueuing P #%Id(%Ix): %Ix. oldest: %Id, LO: %Ix, pre: %d", 
17714         mark_stack_tos, &mark_stack_array[mark_stack_tos], plug, mark_stack_bos, last_object_in_last_plug, (save_pre_plug_info_p ? 1 : 0)));
17715     mark& m = mark_stack_array[mark_stack_tos];
17716     m.first = plug;
17717     // Must be set now because if we have a short object we'll need the value of saved_pre_p.
17718     m.saved_pre_p = save_pre_plug_info_p;
17719
17720     if (save_pre_plug_info_p)
17721     {
17722 #ifdef SHORT_PLUGS
17723         BOOL is_padded = is_plug_padded (last_object_in_last_plug);
17724         if (is_padded)
17725             clear_plug_padded (last_object_in_last_plug);
17726 #endif //SHORT_PLUGS
17727         memcpy (&(m.saved_pre_plug), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair));
17728 #ifdef SHORT_PLUGS
17729         if (is_padded)
17730             set_plug_padded (last_object_in_last_plug);
17731 #endif //SHORT_PLUGS
17732
17733         memcpy (&(m.saved_pre_plug_reloc), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair));
17734
17735         // If the last object in the last plug is too short, it requires special handling.
17736         size_t last_obj_size = plug - last_object_in_last_plug;
17737         if (last_obj_size < min_pre_pin_obj_size)
17738         {
17739             record_interesting_data_point (idp_pre_short);
17740 #ifdef SHORT_PLUGS
17741             if (is_padded)
17742                 record_interesting_data_point (idp_pre_short_padded);
17743 #endif //SHORT_PLUGS
17744             dprintf (3, ("encountered a short object %Ix right before pinned plug %Ix!", 
17745                          last_object_in_last_plug, plug));
17746             // Need to set the short bit regardless of having refs or not because we need to 
17747             // indicate that this object is not walkable.
17748             m.set_pre_short();
17749
17750 #ifdef COLLECTIBLE_CLASS
17751             if (is_collectible (last_object_in_last_plug))
17752             {
17753                 m.set_pre_short_collectible();
17754             }
17755 #endif //COLLECTIBLE_CLASS
17756
17757             if (contain_pointers (last_object_in_last_plug))
17758             {
17759                 dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size));
17760
17761                 go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval,
17762                     {
17763                         size_t gap_offset = (((size_t)pval - (size_t)(plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*);
17764                         dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset));
17765                         m.set_pre_short_bit (gap_offset);
17766                     }
17767                 );
17768             }
17769         }
17770     }
17771
17772     m.saved_post_p = FALSE;
17773 }
17774
17775 void gc_heap::save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug)
17776 {
17777     UNREFERENCED_PARAMETER(last_pinned_plug);
17778
17779     mark& m = mark_stack_array[mark_stack_tos - 1];
17780     assert (last_pinned_plug == m.first);
17781     m.saved_post_plug_info_start = (uint8_t*)&(((plug_and_gap*)post_plug)[-1]);
17782
17783 #ifdef SHORT_PLUGS
17784     BOOL is_padded = is_plug_padded (last_object_in_last_plug);
17785     if (is_padded)
17786         clear_plug_padded (last_object_in_last_plug);
17787 #endif //SHORT_PLUGS
17788     memcpy (&(m.saved_post_plug), m.saved_post_plug_info_start, sizeof (gap_reloc_pair));
17789 #ifdef SHORT_PLUGS
17790     if (is_padded)
17791         set_plug_padded (last_object_in_last_plug);
17792 #endif //SHORT_PLUGS
17793
17794     memcpy (&(m.saved_post_plug_reloc), m.saved_post_plug_info_start, sizeof (gap_reloc_pair));
17795
17796     // This is important - we need to clear all bits here except the last one.
17797     m.saved_post_p = TRUE;
17798
17799 #ifdef _DEBUG
17800     m.saved_post_plug_debug.gap = 1;
17801 #endif //_DEBUG
17802
17803     dprintf (3, ("PP %Ix has NP %Ix right after", last_pinned_plug, post_plug));
17804
17805     size_t last_obj_size = post_plug - last_object_in_last_plug;
17806     if (last_obj_size < min_pre_pin_obj_size)
17807     {
17808         dprintf (3, ("PP %Ix last obj %Ix is too short", last_pinned_plug, last_object_in_last_plug));
17809         record_interesting_data_point (idp_post_short);
17810 #ifdef SHORT_PLUGS
17811         if (is_padded)
17812             record_interesting_data_point (idp_post_short_padded);
17813 #endif //SHORT_PLUGS
17814         m.set_post_short();
17815 #if defined (_DEBUG) && defined (VERIFY_HEAP)
17816         verify_pinned_queue_p = TRUE;
17817 #endif // _DEBUG && VERIFY_HEAP
17818
17819 #ifdef COLLECTIBLE_CLASS
17820         if (is_collectible (last_object_in_last_plug))
17821         {
17822             m.set_post_short_collectible();
17823         }
17824 #endif //COLLECTIBLE_CLASS
17825
17826         if (contain_pointers (last_object_in_last_plug))
17827         {
17828             dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size));
17829
17830             // TODO: since we won't be able to walk this object in relocation, we still need to
17831             // take care of collectible assemblies here.
17832             go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval,
17833                 {
17834                     size_t gap_offset = (((size_t)pval - (size_t)(post_plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*);
17835                     dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset));
17836                     m.set_post_short_bit (gap_offset);
17837                 }
17838             );
17839         }
17840     }
17841 }
17842
17843 //#define PREFETCH
17844 #ifdef PREFETCH
17845 __declspec(naked) void __fastcall Prefetch(void* addr)
17846 {
17847    __asm {
17848        PREFETCHT0 [ECX]
17849         ret
17850     };
17851 }
17852 #else //PREFETCH
17853 inline void Prefetch (void* addr)
17854 {
17855     UNREFERENCED_PARAMETER(addr);
17856 }
17857 #endif //PREFETCH
17858 #ifdef MH_SC_MARK
17859 inline
17860 VOLATILE(uint8_t*)& gc_heap::ref_mark_stack (gc_heap* hp, int index)
17861 {
17862     return ((VOLATILE(uint8_t*)*)(hp->mark_stack_array))[index];
17863 }
17864
17865 #endif //MH_SC_MARK
17866
17867 #define stolen 2
17868 #define partial 1
17869 #define partial_object 3
17870 inline 
17871 uint8_t* ref_from_slot (uint8_t* r)
17872 {
17873     return (uint8_t*)((size_t)r & ~(stolen | partial));
17874 }
17875 inline
17876 BOOL stolen_p (uint8_t* r)
17877 {
17878     return (((size_t)r&2) && !((size_t)r&1));
17879 }
17880 inline 
17881 BOOL ready_p (uint8_t* r)
17882 {
17883     return ((size_t)r != 1);
17884 }
17885 inline
17886 BOOL partial_p (uint8_t* r)
17887 {
17888     return (((size_t)r&1) && !((size_t)r&2));
17889 }
17890 inline 
17891 BOOL straight_ref_p (uint8_t* r)
17892 {
17893     return (!stolen_p (r) && !partial_p (r));
17894 }
17895 inline 
17896 BOOL partial_object_p (uint8_t* r)
17897 {
17898     return (((size_t)r & partial_object) == partial_object);
17899 }
17900 inline
17901 BOOL ref_p (uint8_t* r)
17902 {
17903     return (straight_ref_p (r) || partial_object_p (r));
17904 }
17905
17906 void gc_heap::mark_object_simple1 (uint8_t* oo, uint8_t* start THREAD_NUMBER_DCL)
17907 {
17908     SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_tos = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)mark_stack_array;
17909     SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_limit = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)&mark_stack_array[mark_stack_array_length];
17910     SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_base = mark_stack_tos;
17911 #ifdef SORT_MARK_STACK
17912     SERVER_SC_MARK_VOLATILE(uint8_t*)* sorted_tos = mark_stack_base;
17913 #endif //SORT_MARK_STACK
17914
17915     // If we are doing a full GC we don't use mark list anyway so use m_boundary_fullgc that doesn't 
17916     // update mark list.
17917     BOOL  full_p = (settings.condemned_generation == max_generation);
17918
17919     assert ((start >= oo) && (start < oo+size(oo)));
17920
17921 #ifndef MH_SC_MARK
17922     *mark_stack_tos = oo;
17923 #endif //!MH_SC_MARK
17924
17925     while (1)
17926     {
17927 #ifdef MULTIPLE_HEAPS
17928 #else  //MULTIPLE_HEAPS
17929         const int thread = 0;
17930 #endif //MULTIPLE_HEAPS
17931
17932         if (oo && ((size_t)oo != 4))
17933         {
17934             size_t s = 0; 
17935             if (stolen_p (oo))
17936             {
17937                 --mark_stack_tos;
17938                 goto next_level;
17939             }
17940             else if (!partial_p (oo) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*))))
17941             {
17942                 BOOL overflow_p = FALSE;
17943
17944                 if (mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit  - 1))
17945                 {
17946                     size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
17947                     if (mark_stack_tos + CGCDesc::GetNumPointers(method_table(oo), s, num_components) >= (mark_stack_limit - 1))
17948                     {
17949                         overflow_p = TRUE;
17950                     }
17951                 }
17952                 
17953                 if (overflow_p == FALSE)
17954                 {
17955                     dprintf(3,("pushing mark for %Ix ", (size_t)oo));
17956
17957                     go_through_object_cl (method_table(oo), oo, s, ppslot,
17958                                           {
17959                                               uint8_t* o = *ppslot;
17960                                               Prefetch(o);
17961                                               if (gc_mark (o, gc_low, gc_high))
17962                                               {
17963                                                   if (full_p)
17964                                                   {
17965                                                       m_boundary_fullgc (o);
17966                                                   }
17967                                                   else
17968                                                   {
17969                                                       m_boundary (o);
17970                                                   }
17971                                                   size_t obj_size = size (o);
17972                                                   promoted_bytes (thread) += obj_size;
17973                                                   if (contain_pointers_or_collectible (o))
17974                                                   {
17975                                                       *(mark_stack_tos++) = o;
17976                                                   }
17977                                               }
17978                                           }
17979                         );
17980                 }
17981                 else
17982                 {
17983                     dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo));
17984                     min_overflow_address = min (min_overflow_address, oo);
17985                     max_overflow_address = max (max_overflow_address, oo);
17986                 }
17987             }
17988             else
17989             {
17990                 if (partial_p (oo))
17991                 {
17992                     start = ref_from_slot (oo);
17993                     oo = ref_from_slot (*(--mark_stack_tos));
17994                     dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start));
17995                     assert ((oo < start) && (start < (oo + size (oo))));
17996                 }
17997 #ifdef COLLECTIBLE_CLASS
17998                 else
17999                 {
18000                     // If there's a class object, push it now. We are guaranteed to have the slot since
18001                     // we just popped one object off.
18002                     if (is_collectible (oo))
18003                     {
18004                         uint8_t* class_obj = get_class_object (oo);
18005                         if (gc_mark (class_obj, gc_low, gc_high))
18006                         {
18007                             if (full_p)
18008                             {
18009                                 m_boundary_fullgc (class_obj);
18010                             }
18011                             else
18012                             {
18013                                 m_boundary (class_obj);
18014                             }
18015
18016                             size_t obj_size = size (class_obj);
18017                             promoted_bytes (thread) += obj_size;
18018                             *(mark_stack_tos++) = class_obj;
18019                             // The code below expects that the oo is still stored in the stack slot that was
18020                             // just popped and it "pushes" it back just by incrementing the mark_stack_tos. 
18021                             // But the class_obj has just overwritten that stack slot and so the oo needs to
18022                             // be stored to the new slot that's pointed to by the mark_stack_tos.
18023                             *mark_stack_tos = oo;
18024                         }
18025                     }
18026
18027                     if (!contain_pointers (oo))
18028                     {
18029                         goto next_level;
18030                     }
18031                 }
18032 #endif //COLLECTIBLE_CLASS
18033
18034                 s = size (oo);
18035                 
18036                 BOOL overflow_p = FALSE;
18037             
18038                 if (mark_stack_tos + (num_partial_refs + 2)  >= mark_stack_limit)
18039                 {
18040                     overflow_p = TRUE;
18041                 }
18042                 if (overflow_p == FALSE)
18043                 {
18044                     dprintf(3,("pushing mark for %Ix ", (size_t)oo));
18045
18046                     //push the object and its current 
18047                     SERVER_SC_MARK_VOLATILE(uint8_t*)* place = ++mark_stack_tos;
18048                     mark_stack_tos++;
18049 #ifdef MH_SC_MARK
18050                     *(place-1) = 0;
18051                     *(place) = (uint8_t*)partial;
18052 #endif //MH_SC_MARK
18053                     int i = num_partial_refs; 
18054                     uint8_t* ref_to_continue = 0;
18055
18056                     go_through_object (method_table(oo), oo, s, ppslot,
18057                                        start, use_start, (oo + s),
18058                                        {
18059                                            uint8_t* o = *ppslot;
18060                                            Prefetch(o);
18061                                            if (gc_mark (o, gc_low, gc_high))
18062                                            {
18063                                                 if (full_p)
18064                                                 {
18065                                                     m_boundary_fullgc (o);
18066                                                 }
18067                                                 else
18068                                                 {
18069                                                     m_boundary (o);
18070                                                 }
18071                                                 size_t obj_size = size (o);
18072                                                 promoted_bytes (thread) += obj_size;
18073                                                 if (contain_pointers_or_collectible (o))
18074                                                 {
18075                                                     *(mark_stack_tos++) = o;
18076                                                     if (--i == 0)
18077                                                     {
18078                                                         ref_to_continue = (uint8_t*)((size_t)(ppslot+1) | partial);
18079                                                         goto more_to_do;
18080                                                     }
18081
18082                                                 }
18083                                            }
18084
18085                                        }
18086                         );
18087                     //we are finished with this object
18088                     assert (ref_to_continue == 0);
18089 #ifdef MH_SC_MARK
18090                     assert ((*(place-1)) == (uint8_t*)0);
18091 #else //MH_SC_MARK
18092                     *(place-1) = 0;
18093 #endif //MH_SC_MARK
18094                     *place = 0; 
18095                     // shouldn't we decrease tos by 2 here??
18096
18097 more_to_do:
18098                     if (ref_to_continue)
18099                     {
18100                         //update the start
18101 #ifdef MH_SC_MARK
18102                         assert ((*(place-1)) == (uint8_t*)0);
18103                         *(place-1) = (uint8_t*)((size_t)oo | partial_object);
18104                         assert (((*place) == (uint8_t*)1) || ((*place) == (uint8_t*)2));
18105 #endif //MH_SC_MARK
18106                         *place = ref_to_continue;
18107                     }
18108                 }
18109                 else
18110                 {
18111                     dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo));
18112                     min_overflow_address = min (min_overflow_address, oo);
18113                     max_overflow_address = max (max_overflow_address, oo);
18114                 }
18115             }
18116 #ifdef SORT_MARK_STACK
18117             if (mark_stack_tos > sorted_tos + mark_stack_array_length/8)
18118             {
18119                 rqsort1 (sorted_tos, mark_stack_tos-1);
18120                 sorted_tos = mark_stack_tos-1;
18121             }
18122 #endif //SORT_MARK_STACK
18123         }
18124     next_level:
18125         if (!(mark_stack_empty_p()))
18126         {
18127             oo = *(--mark_stack_tos);
18128             start = oo;
18129
18130 #ifdef SORT_MARK_STACK
18131             sorted_tos = min ((size_t)sorted_tos, (size_t)mark_stack_tos);
18132 #endif //SORT_MARK_STACK
18133         }
18134         else
18135             break;
18136     }
18137 }
18138
18139 #ifdef MH_SC_MARK
18140 BOOL same_numa_node_p (int hn1, int hn2)
18141 {
18142     return (heap_select::find_numa_node_from_heap_no (hn1) == heap_select::find_numa_node_from_heap_no (hn2));
18143 }
18144
18145 int find_next_buddy_heap (int this_heap_number, int current_buddy, int n_heaps)
18146 {
18147     int hn = (current_buddy+1)%n_heaps;
18148     while (hn != current_buddy)
18149     {
18150         if ((this_heap_number != hn) && (same_numa_node_p (this_heap_number, hn)))
18151             return hn;
18152         hn = (hn+1)%n_heaps;
18153     }
18154     return current_buddy;
18155 }
18156
18157 void 
18158 gc_heap::mark_steal()
18159 {
18160     mark_stack_busy() = 0;
18161     //clear the mark stack in the snooping range
18162     for (int i = 0; i < max_snoop_level; i++)
18163     {
18164         ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0;
18165     }
18166
18167     //pick the next heap as our buddy
18168     int thpn = find_next_buddy_heap (heap_number, heap_number, n_heaps);
18169
18170 #ifdef SNOOP_STATS
18171         dprintf (SNOOP_LOG, ("(GC%d)heap%d: start snooping %d", settings.gc_index, heap_number, (heap_number+1)%n_heaps));
18172         uint32_t begin_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
18173 #endif //SNOOP_STATS
18174
18175     int idle_loop_count = 0; 
18176     int first_not_ready_level = 0;
18177
18178     while (1)
18179     {
18180         gc_heap* hp = g_heaps [thpn];
18181         int level = first_not_ready_level;
18182         first_not_ready_level = 0; 
18183
18184         while (check_next_mark_stack (hp) && (level < (max_snoop_level-1)))
18185         {
18186             idle_loop_count = 0; 
18187 #ifdef SNOOP_STATS
18188             snoop_stat.busy_count++;
18189             dprintf (SNOOP_LOG, ("heap%d: looking at next heap level %d stack contents: %Ix", 
18190                                  heap_number, level, (int)((uint8_t**)(hp->mark_stack_array))[level]));
18191 #endif //SNOOP_STATS
18192
18193             uint8_t* o = ref_mark_stack (hp, level);
18194
18195             uint8_t* start = o;
18196             if (ref_p (o))
18197             {
18198                 mark_stack_busy() = 1;
18199
18200                 BOOL success = TRUE;
18201                 uint8_t* next = (ref_mark_stack (hp, level+1));
18202                 if (ref_p (next))
18203                 {
18204                     if (((size_t)o > 4) && !partial_object_p (o))
18205                     {
18206                         //this is a normal object, not a partial mark tuple
18207                         //success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), 0, o)==o);
18208                         success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), (uint8_t*)4, o)==o);
18209 #ifdef SNOOP_STATS
18210                         snoop_stat.interlocked_count++;
18211                         if (success)
18212                             snoop_stat.normal_count++;
18213 #endif //SNOOP_STATS
18214                     }
18215                     else
18216                     {
18217                         //it is a stolen entry, or beginning/ending of a partial mark
18218                         level++;
18219 #ifdef SNOOP_STATS
18220                         snoop_stat.stolen_or_pm_count++;
18221 #endif //SNOOP_STATS
18222                         success = FALSE;
18223                     }
18224                 }
18225                 else if (stolen_p (next))
18226                 {
18227                     //ignore the stolen guy and go to the next level
18228                     success = FALSE;
18229                     level+=2;
18230 #ifdef SNOOP_STATS
18231                     snoop_stat.stolen_entry_count++;
18232 #endif //SNOOP_STATS
18233                 }
18234                 else
18235                 {
18236                     assert (partial_p (next));
18237                     start = ref_from_slot (next);
18238                     //re-read the object
18239                     o = ref_from_slot (ref_mark_stack (hp, level));
18240                     if (o && start)
18241                     {
18242                         //steal the object
18243                         success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level+1), (uint8_t*)stolen, next)==next);
18244 #ifdef SNOOP_STATS
18245                         snoop_stat.interlocked_count++;
18246                         if (success)
18247                         {
18248                             snoop_stat.partial_mark_parent_count++;                    
18249                         }
18250 #endif //SNOOP_STATS
18251                     }
18252                     else
18253                     {
18254                         // stack is not ready, or o is completely different from the last time we read from this stack level.
18255                         // go up 2 levels to steal children or totally unrelated objects.
18256                         success = FALSE;
18257                         if (first_not_ready_level == 0)
18258                         {
18259                             first_not_ready_level = level;
18260                         }
18261                         level+=2;
18262 #ifdef SNOOP_STATS
18263                         snoop_stat.pm_not_ready_count++;
18264 #endif //SNOOP_STATS                        
18265                     }
18266                 }
18267                 if (success)
18268                 {
18269
18270 #ifdef SNOOP_STATS
18271                     dprintf (SNOOP_LOG, ("heap%d: marking %Ix from %d [%d] tl:%dms",
18272                             heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
18273                             (GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
18274                     uint32_t start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
18275 #endif //SNOOP_STATS
18276
18277                     mark_object_simple1 (o, start, heap_number);
18278
18279 #ifdef SNOOP_STATS
18280                     dprintf (SNOOP_LOG, ("heap%d: done marking %Ix from %d [%d] %dms tl:%dms",
18281                             heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
18282                             (GCToOSInterface::GetLowPrecisionTimeStamp()-start_tick),(GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
18283 #endif //SNOOP_STATS
18284
18285                     mark_stack_busy() = 0;
18286
18287                     //clear the mark stack in snooping range
18288                     for (int i = 0; i < max_snoop_level; i++)
18289                     {
18290                         if (((uint8_t**)mark_stack_array)[i] != 0)
18291                         {
18292                             ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0;
18293 #ifdef SNOOP_STATS
18294                             snoop_stat.stack_bottom_clear_count++;
18295 #endif //SNOOP_STATS
18296                         }
18297                     }
18298
18299                     level = 0; 
18300                 }
18301                 mark_stack_busy() = 0;
18302             }
18303             else
18304             {
18305                 //slot is either partial or stolen
18306                 level++;
18307             }
18308         }
18309         if ((first_not_ready_level != 0) && hp->mark_stack_busy())
18310         {
18311             continue;
18312         } 
18313         if (!hp->mark_stack_busy())
18314         {
18315             first_not_ready_level = 0; 
18316             idle_loop_count++;
18317
18318             if ((idle_loop_count % (6) )==1)
18319             {
18320 #ifdef SNOOP_STATS
18321                 snoop_stat.switch_to_thread_count++;
18322 #endif //SNOOP_STATS
18323                 GCToOSInterface::Sleep(1);
18324             }
18325             int free_count = 1;
18326 #ifdef SNOOP_STATS
18327             snoop_stat.stack_idle_count++;
18328             //dprintf (SNOOP_LOG, ("heap%d: counting idle threads", heap_number));
18329 #endif //SNOOP_STATS
18330             for (int hpn = (heap_number+1)%n_heaps; hpn != heap_number;)
18331             {
18332                 if (!((g_heaps [hpn])->mark_stack_busy()))
18333                 {
18334                     free_count++;
18335 #ifdef SNOOP_STATS
18336                 dprintf (SNOOP_LOG, ("heap%d: %d idle", heap_number, free_count));
18337 #endif //SNOOP_STATS
18338                 }
18339                 else if (same_numa_node_p (hpn, heap_number) || ((idle_loop_count%1000))==999)
18340                 {
18341                     thpn = hpn;
18342                     break;
18343                 }
18344                 hpn = (hpn+1)%n_heaps;
18345                 YieldProcessor();
18346             }
18347             if (free_count == n_heaps)
18348             {
18349                 break;
18350             }
18351         }
18352     }
18353 }
18354
18355 inline
18356 BOOL gc_heap::check_next_mark_stack (gc_heap* next_heap)
18357 {
18358 #ifdef SNOOP_STATS
18359     snoop_stat.check_level_count++;
18360 #endif //SNOOP_STATS
18361     return (next_heap->mark_stack_busy()>=1);
18362 }
18363 #endif //MH_SC_MARK
18364
18365 #ifdef SNOOP_STATS
18366 void gc_heap::print_snoop_stat()
18367 {
18368     dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s", 
18369         "heap", "check", "zero", "mark", "stole", "pstack", "nstack", "nonsk"));
18370     dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d",
18371         snoop_stat.heap_index,
18372         snoop_stat.objects_checked_count,
18373         snoop_stat.zero_ref_count,
18374         snoop_stat.objects_marked_count,
18375         snoop_stat.stolen_stack_count,
18376         snoop_stat.partial_stack_count,
18377         snoop_stat.normal_stack_count,
18378         snoop_stat.non_stack_count));
18379     dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s", 
18380         "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "clear"));
18381     dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
18382         snoop_stat.heap_index,
18383         snoop_stat.check_level_count,
18384         snoop_stat.busy_count,
18385         snoop_stat.interlocked_count,
18386         snoop_stat.partial_mark_parent_count,
18387         snoop_stat.stolen_or_pm_count,
18388         snoop_stat.stolen_entry_count,
18389         snoop_stat.pm_not_ready_count,
18390         snoop_stat.normal_count,
18391         snoop_stat.stack_bottom_clear_count));
18392
18393     printf ("\n%4s | %8s | %8s | %8s | %8s | %8s\n", 
18394         "heap", "check", "zero", "mark", "idle", "switch");
18395     printf ("%4d | %8d | %8d | %8d | %8d | %8d\n",
18396         snoop_stat.heap_index,
18397         snoop_stat.objects_checked_count,
18398         snoop_stat.zero_ref_count,
18399         snoop_stat.objects_marked_count,
18400         snoop_stat.stack_idle_count,
18401         snoop_stat.switch_to_thread_count);
18402     printf ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n", 
18403         "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear");
18404     printf ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
18405         snoop_stat.heap_index,
18406         snoop_stat.check_level_count,
18407         snoop_stat.busy_count,
18408         snoop_stat.interlocked_count,
18409         snoop_stat.partial_mark_parent_count,
18410         snoop_stat.stolen_or_pm_count,
18411         snoop_stat.stolen_entry_count,
18412         snoop_stat.pm_not_ready_count,
18413         snoop_stat.normal_count,
18414         snoop_stat.stack_bottom_clear_count);
18415 }
18416 #endif //SNOOP_STATS
18417
18418 #ifdef HEAP_ANALYZE
18419 void
18420 gc_heap::ha_mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
18421 {
18422     if (!internal_root_array)
18423     {
18424         internal_root_array = new (nothrow) uint8_t* [internal_root_array_length];
18425         if (!internal_root_array)
18426         {
18427             heap_analyze_success = FALSE;
18428         }
18429     }
18430
18431     if (heap_analyze_success && (internal_root_array_length <= internal_root_array_index))
18432     {
18433         size_t new_size = 2*internal_root_array_length;
18434
18435         uint64_t available_physical = 0;
18436         get_memory_info (NULL, &available_physical);
18437         if (new_size > (size_t)(available_physical / 10))
18438         {
18439             heap_analyze_success = FALSE;
18440         }
18441         else
18442         {
18443             uint8_t** tmp = new (nothrow) uint8_t* [new_size];
18444             if (tmp)
18445             {
18446                 memcpy (tmp, internal_root_array,
18447                         internal_root_array_length*sizeof (uint8_t*));
18448                 delete[] internal_root_array;
18449                 internal_root_array = tmp;
18450                 internal_root_array_length = new_size;
18451             }
18452             else
18453             {
18454                 heap_analyze_success = FALSE;
18455             }
18456         }
18457     }
18458
18459     if (heap_analyze_success)
18460     {
18461         PREFIX_ASSUME(internal_root_array_index < internal_root_array_length);
18462
18463         uint8_t* ref = (uint8_t*)po;
18464         if (!current_obj || 
18465             !((ref >= current_obj) && (ref < (current_obj + current_obj_size))))
18466         {
18467             gc_heap* hp = gc_heap::heap_of (ref);
18468             current_obj = hp->find_object (ref, hp->lowest_address);
18469             current_obj_size = size (current_obj);
18470
18471             internal_root_array[internal_root_array_index] = current_obj;
18472             internal_root_array_index++;
18473         }
18474     }
18475
18476     mark_object_simple (po THREAD_NUMBER_ARG);
18477 }
18478 #endif //HEAP_ANALYZE
18479
18480 //this method assumes that *po is in the [low. high[ range
18481 void
18482 gc_heap::mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
18483 {
18484     uint8_t* o = *po;
18485 #ifdef MULTIPLE_HEAPS
18486 #else  //MULTIPLE_HEAPS
18487     const int thread = 0;
18488 #endif //MULTIPLE_HEAPS
18489     {
18490 #ifdef SNOOP_STATS
18491         snoop_stat.objects_checked_count++;
18492 #endif //SNOOP_STATS
18493
18494         if (gc_mark1 (o))
18495         {
18496             m_boundary (o);
18497             size_t s = size (o);
18498             promoted_bytes (thread) += s;
18499             {
18500                 go_through_object_cl (method_table(o), o, s, poo,
18501                                         {
18502                                             uint8_t* oo = *poo;
18503                                             if (gc_mark (oo, gc_low, gc_high))
18504                                             {
18505                                                 m_boundary (oo);
18506                                                 size_t obj_size = size (oo);
18507                                                 promoted_bytes (thread) += obj_size;
18508
18509                                                 if (contain_pointers_or_collectible (oo))
18510                                                     mark_object_simple1 (oo, oo THREAD_NUMBER_ARG);
18511                                             }
18512                                         }
18513                     );
18514             }
18515         }
18516     }
18517 }
18518
18519 inline
18520 uint8_t* gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL)
18521 {
18522     if ((o >= gc_low) && (o < gc_high))
18523         mark_object_simple (&o THREAD_NUMBER_ARG);
18524 #ifdef MULTIPLE_HEAPS
18525     else if (o)
18526     {
18527         //find the heap
18528         gc_heap* hp = heap_of (o);
18529         assert (hp);
18530         if ((o >= hp->gc_low) && (o < hp->gc_high))
18531             mark_object_simple (&o THREAD_NUMBER_ARG);
18532     }
18533 #endif //MULTIPLE_HEAPS
18534
18535     return o;
18536 }
18537
18538 #ifdef BACKGROUND_GC
18539
18540 void gc_heap::background_mark_simple1 (uint8_t* oo THREAD_NUMBER_DCL)
18541 {
18542     uint8_t** mark_stack_limit = &background_mark_stack_array[background_mark_stack_array_length];
18543
18544 #ifdef SORT_MARK_STACK
18545     uint8_t** sorted_tos = background_mark_stack_array;
18546 #endif //SORT_MARK_STACK
18547
18548     background_mark_stack_tos = background_mark_stack_array;
18549
18550     while (1)
18551     {
18552 #ifdef MULTIPLE_HEAPS
18553 #else  //MULTIPLE_HEAPS
18554         const int thread = 0;
18555 #endif //MULTIPLE_HEAPS
18556         if (oo)
18557         {
18558             size_t s = 0; 
18559             if ((((size_t)oo & 1) == 0) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*))))
18560             {
18561                 BOOL overflow_p = FALSE;
18562             
18563                 if (background_mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit - 1))
18564                 {
18565                     size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
18566                     size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components);
18567                     if (background_mark_stack_tos + num_pointers >= (mark_stack_limit - 1))
18568                     {
18569                         dprintf (2, ("h%d: %Id left, obj (mt: %Ix) %Id ptrs", 
18570                             heap_number,
18571                             (size_t)(mark_stack_limit - 1 - background_mark_stack_tos),
18572                             method_table(oo), 
18573                             num_pointers));
18574
18575                         bgc_overflow_count++;
18576                         overflow_p = TRUE;
18577                     }
18578                 }
18579             
18580                 if (overflow_p == FALSE)
18581                 {
18582                     dprintf(3,("pushing mark for %Ix ", (size_t)oo));
18583
18584                     go_through_object_cl (method_table(oo), oo, s, ppslot,
18585                     {
18586                         uint8_t* o = *ppslot;
18587                         Prefetch(o);
18588                         if (background_mark (o, 
18589                                              background_saved_lowest_address, 
18590                                              background_saved_highest_address))
18591                         {
18592                             //m_boundary (o);
18593                             size_t obj_size = size (o);
18594                             bpromoted_bytes (thread) += obj_size;
18595                             if (contain_pointers_or_collectible (o))
18596                             {
18597                                 *(background_mark_stack_tos++) = o;
18598
18599                             }
18600                         }
18601                     }
18602                         );
18603                 }
18604                 else
18605                 {
18606                     dprintf (3,("mark stack overflow for object %Ix ", (size_t)oo));
18607                     background_min_overflow_address = min (background_min_overflow_address, oo);
18608                     background_max_overflow_address = max (background_max_overflow_address, oo);
18609                 }
18610             }
18611             else 
18612             {
18613                 uint8_t* start = oo;
18614                 if ((size_t)oo & 1)
18615                 {
18616                     oo = (uint8_t*)((size_t)oo & ~1);
18617                     start = *(--background_mark_stack_tos);
18618                     dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start));
18619                 }
18620 #ifdef COLLECTIBLE_CLASS
18621                 else
18622                 {
18623                     // If there's a class object, push it now. We are guaranteed to have the slot since
18624                     // we just popped one object off.
18625                     if (is_collectible (oo))
18626                     {
18627                         uint8_t* class_obj = get_class_object (oo);
18628                         if (background_mark (class_obj, 
18629                                             background_saved_lowest_address, 
18630                                             background_saved_highest_address))
18631                         {
18632                             size_t obj_size = size (class_obj);
18633                             bpromoted_bytes (thread) += obj_size;
18634
18635                             *(background_mark_stack_tos++) = class_obj;
18636                         }
18637                     }
18638
18639                     if (!contain_pointers (oo))
18640                     {
18641                         goto next_level;
18642                     }                    
18643                 }
18644 #endif //COLLECTIBLE_CLASS
18645
18646                 s = size (oo);
18647                 
18648                 BOOL overflow_p = FALSE;
18649             
18650                 if (background_mark_stack_tos + (num_partial_refs + 2)  >= mark_stack_limit)
18651                 {
18652                     size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
18653                     size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components);
18654
18655                     dprintf (2, ("h%d: PM: %Id left, obj %Ix (mt: %Ix) start: %Ix, total: %Id", 
18656                         heap_number,
18657                         (size_t)(mark_stack_limit - background_mark_stack_tos),
18658                         oo,
18659                         method_table(oo), 
18660                         start,
18661                         num_pointers));
18662
18663                     bgc_overflow_count++;
18664                     overflow_p = TRUE;
18665                 }
18666                 if (overflow_p == FALSE)
18667                 {
18668                     dprintf(3,("pushing mark for %Ix ", (size_t)oo));
18669
18670                     //push the object and its current 
18671                     uint8_t** place = background_mark_stack_tos++;
18672                     *(place) = start;
18673                     *(background_mark_stack_tos++) = (uint8_t*)((size_t)oo | 1);
18674
18675                     int i = num_partial_refs; 
18676
18677                     go_through_object (method_table(oo), oo, s, ppslot,
18678                                        start, use_start, (oo + s),
18679                     {
18680                         uint8_t* o = *ppslot;
18681                         Prefetch(o);
18682
18683                         if (background_mark (o, 
18684                                             background_saved_lowest_address, 
18685                                             background_saved_highest_address))
18686                         {
18687                             //m_boundary (o);
18688                             size_t obj_size = size (o);
18689                             bpromoted_bytes (thread) += obj_size;
18690                             if (contain_pointers_or_collectible (o))
18691                             {
18692                                 *(background_mark_stack_tos++) = o;
18693                                 if (--i == 0)
18694                                 {
18695                                     //update the start
18696                                     *place = (uint8_t*)(ppslot+1);
18697                                     goto more_to_do;
18698                                 }
18699
18700                             }
18701                         }
18702
18703                     }
18704                         );
18705                     //we are finished with this object
18706                     *place = 0; 
18707                     *(place+1) = 0;
18708
18709                 more_to_do:;
18710                 }
18711                 else
18712                 {
18713                     dprintf (3,("mark stack overflow for object %Ix ", (size_t)oo));
18714                     background_min_overflow_address = min (background_min_overflow_address, oo);
18715                     background_max_overflow_address = max (background_max_overflow_address, oo);
18716                 }
18717             }
18718         }
18719 #ifdef SORT_MARK_STACK
18720         if (background_mark_stack_tos > sorted_tos + mark_stack_array_length/8)
18721         {
18722             rqsort1 (sorted_tos, background_mark_stack_tos-1);
18723             sorted_tos = background_mark_stack_tos-1;
18724         }
18725 #endif //SORT_MARK_STACK
18726
18727 #ifdef COLLECTIBLE_CLASS
18728 next_level:
18729 #endif // COLLECTIBLE_CLASS
18730         allow_fgc();
18731
18732         if (!(background_mark_stack_tos == background_mark_stack_array))
18733         {
18734             oo = *(--background_mark_stack_tos);
18735
18736 #ifdef SORT_MARK_STACK
18737             sorted_tos = (uint8_t**)min ((size_t)sorted_tos, (size_t)background_mark_stack_tos);
18738 #endif //SORT_MARK_STACK
18739         }
18740         else
18741             break;
18742     }
18743
18744     assert (background_mark_stack_tos == background_mark_stack_array);
18745
18746
18747 }
18748
18749 //this version is different than the foreground GC because
18750 //it can't keep pointers to the inside of an object
18751 //while calling background_mark_simple1. The object could be moved
18752 //by an intervening foreground gc.
18753 //this method assumes that *po is in the [low. high[ range
18754 void
18755 gc_heap::background_mark_simple (uint8_t* o THREAD_NUMBER_DCL)
18756 {
18757 #ifdef MULTIPLE_HEAPS
18758 #else  //MULTIPLE_HEAPS
18759     const int thread = 0;
18760 #endif //MULTIPLE_HEAPS
18761     {
18762         dprintf (3, ("bmarking %Ix", o));
18763         
18764         if (background_mark1 (o))
18765         {
18766             //m_boundary (o);
18767             size_t s = size (o);
18768             bpromoted_bytes (thread) += s;
18769
18770             if (contain_pointers_or_collectible (o))
18771             {
18772                 background_mark_simple1 (o THREAD_NUMBER_ARG);
18773             }
18774         }
18775     }
18776 }
18777
18778 inline
18779 uint8_t* gc_heap::background_mark_object (uint8_t* o THREAD_NUMBER_DCL)
18780 {
18781     if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address))
18782     {
18783         background_mark_simple (o THREAD_NUMBER_ARG);
18784     }
18785     else
18786     {
18787         if (o)
18788         {
18789             dprintf (3, ("or-%Ix", o));
18790         }
18791     }
18792     return o;
18793 }
18794
18795 void gc_heap::background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags)
18796 {
18797     UNREFERENCED_PARAMETER(sc);
18798
18799     assert (settings.concurrent);
18800     uint8_t* o = (uint8_t*)object;
18801
18802     gc_heap* hp = gc_heap::heap_of (o);
18803 #ifdef INTERIOR_POINTERS
18804     if (flags & GC_CALL_INTERIOR)
18805     {
18806         o = hp->find_object (o, background_saved_lowest_address);
18807     }
18808 #endif //INTERIOR_POINTERS
18809
18810     if (!background_object_marked (o, FALSE))
18811     {
18812         FATAL_GC_ERROR();
18813     }
18814 }
18815
18816 void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t flags)
18817 {
18818     UNREFERENCED_PARAMETER(sc);
18819     //in order to save space on the array, mark the object,
18820     //knowing that it will be visited later
18821     assert (settings.concurrent);
18822
18823     THREAD_NUMBER_FROM_CONTEXT;
18824 #ifndef MULTIPLE_HEAPS
18825     const int thread = 0;
18826 #endif //!MULTIPLE_HEAPS
18827
18828     uint8_t* o = (uint8_t*)*ppObject;
18829
18830     if (o == 0)
18831         return;
18832
18833 #ifdef DEBUG_DestroyedHandleValue
18834     // we can race with destroy handle during concurrent scan
18835     if (o == (uint8_t*)DEBUG_DestroyedHandleValue)
18836         return;
18837 #endif //DEBUG_DestroyedHandleValue
18838
18839     HEAP_FROM_THREAD;
18840
18841     gc_heap* hp = gc_heap::heap_of (o);
18842
18843     if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address))
18844     {
18845         return;
18846     }
18847
18848 #ifdef INTERIOR_POINTERS
18849     if (flags & GC_CALL_INTERIOR)
18850     {
18851         o = hp->find_object (o, hp->background_saved_lowest_address);
18852         if (o == 0)
18853             return;
18854     }
18855 #endif //INTERIOR_POINTERS
18856
18857 #ifdef FEATURE_CONSERVATIVE_GC
18858     // For conservative GC, a value on stack may point to middle of a free object.
18859     // In this case, we don't need to promote the pointer.
18860     if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree())
18861     {
18862         return;
18863     }
18864 #endif //FEATURE_CONSERVATIVE_GC
18865
18866 #ifdef _DEBUG
18867     ((CObjectHeader*)o)->Validate();
18868 #endif //_DEBUG
18869
18870     dprintf (BGC_LOG, ("Background Promote %Ix", (size_t)o));
18871
18872     //needs to be called before the marking because it is possible for a foreground
18873     //gc to take place during the mark and move the object
18874     STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, "    GCHeap::Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL);
18875
18876     hpt->background_mark_simple (o THREAD_NUMBER_ARG);
18877 }
18878
18879 //used by the ephemeral collection to scan the local background structures
18880 //containing references.
18881 void
18882 gc_heap::scan_background_roots (promote_func* fn, int hn, ScanContext *pSC)
18883 {
18884     ScanContext sc;
18885     if (pSC == 0)
18886         pSC = &sc;
18887
18888     pSC->thread_number = hn;
18889
18890 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
18891     pSC->pCurrentDomain = 0;
18892 #endif
18893
18894     BOOL relocate_p = (fn == &GCHeap::Relocate);
18895
18896     dprintf (3, ("Scanning background mark list"));
18897
18898     //scan mark_list
18899     size_t mark_list_finger = 0;
18900     while (mark_list_finger < c_mark_list_index)
18901     {
18902         uint8_t** o = &c_mark_list [mark_list_finger];
18903         if (!relocate_p)
18904         {
18905             // We may not be able to calculate the size during relocate as POPO
18906             // may have written over the object.
18907             size_t s = size (*o);
18908             assert (Align (s) >= Align (min_obj_size));
18909             dprintf(3,("background root %Ix", (size_t)*o));
18910         }
18911         (*fn) ((Object**)o, pSC, 0);
18912         mark_list_finger++;
18913     }
18914
18915     //scan the mark stack
18916     dprintf (3, ("Scanning background mark stack"));
18917
18918     uint8_t** finger = background_mark_stack_array;
18919     while (finger < background_mark_stack_tos)
18920     {
18921         if ((finger + 1) < background_mark_stack_tos)
18922         {
18923             // We need to check for the partial mark case here.
18924             uint8_t* parent_obj = *(finger + 1);
18925             if ((size_t)parent_obj & 1)
18926             {
18927                 uint8_t* place = *finger;
18928                 size_t place_offset = 0;
18929                 uint8_t* real_parent_obj = (uint8_t*)((size_t)parent_obj & ~1);
18930
18931                 if (relocate_p)
18932                 {
18933                     *(finger + 1) = real_parent_obj;
18934                     place_offset = place - real_parent_obj;
18935                     dprintf(3,("relocating background root %Ix", (size_t)real_parent_obj));
18936                     (*fn) ((Object**)(finger + 1), pSC, 0);
18937                     real_parent_obj = *(finger + 1);
18938                     *finger = real_parent_obj + place_offset;
18939                     *(finger + 1) = (uint8_t*)((size_t)real_parent_obj | 1);
18940                     dprintf(3,("roots changed to %Ix, %Ix", *finger, *(finger + 1)));
18941                 }
18942                 else
18943                 {
18944                     uint8_t** temp = &real_parent_obj;
18945                     dprintf(3,("marking background root %Ix", (size_t)real_parent_obj));
18946                     (*fn) ((Object**)temp, pSC, 0);
18947                 }
18948
18949                 finger += 2;
18950                 continue;
18951             }
18952         }
18953         dprintf(3,("background root %Ix", (size_t)*finger));
18954         (*fn) ((Object**)finger, pSC, 0);
18955         finger++;
18956     }
18957 }
18958
18959 inline
18960 void gc_heap::background_mark_through_object (uint8_t* oo THREAD_NUMBER_DCL)
18961 {
18962     if (contain_pointers (oo))
18963     {
18964         size_t total_refs = 0;
18965         size_t s = size (oo);
18966         go_through_object_nostart (method_table(oo), oo, s, po,
18967                           {
18968                             uint8_t* o = *po;
18969                             total_refs++;
18970                             background_mark_object (o THREAD_NUMBER_ARG);
18971                           }
18972             );
18973
18974         dprintf (3,("Background marking through %Ix went through %Id refs", 
18975                           (size_t)oo,
18976                            total_refs));
18977     }
18978 }
18979
18980 uint8_t* gc_heap::background_seg_end (heap_segment* seg, BOOL concurrent_p)
18981 {
18982     if (concurrent_p && (seg == saved_overflow_ephemeral_seg))
18983     {
18984         // for now we stop at where gen1 started when we started processing 
18985         return background_min_soh_overflow_address;
18986     }
18987     else
18988     {
18989         return heap_segment_allocated (seg);
18990     }
18991 }
18992
18993 uint8_t* gc_heap::background_first_overflow (uint8_t* min_add,
18994                                           heap_segment* seg,
18995                                           BOOL concurrent_p, 
18996                                           BOOL small_object_p)
18997 {
18998     uint8_t* o = 0;
18999
19000     if (small_object_p)
19001     {
19002         if (in_range_for_segment (min_add, seg))
19003         {
19004             // min_add was the beginning of gen1 when we did the concurrent
19005             // overflow. Now we could be in a situation where min_add is
19006             // actually the same as allocated for that segment (because
19007             // we expanded heap), in which case we can not call 
19008             // find first on this address or we will AV.
19009             if (min_add >= heap_segment_allocated (seg))
19010             {
19011                 return min_add;
19012             }
19013             else
19014             {
19015                 if (concurrent_p && 
19016                     ((seg == saved_overflow_ephemeral_seg) && (min_add >= background_min_soh_overflow_address)))
19017                 {
19018                     return background_min_soh_overflow_address;
19019                 }
19020                 else
19021                 {
19022                     o = find_first_object (min_add, heap_segment_mem (seg));
19023                     return o;
19024                 }
19025             }
19026         }
19027     }
19028
19029     o = max (heap_segment_mem (seg), min_add);
19030     return o;
19031 }
19032
19033 void gc_heap::background_process_mark_overflow_internal (int condemned_gen_number,
19034                                                          uint8_t* min_add, uint8_t* max_add,
19035                                                          BOOL concurrent_p)
19036 {
19037     if (concurrent_p)
19038     {
19039         current_bgc_state = bgc_overflow_soh;
19040     }
19041
19042     size_t total_marked_objects = 0;
19043
19044 #ifdef MULTIPLE_HEAPS
19045     int thread = heap_number;
19046 #endif //MULTIPLE_HEAPS
19047
19048     exclusive_sync* loh_alloc_lock = 0;
19049
19050     dprintf (2,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add));
19051 #ifdef MULTIPLE_HEAPS
19052     // We don't have each heap scan all heaps concurrently because we are worried about
19053     // multiple threads calling things like find_first_object.
19054     int h_start = (concurrent_p ? heap_number : 0);
19055     int h_end = (concurrent_p ? (heap_number + 1) : n_heaps);
19056     for (int hi = h_start; hi < h_end; hi++)
19057     {
19058         gc_heap*  hp = (concurrent_p ? this : g_heaps [(heap_number + hi) % n_heaps]);
19059
19060 #else
19061     {
19062         gc_heap*  hp = 0;
19063
19064 #endif //MULTIPLE_HEAPS
19065         BOOL small_object_segments = TRUE;
19066         int align_const = get_alignment_constant (small_object_segments);
19067         generation* gen = hp->generation_of (condemned_gen_number);
19068         heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
19069         PREFIX_ASSUME(seg != NULL);
19070         loh_alloc_lock = hp->bgc_alloc_lock;
19071
19072         uint8_t* o = hp->background_first_overflow (min_add,
19073                                                     seg, 
19074                                                     concurrent_p, 
19075                                                     small_object_segments);
19076
19077         while (1)
19078         {
19079             while ((o < hp->background_seg_end (seg, concurrent_p)) && (o <= max_add))
19080             {
19081                 dprintf (3, ("considering %Ix", (size_t)o));
19082
19083                 size_t s;
19084
19085                 if (concurrent_p && !small_object_segments)
19086                 {
19087                     loh_alloc_lock->bgc_mark_set (o);
19088
19089                     if (((CObjectHeader*)o)->IsFree())
19090                     {
19091                         s = unused_array_size (o);
19092                     }
19093                     else
19094                     {
19095                         s = size (o);
19096                     }
19097                 }
19098                 else
19099                 {
19100                     s = size (o);
19101                 }
19102
19103                 if (background_object_marked (o, FALSE) && contain_pointers_or_collectible (o))
19104                 {
19105                     total_marked_objects++;
19106                     go_through_object_cl (method_table(o), o, s, poo,
19107                                           uint8_t* oo = *poo;
19108                                           background_mark_object (oo THREAD_NUMBER_ARG);
19109                                          );
19110                 }
19111
19112                 if (concurrent_p && !small_object_segments)
19113                 {
19114                     loh_alloc_lock->bgc_mark_done ();
19115                 }
19116
19117                 o = o + Align (s, align_const);
19118
19119                 if (concurrent_p)
19120                 {
19121                     allow_fgc();
19122                 }
19123             }
19124
19125             dprintf (2, ("went through overflow objects in segment %Ix (%d) (so far %Id marked)", 
19126                 heap_segment_mem (seg), (small_object_segments ? 0 : 1), total_marked_objects));
19127
19128             if ((concurrent_p && (seg == hp->saved_overflow_ephemeral_seg)) ||
19129                 (seg = heap_segment_next_in_range (seg)) == 0)
19130             {
19131                 if (small_object_segments)
19132                 {
19133                     if (concurrent_p)
19134                     {
19135                         current_bgc_state = bgc_overflow_loh;
19136                     }
19137
19138                     dprintf (2, ("h%d: SOH: ov-mo: %Id", heap_number, total_marked_objects));
19139                     fire_overflow_event (min_add, max_add, total_marked_objects, !small_object_segments);
19140                     concurrent_print_time_delta (concurrent_p ? "Cov SOH" : "Nov SOH");
19141                     total_marked_objects = 0;
19142                     small_object_segments = FALSE;
19143                     align_const = get_alignment_constant (small_object_segments);
19144                     seg = heap_segment_in_range (generation_start_segment (hp->generation_of (max_generation+1)));
19145
19146                     PREFIX_ASSUME(seg != NULL);
19147
19148                     o = max (heap_segment_mem (seg), min_add);
19149                     continue;
19150                 }
19151                 else
19152                 {
19153                     dprintf (GTC_LOG, ("h%d: LOH: ov-mo: %Id", heap_number, total_marked_objects));
19154                     fire_overflow_event (min_add, max_add, total_marked_objects, !small_object_segments);
19155                     break;
19156                 }
19157             } 
19158             else
19159             {
19160                 o = hp->background_first_overflow (min_add, 
19161                                                    seg, 
19162                                                    concurrent_p, 
19163                                                    small_object_segments);
19164                 continue;
19165             }
19166         }
19167     }
19168 }
19169
19170 BOOL gc_heap::background_process_mark_overflow (BOOL concurrent_p)
19171 {
19172     BOOL grow_mark_array_p = TRUE;
19173
19174     if (concurrent_p)
19175     {
19176         assert (!processed_soh_overflow_p);
19177
19178         if ((background_max_overflow_address != 0) &&
19179             (background_min_overflow_address != MAX_PTR))
19180         {
19181             // We have overflow to process but we know we can't process the ephemeral generations
19182             // now (we actually could process till the current gen1 start but since we are going to 
19183             // make overflow per segment, for now I'll just stop at the saved gen1 start.
19184             saved_overflow_ephemeral_seg = ephemeral_heap_segment;
19185             background_max_soh_overflow_address = heap_segment_reserved (saved_overflow_ephemeral_seg);
19186             background_min_soh_overflow_address = generation_allocation_start (generation_of (max_generation-1));
19187         }
19188     }
19189     else
19190     {
19191         assert ((saved_overflow_ephemeral_seg == 0) || 
19192                 ((background_max_soh_overflow_address != 0) &&
19193                  (background_min_soh_overflow_address != MAX_PTR)));
19194         
19195         if (!processed_soh_overflow_p)
19196         {
19197             // if there was no more overflow we just need to process what we didn't process 
19198             // on the saved ephemeral segment.
19199             if ((background_max_overflow_address == 0) && (background_min_overflow_address == MAX_PTR))
19200             {
19201                 dprintf (2, ("final processing mark overflow - no more overflow since last time"));
19202                 grow_mark_array_p = FALSE;
19203             }
19204
19205             background_min_overflow_address = min (background_min_overflow_address, 
19206                                                 background_min_soh_overflow_address);
19207             background_max_overflow_address = max (background_max_overflow_address,
19208                                                 background_max_soh_overflow_address);
19209             processed_soh_overflow_p = TRUE;
19210         }
19211     }
19212
19213     BOOL  overflow_p = FALSE;
19214 recheck:
19215     if ((! ((background_max_overflow_address == 0)) ||
19216          ! ((background_min_overflow_address == MAX_PTR))))
19217     {
19218         overflow_p = TRUE;
19219
19220         if (grow_mark_array_p)
19221         {
19222             // Try to grow the array.
19223             size_t new_size = max (MARK_STACK_INITIAL_LENGTH, 2*background_mark_stack_array_length);
19224
19225             if ((new_size * sizeof(mark)) > 100*1024)
19226             {
19227                 size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark);
19228
19229                 new_size = min(new_max_size, new_size);
19230             }
19231
19232             if ((background_mark_stack_array_length < new_size) && 
19233                 ((new_size - background_mark_stack_array_length) > (background_mark_stack_array_length / 2)))
19234             {
19235                 dprintf (2, ("h%d: ov grow to %Id", heap_number, new_size));
19236
19237                 uint8_t** tmp = new (nothrow) uint8_t* [new_size];
19238                 if (tmp)
19239                 {
19240                     delete background_mark_stack_array;
19241                     background_mark_stack_array = tmp;
19242                     background_mark_stack_array_length = new_size;
19243                     background_mark_stack_tos = background_mark_stack_array;
19244                 }
19245             }
19246         }
19247         else
19248         {
19249             grow_mark_array_p = TRUE;
19250         }
19251
19252         uint8_t*  min_add = background_min_overflow_address;
19253         uint8_t*  max_add = background_max_overflow_address;
19254
19255         background_max_overflow_address = 0;
19256         background_min_overflow_address = MAX_PTR;
19257
19258         background_process_mark_overflow_internal (max_generation, min_add, max_add, concurrent_p);
19259         if (!concurrent_p)
19260         {        
19261             goto recheck;
19262         }
19263     }
19264
19265     return overflow_p;
19266 }
19267
19268 #endif //BACKGROUND_GC
19269
19270 inline
19271 void gc_heap::mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL)
19272 {
19273 #ifndef COLLECTIBLE_CLASS
19274     UNREFERENCED_PARAMETER(mark_class_object_p);
19275     BOOL to_mark_class_object = FALSE;
19276 #else //COLLECTIBLE_CLASS
19277     BOOL to_mark_class_object = (mark_class_object_p && (is_collectible(oo)));
19278 #endif //COLLECTIBLE_CLASS
19279     if (contain_pointers (oo) || to_mark_class_object)
19280     {
19281         dprintf(3,( "Marking through %Ix", (size_t)oo));
19282         size_t s = size (oo);
19283
19284 #ifdef COLLECTIBLE_CLASS
19285         if (to_mark_class_object)
19286         {
19287             uint8_t* class_obj = get_class_object (oo);
19288             mark_object (class_obj THREAD_NUMBER_ARG);
19289         }
19290 #endif //COLLECTIBLE_CLASS
19291
19292         if (contain_pointers (oo))
19293         {
19294             go_through_object_nostart (method_table(oo), oo, s, po,
19295                                 uint8_t* o = *po;
19296                                 mark_object (o THREAD_NUMBER_ARG);
19297                                 );
19298         }
19299     }
19300 }
19301
19302 size_t gc_heap::get_total_heap_size()
19303 {
19304     size_t total_heap_size = 0;
19305
19306 #ifdef MULTIPLE_HEAPS
19307     int hn = 0;
19308
19309     for (hn = 0; hn < gc_heap::n_heaps; hn++)
19310     {
19311         gc_heap* hp2 = gc_heap::g_heaps [hn];
19312         total_heap_size += hp2->generation_size (max_generation + 1) + hp2->generation_sizes (hp2->generation_of (max_generation));
19313     }
19314 #else
19315     total_heap_size = generation_size (max_generation + 1) + generation_sizes (generation_of (max_generation));
19316 #endif //MULTIPLE_HEAPS
19317
19318     return total_heap_size;
19319 }
19320
19321 size_t gc_heap::get_total_fragmentation()
19322 {
19323     size_t total_fragmentation = 0;
19324
19325 #ifdef MULTIPLE_HEAPS
19326     for (int i = 0; i < gc_heap::n_heaps; i++)
19327     {
19328         gc_heap* hp = gc_heap::g_heaps[i];
19329 #else //MULTIPLE_HEAPS
19330     {
19331         gc_heap* hp = pGenGCHeap;
19332 #endif //MULTIPLE_HEAPS
19333         for (int i = 0; i <= (max_generation + 1); i++)
19334         {
19335             generation* gen = hp->generation_of (i);
19336             total_fragmentation += (generation_free_list_space (gen) + generation_free_obj_space (gen));
19337         }
19338     }
19339
19340     return total_fragmentation;
19341 }
19342
19343 size_t gc_heap::committed_size()
19344 {
19345     generation* gen = generation_of (max_generation);
19346     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
19347     size_t total_committed = 0;
19348
19349     while (1)
19350     {
19351         total_committed += heap_segment_committed (seg) - (uint8_t*)seg;
19352
19353         seg = heap_segment_next (seg);
19354         if (!seg)
19355         {
19356             if (gen != large_object_generation)
19357             {
19358                 gen = generation_of (max_generation + 1);
19359                 seg = generation_start_segment (gen);
19360             }
19361             else
19362                 break;
19363         }
19364     }
19365
19366     return total_committed;
19367 }
19368
19369 size_t gc_heap::get_total_committed_size()
19370 {
19371     size_t total_committed = 0;
19372
19373 #ifdef MULTIPLE_HEAPS
19374     int hn = 0;
19375
19376     for (hn = 0; hn < gc_heap::n_heaps; hn++)
19377     {
19378         gc_heap* hp = gc_heap::g_heaps [hn];
19379         total_committed += hp->committed_size();
19380     }
19381 #else
19382     total_committed = committed_size();
19383 #endif //MULTIPLE_HEAPS
19384
19385     return total_committed;
19386 }
19387
19388 void gc_heap::get_memory_info (uint32_t* memory_load, 
19389                                uint64_t* available_physical,
19390                                uint64_t* available_page_file)
19391 {
19392     GCToOSInterface::GetMemoryStatus(memory_load, available_physical, available_page_file);
19393 }
19394
19395 void fire_mark_event (int heap_num, int root_type, size_t bytes_marked)
19396 {
19397     dprintf (DT_LOG_0, ("-----------[%d]mark %d: %Id", heap_num, root_type, bytes_marked));
19398     FIRE_EVENT(GCMarkWithType, heap_num, root_type, bytes_marked);
19399 }
19400
19401 //returns TRUE is an overflow happened.
19402 BOOL gc_heap::process_mark_overflow(int condemned_gen_number)
19403 {
19404     size_t last_promoted_bytes = promoted_bytes (heap_number);
19405     BOOL  overflow_p = FALSE;
19406 recheck:
19407     if ((! (max_overflow_address == 0) ||
19408          ! (min_overflow_address == MAX_PTR)))
19409     {
19410         overflow_p = TRUE;
19411         // Try to grow the array.
19412         size_t new_size =
19413             max (MARK_STACK_INITIAL_LENGTH, 2*mark_stack_array_length);
19414
19415         if ((new_size * sizeof(mark)) > 100*1024)
19416         {
19417             size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark);
19418
19419             new_size = min(new_max_size, new_size);
19420         }
19421
19422         if ((mark_stack_array_length < new_size) && 
19423             ((new_size - mark_stack_array_length) > (mark_stack_array_length / 2)))
19424         {
19425             mark* tmp = new (nothrow) mark [new_size];
19426             if (tmp)
19427             {
19428                 delete mark_stack_array;
19429                 mark_stack_array = tmp;
19430                 mark_stack_array_length = new_size;
19431             }
19432         }
19433
19434         uint8_t*  min_add = min_overflow_address;
19435         uint8_t*  max_add = max_overflow_address;
19436         max_overflow_address = 0;
19437         min_overflow_address = MAX_PTR;
19438         process_mark_overflow_internal (condemned_gen_number, min_add, max_add);
19439         goto recheck;
19440     }
19441
19442     size_t current_promoted_bytes = promoted_bytes (heap_number);
19443
19444     if (current_promoted_bytes != last_promoted_bytes)
19445         fire_mark_event (heap_number, ETW::GC_ROOT_OVERFLOW, (current_promoted_bytes - last_promoted_bytes));
19446     return overflow_p;
19447 }
19448
19449 void gc_heap::process_mark_overflow_internal (int condemned_gen_number,
19450                                               uint8_t* min_add, uint8_t* max_add)
19451 {
19452 #ifdef MULTIPLE_HEAPS
19453     int thread = heap_number;
19454 #endif //MULTIPLE_HEAPS
19455     BOOL  full_p = (condemned_gen_number == max_generation);
19456
19457         dprintf(3,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add));
19458 #ifdef MULTIPLE_HEAPS
19459             for (int hi = 0; hi < n_heaps; hi++)
19460             {
19461                 gc_heap*  hp = g_heaps [(heap_number + hi) % n_heaps];
19462
19463 #else
19464             {
19465                 gc_heap*  hp = 0;
19466
19467 #endif //MULTIPLE_HEAPS
19468         BOOL small_object_segments = TRUE;
19469         int align_const = get_alignment_constant (small_object_segments);
19470         generation* gen = hp->generation_of (condemned_gen_number);
19471         heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
19472         
19473         PREFIX_ASSUME(seg != NULL);
19474         uint8_t*  o = max (heap_segment_mem (seg), min_add);
19475         while (1)
19476         {
19477             uint8_t*  end = heap_segment_allocated (seg);
19478
19479             while ((o < end) && (o <= max_add))
19480             {
19481                 assert ((min_add <= o) && (max_add >= o));
19482                 dprintf (3, ("considering %Ix", (size_t)o));
19483                 if (marked (o))
19484                 {
19485                     mark_through_object (o, TRUE THREAD_NUMBER_ARG);
19486                 }
19487
19488                 o = o + Align (size (o), align_const);
19489             }
19490
19491             if (( seg = heap_segment_next_in_range (seg)) == 0)
19492             {
19493                 if (small_object_segments && full_p)
19494                 {
19495                     small_object_segments = FALSE;
19496                     align_const = get_alignment_constant (small_object_segments);
19497                     seg = heap_segment_in_range (generation_start_segment (hp->generation_of (max_generation+1)));
19498
19499                     PREFIX_ASSUME(seg != NULL);
19500
19501                     o = max (heap_segment_mem (seg), min_add);
19502                     continue;
19503                 }
19504                 else
19505                 {
19506                     break;
19507                 } 
19508             } 
19509             else
19510             {
19511                 o = max (heap_segment_mem (seg), min_add);
19512                 continue;
19513             }
19514         }
19515     }
19516 }
19517
19518 // Scanning for promotion for dependent handles need special handling. Because the primary holds a strong
19519 // reference to the secondary (when the primary itself is reachable) and this can cause a cascading series of
19520 // promotions (the secondary of one handle is or promotes the primary of another) we might need to perform the
19521 // promotion scan multiple times.
19522 // This helper encapsulates the logic to complete all dependent handle promotions when running a server GC. It
19523 // also has the effect of processing any mark stack overflow.
19524
19525 #ifdef MULTIPLE_HEAPS
19526 // When multiple heaps are enabled we have must utilize a more complex algorithm in order to keep all the GC
19527 // worker threads synchronized. The algorithms are sufficiently divergent that we have different
19528 // implementations based on whether MULTIPLE_HEAPS is defined or not.
19529 //
19530 // Define some static variables used for synchronization in the method below. These should really be defined
19531 // locally but MSVC complains when the VOLATILE macro is expanded into an instantiation of the Volatile class.
19532 //
19533 // A note about the synchronization used within this method. Communication between the worker threads is
19534 // achieved via two shared booleans (defined below). These both act as latches that are transitioned only from
19535 // false -> true by unsynchronized code. They are only read or reset to false by a single thread under the
19536 // protection of a join.
19537 static VOLATILE(BOOL) s_fUnpromotedHandles = FALSE;
19538 static VOLATILE(BOOL) s_fUnscannedPromotions = FALSE;
19539 static VOLATILE(BOOL) s_fScanRequired;
19540 void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p)
19541 {
19542     // Whenever we call this method there may have been preceding object promotions. So set
19543     // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
19544     // based on the how the scanning proceeded).
19545     s_fUnscannedPromotions = TRUE;
19546
19547     // We don't know how many times we need to loop yet. In particular we can't base the loop condition on
19548     // the state of this thread's portion of the dependent handle table. That's because promotions on other
19549     // threads could cause handle promotions to become necessary here. Even if there are definitely no more
19550     // promotions possible in this thread's handles, we still have to stay in lock-step with those worker
19551     // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times
19552     // as all the others or they'll get out of step).
19553     while (true)
19554     {
19555         // The various worker threads are all currently racing in this code. We need to work out if at least
19556         // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the
19557         // dependent handle table when both of the following conditions apply:
19558         //  1) At least one (arbitrary) object might have been promoted since the last scan (because if this
19559         //     object happens to correspond to a primary in one of our handles we might potentially have to
19560         //     promote the associated secondary).
19561         //  2) The table for this thread has at least one handle with a secondary that isn't promoted yet.
19562         //
19563         // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first
19564         // iteration of this loop (see comment above) and in subsequent cycles each thread updates this
19565         // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary
19566         // being promoted. This value is cleared back to zero in a synchronized fashion in the join that
19567         // follows below. Note that we can't read this outside of the join since on any iteration apart from
19568         // the first threads will be racing between reading this value and completing their previous
19569         // iteration's table scan.
19570         //
19571         // The second condition is tracked by the dependent handle code itself on a per worker thread basis
19572         // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to
19573         // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is
19574         // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until
19575         // we're safely joined.
19576         if (GCScan::GcDhUnpromotedHandlesExist(sc))
19577             s_fUnpromotedHandles = TRUE;
19578
19579         // Synchronize all the threads so we can read our state variables safely. The shared variable
19580         // s_fScanRequired, indicating whether we should scan the tables or terminate the loop, will be set by
19581         // a single thread inside the join.
19582         gc_t_join.join(this, gc_join_scan_dependent_handles);
19583         if (gc_t_join.joined())
19584         {
19585             // We're synchronized so it's safe to read our shared state variables. We update another shared
19586             // variable to indicate to all threads whether we'll be scanning for another cycle or terminating
19587             // the loop. We scan if there has been at least one object promotion since last time and at least
19588             // one thread has a dependent handle table with a potential handle promotion possible.
19589             s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles;
19590
19591             // Reset our shared state variables (ready to be set again on this scan or with a good initial
19592             // value for the next call if we're terminating the loop).
19593             s_fUnscannedPromotions = FALSE;
19594             s_fUnpromotedHandles = FALSE;
19595
19596             if (!s_fScanRequired)
19597             {
19598                 // We're terminating the loop. Perform any last operations that require single threaded access.
19599                 if (!initial_scan_p)
19600                 {
19601                     // On the second invocation we reconcile all mark overflow ranges across the heaps. This can help
19602                     // load balance if some of the heaps have an abnormally large workload.
19603                     uint8_t* all_heaps_max = 0;
19604                     uint8_t* all_heaps_min = MAX_PTR;
19605                     int i;
19606                     for (i = 0; i < n_heaps; i++)
19607                     {
19608                         if (all_heaps_max < g_heaps[i]->max_overflow_address)
19609                             all_heaps_max = g_heaps[i]->max_overflow_address;
19610                         if (all_heaps_min > g_heaps[i]->min_overflow_address)
19611                             all_heaps_min = g_heaps[i]->min_overflow_address;
19612                     }
19613                     for (i = 0; i < n_heaps; i++)
19614                     {
19615                         g_heaps[i]->max_overflow_address = all_heaps_max;
19616                         g_heaps[i]->min_overflow_address = all_heaps_min;
19617                     }
19618                 }
19619             }
19620
19621             // Restart all the workers.
19622             dprintf(3, ("Starting all gc thread mark stack overflow processing"));
19623             gc_t_join.restart();
19624         }
19625
19626         // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
19627         // being visible. If there really was an overflow (process_mark_overflow returns true) then set the
19628         // global flag indicating that at least one object promotion may have occurred (the usual comment
19629         // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and
19630         // exit the method since we unconditionally set this variable on method entry anyway).
19631         if (process_mark_overflow(condemned_gen_number))
19632             s_fUnscannedPromotions = TRUE;
19633
19634         // If we decided that no scan was required we can terminate the loop now.
19635         if (!s_fScanRequired)
19636             break;
19637
19638         // Otherwise we must join with the other workers to ensure that all mark stack overflows have been
19639         // processed before we start scanning dependent handle tables (if overflows remain while we scan we
19640         // could miss noting the promotion of some primary objects).
19641         gc_t_join.join(this, gc_join_rescan_dependent_handles);
19642         if (gc_t_join.joined())
19643         {
19644             // Restart all the workers.
19645             dprintf(3, ("Starting all gc thread for dependent handle promotion"));
19646             gc_t_join.restart();
19647         }
19648
19649         // If the portion of the dependent handle table managed by this worker has handles that could still be
19650         // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it
19651         // could require a rescan of handles on this or other workers.
19652         if (GCScan::GcDhUnpromotedHandlesExist(sc))
19653             if (GCScan::GcDhReScan(sc))
19654                 s_fUnscannedPromotions = TRUE;
19655     }
19656 }
19657 #else //MULTIPLE_HEAPS
19658 // Non-multiple heap version of scan_dependent_handles: much simpler without the need to keep multiple worker
19659 // threads synchronized.
19660 void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p)
19661 {
19662     UNREFERENCED_PARAMETER(initial_scan_p);
19663
19664     // Whenever we call this method there may have been preceding object promotions. So set
19665     // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
19666     // based on the how the scanning proceeded).
19667     bool fUnscannedPromotions = true;
19668
19669     // Loop until there are either no more dependent handles that can have their secondary promoted or we've
19670     // managed to perform a scan without promoting anything new.
19671     while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
19672     {
19673         // On each iteration of the loop start with the assumption that no further objects have been promoted.
19674         fUnscannedPromotions = false;
19675
19676         // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
19677         // being visible. If there was an overflow (process_mark_overflow returned true) then additional
19678         // objects now appear to be promoted and we should set the flag.
19679         if (process_mark_overflow(condemned_gen_number))
19680             fUnscannedPromotions = true;
19681
19682         // Perform the scan and set the flag if any promotions resulted.
19683         if (GCScan::GcDhReScan(sc))
19684             fUnscannedPromotions = true;
19685     }
19686
19687     // Process any mark stack overflow that may have resulted from scanning handles (or if we didn't need to
19688     // scan any handles at all this is the processing of overflows that may have occurred prior to this method
19689     // invocation).
19690     process_mark_overflow(condemned_gen_number);
19691 }
19692 #endif //MULTIPLE_HEAPS
19693
19694 void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
19695 {
19696     assert (settings.concurrent == FALSE);
19697
19698     ScanContext sc;
19699     sc.thread_number = heap_number;
19700     sc.promotion = TRUE;
19701     sc.concurrent = FALSE;
19702
19703     dprintf(2,("---- Mark Phase condemning %d ----", condemned_gen_number));
19704     BOOL  full_p = (condemned_gen_number == max_generation);
19705
19706 #ifdef TIME_GC
19707     unsigned start;
19708     unsigned finish;
19709     start = GetCycleCount32();
19710 #endif //TIME_GC
19711
19712     int gen_to_init = condemned_gen_number;
19713     if (condemned_gen_number == max_generation)
19714     {
19715         gen_to_init = max_generation + 1;
19716     }
19717     for (int gen_idx = 0; gen_idx <= gen_to_init; gen_idx++)
19718     {
19719         dynamic_data* dd = dynamic_data_of (gen_idx);
19720         dd_begin_data_size (dd) = generation_size (gen_idx) - 
19721                                    dd_fragmentation (dd) -
19722                                    Align (size (generation_allocation_start (generation_of (gen_idx))));
19723         dprintf (2, ("begin data size for gen%d is %Id", gen_idx, dd_begin_data_size (dd)));
19724         dd_survived_size (dd) = 0;
19725         dd_pinned_survived_size (dd) = 0;
19726         dd_artificial_pinned_survived_size (dd) = 0;
19727         dd_added_pinned_size (dd) = 0;
19728 #ifdef SHORT_PLUGS
19729         dd_padding_size (dd) = 0;
19730 #endif //SHORT_PLUGS
19731 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
19732         dd_num_npinned_plugs (dd) = 0;
19733 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
19734     }
19735
19736 #ifdef FFIND_OBJECT
19737     if (gen0_must_clear_bricks > 0)
19738         gen0_must_clear_bricks--;
19739 #endif //FFIND_OBJECT
19740
19741     size_t last_promoted_bytes = 0;
19742
19743     promoted_bytes (heap_number) = 0;
19744     reset_mark_stack();
19745
19746 #ifdef SNOOP_STATS
19747     memset (&snoop_stat, 0, sizeof(snoop_stat));
19748     snoop_stat.heap_index = heap_number;
19749 #endif //SNOOP_STATS
19750
19751 #ifdef MH_SC_MARK
19752     if (full_p)
19753     {
19754         //initialize the mark stack
19755         for (int i = 0; i < max_snoop_level; i++)
19756         {
19757             ((uint8_t**)(mark_stack_array))[i] = 0;
19758         }
19759
19760         mark_stack_busy() = 1;
19761     }
19762 #endif //MH_SC_MARK
19763
19764     static uint32_t num_sizedrefs = 0;
19765
19766 #ifdef MH_SC_MARK
19767     static BOOL do_mark_steal_p = FALSE;
19768 #endif //MH_SC_MARK
19769
19770 #ifdef MULTIPLE_HEAPS
19771     gc_t_join.join(this, gc_join_begin_mark_phase);
19772     if (gc_t_join.joined())
19773     {
19774 #endif //MULTIPLE_HEAPS
19775
19776         maxgen_size_inc_p = false;
19777
19778         num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
19779
19780 #ifdef MULTIPLE_HEAPS
19781
19782 #ifdef MH_SC_MARK
19783         if (full_p)
19784         {
19785             size_t total_heap_size = get_total_heap_size();
19786
19787             if (total_heap_size > (100 * 1024 * 1024))
19788             {
19789                 do_mark_steal_p = TRUE;
19790             }
19791             else
19792             {
19793                 do_mark_steal_p = FALSE;
19794             }
19795         }
19796         else
19797         {
19798             do_mark_steal_p = FALSE;
19799         }
19800 #endif //MH_SC_MARK
19801
19802         gc_t_join.restart();
19803     }
19804 #endif //MULTIPLE_HEAPS
19805
19806     {
19807
19808 #ifdef MARK_LIST
19809         //set up the mark lists from g_mark_list
19810         assert (g_mark_list);
19811 #ifdef MULTIPLE_HEAPS
19812         mark_list = &g_mark_list [heap_number*mark_list_size];
19813 #else
19814         mark_list = g_mark_list;
19815 #endif //MULTIPLE_HEAPS
19816         //dont use the mark list for full gc
19817         //because multiple segments are more complex to handle and the list
19818         //is likely to overflow
19819         if (condemned_gen_number != max_generation)
19820             mark_list_end = &mark_list [mark_list_size-1];
19821         else
19822             mark_list_end = &mark_list [0];
19823         mark_list_index = &mark_list [0];
19824 #endif //MARK_LIST
19825
19826 #ifndef MULTIPLE_HEAPS
19827         shigh = (uint8_t*) 0;
19828         slow  = MAX_PTR;
19829 #endif //MULTIPLE_HEAPS
19830
19831         //%type%  category = quote (mark);
19832
19833         if ((condemned_gen_number == max_generation) && (num_sizedrefs > 0))
19834         {
19835             GCScan::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
19836             fire_mark_event (heap_number, ETW::GC_ROOT_SIZEDREF, (promoted_bytes (heap_number) - last_promoted_bytes));
19837             last_promoted_bytes = promoted_bytes (heap_number);
19838
19839 #ifdef MULTIPLE_HEAPS
19840             gc_t_join.join(this, gc_join_scan_sizedref_done);
19841             if (gc_t_join.joined())
19842             {
19843                 dprintf(3, ("Done with marking all sized refs. Starting all gc thread for marking other strong roots"));
19844                 gc_t_join.restart();
19845             }
19846 #endif //MULTIPLE_HEAPS
19847         }
19848     
19849         dprintf(3,("Marking Roots"));
19850
19851         GCScan::GcScanRoots(GCHeap::Promote,
19852                                 condemned_gen_number, max_generation,
19853                                 &sc);
19854
19855         fire_mark_event (heap_number, ETW::GC_ROOT_STACK, (promoted_bytes (heap_number) - last_promoted_bytes));
19856         last_promoted_bytes = promoted_bytes (heap_number);
19857
19858 #ifdef BACKGROUND_GC
19859         if (recursive_gc_sync::background_running_p())
19860         {
19861             scan_background_roots (GCHeap::Promote, heap_number, &sc);
19862         }
19863 #endif //BACKGROUND_GC
19864
19865 #ifdef FEATURE_PREMORTEM_FINALIZATION
19866         dprintf(3, ("Marking finalization data"));
19867         finalize_queue->GcScanRoots(GCHeap::Promote, heap_number, 0);
19868 #endif // FEATURE_PREMORTEM_FINALIZATION
19869
19870         fire_mark_event (heap_number, ETW::GC_ROOT_FQ, (promoted_bytes (heap_number) - last_promoted_bytes));
19871         last_promoted_bytes = promoted_bytes (heap_number);
19872
19873 // MTHTS
19874         {
19875
19876             dprintf(3,("Marking handle table"));
19877             GCScan::GcScanHandles(GCHeap::Promote,
19878                                       condemned_gen_number, max_generation,
19879                                       &sc);
19880             fire_mark_event (heap_number, ETW::GC_ROOT_HANDLES, (promoted_bytes (heap_number) - last_promoted_bytes));
19881             last_promoted_bytes = promoted_bytes (heap_number);
19882         }
19883
19884 #ifdef TRACE_GC
19885         size_t promoted_before_cards = promoted_bytes (heap_number);
19886 #endif //TRACE_GC
19887
19888         dprintf (3, ("before cards: %Id", promoted_before_cards));
19889         if (!full_p)
19890         {
19891 #ifdef CARD_BUNDLE
19892 #ifdef MULTIPLE_HEAPS
19893             if (gc_t_join.r_join(this, gc_r_join_update_card_bundle))
19894             {
19895 #endif //MULTIPLE_HEAPS
19896
19897 #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
19898                 // If we are manually managing card bundles, every write to the card table should already be
19899                 // accounted for in the card bundle table so there's nothing to update here.
19900                 update_card_table_bundle();
19901 #endif
19902                 if (card_bundles_enabled())
19903                 {
19904                     verify_card_bundles();
19905                 }
19906
19907 #ifdef MULTIPLE_HEAPS
19908                 gc_t_join.r_restart();
19909             }
19910 #endif //MULTIPLE_HEAPS
19911 #endif //CARD_BUNDLE
19912
19913             card_fn mark_object_fn = &gc_heap::mark_object_simple;
19914 #ifdef HEAP_ANALYZE
19915             heap_analyze_success = TRUE;
19916             if (heap_analyze_enabled)
19917             {
19918                 internal_root_array_index = 0;
19919                 current_obj = 0;
19920                 current_obj_size = 0;
19921                 mark_object_fn = &gc_heap::ha_mark_object_simple;
19922             }
19923 #endif //HEAP_ANALYZE
19924
19925             dprintf(3,("Marking cross generation pointers"));
19926             mark_through_cards_for_segments (mark_object_fn, FALSE);
19927
19928             dprintf(3,("Marking cross generation pointers for large objects"));
19929             mark_through_cards_for_large_objects (mark_object_fn, FALSE);
19930
19931             dprintf (3, ("marked by cards: %Id", 
19932                 (promoted_bytes (heap_number) - promoted_before_cards)));
19933             fire_mark_event (heap_number, ETW::GC_ROOT_OLDER, (promoted_bytes (heap_number) - last_promoted_bytes));
19934             last_promoted_bytes = promoted_bytes (heap_number);
19935         }
19936     }
19937
19938 #ifdef MH_SC_MARK
19939     if (do_mark_steal_p)
19940     {
19941         mark_steal();
19942     }
19943 #endif //MH_SC_MARK
19944
19945     // Dependent handles need to be scanned with a special algorithm (see the header comment on
19946     // scan_dependent_handles for more detail). We perform an initial scan without synchronizing with other
19947     // worker threads or processing any mark stack overflow. This is not guaranteed to complete the operation
19948     // but in a common case (where there are no dependent handles that are due to be collected) it allows us
19949     // to optimize away further scans. The call to scan_dependent_handles is what will cycle through more
19950     // iterations if required and will also perform processing of any mark stack overflow once the dependent
19951     // handle table has been fully promoted.
19952     GCScan::GcDhInitialScan(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
19953     scan_dependent_handles(condemned_gen_number, &sc, true);
19954
19955 #ifdef MULTIPLE_HEAPS
19956     dprintf(3, ("Joining for short weak handle scan"));
19957     gc_t_join.join(this, gc_join_null_dead_short_weak);
19958     if (gc_t_join.joined())
19959 #endif //MULTIPLE_HEAPS
19960     {
19961 #ifdef HEAP_ANALYZE
19962         heap_analyze_enabled = FALSE;
19963         GCToEEInterface::AnalyzeSurvivorsFinished(condemned_gen_number);
19964 #endif // HEAP_ANALYZE
19965         GCToEEInterface::AfterGcScanRoots (condemned_gen_number, max_generation, &sc);
19966
19967 #ifdef MULTIPLE_HEAPS
19968         if (!full_p)
19969         {
19970             // we used r_join and need to reinitialize states for it here.
19971             gc_t_join.r_init();
19972         }
19973
19974         //start all threads on the roots.
19975         dprintf(3, ("Starting all gc thread for short weak handle scan"));
19976         gc_t_join.restart();
19977 #endif //MULTIPLE_HEAPS
19978
19979     }
19980
19981     // null out the target of short weakref that were not promoted.
19982     GCScan::GcShortWeakPtrScan(GCHeap::Promote, condemned_gen_number, max_generation,&sc);
19983
19984 // MTHTS: keep by single thread
19985 #ifdef MULTIPLE_HEAPS
19986     dprintf(3, ("Joining for finalization"));
19987     gc_t_join.join(this, gc_join_scan_finalization);
19988     if (gc_t_join.joined())
19989 #endif //MULTIPLE_HEAPS
19990
19991     {
19992 #ifdef MULTIPLE_HEAPS
19993         //start all threads on the roots.
19994         dprintf(3, ("Starting all gc thread for Finalization"));
19995         gc_t_join.restart();
19996 #endif //MULTIPLE_HEAPS
19997     }
19998
19999     //Handle finalization.
20000     size_t promoted_bytes_live = promoted_bytes (heap_number);
20001
20002 #ifdef FEATURE_PREMORTEM_FINALIZATION
20003     dprintf (3, ("Finalize marking"));
20004     finalize_queue->ScanForFinalization (GCHeap::Promote, condemned_gen_number, mark_only_p, __this);
20005
20006     GCToEEInterface::DiagWalkFReachableObjects(__this);
20007 #endif // FEATURE_PREMORTEM_FINALIZATION
20008
20009     // Scan dependent handles again to promote any secondaries associated with primaries that were promoted
20010     // for finalization. As before scan_dependent_handles will also process any mark stack overflow.
20011     scan_dependent_handles(condemned_gen_number, &sc, false);
20012
20013 #ifdef MULTIPLE_HEAPS
20014     dprintf(3, ("Joining for weak pointer deletion"));
20015     gc_t_join.join(this, gc_join_null_dead_long_weak);
20016     if (gc_t_join.joined())
20017     {
20018         //start all threads on the roots.
20019         dprintf(3, ("Starting all gc thread for weak pointer deletion"));
20020         gc_t_join.restart();
20021     }
20022 #endif //MULTIPLE_HEAPS
20023
20024     // null out the target of long weakref that were not promoted.
20025     GCScan::GcWeakPtrScan (GCHeap::Promote, condemned_gen_number, max_generation, &sc);
20026
20027 // MTHTS: keep by single thread
20028 #ifdef MULTIPLE_HEAPS
20029 #ifdef MARK_LIST
20030 #ifdef PARALLEL_MARK_LIST_SORT
20031 //    unsigned long start = GetCycleCount32();
20032     sort_mark_list();
20033 //    printf("sort_mark_list took %u cycles\n", GetCycleCount32() - start);
20034 #endif //PARALLEL_MARK_LIST_SORT
20035 #endif //MARK_LIST
20036
20037     dprintf (3, ("Joining for sync block cache entry scanning"));
20038     gc_t_join.join(this, gc_join_null_dead_syncblk);
20039     if (gc_t_join.joined())
20040 #endif //MULTIPLE_HEAPS
20041     {
20042         // scan for deleted entries in the syncblk cache
20043         GCScan::GcWeakPtrScanBySingleThread (condemned_gen_number, max_generation, &sc);
20044
20045 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
20046         if (g_fEnableAppDomainMonitoring)
20047         {
20048             size_t promoted_all_heaps = 0;
20049 #ifdef MULTIPLE_HEAPS
20050             for (int i = 0; i < n_heaps; i++)
20051             {
20052                 promoted_all_heaps += promoted_bytes (i);
20053             }
20054 #else
20055             promoted_all_heaps = promoted_bytes (heap_number);
20056 #endif //MULTIPLE_HEAPS
20057             GCToEEInterface::RecordTotalSurvivedBytes(promoted_all_heaps);
20058         }
20059 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
20060
20061 #ifdef MULTIPLE_HEAPS
20062
20063 #ifdef MARK_LIST
20064 #ifndef PARALLEL_MARK_LIST_SORT
20065         //compact g_mark_list and sort it.
20066         combine_mark_lists();
20067 #endif //PARALLEL_MARK_LIST_SORT
20068 #endif //MARK_LIST
20069
20070         //decide on promotion
20071         if (!settings.promotion)
20072         {
20073             size_t m = 0;
20074             for (int n = 0; n <= condemned_gen_number;n++)
20075             {
20076                 m +=  (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.1);
20077             }
20078
20079             for (int i = 0; i < n_heaps; i++)
20080             {
20081                 dynamic_data* dd = g_heaps[i]->dynamic_data_of (min (condemned_gen_number +1,
20082                                                                      max_generation));
20083                 size_t older_gen_size = (dd_current_size (dd) +
20084                                          (dd_desired_allocation (dd) -
20085                                          dd_new_allocation (dd)));
20086
20087                 if ((m > (older_gen_size)) ||
20088                     (promoted_bytes (i) > m))
20089                 {
20090                     settings.promotion = TRUE;
20091                 }
20092             }
20093         }
20094
20095 #ifdef SNOOP_STATS
20096         if (do_mark_steal_p)
20097         {
20098             size_t objects_checked_count = 0;
20099             size_t zero_ref_count = 0;
20100             size_t objects_marked_count = 0;
20101             size_t check_level_count = 0;
20102             size_t busy_count = 0;
20103             size_t interlocked_count = 0;
20104             size_t partial_mark_parent_count = 0;
20105             size_t stolen_or_pm_count = 0; 
20106             size_t stolen_entry_count = 0; 
20107             size_t pm_not_ready_count = 0; 
20108             size_t normal_count = 0;
20109             size_t stack_bottom_clear_count = 0;
20110
20111             for (int i = 0; i < n_heaps; i++)
20112             {
20113                 gc_heap* hp = g_heaps[i];
20114                 hp->print_snoop_stat();
20115                 objects_checked_count += hp->snoop_stat.objects_checked_count;
20116                 zero_ref_count += hp->snoop_stat.zero_ref_count;
20117                 objects_marked_count += hp->snoop_stat.objects_marked_count;
20118                 check_level_count += hp->snoop_stat.check_level_count;
20119                 busy_count += hp->snoop_stat.busy_count;
20120                 interlocked_count += hp->snoop_stat.interlocked_count;
20121                 partial_mark_parent_count += hp->snoop_stat.partial_mark_parent_count;
20122                 stolen_or_pm_count += hp->snoop_stat.stolen_or_pm_count;
20123                 stolen_entry_count += hp->snoop_stat.stolen_entry_count;
20124                 pm_not_ready_count += hp->snoop_stat.pm_not_ready_count;
20125                 normal_count += hp->snoop_stat.normal_count;
20126                 stack_bottom_clear_count += hp->snoop_stat.stack_bottom_clear_count;
20127             }
20128
20129             fflush (stdout);
20130
20131             printf ("-------total stats-------\n");
20132             printf ("%8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n", 
20133                 "checked", "zero", "marked", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear");
20134             printf ("%8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
20135                 objects_checked_count,
20136                 zero_ref_count,
20137                 objects_marked_count,
20138                 check_level_count,
20139                 busy_count,
20140                 interlocked_count,
20141                 partial_mark_parent_count,
20142                 stolen_or_pm_count,
20143                 stolen_entry_count,
20144                 pm_not_ready_count,
20145                 normal_count,
20146                 stack_bottom_clear_count);
20147         }
20148 #endif //SNOOP_STATS
20149
20150         //start all threads.
20151         dprintf(3, ("Starting all threads for end of mark phase"));
20152         gc_t_join.restart();
20153 #else //MULTIPLE_HEAPS
20154
20155         //decide on promotion
20156         if (!settings.promotion)
20157         {
20158             size_t m = 0;
20159             for (int n = 0; n <= condemned_gen_number;n++)
20160             {
20161                 m +=  (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.06);
20162             }
20163             dynamic_data* dd = dynamic_data_of (min (condemned_gen_number +1,
20164                                                      max_generation));
20165             size_t older_gen_size = (dd_current_size (dd) +
20166                                      (dd_desired_allocation (dd) -
20167                                      dd_new_allocation (dd)));
20168
20169             dprintf (2, ("promotion threshold: %Id, promoted bytes: %Id size n+1: %Id",
20170                          m, promoted_bytes (heap_number), older_gen_size));
20171
20172             if ((m > older_gen_size) ||
20173                     (promoted_bytes (heap_number) > m))
20174             {
20175                 settings.promotion = TRUE;
20176             }
20177         }
20178
20179 #endif //MULTIPLE_HEAPS
20180     }
20181
20182 #ifdef MULTIPLE_HEAPS
20183 #ifdef MARK_LIST
20184 #ifdef PARALLEL_MARK_LIST_SORT
20185 //    start = GetCycleCount32();
20186     merge_mark_lists();
20187 //    printf("merge_mark_lists took %u cycles\n", GetCycleCount32() - start);
20188 #endif //PARALLEL_MARK_LIST_SORT
20189 #endif //MARK_LIST
20190 #endif //MULTIPLE_HEAPS
20191
20192 #ifdef BACKGROUND_GC
20193     total_promoted_bytes = promoted_bytes (heap_number);
20194 #endif //BACKGROUND_GC
20195
20196     promoted_bytes (heap_number) -= promoted_bytes_live;
20197
20198 #ifdef TIME_GC
20199         finish = GetCycleCount32();
20200         mark_time = finish - start;
20201 #endif //TIME_GC
20202
20203     dprintf(2,("---- End of mark phase ----"));
20204 }
20205
20206 inline
20207 void gc_heap::pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* high)
20208 {
20209     dprintf (3, ("Pinning %Ix", (size_t)o));
20210     if ((o >= low) && (o < high))
20211     {
20212         dprintf(3,("^%Ix^", (size_t)o));
20213         set_pinned (o);
20214
20215 #ifdef FEATURE_EVENT_TRACE        
20216         if(EVENT_ENABLED(PinObjectAtGCTime))
20217         {
20218             fire_etw_pin_object_event(o, ppObject);
20219         }
20220 #endif // FEATURE_EVENT_TRACE
20221
20222 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
20223         num_pinned_objects++;
20224 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
20225     }
20226 }
20227
20228 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
20229 size_t gc_heap::get_total_pinned_objects()
20230 {
20231 #ifdef MULTIPLE_HEAPS
20232     size_t total_num_pinned_objects = 0;
20233     for (int i = 0; i < gc_heap::n_heaps; i++)
20234     {
20235         gc_heap* hp = gc_heap::g_heaps[i];
20236         total_num_pinned_objects += hp->num_pinned_objects;
20237     }
20238     return total_num_pinned_objects;
20239 #else //MULTIPLE_HEAPS
20240     return num_pinned_objects;
20241 #endif //MULTIPLE_HEAPS
20242 }
20243 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
20244
20245 void gc_heap::reset_mark_stack ()
20246 {
20247     reset_pinned_queue();
20248     max_overflow_address = 0;
20249     min_overflow_address = MAX_PTR;
20250 }
20251
20252 #ifdef FEATURE_STRUCTALIGN
20253 //
20254 // The word with left child, right child, and align info is laid out as follows:
20255 //
20256 //      |   upper short word   |   lower short word   |
20257 //      |<------------> <----->|<------------> <----->|
20258 //      |  left child   info hi| right child   info lo|
20259 // x86: |    10 bits     6 bits|   10 bits      6 bits|
20260 //
20261 // where left/right child are signed values and concat(info hi, info lo) is unsigned.
20262 //
20263 // The "align info" encodes two numbers: the required alignment (a power of two)
20264 // and the misalignment (the number of machine words the destination address needs
20265 // to be adjusted by to provide alignment - so this number is always smaller than
20266 // the required alignment).  Thus, the two can be represented as the "logical or"
20267 // of the two numbers.  Note that the actual pad is computed from the misalignment
20268 // by adding the alignment iff the misalignment is non-zero and less than min_obj_size.
20269 //
20270
20271 // The number of bits in a brick.
20272 #if defined (_TARGET_AMD64_)
20273 #define brick_bits (12)
20274 #else
20275 #define brick_bits (11)
20276 #endif //_TARGET_AMD64_
20277 C_ASSERT(brick_size == (1 << brick_bits));
20278
20279 // The number of bits needed to represent the offset to a child node.
20280 // "brick_bits + 1" allows us to represent a signed offset within a brick.
20281 #define child_bits (brick_bits + 1 - LOG2_PTRSIZE)
20282
20283 // The number of bits in each of the pad hi, pad lo fields.
20284 #define pad_bits (sizeof(short) * 8 - child_bits)
20285
20286 #define child_from_short(w) (((signed short)(w) / (1 << (pad_bits - LOG2_PTRSIZE))) & ~((1 << LOG2_PTRSIZE) - 1))
20287 #define pad_mask ((1 << pad_bits) - 1)
20288 #define pad_from_short(w) ((size_t)(w) & pad_mask)
20289 #else // FEATURE_STRUCTALIGN
20290 #define child_from_short(w) (w)
20291 #endif // FEATURE_STRUCTALIGN
20292
20293 inline
20294 short node_left_child(uint8_t* node)
20295 {
20296     return child_from_short(((plug_and_pair*)node)[-1].m_pair.left);
20297 }
20298
20299 inline
20300 void set_node_left_child(uint8_t* node, ptrdiff_t val)
20301 {
20302     assert (val > -(ptrdiff_t)brick_size);
20303     assert (val < (ptrdiff_t)brick_size);
20304     assert (Aligned (val));
20305 #ifdef FEATURE_STRUCTALIGN
20306     size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.left);
20307     ((plug_and_pair*)node)[-1].m_pair.left = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad;
20308 #else // FEATURE_STRUCTALIGN
20309     ((plug_and_pair*)node)[-1].m_pair.left = (short)val;
20310 #endif // FEATURE_STRUCTALIGN
20311     assert (node_left_child (node) == val);
20312 }
20313
20314 inline
20315 short node_right_child(uint8_t* node)
20316 {
20317     return child_from_short(((plug_and_pair*)node)[-1].m_pair.right);
20318 }
20319
20320 inline
20321 void set_node_right_child(uint8_t* node, ptrdiff_t val)
20322 {
20323     assert (val > -(ptrdiff_t)brick_size);
20324     assert (val < (ptrdiff_t)brick_size);
20325     assert (Aligned (val));
20326 #ifdef FEATURE_STRUCTALIGN
20327     size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.right);
20328     ((plug_and_pair*)node)[-1].m_pair.right = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad;
20329 #else // FEATURE_STRUCTALIGN
20330     ((plug_and_pair*)node)[-1].m_pair.right = (short)val;
20331 #endif // FEATURE_STRUCTALIGN
20332     assert (node_right_child (node) == val);
20333 }
20334
20335 #ifdef FEATURE_STRUCTALIGN
20336 void node_aligninfo (uint8_t* node, int& requiredAlignment, ptrdiff_t& pad)
20337 {
20338     // Extract the single-number aligninfo from the fields.
20339     short left = ((plug_and_pair*)node)[-1].m_pair.left;
20340     short right = ((plug_and_pair*)node)[-1].m_pair.right;
20341     ptrdiff_t pad_shifted = (pad_from_short(left) << pad_bits) | pad_from_short(right);
20342     ptrdiff_t aligninfo = pad_shifted * DATA_ALIGNMENT;
20343
20344     // Replicate the topmost bit into all lower bits.
20345     ptrdiff_t x = aligninfo;
20346     x |= x >> 8;
20347     x |= x >> 4;
20348     x |= x >> 2;
20349     x |= x >> 1;
20350
20351     // Clear all bits but the highest.
20352     requiredAlignment = (int)(x ^ (x >> 1));
20353     pad = aligninfo - requiredAlignment;
20354     pad += AdjustmentForMinPadSize(pad, requiredAlignment);
20355 }
20356
20357 inline
20358 ptrdiff_t node_alignpad (uint8_t* node)
20359 {
20360     int requiredAlignment;
20361     ptrdiff_t alignpad;
20362     node_aligninfo (node, requiredAlignment, alignpad);
20363     return alignpad;
20364 }
20365
20366 void clear_node_aligninfo (uint8_t* node)
20367 {
20368     ((plug_and_pair*)node)[-1].m_pair.left &= ~0 << pad_bits;
20369     ((plug_and_pair*)node)[-1].m_pair.right &= ~0 << pad_bits;
20370 }
20371
20372 void set_node_aligninfo (uint8_t* node, int requiredAlignment, ptrdiff_t pad)
20373 {
20374     // Encode the alignment requirement and alignment offset as a single number
20375     // as described above.
20376     ptrdiff_t aligninfo = (size_t)requiredAlignment + (pad & (requiredAlignment-1));
20377     assert (Aligned (aligninfo));
20378     ptrdiff_t aligninfo_shifted = aligninfo / DATA_ALIGNMENT;
20379     assert (aligninfo_shifted < (1 << (pad_bits + pad_bits)));
20380
20381     ptrdiff_t hi = aligninfo_shifted >> pad_bits;
20382     assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.left) == 0);
20383     ((plug_and_pair*)node)[-1].m_pair.left |= hi;
20384
20385     ptrdiff_t lo = aligninfo_shifted & pad_mask;
20386     assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.right) == 0);
20387     ((plug_and_pair*)node)[-1].m_pair.right |= lo;
20388
20389 #ifdef _DEBUG
20390     int requiredAlignment2;
20391     ptrdiff_t pad2;
20392     node_aligninfo (node, requiredAlignment2, pad2);
20393     assert (requiredAlignment == requiredAlignment2);
20394     assert (pad == pad2);
20395 #endif // _DEBUG
20396 }
20397 #endif // FEATURE_STRUCTALIGN
20398
20399 inline
20400 void loh_set_node_relocation_distance(uint8_t* node, ptrdiff_t val)
20401 {
20402     ptrdiff_t* place = &(((loh_obj_and_pad*)node)[-1].reloc);
20403     *place = val;
20404 }
20405
20406 inline
20407 ptrdiff_t loh_node_relocation_distance(uint8_t* node)
20408 {
20409     return (((loh_obj_and_pad*)node)[-1].reloc);
20410 }
20411
20412 inline
20413 ptrdiff_t node_relocation_distance (uint8_t* node)
20414 {
20415     return (((plug_and_reloc*)(node))[-1].reloc & ~3);
20416 }
20417
20418 inline
20419 void set_node_relocation_distance(uint8_t* node, ptrdiff_t val)
20420 {
20421     assert (val == (val & ~3));
20422     ptrdiff_t* place = &(((plug_and_reloc*)node)[-1].reloc);
20423     //clear the left bit and the relocation field
20424     *place &= 1;
20425     // store the value
20426     *place |= val;
20427 }
20428
20429 #define node_left_p(node) (((plug_and_reloc*)(node))[-1].reloc & 2)
20430
20431 #define set_node_left(node) ((plug_and_reloc*)(node))[-1].reloc |= 2;
20432
20433 #ifndef FEATURE_STRUCTALIGN
20434 void set_node_realigned(uint8_t* node)
20435 {
20436     ((plug_and_reloc*)(node))[-1].reloc |= 1;
20437 }
20438
20439 void clear_node_realigned(uint8_t* node)
20440 {
20441 #ifdef RESPECT_LARGE_ALIGNMENT
20442     ((plug_and_reloc*)(node))[-1].reloc &= ~1;
20443 #else
20444     UNREFERENCED_PARAMETER(node);
20445 #endif //RESPECT_LARGE_ALIGNMENT
20446 }
20447 #endif // FEATURE_STRUCTALIGN
20448
20449 inline
20450 size_t  node_gap_size (uint8_t* node)
20451 {
20452     return ((plug_and_gap *)node)[-1].gap;
20453 }
20454
20455 void set_gap_size (uint8_t* node, size_t size)
20456 {
20457     assert (Aligned (size));
20458
20459     // clear the 2 uint32_t used by the node.
20460     ((plug_and_gap *)node)[-1].reloc = 0;
20461     ((plug_and_gap *)node)[-1].lr =0;
20462     ((plug_and_gap *)node)[-1].gap = size;
20463
20464     assert ((size == 0 )||(size >= sizeof(plug_and_reloc)));
20465
20466 }
20467
20468 uint8_t* gc_heap::insert_node (uint8_t* new_node, size_t sequence_number,
20469                    uint8_t* tree, uint8_t* last_node)
20470 {
20471     dprintf (3, ("IN: %Ix(%Ix), T: %Ix(%Ix), L: %Ix(%Ix) [%Ix]",
20472                  (size_t)new_node, brick_of(new_node), 
20473                  (size_t)tree, brick_of(tree), 
20474                  (size_t)last_node, brick_of(last_node),
20475                  sequence_number));
20476     if (power_of_two_p (sequence_number))
20477     {
20478         set_node_left_child (new_node, (tree - new_node));
20479         dprintf (3, ("NT: %Ix, LC->%Ix", (size_t)new_node, (tree - new_node)));
20480         tree = new_node;
20481     }
20482     else
20483     {
20484         if (oddp (sequence_number))
20485         {
20486             set_node_right_child (last_node, (new_node - last_node));
20487             dprintf (3, ("%Ix RC->%Ix", last_node, (new_node - last_node)));
20488         }
20489         else
20490         {
20491             uint8_t*  earlier_node = tree;
20492             size_t imax = logcount(sequence_number) - 2;
20493             for (size_t i = 0; i != imax; i++)
20494             {
20495                 earlier_node = earlier_node + node_right_child (earlier_node);
20496             }
20497             int tmp_offset = node_right_child (earlier_node);
20498             assert (tmp_offset); // should never be empty
20499             set_node_left_child (new_node, ((earlier_node + tmp_offset ) - new_node));
20500             set_node_right_child (earlier_node, (new_node - earlier_node));
20501
20502             dprintf (3, ("%Ix LC->%Ix, %Ix RC->%Ix", 
20503                 new_node, ((earlier_node + tmp_offset ) - new_node),
20504                 earlier_node, (new_node - earlier_node)));
20505         }
20506     }
20507     return tree;
20508 }
20509
20510 size_t gc_heap::update_brick_table (uint8_t* tree, size_t current_brick,
20511                                     uint8_t* x, uint8_t* plug_end)
20512 {
20513     dprintf (3, ("tree: %Ix, current b: %Ix, x: %Ix, plug_end: %Ix",
20514         tree, current_brick, x, plug_end));
20515
20516     if (tree != NULL)
20517     {
20518         dprintf (3, ("b- %Ix->%Ix pointing to tree %Ix", 
20519             current_brick, (size_t)(tree - brick_address (current_brick)), tree));
20520         set_brick (current_brick, (tree - brick_address (current_brick)));
20521     }
20522     else
20523     {
20524         dprintf (3, ("b- %Ix->-1", current_brick));
20525         set_brick (current_brick, -1);
20526     }
20527     size_t  b = 1 + current_brick;
20528     ptrdiff_t  offset = 0;
20529     size_t last_br = brick_of (plug_end-1);
20530     current_brick = brick_of (x-1);
20531     dprintf (3, ("ubt: %Ix->%Ix]->%Ix]", b, last_br, current_brick));
20532     while (b <= current_brick)
20533     {
20534         if (b <= last_br)
20535         {
20536             set_brick (b, --offset);
20537         }
20538         else
20539         {
20540             set_brick (b,-1);
20541         }
20542         b++;
20543     }
20544     return brick_of (x);
20545 }
20546
20547 void gc_heap::plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate)
20548 {
20549 #ifdef BIT64
20550     // We should never demote big plugs to gen0.
20551     if (gen == youngest_generation)
20552     {
20553         heap_segment* seg = ephemeral_heap_segment;
20554         size_t mark_stack_large_bos = mark_stack_bos;
20555         size_t large_plug_pos = 0;
20556         while (mark_stack_large_bos < mark_stack_tos)
20557         {
20558             if (mark_stack_array[mark_stack_large_bos].len > demotion_plug_len_th)
20559             {
20560                 while (mark_stack_bos <= mark_stack_large_bos)
20561                 {
20562                     size_t entry = deque_pinned_plug();
20563                     size_t len = pinned_len (pinned_plug_of (entry));
20564                     uint8_t* plug = pinned_plug (pinned_plug_of(entry));
20565                     if (len > demotion_plug_len_th)
20566                     {
20567                         dprintf (2, ("ps(%d): S %Ix (%Id)(%Ix)", gen->gen_num, plug, len, (plug+len)));
20568                     }
20569                     pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (consing_gen);
20570                     assert(mark_stack_array[entry].len == 0 ||
20571                             mark_stack_array[entry].len >= Align(min_obj_size));
20572                     generation_allocation_pointer (consing_gen) = plug + len;
20573                     generation_allocation_limit (consing_gen) = heap_segment_plan_allocated (seg);
20574                     set_allocator_next_pin (consing_gen);
20575                 }
20576             }
20577
20578             mark_stack_large_bos++;
20579         }
20580     }
20581 #endif // BIT64
20582
20583     generation_plan_allocation_start (gen) =
20584         allocate_in_condemned_generations (consing_gen, Align (min_obj_size), -1);
20585     generation_plan_allocation_start_size (gen) = Align (min_obj_size);
20586     size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
20587     if (next_plug_to_allocate)
20588     {
20589         size_t dist_to_next_plug = (size_t)(next_plug_to_allocate - generation_allocation_pointer (consing_gen));
20590         if (allocation_left > dist_to_next_plug)
20591         {
20592             allocation_left = dist_to_next_plug;
20593         }
20594     }
20595     if (allocation_left < Align (min_obj_size))
20596     {
20597         generation_plan_allocation_start_size (gen) += allocation_left;
20598         generation_allocation_pointer (consing_gen) += allocation_left;
20599     }
20600
20601     dprintf (1, ("plan alloc gen%d(%Ix) start at %Ix (ptr: %Ix, limit: %Ix, next: %Ix)", gen->gen_num, 
20602         generation_plan_allocation_start (gen),
20603         generation_plan_allocation_start_size (gen),
20604         generation_allocation_pointer (consing_gen), generation_allocation_limit (consing_gen),
20605         next_plug_to_allocate));
20606 }
20607
20608 void gc_heap::realloc_plan_generation_start (generation* gen, generation* consing_gen)
20609 {
20610     BOOL adjacentp = FALSE;
20611
20612     generation_plan_allocation_start (gen) =  
20613         allocate_in_expanded_heap (consing_gen, Align(min_obj_size), adjacentp, 0, 
20614 #ifdef SHORT_PLUGS
20615                                    FALSE, NULL, 
20616 #endif //SHORT_PLUGS
20617                                    FALSE, -1 REQD_ALIGN_AND_OFFSET_ARG);
20618
20619     generation_plan_allocation_start_size (gen) = Align (min_obj_size);
20620     size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
20621     if ((allocation_left < Align (min_obj_size)) && 
20622          (generation_allocation_limit (consing_gen)!=heap_segment_plan_allocated (generation_allocation_segment (consing_gen))))
20623     {
20624         generation_plan_allocation_start_size (gen) += allocation_left;
20625         generation_allocation_pointer (consing_gen) += allocation_left;
20626     }
20627
20628     dprintf (1, ("plan re-alloc gen%d start at %Ix (ptr: %Ix, limit: %Ix)", gen->gen_num, 
20629         generation_plan_allocation_start (consing_gen),
20630         generation_allocation_pointer (consing_gen), 
20631         generation_allocation_limit (consing_gen))); 
20632 }
20633
20634 void gc_heap::plan_generation_starts (generation*& consing_gen)
20635 {
20636     //make sure that every generation has a planned allocation start
20637     int  gen_number = settings.condemned_generation;
20638     while (gen_number >= 0)
20639     {
20640         if (gen_number < max_generation)
20641         {
20642             consing_gen = ensure_ephemeral_heap_segment (consing_gen);
20643         }
20644         generation* gen = generation_of (gen_number);
20645         if (0 == generation_plan_allocation_start (gen))
20646         {
20647             plan_generation_start (gen, consing_gen, 0);
20648             assert (generation_plan_allocation_start (gen));
20649         }
20650         gen_number--;
20651     }
20652     // now we know the planned allocation size
20653     heap_segment_plan_allocated (ephemeral_heap_segment) =
20654         generation_allocation_pointer (consing_gen);
20655 }
20656
20657 void gc_heap::advance_pins_for_demotion (generation* gen)
20658 {
20659     uint8_t* original_youngest_start = generation_allocation_start (youngest_generation);
20660     heap_segment* seg = ephemeral_heap_segment;
20661
20662     if ((!(pinned_plug_que_empty_p())))
20663     {
20664         size_t gen1_pinned_promoted = generation_pinned_allocation_compact_size (generation_of (max_generation));
20665         size_t gen1_pins_left = dd_pinned_survived_size (dynamic_data_of (max_generation - 1)) - gen1_pinned_promoted;
20666         size_t total_space_to_skip = last_gen1_pin_end - generation_allocation_pointer (gen);
20667         float pin_frag_ratio = (float)gen1_pins_left / (float)total_space_to_skip;
20668         float pin_surv_ratio = (float)gen1_pins_left / (float)(dd_survived_size (dynamic_data_of (max_generation - 1)));
20669         if ((pin_frag_ratio > 0.15) && (pin_surv_ratio > 0.30))
20670         {
20671             while (!pinned_plug_que_empty_p() &&
20672                     (pinned_plug (oldest_pin()) < original_youngest_start))
20673             {
20674                 size_t entry = deque_pinned_plug();
20675                 size_t len = pinned_len (pinned_plug_of (entry));
20676                 uint8_t* plug = pinned_plug (pinned_plug_of(entry));
20677                 pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (gen);
20678                 assert(mark_stack_array[entry].len == 0 ||
20679                         mark_stack_array[entry].len >= Align(min_obj_size));
20680                 generation_allocation_pointer (gen) = plug + len;
20681                 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
20682                 set_allocator_next_pin (gen);
20683
20684                 //Add the size of the pinned plug to the right pinned allocations
20685                 //find out which gen this pinned plug came from 
20686                 int frgn = object_gennum (plug);
20687                 if ((frgn != (int)max_generation) && settings.promotion)
20688                 {
20689                     int togn = object_gennum_plan (plug);
20690                     generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
20691                     if (frgn < togn)
20692                     {
20693                         generation_pinned_allocation_compact_size (generation_of (togn)) += len;
20694                     }
20695                 }
20696
20697                 dprintf (2, ("skipping gap %d, pin %Ix (%Id)", 
20698                     pinned_len (pinned_plug_of (entry)), plug, len));
20699             }
20700         }
20701         dprintf (2, ("ad_p_d: PL: %Id, SL: %Id, pfr: %d, psr: %d", 
20702             gen1_pins_left, total_space_to_skip, (int)(pin_frag_ratio*100), (int)(pin_surv_ratio*100)));
20703     }
20704 }
20705
20706 void gc_heap::process_ephemeral_boundaries (uint8_t* x,
20707                                             int& active_new_gen_number,
20708                                             int& active_old_gen_number,
20709                                             generation*& consing_gen,
20710                                             BOOL& allocate_in_condemned)
20711 {
20712 retry:
20713     if ((active_old_gen_number > 0) &&
20714         (x >= generation_allocation_start (generation_of (active_old_gen_number - 1))))
20715     {
20716         dprintf (1, ("crossing gen%d, x is %Ix", active_old_gen_number - 1, x));
20717
20718         if (!pinned_plug_que_empty_p())
20719         {
20720             dprintf (1, ("oldest pin: %Ix(%Id)",
20721                 pinned_plug (oldest_pin()), 
20722                 (x - pinned_plug (oldest_pin()))));
20723         }
20724
20725         if (active_old_gen_number <= (settings.promotion ? (max_generation - 1) : max_generation))
20726         {
20727             active_new_gen_number--;
20728         }
20729
20730         active_old_gen_number--;
20731         assert ((!settings.promotion) || (active_new_gen_number>0));
20732
20733         if (active_new_gen_number == (max_generation - 1))
20734         {
20735 #ifdef FREE_USAGE_STATS
20736             if (settings.condemned_generation == max_generation)
20737             {
20738                 // We need to do this before we skip the rest of the pinned plugs.
20739                 generation* gen_2 = generation_of (max_generation);
20740                 generation* gen_1 = generation_of (max_generation - 1);
20741
20742                 size_t total_num_pinned_free_spaces_left = 0;
20743
20744                 // We are about to allocate gen1, check to see how efficient fitting in gen2 pinned free spaces is.
20745                 for (int j = 0; j < NUM_GEN_POWER2; j++)
20746                 {
20747                     dprintf (1, ("[h%d][#%Id]2^%d: current: %Id, S: 2: %Id, 1: %Id(%Id)", 
20748                         heap_number, 
20749                         settings.gc_index,
20750                         (j + 10), 
20751                         gen_2->gen_current_pinned_free_spaces[j],
20752                         gen_2->gen_plugs[j], gen_1->gen_plugs[j],
20753                         (gen_2->gen_plugs[j] + gen_1->gen_plugs[j])));
20754
20755                     total_num_pinned_free_spaces_left += gen_2->gen_current_pinned_free_spaces[j];
20756                 }
20757
20758                 float pinned_free_list_efficiency = 0;
20759                 size_t total_pinned_free_space = generation_allocated_in_pinned_free (gen_2) + generation_pinned_free_obj_space (gen_2);
20760                 if (total_pinned_free_space != 0)
20761                 {
20762                     pinned_free_list_efficiency = (float)(generation_allocated_in_pinned_free (gen_2)) / (float)total_pinned_free_space;
20763                 }
20764
20765                 dprintf (1, ("[h%d] gen2 allocated %Id bytes with %Id bytes pinned free spaces (effi: %d%%), %Id (%Id) left",
20766                             heap_number,
20767                             generation_allocated_in_pinned_free (gen_2),
20768                             total_pinned_free_space, 
20769                             (int)(pinned_free_list_efficiency * 100),
20770                             generation_pinned_free_obj_space (gen_2),
20771                             total_num_pinned_free_spaces_left));
20772             }
20773 #endif //FREE_USAGE_STATS
20774
20775             //Go past all of the pinned plugs for this generation.
20776             while (!pinned_plug_que_empty_p() &&
20777                    (!in_range_for_segment ((pinned_plug (oldest_pin())), ephemeral_heap_segment)))
20778             {
20779                 size_t  entry = deque_pinned_plug();
20780                 mark*  m = pinned_plug_of (entry);
20781                 uint8_t*  plug = pinned_plug (m);
20782                 size_t  len = pinned_len (m);
20783                 // detect pinned block in different segment (later) than
20784                 // allocation segment, skip those until the oldest pin is in the ephemeral seg.
20785                 // adjust the allocation segment along the way (at the end it will
20786                 // be the ephemeral segment.
20787                 heap_segment* nseg = heap_segment_in_range (generation_allocation_segment (consing_gen));
20788
20789                 PREFIX_ASSUME(nseg != NULL);
20790
20791                 while (!((plug >= generation_allocation_pointer (consing_gen))&&
20792                         (plug < heap_segment_allocated (nseg))))
20793                 {
20794                     //adjust the end of the segment to be the end of the plug
20795                     assert (generation_allocation_pointer (consing_gen)>=
20796                             heap_segment_mem (nseg));
20797                     assert (generation_allocation_pointer (consing_gen)<=
20798                             heap_segment_committed (nseg));
20799
20800                     heap_segment_plan_allocated (nseg) =
20801                         generation_allocation_pointer (consing_gen);
20802                     //switch allocation segment
20803                     nseg = heap_segment_next_rw (nseg);
20804                     generation_allocation_segment (consing_gen) = nseg;
20805                     //reset the allocation pointer and limits
20806                     generation_allocation_pointer (consing_gen) =
20807                         heap_segment_mem (nseg);
20808                 }
20809                 set_new_pin_info (m, generation_allocation_pointer (consing_gen));
20810                 assert(pinned_len(m) == 0 || pinned_len(m) >= Align(min_obj_size));
20811                 generation_allocation_pointer (consing_gen) = plug + len;
20812                 generation_allocation_limit (consing_gen) =
20813                     generation_allocation_pointer (consing_gen);
20814             }
20815             allocate_in_condemned = TRUE;
20816             consing_gen = ensure_ephemeral_heap_segment (consing_gen);
20817         }
20818
20819         if (active_new_gen_number != max_generation)
20820         {
20821             if (active_new_gen_number == (max_generation - 1))
20822             {
20823                 maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation));
20824                 if (!demote_gen1_p)
20825                     advance_pins_for_demotion (consing_gen);
20826             }
20827
20828             plan_generation_start (generation_of (active_new_gen_number), consing_gen, x);
20829                 
20830             dprintf (1, ("process eph: allocated gen%d start at %Ix", 
20831                 active_new_gen_number,
20832                 generation_plan_allocation_start (generation_of (active_new_gen_number))));
20833
20834             if ((demotion_low == MAX_PTR) && !pinned_plug_que_empty_p())
20835             {
20836                 uint8_t* pplug = pinned_plug (oldest_pin());
20837                 if (object_gennum (pplug) > 0)
20838                 {
20839                     demotion_low = pplug;
20840                     dprintf (3, ("process eph: dlow->%Ix", demotion_low));
20841                 }
20842             }
20843
20844             assert (generation_plan_allocation_start (generation_of (active_new_gen_number)));
20845         }
20846
20847         goto retry;
20848     }
20849 }
20850
20851 inline
20852 void gc_heap::seg_clear_mark_bits (heap_segment* seg)
20853 {
20854     uint8_t* o = heap_segment_mem (seg);
20855     while (o < heap_segment_allocated (seg))
20856     {
20857         if (marked (o))
20858         {
20859             clear_marked (o);
20860         }
20861         o = o  + Align (size (o));
20862     }
20863 }
20864
20865 #ifdef FEATURE_BASICFREEZE
20866 void gc_heap::sweep_ro_segments (heap_segment* start_seg)
20867 {
20868     //go through all of the segment in range and reset the mark bit
20869     //TODO works only on small object segments
20870
20871     heap_segment* seg = start_seg;
20872
20873     while (seg)
20874     {
20875         if (heap_segment_read_only_p (seg) &&
20876             heap_segment_in_range_p (seg))
20877         {
20878 #ifdef BACKGROUND_GC
20879             if (settings.concurrent)
20880             {
20881                 seg_clear_mark_array_bits_soh (seg);
20882             }
20883             else
20884             {
20885                 seg_clear_mark_bits (seg);
20886             }
20887 #else //BACKGROUND_GC
20888
20889 #ifdef MARK_ARRAY
20890             if(gc_can_use_concurrent)
20891             {
20892                 clear_mark_array (max (heap_segment_mem (seg), lowest_address),
20893                               min (heap_segment_allocated (seg), highest_address),
20894                               FALSE); // read_only segments need the mark clear
20895             }
20896 #else //MARK_ARRAY
20897             seg_clear_mark_bits (seg);
20898 #endif //MARK_ARRAY
20899
20900 #endif //BACKGROUND_GC
20901         }
20902         seg = heap_segment_next (seg);
20903     }
20904 }
20905 #endif // FEATURE_BASICFREEZE
20906
20907 #ifdef FEATURE_LOH_COMPACTION
20908 inline
20909 BOOL gc_heap::loh_pinned_plug_que_empty_p()
20910 {
20911     return (loh_pinned_queue_bos == loh_pinned_queue_tos);
20912 }
20913
20914 void gc_heap::loh_set_allocator_next_pin()
20915 {
20916     if (!(loh_pinned_plug_que_empty_p()))
20917     {
20918         mark*  oldest_entry = loh_oldest_pin();
20919         uint8_t* plug = pinned_plug (oldest_entry);
20920         generation* gen = large_object_generation;
20921         if ((plug >= generation_allocation_pointer (gen)) &&
20922             (plug <  generation_allocation_limit (gen)))
20923         {
20924             generation_allocation_limit (gen) = pinned_plug (oldest_entry);
20925         }
20926         else
20927             assert (!((plug < generation_allocation_pointer (gen)) &&
20928                       (plug >= heap_segment_mem (generation_allocation_segment (gen)))));
20929     }
20930 }
20931
20932 size_t gc_heap::loh_deque_pinned_plug ()
20933 {
20934     size_t m = loh_pinned_queue_bos;
20935     loh_pinned_queue_bos++;
20936     return m;
20937 }
20938
20939 inline
20940 mark* gc_heap::loh_pinned_plug_of (size_t bos)
20941 {
20942     return &loh_pinned_queue[bos];
20943 }
20944
20945 inline
20946 mark* gc_heap::loh_oldest_pin()
20947 {
20948     return loh_pinned_plug_of (loh_pinned_queue_bos);
20949 }
20950
20951 // If we can't grow the queue, then don't compact.
20952 BOOL gc_heap::loh_enque_pinned_plug (uint8_t* plug, size_t len)
20953 {
20954     assert(len >= Align(min_obj_size, get_alignment_constant (FALSE)));
20955
20956     if (loh_pinned_queue_length <= loh_pinned_queue_tos)
20957     {
20958         if (!grow_mark_stack (loh_pinned_queue, loh_pinned_queue_length, LOH_PIN_QUEUE_LENGTH))
20959         {
20960             return FALSE;
20961         }
20962     }
20963     dprintf (3, (" P: %Ix(%Id)", plug, len));
20964     mark& m = loh_pinned_queue[loh_pinned_queue_tos];
20965     m.first = plug;
20966     m.len = len;
20967     loh_pinned_queue_tos++;
20968     loh_set_allocator_next_pin();
20969     return TRUE;
20970 }
20971
20972 inline
20973 BOOL gc_heap::loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit)
20974 {
20975     dprintf (1235, ("trying to fit %Id(%Id) between %Ix and %Ix (%Id)", 
20976         size, 
20977         (2* AlignQword (loh_padding_obj_size) +  size),
20978         alloc_pointer,
20979         alloc_limit,
20980         (alloc_limit - alloc_pointer)));
20981
20982     return ((alloc_pointer + 2* AlignQword (loh_padding_obj_size) +  size) <= alloc_limit);
20983 }
20984
20985 uint8_t* gc_heap::loh_allocate_in_condemned (uint8_t* old_loc, size_t size)
20986 {
20987     UNREFERENCED_PARAMETER(old_loc);
20988
20989     generation* gen = large_object_generation;
20990     dprintf (1235, ("E: p:%Ix, l:%Ix, s: %Id", 
20991         generation_allocation_pointer (gen),
20992         generation_allocation_limit (gen),
20993         size));
20994
20995 retry:
20996     {
20997         heap_segment* seg = generation_allocation_segment (gen);
20998         if (!(loh_size_fit_p (size, generation_allocation_pointer (gen), generation_allocation_limit (gen))))
20999         {
21000             if ((!(loh_pinned_plug_que_empty_p()) &&
21001                  (generation_allocation_limit (gen) ==
21002                   pinned_plug (loh_oldest_pin()))))
21003             {
21004                 mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
21005                 size_t len = pinned_len (m);
21006                 uint8_t* plug = pinned_plug (m);
21007                 dprintf (1235, ("AIC: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen)));
21008                 pinned_len (m) = plug - generation_allocation_pointer (gen);
21009                 generation_allocation_pointer (gen) = plug + len;
21010                 
21011                 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
21012                 loh_set_allocator_next_pin();
21013                 dprintf (1235, ("s: p: %Ix, l: %Ix (%Id)", 
21014                     generation_allocation_pointer (gen), 
21015                     generation_allocation_limit (gen),
21016                     (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
21017
21018                 goto retry;
21019             }
21020
21021             if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))
21022             {
21023                 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
21024                 dprintf (1235, ("l->pa(%Ix)", generation_allocation_limit (gen)));
21025             }
21026             else
21027             {
21028                 if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg))
21029                 {
21030                     heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
21031                     generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
21032                     dprintf (1235, ("l->c(%Ix)", generation_allocation_limit (gen)));
21033                 }
21034                 else
21035                 {
21036                     if (loh_size_fit_p (size, generation_allocation_pointer (gen), heap_segment_reserved (seg)) &&
21037                         (grow_heap_segment (seg, (generation_allocation_pointer (gen) + size + 2* AlignQword (loh_padding_obj_size)))))
21038                     {
21039                         dprintf (1235, ("growing seg from %Ix to %Ix\n", heap_segment_committed (seg),
21040                                          (generation_allocation_pointer (gen) + size)));
21041
21042                         heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
21043                         generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
21044
21045                         dprintf (1235, ("g: p: %Ix, l: %Ix (%Id)", 
21046                             generation_allocation_pointer (gen), 
21047                             generation_allocation_limit (gen),
21048                             (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
21049                     }
21050                     else
21051                     {
21052                         heap_segment* next_seg = heap_segment_next (seg);
21053                         assert (generation_allocation_pointer (gen)>=
21054                                 heap_segment_mem (seg));
21055                         // Verify that all pinned plugs for this segment are consumed
21056                         if (!loh_pinned_plug_que_empty_p() &&
21057                             ((pinned_plug (loh_oldest_pin()) <
21058                               heap_segment_allocated (seg)) &&
21059                              (pinned_plug (loh_oldest_pin()) >=
21060                               generation_allocation_pointer (gen))))
21061                         {
21062                             LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation",
21063                                          pinned_plug (loh_oldest_pin())));
21064                             dprintf (1236, ("queue empty: %d", loh_pinned_plug_que_empty_p()));
21065                             FATAL_GC_ERROR();
21066                         }
21067                         assert (generation_allocation_pointer (gen)>=
21068                                 heap_segment_mem (seg));
21069                         assert (generation_allocation_pointer (gen)<=
21070                                 heap_segment_committed (seg));
21071                         heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen);
21072
21073                         if (next_seg)
21074                         {
21075                             // for LOH do we want to try starting from the first LOH every time though?
21076                             generation_allocation_segment (gen) = next_seg;
21077                             generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
21078                             generation_allocation_limit (gen) = generation_allocation_pointer (gen);
21079
21080                             dprintf (1235, ("n: p: %Ix, l: %Ix (%Id)", 
21081                                 generation_allocation_pointer (gen), 
21082                                 generation_allocation_limit (gen),
21083                                 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
21084                         }
21085                         else
21086                         {
21087                             dprintf (1, ("We ran out of space compacting, shouldn't happen"));
21088                             FATAL_GC_ERROR();
21089                         }
21090                     }
21091                 }
21092             }
21093             loh_set_allocator_next_pin();
21094
21095             dprintf (1235, ("r: p: %Ix, l: %Ix (%Id)", 
21096                 generation_allocation_pointer (gen), 
21097                 generation_allocation_limit (gen),
21098                 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
21099
21100             goto retry;
21101         }
21102     }
21103
21104     {
21105         assert (generation_allocation_pointer (gen)>=
21106                 heap_segment_mem (generation_allocation_segment (gen)));
21107         uint8_t* result = generation_allocation_pointer (gen);
21108         size_t loh_pad = AlignQword (loh_padding_obj_size);
21109
21110         generation_allocation_pointer (gen) += size + loh_pad;
21111         assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
21112
21113         dprintf (1235, ("p: %Ix, l: %Ix (%Id)", 
21114             generation_allocation_pointer (gen), 
21115             generation_allocation_limit (gen),
21116             (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
21117
21118         assert (result + loh_pad);
21119         return result + loh_pad;
21120     }
21121 }
21122
21123 BOOL gc_heap::should_compact_loh()
21124 {
21125     return (loh_compaction_always_p || (loh_compaction_mode != loh_compaction_default));
21126 }
21127
21128 inline
21129 void gc_heap::check_loh_compact_mode (BOOL all_heaps_compacted_p)
21130 {
21131     if (settings.loh_compaction && (loh_compaction_mode == loh_compaction_once))
21132     {
21133         if (all_heaps_compacted_p)
21134         {
21135             // If the compaction mode says to compact once and we are going to compact LOH, 
21136             // we need to revert it back to no compaction.
21137             loh_compaction_mode = loh_compaction_default;
21138         }
21139     }
21140 }
21141
21142 BOOL gc_heap::plan_loh()
21143 {
21144     if (!loh_pinned_queue)
21145     {
21146         loh_pinned_queue = new (nothrow) (mark [LOH_PIN_QUEUE_LENGTH]);
21147         if (!loh_pinned_queue)
21148         {
21149             dprintf (1, ("Cannot allocate the LOH pinned queue (%Id bytes), no compaction", 
21150                          LOH_PIN_QUEUE_LENGTH * sizeof (mark)));
21151             return FALSE;
21152         }
21153
21154         loh_pinned_queue_length = LOH_PIN_QUEUE_LENGTH;
21155     }
21156
21157     if (heap_number == 0)
21158         loh_pinned_queue_decay = LOH_PIN_DECAY;
21159
21160     loh_pinned_queue_tos = 0;
21161     loh_pinned_queue_bos = 0;
21162     
21163     generation* gen        = large_object_generation;
21164     heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
21165     PREFIX_ASSUME(start_seg != NULL);
21166     heap_segment* seg      = start_seg;
21167     uint8_t* o             = generation_allocation_start (gen);
21168
21169     dprintf (1235, ("before GC LOH size: %Id, free list: %Id, free obj: %Id\n", 
21170         generation_size (max_generation + 1), 
21171         generation_free_list_space (gen),
21172         generation_free_obj_space (gen)));
21173
21174     while (seg)
21175     {
21176         heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
21177         seg = heap_segment_next (seg);
21178     }
21179
21180     seg = start_seg;
21181
21182     //Skip the generation gap object
21183     o = o + AlignQword (size (o));
21184     // We don't need to ever realloc gen3 start so don't touch it.
21185     heap_segment_plan_allocated (seg) = o;
21186     generation_allocation_pointer (gen) = o;
21187     generation_allocation_limit (gen) = generation_allocation_pointer (gen);
21188     generation_allocation_segment (gen) = start_seg;
21189
21190     uint8_t* free_space_start = o;
21191     uint8_t* free_space_end = o;
21192     uint8_t* new_address = 0;
21193
21194     while (1)
21195     {
21196         if (o >= heap_segment_allocated (seg))
21197         {
21198             seg = heap_segment_next (seg);
21199             if (seg == 0)
21200             {
21201                 break;
21202             }
21203
21204             o = heap_segment_mem (seg);
21205         }
21206
21207         if (marked (o))
21208         {
21209             free_space_end = o;
21210             size_t size = AlignQword (size (o));
21211             dprintf (1235, ("%Ix(%Id) M", o, size));
21212
21213             if (pinned (o))
21214             {
21215                 // We don't clear the pinned bit yet so we can check in 
21216                 // compact phase how big a free object we should allocate
21217                 // in front of the pinned object. We use the reloc address
21218                 // field to store this.
21219                 if (!loh_enque_pinned_plug (o, size))
21220                 {
21221                     return FALSE;
21222                 }
21223                 new_address = o;
21224             }
21225             else
21226             {
21227                 new_address = loh_allocate_in_condemned (o, size);
21228             }
21229
21230             loh_set_node_relocation_distance (o, (new_address - o));
21231             dprintf (1235, ("lobj %Ix-%Ix -> %Ix-%Ix (%Id)", o, (o + size), new_address, (new_address + size), (new_address - o)));
21232
21233             o = o + size;
21234             free_space_start = o;
21235             if (o < heap_segment_allocated (seg))
21236             {
21237                 assert (!marked (o));
21238             }
21239         }
21240         else
21241         {
21242             while (o < heap_segment_allocated (seg) && !marked (o))
21243             {
21244                 dprintf (1235, ("%Ix(%Id) F (%d)", o, AlignQword (size (o)), ((method_table (o) == g_gc_pFreeObjectMethodTable) ? 1 : 0)));
21245                 o = o + AlignQword (size (o));
21246             }
21247         }
21248     }
21249
21250     while (!loh_pinned_plug_que_empty_p())
21251     {
21252         mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
21253         size_t len = pinned_len (m);
21254         uint8_t* plug = pinned_plug (m);
21255
21256         // detect pinned block in different segment (later) than
21257         // allocation segment
21258         heap_segment* nseg = heap_segment_rw (generation_allocation_segment (gen));
21259
21260         while ((plug < generation_allocation_pointer (gen)) ||
21261                (plug >= heap_segment_allocated (nseg)))
21262         {
21263             assert ((plug < heap_segment_mem (nseg)) ||
21264                     (plug > heap_segment_reserved (nseg)));
21265             //adjust the end of the segment to be the end of the plug
21266             assert (generation_allocation_pointer (gen)>=
21267                     heap_segment_mem (nseg));
21268             assert (generation_allocation_pointer (gen)<=
21269                     heap_segment_committed (nseg));
21270
21271             heap_segment_plan_allocated (nseg) =
21272                 generation_allocation_pointer (gen);
21273             //switch allocation segment
21274             nseg = heap_segment_next_rw (nseg);
21275             generation_allocation_segment (gen) = nseg;
21276             //reset the allocation pointer and limits
21277             generation_allocation_pointer (gen) =
21278                 heap_segment_mem (nseg);
21279         }
21280
21281         dprintf (1235, ("SP: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen)));
21282         pinned_len (m) = plug - generation_allocation_pointer (gen);
21283         generation_allocation_pointer (gen) = plug + len;
21284     }
21285
21286     heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen);
21287     generation_allocation_pointer (gen) = 0;
21288     generation_allocation_limit (gen) = 0;
21289
21290     return TRUE;
21291 }
21292
21293 void gc_heap::compact_loh()
21294 {
21295     assert (should_compact_loh());
21296
21297     generation* gen        = large_object_generation;
21298     heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
21299     PREFIX_ASSUME(start_seg != NULL);
21300     heap_segment* seg      = start_seg;
21301     heap_segment* prev_seg = 0;
21302     uint8_t* o             = generation_allocation_start (gen);
21303
21304     //Skip the generation gap object
21305     o = o + AlignQword (size (o));
21306     // We don't need to ever realloc gen3 start so don't touch it.
21307     uint8_t* free_space_start = o;
21308     uint8_t* free_space_end = o;
21309     generation_allocator (gen)->clear();
21310     generation_free_list_space (gen) = 0;
21311     generation_free_obj_space (gen) = 0;
21312
21313     loh_pinned_queue_bos = 0;
21314
21315     while (1)
21316     {
21317         if (o >= heap_segment_allocated (seg))
21318         {
21319             heap_segment* next_seg = heap_segment_next (seg);
21320
21321             if ((heap_segment_plan_allocated (seg) == heap_segment_mem (seg)) &&
21322                 (seg != start_seg) && !heap_segment_read_only_p (seg))
21323             {
21324                 dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg));
21325                 assert (prev_seg);
21326                 heap_segment_next (prev_seg) = next_seg;
21327                 heap_segment_next (seg) = freeable_large_heap_segment;
21328                 freeable_large_heap_segment = seg;
21329             }
21330             else
21331             {
21332                 if (!heap_segment_read_only_p (seg))
21333                 {
21334                     // We grew the segment to accommodate allocations.
21335                     if (heap_segment_plan_allocated (seg) > heap_segment_allocated (seg))
21336                     {
21337                         if ((heap_segment_plan_allocated (seg) - plug_skew)  > heap_segment_used (seg))
21338                         {
21339                             heap_segment_used (seg) = heap_segment_plan_allocated (seg) - plug_skew;
21340                         }
21341                     }
21342
21343                     heap_segment_allocated (seg) = heap_segment_plan_allocated (seg);
21344                     dprintf (3, ("Trimming seg to %Ix[", heap_segment_allocated (seg)));
21345                     decommit_heap_segment_pages (seg, 0);
21346                     dprintf (1236, ("CLOH: seg: %Ix, alloc: %Ix, used: %Ix, committed: %Ix",
21347                         seg, 
21348                         heap_segment_allocated (seg),
21349                         heap_segment_used (seg),
21350                         heap_segment_committed (seg)));
21351                     //heap_segment_used (seg) = heap_segment_allocated (seg) - plug_skew;
21352                     dprintf (1236, ("CLOH: used is set to %Ix", heap_segment_used (seg)));
21353                 }
21354                 prev_seg = seg;
21355             }
21356
21357             seg = next_seg;
21358             if (seg == 0)
21359                 break;
21360             else
21361             {
21362                 o = heap_segment_mem (seg);
21363             }
21364         }
21365
21366         if (marked (o))
21367         {
21368             free_space_end = o;
21369             size_t size = AlignQword (size (o));
21370
21371             size_t loh_pad;
21372             uint8_t* reloc = o;
21373             clear_marked (o);
21374
21375             if (pinned (o))
21376             {
21377                 // We are relying on the fact the pinned objects are always looked at in the same order 
21378                 // in plan phase and in compact phase.
21379                 mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
21380                 uint8_t* plug = pinned_plug (m);
21381                 assert (plug == o);
21382
21383                 loh_pad = pinned_len (m);
21384                 clear_pinned (o);
21385             }
21386             else
21387             {
21388                 loh_pad = AlignQword (loh_padding_obj_size);
21389
21390                 reloc += loh_node_relocation_distance (o);
21391                 gcmemcopy (reloc, o, size, TRUE);
21392             }
21393
21394             thread_gap ((reloc - loh_pad), loh_pad, gen);
21395
21396             o = o + size;
21397             free_space_start = o;
21398             if (o < heap_segment_allocated (seg))
21399             {
21400                 assert (!marked (o));
21401             }
21402         }
21403         else
21404         {
21405             while (o < heap_segment_allocated (seg) && !marked (o))
21406             {
21407                 o = o + AlignQword (size (o));
21408             }
21409         }
21410     }
21411
21412     assert (loh_pinned_plug_que_empty_p());
21413
21414     dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n", 
21415         generation_size (max_generation + 1), 
21416         generation_free_list_space (gen),
21417         generation_free_obj_space (gen)));
21418 }
21419
21420 void gc_heap::relocate_in_loh_compact()
21421 {
21422     generation* gen        = large_object_generation;
21423     heap_segment* seg      = heap_segment_rw (generation_start_segment (gen));
21424     uint8_t* o             = generation_allocation_start (gen);
21425
21426     //Skip the generation gap object
21427     o = o + AlignQword (size (o));
21428
21429     relocate_args args;
21430     args.low = gc_low;
21431     args.high = gc_high;
21432     args.last_plug = 0;
21433
21434     while (1)
21435     {
21436         if (o >= heap_segment_allocated (seg))
21437         {
21438             seg = heap_segment_next (seg);
21439             if (seg == 0)
21440             {
21441                 break;
21442             }
21443
21444             o = heap_segment_mem (seg);
21445         }
21446
21447         if (marked (o))
21448         {
21449             size_t size = AlignQword (size (o));
21450
21451             check_class_object_demotion (o);
21452             if (contain_pointers (o))
21453             {
21454                 go_through_object_nostart (method_table (o), o, size(o), pval,
21455                 {
21456                     reloc_survivor_helper (pval);
21457                 });
21458             }
21459
21460             o = o + size;
21461             if (o < heap_segment_allocated (seg))
21462             {
21463                 assert (!marked (o));
21464             }
21465         }
21466         else
21467         {
21468             while (o < heap_segment_allocated (seg) && !marked (o))
21469             {
21470                 o = o + AlignQword (size (o));
21471             }
21472         }
21473     }
21474
21475     dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n", 
21476         generation_size (max_generation + 1), 
21477         generation_free_list_space (gen),
21478         generation_free_obj_space (gen)));
21479 }
21480
21481 void gc_heap::walk_relocation_for_loh (void* profiling_context, record_surv_fn fn)
21482 {
21483     generation* gen        = large_object_generation;
21484     heap_segment* seg      = heap_segment_rw (generation_start_segment (gen));
21485     uint8_t* o             = generation_allocation_start (gen);
21486
21487     //Skip the generation gap object
21488     o = o + AlignQword (size (o));
21489
21490     while (1)
21491     {
21492         if (o >= heap_segment_allocated (seg))
21493         {
21494             seg = heap_segment_next (seg);
21495             if (seg == 0)
21496             {
21497                 break;
21498             }
21499
21500             o = heap_segment_mem (seg);
21501         }
21502
21503         if (marked (o))
21504         {
21505             size_t size = AlignQword (size (o));
21506
21507             ptrdiff_t reloc = loh_node_relocation_distance (o);
21508
21509             STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc);
21510
21511             fn (o, (o + size), reloc, profiling_context, !!settings.compaction, false);
21512
21513             o = o + size;
21514             if (o < heap_segment_allocated (seg))
21515             {
21516                 assert (!marked (o));
21517             }
21518         }
21519         else
21520         {
21521             while (o < heap_segment_allocated (seg) && !marked (o))
21522             {
21523                 o = o + AlignQword (size (o));
21524             }
21525         }
21526     }
21527 }
21528
21529 BOOL gc_heap::loh_object_p (uint8_t* o)
21530 {
21531 #ifdef MULTIPLE_HEAPS
21532     gc_heap* hp = gc_heap::g_heaps [0];
21533     int brick_entry = hp->brick_table[hp->brick_of (o)];
21534 #else //MULTIPLE_HEAPS
21535     int brick_entry = brick_table[brick_of (o)];
21536 #endif //MULTIPLE_HEAPS
21537
21538     return (brick_entry == 0);
21539 }
21540 #endif //FEATURE_LOH_COMPACTION
21541
21542 void gc_heap::convert_to_pinned_plug (BOOL& last_npinned_plug_p, 
21543                                       BOOL& last_pinned_plug_p, 
21544                                       BOOL& pinned_plug_p,
21545                                       size_t ps,
21546                                       size_t& artificial_pinned_size)
21547 {
21548     last_npinned_plug_p = FALSE;
21549     last_pinned_plug_p = TRUE;
21550     pinned_plug_p = TRUE;
21551     artificial_pinned_size = ps;
21552 }
21553
21554 // Because we have the artificial pinning, we can't guarantee that pinned and npinned
21555 // plugs are always interleaved.
21556 void gc_heap::store_plug_gap_info (uint8_t* plug_start,
21557                                    uint8_t* plug_end,
21558                                    BOOL& last_npinned_plug_p, 
21559                                    BOOL& last_pinned_plug_p, 
21560                                    uint8_t*& last_pinned_plug,
21561                                    BOOL& pinned_plug_p,
21562                                    uint8_t* last_object_in_last_plug,
21563                                    BOOL& merge_with_last_pin_p,
21564                                    // this is only for verification purpose
21565                                    size_t last_plug_len)
21566 {
21567     UNREFERENCED_PARAMETER(last_plug_len);
21568
21569     if (!last_npinned_plug_p && !last_pinned_plug_p)
21570     {
21571         //dprintf (3, ("last full plug end: %Ix, full plug start: %Ix", plug_end, plug_start));
21572         dprintf (3, ("Free: %Ix", (plug_start - plug_end)));
21573         assert ((plug_start == plug_end) || ((size_t)(plug_start - plug_end) >= Align (min_obj_size)));
21574         set_gap_size (plug_start, plug_start - plug_end);
21575     }
21576
21577     if (pinned (plug_start))
21578     {
21579         BOOL save_pre_plug_info_p = FALSE;
21580
21581         if (last_npinned_plug_p || last_pinned_plug_p)
21582         {
21583             //if (last_plug_len == Align (min_obj_size))
21584             //{
21585             //    dprintf (3, ("debugging only - last npinned plug is min, check to see if it's correct"));
21586             //    GCToOSInterface::DebugBreak();
21587             //}
21588             save_pre_plug_info_p = TRUE;
21589         }
21590
21591         pinned_plug_p = TRUE;
21592         last_npinned_plug_p = FALSE;
21593
21594         if (last_pinned_plug_p)
21595         {
21596             dprintf (3, ("last plug %Ix was also pinned, should merge", last_pinned_plug));
21597             merge_with_last_pin_p = TRUE;
21598         }
21599         else
21600         {
21601             last_pinned_plug_p = TRUE;
21602             last_pinned_plug = plug_start;
21603                 
21604             enque_pinned_plug (last_pinned_plug, save_pre_plug_info_p, last_object_in_last_plug);
21605
21606             if (save_pre_plug_info_p)
21607             {
21608                 set_gap_size (plug_start, sizeof (gap_reloc_pair));
21609             }
21610         }
21611     }
21612     else
21613     {
21614         if (last_pinned_plug_p)
21615         {
21616             //if (Align (last_plug_len) < min_pre_pin_obj_size)
21617             //{
21618             //    dprintf (3, ("debugging only - last pinned plug is min, check to see if it's correct"));
21619             //    GCToOSInterface::DebugBreak();
21620             //}
21621
21622             save_post_plug_info (last_pinned_plug, last_object_in_last_plug, plug_start);
21623             set_gap_size (plug_start, sizeof (gap_reloc_pair));
21624
21625             verify_pins_with_post_plug_info("after saving post plug info");
21626         }
21627         last_npinned_plug_p = TRUE;
21628         last_pinned_plug_p = FALSE;
21629     }
21630 }
21631
21632 void gc_heap::record_interesting_data_point (interesting_data_point idp)
21633 {
21634 #ifdef GC_CONFIG_DRIVEN
21635     (interesting_data_per_gc[idp])++;
21636 #else
21637     UNREFERENCED_PARAMETER(idp);
21638 #endif //GC_CONFIG_DRIVEN
21639 }
21640
21641 #ifdef _PREFAST_
21642 #pragma warning(push)
21643 #pragma warning(disable:21000) // Suppress PREFast warning about overly large function
21644 #endif //_PREFAST_
21645 void gc_heap::plan_phase (int condemned_gen_number)
21646 {
21647     size_t old_gen2_allocated = 0;
21648     size_t old_gen2_size = 0;
21649
21650     if (condemned_gen_number == (max_generation - 1))
21651     {
21652         old_gen2_allocated = generation_free_list_allocated (generation_of (max_generation));
21653         old_gen2_size = generation_size (max_generation);
21654     }
21655
21656     assert (settings.concurrent == FALSE);
21657
21658     // %type%  category = quote (plan);
21659 #ifdef TIME_GC
21660     unsigned start;
21661     unsigned finish;
21662     start = GetCycleCount32();
21663 #endif //TIME_GC
21664
21665     dprintf (2,("---- Plan Phase ---- Condemned generation %d, promotion: %d",
21666                 condemned_gen_number, settings.promotion ? 1 : 0));
21667
21668     generation*  condemned_gen1 = generation_of (condemned_gen_number);
21669
21670 #ifdef MARK_LIST
21671     BOOL use_mark_list = FALSE;
21672     uint8_t** mark_list_next = &mark_list[0];
21673 #ifdef GC_CONFIG_DRIVEN
21674     dprintf (3, ("total number of marked objects: %Id (%Id)",
21675                  (mark_list_index - &mark_list[0]), ((mark_list_end - &mark_list[0]))));
21676     
21677     if (mark_list_index >= (mark_list_end + 1))
21678         mark_list_index = mark_list_end + 1;
21679 #else
21680     dprintf (3, ("mark_list length: %Id",
21681                  (mark_list_index - &mark_list[0])));
21682 #endif //GC_CONFIG_DRIVEN
21683
21684     if ((condemned_gen_number < max_generation) &&
21685         (mark_list_index <= mark_list_end) 
21686 #ifdef BACKGROUND_GC        
21687         && (!recursive_gc_sync::background_running_p())
21688 #endif //BACKGROUND_GC
21689         )
21690     {
21691 #ifndef MULTIPLE_HEAPS
21692         _sort (&mark_list[0], mark_list_index-1, 0);
21693         //printf ("using mark list at GC #%d", dd_collection_count (dynamic_data_of (0)));
21694         //verify_qsort_array (&mark_list[0], mark_list_index-1);
21695 #endif //!MULTIPLE_HEAPS
21696         use_mark_list = TRUE;
21697         get_gc_data_per_heap()->set_mechanism_bit (gc_mark_list_bit);
21698     }
21699     else
21700     {
21701         dprintf (3, ("mark_list not used"));
21702     }
21703
21704 #endif //MARK_LIST
21705
21706 #ifdef FEATURE_BASICFREEZE
21707     if ((generation_start_segment (condemned_gen1) != ephemeral_heap_segment) &&
21708         ro_segments_in_range)
21709     {
21710         sweep_ro_segments (generation_start_segment (condemned_gen1));
21711     }
21712 #endif // FEATURE_BASICFREEZE
21713
21714 #ifndef MULTIPLE_HEAPS
21715     if (shigh != (uint8_t*)0)
21716     {
21717         heap_segment* seg = heap_segment_rw (generation_start_segment (condemned_gen1));
21718
21719         PREFIX_ASSUME(seg != NULL);
21720
21721         heap_segment* fseg = seg;
21722         do
21723         {
21724             if (slow > heap_segment_mem (seg) &&
21725                 slow < heap_segment_reserved (seg))
21726             {
21727                 if (seg == fseg)
21728                 {
21729                     uint8_t* o = generation_allocation_start (condemned_gen1) +
21730                         Align (size (generation_allocation_start (condemned_gen1)));
21731                     if (slow > o)
21732                     {
21733                         assert ((slow - o) >= (int)Align (min_obj_size));
21734 #ifdef BACKGROUND_GC
21735                         if (current_c_gc_state == c_gc_state_marking)
21736                         {
21737                             bgc_clear_batch_mark_array_bits (o, slow);
21738                         }
21739 #endif //BACKGROUND_GC
21740                         make_unused_array (o, slow - o);
21741                     }
21742                 } 
21743                 else
21744                 {
21745                     assert (condemned_gen_number == max_generation);
21746                     make_unused_array (heap_segment_mem (seg),
21747                                        slow - heap_segment_mem (seg));
21748                 }
21749             }
21750             if (in_range_for_segment (shigh, seg))
21751             {
21752 #ifdef BACKGROUND_GC
21753                 if (current_c_gc_state == c_gc_state_marking)
21754                 {
21755                     bgc_clear_batch_mark_array_bits ((shigh + Align (size (shigh))), heap_segment_allocated (seg));
21756                 }
21757 #endif //BACKGROUND_GC
21758                 heap_segment_allocated (seg) = shigh + Align (size (shigh));
21759             }
21760             // test if the segment is in the range of [slow, shigh]
21761             if (!((heap_segment_reserved (seg) >= slow) &&
21762                   (heap_segment_mem (seg) <= shigh)))
21763             {
21764                 // shorten it to minimum
21765                 heap_segment_allocated (seg) =  heap_segment_mem (seg);
21766             }
21767             seg = heap_segment_next_rw (seg);
21768         } while (seg);
21769     }
21770     else
21771     {
21772         heap_segment* seg = heap_segment_rw (generation_start_segment (condemned_gen1));
21773
21774         PREFIX_ASSUME(seg != NULL);
21775
21776         heap_segment* sseg = seg;
21777         do
21778         {
21779             // shorten it to minimum
21780             if (seg == sseg)
21781             {
21782                 // no survivors make all generations look empty
21783                 uint8_t* o = generation_allocation_start (condemned_gen1) +
21784                     Align (size (generation_allocation_start (condemned_gen1)));
21785 #ifdef BACKGROUND_GC
21786                 if (current_c_gc_state == c_gc_state_marking)
21787                 {
21788                     bgc_clear_batch_mark_array_bits (o, heap_segment_allocated (seg));
21789                 }
21790 #endif //BACKGROUND_GC
21791                 heap_segment_allocated (seg) = o;
21792             }
21793             else
21794             {
21795                 assert (condemned_gen_number == max_generation);
21796 #ifdef BACKGROUND_GC
21797                 if (current_c_gc_state == c_gc_state_marking)
21798                 {
21799                     bgc_clear_batch_mark_array_bits (heap_segment_mem (seg), heap_segment_allocated (seg));
21800                 }
21801 #endif //BACKGROUND_GC
21802                 heap_segment_allocated (seg) =  heap_segment_mem (seg);
21803             }
21804             seg = heap_segment_next_rw (seg);
21805         } while (seg);
21806     }
21807
21808 #endif //MULTIPLE_HEAPS
21809
21810     heap_segment*  seg1 = heap_segment_rw (generation_start_segment (condemned_gen1));
21811
21812     PREFIX_ASSUME(seg1 != NULL);
21813
21814     uint8_t*  end = heap_segment_allocated (seg1);
21815     uint8_t*  first_condemned_address = generation_allocation_start (condemned_gen1);
21816     uint8_t*  x = first_condemned_address;
21817
21818     assert (!marked (x));
21819     uint8_t*  plug_end = x;
21820     uint8_t*  tree = 0;
21821     size_t  sequence_number = 0;
21822     uint8_t*  last_node = 0;
21823     size_t  current_brick = brick_of (x);
21824     BOOL  allocate_in_condemned = ((condemned_gen_number == max_generation)||
21825                                    (settings.promotion == FALSE));
21826     int  active_old_gen_number = condemned_gen_number;
21827     int  active_new_gen_number = (allocate_in_condemned ? condemned_gen_number:
21828                                   (1 + condemned_gen_number));
21829     generation*  older_gen = 0;
21830     generation* consing_gen = condemned_gen1;
21831     alloc_list  r_free_list [MAX_BUCKET_COUNT];
21832
21833     size_t r_free_list_space = 0;
21834     size_t r_free_obj_space = 0;
21835     size_t r_older_gen_free_list_allocated = 0;
21836     size_t r_older_gen_condemned_allocated = 0;
21837     size_t r_older_gen_end_seg_allocated = 0;
21838     uint8_t*  r_allocation_pointer = 0;
21839     uint8_t*  r_allocation_limit = 0;
21840     uint8_t* r_allocation_start_region = 0;
21841     heap_segment*  r_allocation_segment = 0;
21842 #ifdef FREE_USAGE_STATS
21843     size_t r_older_gen_free_space[NUM_GEN_POWER2];
21844 #endif //FREE_USAGE_STATS
21845
21846     if ((condemned_gen_number < max_generation))
21847     {
21848         older_gen = generation_of (min (max_generation, 1 + condemned_gen_number));
21849         generation_allocator (older_gen)->copy_to_alloc_list (r_free_list);
21850
21851         r_free_list_space = generation_free_list_space (older_gen);
21852         r_free_obj_space = generation_free_obj_space (older_gen);
21853 #ifdef FREE_USAGE_STATS
21854         memcpy (r_older_gen_free_space, older_gen->gen_free_spaces, sizeof (r_older_gen_free_space));
21855 #endif //FREE_USAGE_STATS
21856         generation_allocate_end_seg_p (older_gen) = FALSE;
21857         r_older_gen_free_list_allocated = generation_free_list_allocated (older_gen);
21858         r_older_gen_condemned_allocated = generation_condemned_allocated (older_gen);
21859         r_older_gen_end_seg_allocated = generation_end_seg_allocated (older_gen);
21860         r_allocation_limit = generation_allocation_limit (older_gen);
21861         r_allocation_pointer = generation_allocation_pointer (older_gen);
21862         r_allocation_start_region = generation_allocation_context_start_region (older_gen);
21863         r_allocation_segment = generation_allocation_segment (older_gen);
21864         heap_segment* start_seg = heap_segment_rw (generation_start_segment (older_gen));
21865
21866         PREFIX_ASSUME(start_seg != NULL);
21867
21868         if (start_seg != ephemeral_heap_segment)
21869         {
21870             assert (condemned_gen_number == (max_generation - 1));
21871             while (start_seg && (start_seg != ephemeral_heap_segment))
21872             {
21873                 assert (heap_segment_allocated (start_seg) >=
21874                         heap_segment_mem (start_seg));
21875                 assert (heap_segment_allocated (start_seg) <=
21876                         heap_segment_reserved (start_seg));
21877                 heap_segment_plan_allocated (start_seg) =
21878                     heap_segment_allocated (start_seg);
21879                 start_seg = heap_segment_next_rw (start_seg);
21880             }
21881         }
21882     }
21883
21884     //reset all of the segment allocated sizes
21885     {
21886         heap_segment*  seg2 = heap_segment_rw (generation_start_segment (condemned_gen1));
21887
21888         PREFIX_ASSUME(seg2 != NULL);
21889
21890         while (seg2)
21891         {
21892             heap_segment_plan_allocated (seg2) =
21893                 heap_segment_mem (seg2);
21894             seg2 = heap_segment_next_rw (seg2);
21895         }
21896     }
21897     int  condemned_gn = condemned_gen_number;
21898
21899     int bottom_gen = 0;
21900     init_free_and_plug();
21901
21902     while (condemned_gn >= bottom_gen)
21903     {
21904         generation*  condemned_gen2 = generation_of (condemned_gn);
21905         generation_allocator (condemned_gen2)->clear();
21906         generation_free_list_space (condemned_gen2) = 0;
21907         generation_free_obj_space (condemned_gen2) = 0;
21908         generation_allocation_size (condemned_gen2) = 0;
21909         generation_condemned_allocated (condemned_gen2) = 0; 
21910         generation_pinned_allocated (condemned_gen2) = 0; 
21911         generation_free_list_allocated(condemned_gen2) = 0; 
21912         generation_end_seg_allocated (condemned_gen2) = 0; 
21913         generation_pinned_allocation_sweep_size (condemned_gen2) = 0;
21914         generation_pinned_allocation_compact_size (condemned_gen2) = 0;
21915 #ifdef FREE_USAGE_STATS
21916         generation_pinned_free_obj_space (condemned_gen2) = 0;
21917         generation_allocated_in_pinned_free (condemned_gen2) = 0;
21918         generation_allocated_since_last_pin (condemned_gen2) = 0;
21919 #endif //FREE_USAGE_STATS
21920         generation_plan_allocation_start (condemned_gen2) = 0;
21921         generation_allocation_segment (condemned_gen2) =
21922             heap_segment_rw (generation_start_segment (condemned_gen2));
21923
21924         PREFIX_ASSUME(generation_allocation_segment(condemned_gen2) != NULL);
21925
21926         if (generation_start_segment (condemned_gen2) != ephemeral_heap_segment)
21927         {
21928             generation_allocation_pointer (condemned_gen2) =
21929                 heap_segment_mem (generation_allocation_segment (condemned_gen2));
21930         }
21931         else
21932         {
21933             generation_allocation_pointer (condemned_gen2) = generation_allocation_start (condemned_gen2);
21934         }
21935
21936         generation_allocation_limit (condemned_gen2) = generation_allocation_pointer (condemned_gen2);
21937         generation_allocation_context_start_region (condemned_gen2) = generation_allocation_pointer (condemned_gen2);
21938
21939         condemned_gn--;
21940     }
21941
21942     BOOL allocate_first_generation_start = FALSE;
21943     
21944     if (allocate_in_condemned)
21945     {
21946         allocate_first_generation_start = TRUE;
21947     }
21948
21949     dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end));
21950
21951     demotion_low = MAX_PTR;
21952     demotion_high = heap_segment_allocated (ephemeral_heap_segment);
21953
21954     // If we are doing a gen1 only because of cards, it means we should not demote any pinned plugs
21955     // from gen1. They should get promoted to gen2.
21956     demote_gen1_p = !(settings.promotion && 
21957                       (settings.condemned_generation == (max_generation - 1)) && 
21958                       gen_to_condemn_reasons.is_only_condition (gen_low_card_p));
21959
21960     total_ephemeral_size = 0;
21961
21962     print_free_and_plug ("BP");
21963
21964     for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++)
21965     {
21966         generation* temp_gen = generation_of (gen_idx);
21967
21968         dprintf (2, ("gen%d start %Ix, plan start %Ix",
21969             gen_idx, 
21970             generation_allocation_start (temp_gen),
21971             generation_plan_allocation_start (temp_gen)));
21972     }
21973
21974     BOOL fire_pinned_plug_events_p = EVENT_ENABLED(PinPlugAtGCTime);
21975     size_t last_plug_len = 0;
21976
21977     while (1)
21978     {
21979         if (x >= end)
21980         {
21981             assert (x == end);
21982             assert (heap_segment_allocated (seg1) == end);
21983             heap_segment_allocated (seg1) = plug_end;
21984
21985             current_brick = update_brick_table (tree, current_brick, x, plug_end);
21986             dprintf (3, ("end of seg: new tree, sequence# 0"));
21987             sequence_number = 0;
21988             tree = 0;
21989
21990             if (heap_segment_next_rw (seg1))
21991             {
21992                 seg1 = heap_segment_next_rw (seg1);
21993                 end = heap_segment_allocated (seg1);
21994                 plug_end = x = heap_segment_mem (seg1);
21995                 current_brick = brick_of (x);
21996                 dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end));
21997                 continue;
21998             }
21999             else
22000             {
22001                 break;
22002             }
22003         }
22004
22005         BOOL last_npinned_plug_p = FALSE;
22006         BOOL last_pinned_plug_p = FALSE;
22007
22008         // last_pinned_plug is the beginning of the last pinned plug. If we merge a plug into a pinned
22009         // plug we do not change the value of last_pinned_plug. This happens with artificially pinned plugs -
22010         // it can be merged with a previous pinned plug and a pinned plug after it can be merged with it.
22011         uint8_t* last_pinned_plug = 0;
22012         size_t num_pinned_plugs_in_plug = 0;
22013
22014         uint8_t* last_object_in_plug = 0;
22015
22016         while ((x < end) && marked (x))
22017         {
22018             uint8_t*  plug_start = x;
22019             uint8_t*  saved_plug_end = plug_end;
22020             BOOL   pinned_plug_p = FALSE;
22021             BOOL   npin_before_pin_p = FALSE;
22022             BOOL   saved_last_npinned_plug_p = last_npinned_plug_p;
22023             uint8_t*  saved_last_object_in_plug = last_object_in_plug;
22024             BOOL   merge_with_last_pin_p = FALSE;
22025
22026             size_t added_pinning_size = 0;
22027             size_t artificial_pinned_size = 0;
22028
22029             store_plug_gap_info (plug_start, plug_end, last_npinned_plug_p, last_pinned_plug_p, 
22030                                  last_pinned_plug, pinned_plug_p, last_object_in_plug, 
22031                                  merge_with_last_pin_p, last_plug_len);
22032
22033 #ifdef FEATURE_STRUCTALIGN
22034             int requiredAlignment = ((CObjectHeader*)plug_start)->GetRequiredAlignment();
22035             size_t alignmentOffset = OBJECT_ALIGNMENT_OFFSET;
22036 #endif // FEATURE_STRUCTALIGN
22037
22038             {
22039                 uint8_t* xl = x;
22040                 while ((xl < end) && marked (xl) && (pinned (xl) == pinned_plug_p))
22041                 {
22042                     assert (xl < end);
22043                     if (pinned(xl))
22044                     {
22045                         clear_pinned (xl);
22046                     }
22047 #ifdef FEATURE_STRUCTALIGN
22048                     else
22049                     {
22050                         int obj_requiredAlignment = ((CObjectHeader*)xl)->GetRequiredAlignment();
22051                         if (obj_requiredAlignment > requiredAlignment)
22052                         {
22053                             requiredAlignment = obj_requiredAlignment;
22054                             alignmentOffset = xl - plug_start + OBJECT_ALIGNMENT_OFFSET;
22055                         }
22056                     }
22057 #endif // FEATURE_STRUCTALIGN
22058
22059                     clear_marked (xl);
22060
22061                     dprintf(4, ("+%Ix+", (size_t)xl));
22062                     assert ((size (xl) > 0));
22063                     assert ((size (xl) <= loh_size_threshold));
22064
22065                     last_object_in_plug = xl;
22066
22067                     xl = xl + Align (size (xl));
22068                     Prefetch (xl);
22069                 }
22070
22071                 BOOL next_object_marked_p = ((xl < end) && marked (xl));
22072
22073                 if (pinned_plug_p)
22074                 {
22075                     // If it is pinned we need to extend to the next marked object as we can't use part of
22076                     // a pinned object to make the artificial gap (unless the last 3 ptr sized words are all
22077                     // references but for now I am just using the next non pinned object for that).
22078                     if (next_object_marked_p) 
22079                     {
22080                         clear_marked (xl);
22081                         last_object_in_plug = xl;
22082                         size_t extra_size = Align (size (xl));
22083                         xl = xl + extra_size;
22084                         added_pinning_size = extra_size;
22085                     }
22086                 }
22087                 else
22088                 {
22089                     if (next_object_marked_p)
22090                         npin_before_pin_p = TRUE;
22091                 }
22092
22093                 assert (xl <= end);
22094                 x = xl;
22095             }
22096             dprintf (3, ( "%Ix[", (size_t)x));
22097             plug_end = x;
22098             size_t ps = plug_end - plug_start;
22099             last_plug_len = ps;
22100             dprintf (3, ( "%Ix[(%Ix)", (size_t)x, ps));
22101             uint8_t*  new_address = 0;
22102
22103             if (!pinned_plug_p)
22104             {
22105                 if (allocate_in_condemned &&
22106                     (settings.condemned_generation == max_generation) &&
22107                     (ps > OS_PAGE_SIZE))
22108                 {
22109                     ptrdiff_t reloc = plug_start - generation_allocation_pointer (consing_gen);
22110                     //reloc should >=0 except when we relocate
22111                     //across segments and the dest seg is higher then the src
22112
22113                     if ((ps > (8*OS_PAGE_SIZE)) &&
22114                         (reloc > 0) &&
22115                         ((size_t)reloc < (ps/16)))
22116                     {
22117                         dprintf (3, ("Pinning %Ix; reloc would have been: %Ix",
22118                                      (size_t)plug_start, reloc));
22119                         // The last plug couldn't have been a npinned plug or it would have
22120                         // included this plug.
22121                         assert (!saved_last_npinned_plug_p);
22122
22123                         if (last_pinned_plug)
22124                         {
22125                             dprintf (3, ("artificially pinned plug merged with last pinned plug"));
22126                             merge_with_last_pin_p = TRUE;
22127                         }
22128                         else
22129                         {
22130                             enque_pinned_plug (plug_start, FALSE, 0);
22131                             last_pinned_plug = plug_start;
22132                         }
22133
22134                         convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p,
22135                                                 ps, artificial_pinned_size);
22136                     }
22137                 }
22138             }
22139
22140             if (allocate_first_generation_start)
22141             {
22142                 allocate_first_generation_start = FALSE;
22143                 plan_generation_start (condemned_gen1, consing_gen, plug_start);
22144                 assert (generation_plan_allocation_start (condemned_gen1));
22145             }
22146
22147             if (seg1 == ephemeral_heap_segment)
22148             {
22149                 process_ephemeral_boundaries (plug_start, active_new_gen_number,
22150                                               active_old_gen_number,
22151                                               consing_gen,
22152                                               allocate_in_condemned);
22153             }
22154
22155             dprintf (3, ("adding %Id to gen%d surv", ps, active_old_gen_number));
22156
22157             dynamic_data* dd_active_old = dynamic_data_of (active_old_gen_number);
22158             dd_survived_size (dd_active_old) += ps;
22159
22160             BOOL convert_to_pinned_p = FALSE;
22161
22162             if (!pinned_plug_p)
22163             {
22164 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
22165                 dd_num_npinned_plugs (dd_active_old)++;
22166 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
22167
22168                 add_gen_plug (active_old_gen_number, ps);
22169
22170                 if (allocate_in_condemned)
22171                 {
22172                     verify_pins_with_post_plug_info("before aic");
22173
22174                     new_address =
22175                         allocate_in_condemned_generations (consing_gen,
22176                                                            ps,
22177                                                            active_old_gen_number,
22178 #ifdef SHORT_PLUGS
22179                                                            &convert_to_pinned_p,
22180                                                            (npin_before_pin_p ? plug_end : 0),
22181                                                            seg1,
22182 #endif //SHORT_PLUGS
22183                                                            plug_start REQD_ALIGN_AND_OFFSET_ARG);
22184                     verify_pins_with_post_plug_info("after aic");
22185                 }
22186                 else
22187                 {
22188                     new_address = allocate_in_older_generation (older_gen, ps, active_old_gen_number, plug_start REQD_ALIGN_AND_OFFSET_ARG);
22189
22190                     if (new_address != 0)
22191                     {
22192                         if (settings.condemned_generation == (max_generation - 1))
22193                         {
22194                             dprintf (3, (" NA: %Ix-%Ix -> %Ix, %Ix (%Ix)",
22195                                 plug_start, plug_end,
22196                                 (size_t)new_address, (size_t)new_address + (plug_end - plug_start),
22197                                 (size_t)(plug_end - plug_start)));
22198                         }
22199                     }
22200                     else
22201                     {
22202                         if (generation_allocator(older_gen)->discard_if_no_fit_p())
22203                         {
22204                             allocate_in_condemned = TRUE;
22205                         }
22206
22207                         new_address = allocate_in_condemned_generations (consing_gen, ps, active_old_gen_number, 
22208 #ifdef SHORT_PLUGS
22209                                                                          &convert_to_pinned_p,
22210                                                                          (npin_before_pin_p ? plug_end : 0),
22211                                                                          seg1,
22212 #endif //SHORT_PLUGS
22213                                                                          plug_start REQD_ALIGN_AND_OFFSET_ARG);
22214                     }
22215                 }
22216
22217                 if (convert_to_pinned_p)
22218                 {
22219                     assert (last_npinned_plug_p != FALSE);
22220                     assert (last_pinned_plug_p == FALSE);
22221                     convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p,
22222                                             ps, artificial_pinned_size);
22223                     enque_pinned_plug (plug_start, FALSE, 0);
22224                     last_pinned_plug = plug_start;
22225                 }
22226                 else
22227                 {
22228                     if (!new_address)
22229                     {
22230                         //verify that we are at then end of the ephemeral segment
22231                         assert (generation_allocation_segment (consing_gen) ==
22232                                 ephemeral_heap_segment);
22233                         //verify that we are near the end
22234                         assert ((generation_allocation_pointer (consing_gen) + Align (ps)) <
22235                                 heap_segment_allocated (ephemeral_heap_segment));
22236                         assert ((generation_allocation_pointer (consing_gen) + Align (ps)) >
22237                                 (heap_segment_allocated (ephemeral_heap_segment) + Align (min_obj_size)));
22238                     }
22239                     else
22240                     {
22241 #ifdef SIMPLE_DPRINTF
22242                         dprintf (3, ("(%Ix)[%Ix->%Ix, NA: [%Ix(%Id), %Ix[: %Ix(%d)",
22243                             (size_t)(node_gap_size (plug_start)), 
22244                             plug_start, plug_end, (size_t)new_address, (size_t)(plug_start - new_address),
22245                                 (size_t)new_address + ps, ps, 
22246                                 (is_plug_padded (plug_start) ? 1 : 0)));
22247 #endif //SIMPLE_DPRINTF
22248
22249 #ifdef SHORT_PLUGS
22250                         if (is_plug_padded (plug_start))
22251                         {
22252                             dprintf (3, ("%Ix was padded", plug_start));
22253                             dd_padding_size (dd_active_old) += Align (min_obj_size);
22254                         }
22255 #endif //SHORT_PLUGS
22256                     }
22257                 }
22258             }
22259
22260             if (pinned_plug_p)
22261             {
22262                 if (fire_pinned_plug_events_p)
22263                 {
22264                     FIRE_EVENT(PinPlugAtGCTime, plug_start, plug_end, 
22265                                (merge_with_last_pin_p ? 0 : (uint8_t*)node_gap_size (plug_start)));
22266                 }
22267
22268                 if (merge_with_last_pin_p)
22269                 {
22270                     merge_with_last_pinned_plug (last_pinned_plug, ps);
22271                 }
22272                 else
22273                 {
22274                     assert (last_pinned_plug == plug_start);
22275                     set_pinned_info (plug_start, ps, consing_gen);
22276                 }
22277
22278                 new_address = plug_start;
22279
22280                 dprintf (3, ( "(%Ix)PP: [%Ix, %Ix[%Ix](m:%d)",
22281                             (size_t)(node_gap_size (plug_start)), (size_t)plug_start,
22282                             (size_t)plug_end, ps,
22283                             (merge_with_last_pin_p ? 1 : 0)));
22284
22285                 dprintf (3, ("adding %Id to gen%d pinned surv", plug_end - plug_start, active_old_gen_number));
22286                 dd_pinned_survived_size (dd_active_old) += plug_end - plug_start;
22287                 dd_added_pinned_size (dd_active_old) += added_pinning_size;
22288                 dd_artificial_pinned_survived_size (dd_active_old) += artificial_pinned_size;
22289
22290                 if (!demote_gen1_p && (active_old_gen_number == (max_generation - 1)))
22291                 {
22292                     last_gen1_pin_end = plug_end;
22293                 }
22294             }
22295
22296 #ifdef _DEBUG
22297             // detect forward allocation in the same segment
22298             assert (!((new_address > plug_start) &&
22299                 (new_address < heap_segment_reserved (seg1))));
22300 #endif //_DEBUG
22301
22302             if (!merge_with_last_pin_p)
22303             {
22304                 if (current_brick != brick_of (plug_start))
22305                 {
22306                     current_brick = update_brick_table (tree, current_brick, plug_start, saved_plug_end);
22307                     sequence_number = 0;
22308                     tree = 0;
22309                 }
22310
22311                 set_node_relocation_distance (plug_start, (new_address - plug_start));
22312                 if (last_node && (node_relocation_distance (last_node) ==
22313                                   (node_relocation_distance (plug_start) +
22314                                    (ptrdiff_t)node_gap_size (plug_start))))
22315                 {
22316                     //dprintf(3,( " Lb"));
22317                     dprintf (3, ("%Ix Lb", plug_start));
22318                     set_node_left (plug_start);
22319                 }
22320                 if (0 == sequence_number)
22321                 {
22322                     dprintf (2, ("sn: 0, tree is set to %Ix", plug_start));
22323                     tree = plug_start;
22324                 }
22325
22326                 verify_pins_with_post_plug_info("before insert node");
22327
22328                 tree = insert_node (plug_start, ++sequence_number, tree, last_node);
22329                 dprintf (3, ("tree is %Ix (b: %Ix) after insert_node", tree, brick_of (tree)));
22330                 last_node = plug_start;
22331
22332 #ifdef _DEBUG
22333                 // If we detect if the last plug is pinned plug right before us, we should save this gap info
22334                 if (!pinned_plug_p)
22335                 {
22336                     if (mark_stack_tos > 0)
22337                     {
22338                         mark& m = mark_stack_array[mark_stack_tos - 1];
22339                         if (m.has_post_plug_info())
22340                         {
22341                             uint8_t* post_plug_info_start = m.saved_post_plug_info_start;
22342                             size_t* current_plug_gap_start = (size_t*)(plug_start - sizeof (plug_and_gap));
22343                             if ((uint8_t*)current_plug_gap_start == post_plug_info_start)
22344                             {
22345                                 dprintf (3, ("Ginfo: %Ix, %Ix, %Ix",
22346                                     *current_plug_gap_start, *(current_plug_gap_start + 1),
22347                                     *(current_plug_gap_start + 2)));
22348                                 memcpy (&(m.saved_post_plug_debug), current_plug_gap_start, sizeof (gap_reloc_pair));
22349                             }
22350                         }
22351                     }
22352                 }
22353 #endif //_DEBUG
22354
22355                 verify_pins_with_post_plug_info("after insert node");
22356             }
22357         }
22358         
22359         if (num_pinned_plugs_in_plug > 1)
22360         {
22361             dprintf (3, ("more than %Id pinned plugs in this plug", num_pinned_plugs_in_plug));
22362         }
22363
22364         {
22365 #ifdef MARK_LIST
22366             if (use_mark_list)
22367             {
22368                while ((mark_list_next < mark_list_index) &&
22369                       (*mark_list_next <= x))
22370                {
22371                    mark_list_next++;
22372                }
22373                if ((mark_list_next < mark_list_index)
22374 #ifdef MULTIPLE_HEAPS
22375                    && (*mark_list_next < end) //for multiple segments
22376 #endif //MULTIPLE_HEAPS
22377                    )
22378                    x = *mark_list_next;
22379                else
22380                    x = end;
22381             }
22382             else
22383 #endif //MARK_LIST
22384             {
22385                 uint8_t* xl = x;
22386 #ifdef BACKGROUND_GC
22387                 if (current_c_gc_state == c_gc_state_marking)
22388                 {
22389                     assert (recursive_gc_sync::background_running_p());
22390                     while ((xl < end) && !marked (xl))
22391                     {
22392                         dprintf (4, ("-%Ix-", (size_t)xl));
22393                         assert ((size (xl) > 0));
22394                         background_object_marked (xl, TRUE);
22395                         xl = xl + Align (size (xl));
22396                         Prefetch (xl);
22397                     }
22398                 }
22399                 else
22400 #endif //BACKGROUND_GC
22401                 {
22402                     while ((xl < end) && !marked (xl))
22403                     {
22404                         dprintf (4, ("-%Ix-", (size_t)xl));
22405                         assert ((size (xl) > 0));
22406                         xl = xl + Align (size (xl));
22407                         Prefetch (xl);
22408                     }
22409                 }
22410                 assert (xl <= end);
22411                 x = xl;
22412             }
22413         }
22414     }
22415
22416     while (!pinned_plug_que_empty_p())
22417     {
22418         if (settings.promotion)
22419         {
22420             uint8_t* pplug = pinned_plug (oldest_pin());
22421             if (in_range_for_segment (pplug, ephemeral_heap_segment))
22422             {
22423                 consing_gen = ensure_ephemeral_heap_segment (consing_gen);
22424                 //allocate all of the generation gaps
22425                 while (active_new_gen_number > 0)
22426                 {
22427                     active_new_gen_number--;
22428
22429                     if (active_new_gen_number == (max_generation - 1))
22430                     {
22431                         maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation));
22432                         if (!demote_gen1_p)
22433                             advance_pins_for_demotion (consing_gen);
22434                     }
22435
22436                     generation* gen = generation_of (active_new_gen_number);
22437                     plan_generation_start (gen, consing_gen, 0);
22438
22439                     if (demotion_low == MAX_PTR)
22440                     {
22441                         demotion_low = pplug;
22442                         dprintf (3, ("end plan: dlow->%Ix", demotion_low));
22443                     }
22444
22445                     dprintf (2, ("(%d)gen%d plan start: %Ix", 
22446                                   heap_number, active_new_gen_number, (size_t)generation_plan_allocation_start (gen)));
22447                     assert (generation_plan_allocation_start (gen));
22448                 }
22449             }
22450         }
22451
22452         if (pinned_plug_que_empty_p())
22453             break;
22454
22455         size_t  entry = deque_pinned_plug();
22456         mark*  m = pinned_plug_of (entry);
22457         uint8_t*  plug = pinned_plug (m);
22458         size_t  len = pinned_len (m);
22459
22460         // detect pinned block in different segment (later) than
22461         // allocation segment
22462         heap_segment* nseg = heap_segment_rw (generation_allocation_segment (consing_gen));
22463
22464         while ((plug < generation_allocation_pointer (consing_gen)) ||
22465                (plug >= heap_segment_allocated (nseg)))
22466         {
22467             assert ((plug < heap_segment_mem (nseg)) ||
22468                     (plug > heap_segment_reserved (nseg)));
22469             //adjust the end of the segment to be the end of the plug
22470             assert (generation_allocation_pointer (consing_gen)>=
22471                     heap_segment_mem (nseg));
22472             assert (generation_allocation_pointer (consing_gen)<=
22473                     heap_segment_committed (nseg));
22474
22475             heap_segment_plan_allocated (nseg) =
22476                 generation_allocation_pointer (consing_gen);
22477             //switch allocation segment
22478             nseg = heap_segment_next_rw (nseg);
22479             generation_allocation_segment (consing_gen) = nseg;
22480             //reset the allocation pointer and limits
22481             generation_allocation_pointer (consing_gen) =
22482                 heap_segment_mem (nseg);
22483         }
22484
22485         set_new_pin_info (m, generation_allocation_pointer (consing_gen));
22486         dprintf (2, ("pin %Ix b: %Ix->%Ix", plug, brick_of (plug),
22487             (size_t)(brick_table[brick_of (plug)])));
22488
22489         generation_allocation_pointer (consing_gen) = plug + len;
22490         generation_allocation_limit (consing_gen) =
22491             generation_allocation_pointer (consing_gen);
22492         //Add the size of the pinned plug to the right pinned allocations
22493         //find out which gen this pinned plug came from 
22494         int frgn = object_gennum (plug);
22495         if ((frgn != (int)max_generation) && settings.promotion)
22496         {
22497             generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
22498         }
22499
22500     }
22501
22502     plan_generation_starts (consing_gen);
22503     print_free_and_plug ("AP");
22504
22505     {
22506 #ifdef SIMPLE_DPRINTF
22507         for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++)
22508         {
22509             generation* temp_gen = generation_of (gen_idx);
22510             dynamic_data* temp_dd = dynamic_data_of (gen_idx);
22511
22512             int added_pinning_ratio = 0;
22513             int artificial_pinned_ratio = 0;
22514
22515             if (dd_pinned_survived_size (temp_dd) != 0)
22516             {
22517                 added_pinning_ratio = (int)((float)dd_added_pinned_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd));
22518                 artificial_pinned_ratio = (int)((float)dd_artificial_pinned_survived_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd));
22519             }
22520
22521             size_t padding_size = 
22522 #ifdef SHORT_PLUGS
22523                 dd_padding_size (temp_dd);
22524 #else
22525                 0;
22526 #endif //SHORT_PLUGS
22527             dprintf (1, ("gen%d: %Ix, %Ix(%Id), NON PIN alloc: %Id, pin com: %Id, sweep: %Id, surv: %Id, pinsurv: %Id(%d%% added, %d%% art), np surv: %Id, pad: %Id",
22528                 gen_idx, 
22529                 generation_allocation_start (temp_gen),
22530                 generation_plan_allocation_start (temp_gen),
22531                 (size_t)(generation_plan_allocation_start (temp_gen) - generation_allocation_start (temp_gen)),
22532                 generation_allocation_size (temp_gen),
22533                 generation_pinned_allocation_compact_size (temp_gen),
22534                 generation_pinned_allocation_sweep_size (temp_gen),
22535                 dd_survived_size (temp_dd),
22536                 dd_pinned_survived_size (temp_dd),
22537                 added_pinning_ratio,
22538                 artificial_pinned_ratio,
22539                 (dd_survived_size (temp_dd) - dd_pinned_survived_size (temp_dd)),
22540                 padding_size));
22541         }
22542 #endif //SIMPLE_DPRINTF
22543     }
22544
22545     if (settings.condemned_generation == (max_generation - 1 ))
22546     {
22547         size_t plan_gen2_size = generation_plan_size (max_generation);
22548         size_t growth = plan_gen2_size - old_gen2_size;
22549
22550         if (growth > 0)
22551         {
22552             dprintf (1, ("gen2 grew %Id (end seg alloc: %Id, condemned alloc: %Id", 
22553                          growth, end_seg_allocated, condemned_allocated));
22554
22555             maxgen_size_inc_p = true;
22556         }
22557         else
22558         {
22559             dprintf (2, ("gen2 shrank %Id (end seg alloc: %Id, gen1 c alloc: %Id", 
22560                          (old_gen2_size - plan_gen2_size), generation_end_seg_allocated (generation_of (max_generation)), 
22561                          generation_condemned_allocated (generation_of (max_generation - 1))));
22562         }
22563
22564         generation* older_gen = generation_of (settings.condemned_generation + 1);
22565         size_t rejected_free_space = generation_free_obj_space (older_gen) - r_free_obj_space;
22566         size_t free_list_allocated = generation_free_list_allocated (older_gen) - r_older_gen_free_list_allocated;
22567         size_t end_seg_allocated = generation_end_seg_allocated (older_gen) - r_older_gen_end_seg_allocated;
22568         size_t condemned_allocated = generation_condemned_allocated (older_gen) - r_older_gen_condemned_allocated;
22569
22570         dprintf (1, ("older gen's free alloc: %Id->%Id, seg alloc: %Id->%Id, condemned alloc: %Id->%Id",
22571                     r_older_gen_free_list_allocated, generation_free_list_allocated (older_gen),
22572                     r_older_gen_end_seg_allocated, generation_end_seg_allocated (older_gen), 
22573                     r_older_gen_condemned_allocated, generation_condemned_allocated (older_gen)));
22574
22575         dprintf (1, ("this GC did %Id free list alloc(%Id bytes free space rejected), %Id seg alloc and %Id condemned alloc, gen1 condemned alloc is %Id", 
22576             free_list_allocated, rejected_free_space, end_seg_allocated,
22577             condemned_allocated, generation_condemned_allocated (generation_of (settings.condemned_generation))));
22578
22579         maxgen_size_increase* maxgen_size_info = &(get_gc_data_per_heap()->maxgen_size_info);
22580         maxgen_size_info->free_list_allocated = free_list_allocated;
22581         maxgen_size_info->free_list_rejected = rejected_free_space;
22582         maxgen_size_info->end_seg_allocated = end_seg_allocated;
22583         maxgen_size_info->condemned_allocated = condemned_allocated;
22584         maxgen_size_info->pinned_allocated = maxgen_pinned_compact_before_advance;
22585         maxgen_size_info->pinned_allocated_advance = generation_pinned_allocation_compact_size (generation_of (max_generation)) - maxgen_pinned_compact_before_advance;
22586
22587 #ifdef FREE_USAGE_STATS
22588         int free_list_efficiency = 0;
22589         if ((free_list_allocated + rejected_free_space) != 0)
22590             free_list_efficiency = (int)(((float) (free_list_allocated) / (float)(free_list_allocated + rejected_free_space)) * (float)100);
22591
22592         int running_free_list_efficiency = (int)(generation_allocator_efficiency(older_gen)*100);
22593
22594         dprintf (1, ("gen%d free list alloc effi: %d%%, current effi: %d%%",
22595                     older_gen->gen_num,
22596                     free_list_efficiency, running_free_list_efficiency));
22597
22598         dprintf (1, ("gen2 free list change"));
22599         for (int j = 0; j < NUM_GEN_POWER2; j++)
22600         {
22601             dprintf (1, ("[h%d][#%Id]: 2^%d: F: %Id->%Id(%Id), P: %Id", 
22602                 heap_number, 
22603                 settings.gc_index,
22604                 (j + 10), r_older_gen_free_space[j], older_gen->gen_free_spaces[j], 
22605                 (ptrdiff_t)(r_older_gen_free_space[j] - older_gen->gen_free_spaces[j]),
22606                 (generation_of(max_generation - 1))->gen_plugs[j]));
22607         }
22608 #endif //FREE_USAGE_STATS
22609     }
22610
22611     size_t fragmentation =
22612         generation_fragmentation (generation_of (condemned_gen_number),
22613                                   consing_gen,
22614                                   heap_segment_allocated (ephemeral_heap_segment));
22615
22616     dprintf (2,("Fragmentation: %Id", fragmentation));
22617     dprintf (2,("---- End of Plan phase ----"));
22618
22619 #ifdef TIME_GC
22620     finish = GetCycleCount32();
22621     plan_time = finish - start;
22622 #endif //TIME_GC
22623
22624     // We may update write barrier code.  We assume here EE has been suspended if we are on a GC thread.
22625     assert(IsGCInProgress());
22626
22627     BOOL should_expand = FALSE;
22628     BOOL should_compact= FALSE;
22629     ephemeral_promotion = FALSE;
22630
22631 #ifdef BIT64
22632     if ((!settings.concurrent) &&
22633         !provisional_mode_triggered &&
22634         ((condemned_gen_number < max_generation) && 
22635          ((settings.gen0_reduction_count > 0) || (settings.entry_memory_load >= 95))))
22636     {
22637         dprintf (GTC_LOG, ("gen0 reduction count is %d, condemning %d, mem load %d",
22638                      settings.gen0_reduction_count,
22639                      condemned_gen_number,
22640                      settings.entry_memory_load));
22641         should_compact = TRUE;
22642
22643         get_gc_data_per_heap()->set_mechanism (gc_heap_compact, 
22644             ((settings.gen0_reduction_count > 0) ? compact_fragmented_gen0 : compact_high_mem_load));
22645
22646         if ((condemned_gen_number >= (max_generation - 1)) && 
22647             dt_low_ephemeral_space_p (tuning_deciding_expansion))
22648         {
22649             dprintf (GTC_LOG, ("Not enough space for all ephemeral generations with compaction"));
22650             should_expand = TRUE;
22651         }
22652     }
22653     else
22654     {
22655 #endif // BIT64
22656         should_compact = decide_on_compacting (condemned_gen_number, fragmentation, should_expand);
22657 #ifdef BIT64
22658     }
22659 #endif // BIT64
22660
22661 #ifdef FEATURE_LOH_COMPACTION
22662     loh_compacted_p = FALSE;
22663 #endif //FEATURE_LOH_COMPACTION
22664
22665     if (condemned_gen_number == max_generation)
22666     {
22667 #ifdef FEATURE_LOH_COMPACTION
22668         if (settings.loh_compaction)
22669         {
22670             if (plan_loh())
22671             {
22672                 should_compact = TRUE;
22673                 get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_loh_forced);
22674                 loh_compacted_p = TRUE;
22675             }
22676         }
22677         else
22678         {
22679             if ((heap_number == 0) && (loh_pinned_queue))
22680             {
22681                 loh_pinned_queue_decay--;
22682
22683                 if (!loh_pinned_queue_decay)
22684                 {
22685                     delete loh_pinned_queue;
22686                     loh_pinned_queue = 0;
22687                 }
22688             }
22689         }
22690
22691         if (!loh_compacted_p)
22692 #endif //FEATURE_LOH_COMPACTION
22693         {
22694             GCToEEInterface::DiagWalkLOHSurvivors(__this);
22695             sweep_large_objects();
22696         }
22697     }
22698     else
22699     {
22700         settings.loh_compaction = FALSE;
22701     }
22702
22703 #ifdef MULTIPLE_HEAPS
22704
22705     new_heap_segment = NULL;
22706
22707     if (should_compact && should_expand)
22708         gc_policy = policy_expand;
22709     else if (should_compact)
22710         gc_policy = policy_compact;
22711     else
22712         gc_policy = policy_sweep;
22713
22714     //vote for result of should_compact
22715     dprintf (3, ("Joining for compaction decision"));
22716     gc_t_join.join(this, gc_join_decide_on_compaction);
22717     if (gc_t_join.joined())
22718     {
22719         //safe place to delete large heap segments
22720         if (condemned_gen_number == max_generation)
22721         {
22722             for (int i = 0; i < n_heaps; i++)
22723             {
22724                 g_heaps [i]->rearrange_large_heap_segments ();
22725             }
22726         }
22727
22728         if (maxgen_size_inc_p && provisional_mode_triggered)
22729         {
22730             pm_trigger_full_gc = true;
22731             dprintf (GTC_LOG, ("in PM: maxgen size inc, doing a sweeping gen1 and trigger NGC2"));
22732         }
22733         else
22734         {
22735             settings.demotion = FALSE;
22736             int pol_max = policy_sweep;
22737 #ifdef GC_CONFIG_DRIVEN
22738             BOOL is_compaction_mandatory = FALSE;
22739 #endif //GC_CONFIG_DRIVEN
22740
22741             int i;
22742             for (i = 0; i < n_heaps; i++)
22743             {
22744                 if (pol_max < g_heaps[i]->gc_policy)
22745                     pol_max = policy_compact;
22746                 // set the demotion flag is any of the heap has demotion
22747                 if (g_heaps[i]->demotion_high >= g_heaps[i]->demotion_low)
22748                 {
22749                     (g_heaps[i]->get_gc_data_per_heap())->set_mechanism_bit (gc_demotion_bit);
22750                     settings.demotion = TRUE;
22751                 }
22752
22753 #ifdef GC_CONFIG_DRIVEN
22754                 if (!is_compaction_mandatory)
22755                 {
22756                     int compact_reason = (g_heaps[i]->get_gc_data_per_heap())->get_mechanism (gc_heap_compact);
22757                     if (compact_reason >= 0)
22758                     {
22759                         if (gc_heap_compact_reason_mandatory_p[compact_reason])
22760                             is_compaction_mandatory = TRUE;
22761                     }
22762                 }
22763 #endif //GC_CONFIG_DRIVEN
22764             }
22765
22766 #ifdef GC_CONFIG_DRIVEN
22767             if (!is_compaction_mandatory)
22768             {
22769                 // If compaction is not mandatory we can feel free to change it to a sweeping GC.
22770                 // Note that we may want to change this to only checking every so often instead of every single GC.
22771                 if (should_do_sweeping_gc (pol_max >= policy_compact))
22772                 {
22773                     pol_max = policy_sweep;
22774                 }
22775                 else
22776                 {
22777                     if (pol_max == policy_sweep)
22778                         pol_max = policy_compact;
22779                 }
22780             }
22781 #endif //GC_CONFIG_DRIVEN
22782
22783             for (i = 0; i < n_heaps; i++)
22784             {
22785                 if (pol_max > g_heaps[i]->gc_policy)
22786                     g_heaps[i]->gc_policy = pol_max;
22787                 //get the segment while we are serialized
22788                 if (g_heaps[i]->gc_policy == policy_expand)
22789                 {
22790                     g_heaps[i]->new_heap_segment =
22791                         g_heaps[i]->soh_get_segment_to_expand();
22792                     if (!g_heaps[i]->new_heap_segment)
22793                     {
22794                         set_expand_in_full_gc (condemned_gen_number);
22795                         //we are out of memory, cancel the expansion
22796                         g_heaps[i]->gc_policy = policy_compact;
22797                     }
22798                 }
22799             }
22800
22801             BOOL is_full_compacting_gc = FALSE;
22802
22803             if ((gc_policy >= policy_compact) && (condemned_gen_number == max_generation))
22804             {
22805                 full_gc_counts[gc_type_compacting]++;
22806                 is_full_compacting_gc = TRUE;
22807             }
22808
22809             for (i = 0; i < n_heaps; i++)
22810             {
22811                 //copy the card and brick tables
22812                 if (g_gc_card_table!= g_heaps[i]->card_table)
22813                 {
22814                     g_heaps[i]->copy_brick_card_table();
22815                 }
22816
22817                 if (is_full_compacting_gc)
22818                 {
22819                     g_heaps[i]->loh_alloc_since_cg = 0;
22820                 }
22821             }
22822         }
22823
22824         //start all threads on the roots.
22825         dprintf(3, ("Starting all gc threads after compaction decision"));
22826         gc_t_join.restart();
22827     }
22828
22829     //reset the local variable accordingly
22830     should_compact = (gc_policy >= policy_compact);
22831     should_expand  = (gc_policy >= policy_expand);
22832
22833 #else //MULTIPLE_HEAPS
22834
22835     //safe place to delete large heap segments
22836     if (condemned_gen_number == max_generation)
22837     {
22838         rearrange_large_heap_segments ();
22839     }
22840
22841     if (maxgen_size_inc_p && provisional_mode_triggered)
22842     {
22843         pm_trigger_full_gc = true;
22844         dprintf (GTC_LOG, ("in PM: maxgen size inc, doing a sweeping gen1 and trigger NGC2"));
22845     }
22846     else
22847     {
22848         settings.demotion = ((demotion_high >= demotion_low) ? TRUE : FALSE);
22849         if (settings.demotion)
22850             get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);
22851
22852 #ifdef GC_CONFIG_DRIVEN
22853         BOOL is_compaction_mandatory = FALSE;
22854         int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact);
22855         if (compact_reason >= 0)
22856             is_compaction_mandatory = gc_heap_compact_reason_mandatory_p[compact_reason];
22857
22858         if (!is_compaction_mandatory)
22859         {
22860             if (should_do_sweeping_gc (should_compact))
22861                 should_compact = FALSE;
22862             else
22863                 should_compact = TRUE;
22864         }
22865 #endif //GC_CONFIG_DRIVEN
22866
22867         if (should_compact && (condemned_gen_number == max_generation))
22868         {
22869             full_gc_counts[gc_type_compacting]++;
22870             loh_alloc_since_cg = 0;
22871         }
22872     }
22873 #endif //MULTIPLE_HEAPS
22874
22875     if (!pm_trigger_full_gc && pm_stress_on && provisional_mode_triggered)
22876     {
22877         if ((settings.condemned_generation == (max_generation - 1)) &&
22878             ((settings.gc_index % 5) == 0))
22879         {
22880             pm_trigger_full_gc = true;
22881         }
22882     }
22883
22884     if (settings.condemned_generation == (max_generation - 1))
22885     {
22886         if (provisional_mode_triggered)
22887         {
22888             if (should_expand)
22889             {
22890                 should_expand = FALSE;
22891                 dprintf (GTC_LOG, ("h%d in PM cannot expand", heap_number));
22892             }
22893         }
22894
22895         if (pm_trigger_full_gc)
22896         {
22897             should_compact = FALSE;
22898             dprintf (GTC_LOG, ("h%d PM doing sweeping", heap_number));
22899         }
22900     }
22901
22902     if (should_compact)
22903     {
22904         dprintf (2,( "**** Doing Compacting GC ****"));
22905
22906         if (should_expand)
22907         {
22908 #ifndef MULTIPLE_HEAPS
22909             heap_segment* new_heap_segment = soh_get_segment_to_expand();
22910 #endif //!MULTIPLE_HEAPS
22911             if (new_heap_segment)
22912             {
22913                 consing_gen = expand_heap(condemned_gen_number,
22914                                           consing_gen,
22915                                           new_heap_segment);
22916             }
22917
22918             // If we couldn't get a new segment, or we were able to 
22919             // reserve one but no space to commit, we couldn't
22920             // expand heap.
22921             if (ephemeral_heap_segment != new_heap_segment)
22922             {
22923                 set_expand_in_full_gc (condemned_gen_number);
22924                 should_expand = FALSE;
22925             }
22926         }
22927         generation_allocation_limit (condemned_gen1) =
22928             generation_allocation_pointer (condemned_gen1);
22929         if ((condemned_gen_number < max_generation))
22930         {
22931             generation_allocator (older_gen)->commit_alloc_list_changes();
22932
22933             // Fix the allocation area of the older generation
22934             fix_older_allocation_area (older_gen);
22935         }
22936         assert (generation_allocation_segment (consing_gen) ==
22937                 ephemeral_heap_segment);
22938
22939         GCToEEInterface::DiagWalkSurvivors(__this);
22940
22941         relocate_phase (condemned_gen_number, first_condemned_address);
22942         compact_phase (condemned_gen_number, first_condemned_address,
22943                        (!settings.demotion && settings.promotion));
22944         fix_generation_bounds (condemned_gen_number, consing_gen);
22945         assert (generation_allocation_limit (youngest_generation) ==
22946                 generation_allocation_pointer (youngest_generation));
22947         if (condemned_gen_number >= (max_generation -1))
22948         {
22949 #ifdef MULTIPLE_HEAPS
22950             // this needs be serialized just because we have one
22951             // segment_standby_list/seg_table for all heaps. We should make it at least
22952             // so that when hoarding is not on we don't need this join because
22953             // decommitting memory can take a long time.
22954             //must serialize on deleting segments
22955             gc_t_join.join(this, gc_join_rearrange_segs_compaction);
22956             if (gc_t_join.joined())
22957             {
22958                 for (int i = 0; i < n_heaps; i++)
22959                 {
22960                     g_heaps[i]->rearrange_heap_segments(TRUE);
22961                 }
22962                 gc_t_join.restart();
22963             }
22964 #else
22965             rearrange_heap_segments(TRUE);
22966 #endif //MULTIPLE_HEAPS
22967
22968             if (should_expand)
22969             {
22970                 //fix the start_segment for the ephemeral generations
22971                 for (int i = 0; i < max_generation; i++)
22972                 {
22973                     generation* gen = generation_of (i);
22974                     generation_start_segment (gen) = ephemeral_heap_segment;
22975                     generation_allocation_segment (gen) = ephemeral_heap_segment;
22976                 }
22977             }
22978         }
22979
22980         {
22981 #ifdef FEATURE_PREMORTEM_FINALIZATION
22982             finalize_queue->UpdatePromotedGenerations (condemned_gen_number,
22983                                                        (!settings.demotion && settings.promotion));
22984 #endif // FEATURE_PREMORTEM_FINALIZATION
22985
22986 #ifdef MULTIPLE_HEAPS
22987             dprintf(3, ("Joining after end of compaction"));
22988             gc_t_join.join(this, gc_join_adjust_handle_age_compact);
22989             if (gc_t_join.joined())
22990 #endif //MULTIPLE_HEAPS
22991             {
22992 #ifdef MULTIPLE_HEAPS
22993                 //join all threads to make sure they are synchronized
22994                 dprintf(3, ("Restarting after Promotion granted"));
22995                 gc_t_join.restart();
22996 #endif //MULTIPLE_HEAPS
22997             }
22998
22999             ScanContext sc;
23000             sc.thread_number = heap_number;
23001             sc.promotion = FALSE;
23002             sc.concurrent = FALSE;
23003             // new generations bounds are set can call this guy
23004             if (settings.promotion && !settings.demotion)
23005             {
23006                 dprintf (2, ("Promoting EE roots for gen %d",
23007                              condemned_gen_number));
23008                 GCScan::GcPromotionsGranted(condemned_gen_number,
23009                                                 max_generation, &sc);
23010             }
23011             else if (settings.demotion)
23012             {
23013                 dprintf (2, ("Demoting EE roots for gen %d",
23014                              condemned_gen_number));
23015                 GCScan::GcDemote (condemned_gen_number, max_generation, &sc);
23016             }
23017         }
23018
23019         {
23020             gen0_big_free_spaces = 0;
23021
23022             reset_pinned_queue_bos();
23023             unsigned int  gen_number = min (max_generation, 1 + condemned_gen_number);
23024             generation*  gen = generation_of (gen_number);
23025             uint8_t*  low = generation_allocation_start (generation_of (gen_number-1));
23026             uint8_t*  high =  heap_segment_allocated (ephemeral_heap_segment);
23027             
23028             while (!pinned_plug_que_empty_p())
23029             {
23030                 mark*  m = pinned_plug_of (deque_pinned_plug());
23031                 size_t len = pinned_len (m);
23032                 uint8_t*  arr = (pinned_plug (m) - len);
23033                 dprintf(3,("free [%Ix %Ix[ pin",
23034                             (size_t)arr, (size_t)arr + len));
23035                 if (len != 0)
23036                 {
23037                     assert (len >= Align (min_obj_size));
23038                     make_unused_array (arr, len);
23039                     // fix fully contained bricks + first one
23040                     // if the array goes beyond the first brick
23041                     size_t start_brick = brick_of (arr);
23042                     size_t end_brick = brick_of (arr + len);
23043                     if (end_brick != start_brick)
23044                     {
23045                         dprintf (3,
23046                                     ("Fixing bricks [%Ix, %Ix[ to point to unused array %Ix",
23047                                     start_brick, end_brick, (size_t)arr));
23048                         set_brick (start_brick,
23049                                     arr - brick_address (start_brick));
23050                         size_t brick = start_brick+1;
23051                         while (brick < end_brick)
23052                         {
23053                             set_brick (brick, start_brick - brick);
23054                             brick++;
23055                         }
23056                     }
23057
23058                     //when we take an old segment to make the new
23059                     //ephemeral segment. we can have a bunch of
23060                     //pinned plugs out of order going to the new ephemeral seg
23061                     //and then the next plugs go back to max_generation
23062                     if ((heap_segment_mem (ephemeral_heap_segment) <= arr) &&
23063                         (heap_segment_reserved (ephemeral_heap_segment) > arr))
23064                     {
23065
23066                         while ((low <= arr) && (high > arr))
23067                         {
23068                             gen_number--;
23069                             assert ((gen_number >= 1) || (demotion_low != MAX_PTR) ||
23070                                     settings.demotion || !settings.promotion);
23071                             dprintf (3, ("new free list generation %d", gen_number));
23072
23073                             gen = generation_of (gen_number);
23074                             if (gen_number >= 1)
23075                                 low = generation_allocation_start (generation_of (gen_number-1));
23076                             else
23077                                 low = high;
23078                         }
23079                     }
23080                     else
23081                     {
23082                         dprintf (3, ("new free list generation %d", max_generation));
23083                         gen_number = max_generation;
23084                         gen = generation_of (gen_number);
23085                     }
23086
23087                     dprintf(3,("threading it into generation %d", gen_number));
23088                     thread_gap (arr, len, gen);
23089                     add_gen_free (gen_number, len);
23090                 }
23091             }
23092         }
23093
23094 #ifdef _DEBUG
23095         for (int x = 0; x <= max_generation; x++)
23096         {
23097             assert (generation_allocation_start (generation_of (x)));
23098         }
23099 #endif //_DEBUG
23100
23101         if (!settings.demotion && settings.promotion)
23102         {
23103             //clear card for generation 1. generation 0 is empty
23104             clear_card_for_addresses (
23105                 generation_allocation_start (generation_of (1)),
23106                 generation_allocation_start (generation_of (0)));
23107         }
23108         if (settings.promotion && !settings.demotion)
23109         {
23110             uint8_t* start = generation_allocation_start (youngest_generation);
23111             MAYBE_UNUSED_VAR(start);
23112             assert (heap_segment_allocated (ephemeral_heap_segment) ==
23113                     (start + Align (size (start))));
23114         }
23115     }
23116     else
23117     {
23118         //force promotion for sweep
23119         settings.promotion = TRUE;
23120         settings.compaction = FALSE;
23121
23122         ScanContext sc;
23123         sc.thread_number = heap_number;
23124         sc.promotion = FALSE;
23125         sc.concurrent = FALSE;
23126
23127         dprintf (2, ("**** Doing Mark and Sweep GC****"));
23128
23129         if ((condemned_gen_number < max_generation))
23130         {
23131             generation_allocator (older_gen)->copy_from_alloc_list (r_free_list);
23132             generation_free_list_space (older_gen) = r_free_list_space;
23133             generation_free_obj_space (older_gen) = r_free_obj_space;
23134             generation_free_list_allocated (older_gen) = r_older_gen_free_list_allocated;
23135             generation_end_seg_allocated (older_gen) = r_older_gen_end_seg_allocated;
23136             generation_condemned_allocated (older_gen) = r_older_gen_condemned_allocated;
23137             generation_allocation_limit (older_gen) = r_allocation_limit;
23138             generation_allocation_pointer (older_gen) = r_allocation_pointer;
23139             generation_allocation_context_start_region (older_gen) = r_allocation_start_region;
23140             generation_allocation_segment (older_gen) = r_allocation_segment;
23141         }
23142
23143         if ((condemned_gen_number < max_generation))
23144         {
23145             // Fix the allocation area of the older generation
23146             fix_older_allocation_area (older_gen);
23147         }
23148
23149         GCToEEInterface::DiagWalkSurvivors(__this);
23150
23151         gen0_big_free_spaces = 0;
23152         make_free_lists (condemned_gen_number);
23153         recover_saved_pinned_info();
23154
23155 #ifdef FEATURE_PREMORTEM_FINALIZATION
23156         finalize_queue->UpdatePromotedGenerations (condemned_gen_number, TRUE);
23157 #endif // FEATURE_PREMORTEM_FINALIZATION
23158 // MTHTS: leave single thread for HT processing on plan_phase
23159 #ifdef MULTIPLE_HEAPS
23160         dprintf(3, ("Joining after end of sweep"));
23161         gc_t_join.join(this, gc_join_adjust_handle_age_sweep);
23162         if (gc_t_join.joined())
23163 #endif //MULTIPLE_HEAPS
23164         {
23165             GCScan::GcPromotionsGranted(condemned_gen_number,
23166                                             max_generation, &sc);
23167             if (condemned_gen_number >= (max_generation -1))
23168             {
23169 #ifdef MULTIPLE_HEAPS
23170                 for (int i = 0; i < n_heaps; i++)
23171                 {
23172                     g_heaps[i]->rearrange_heap_segments(FALSE);
23173                 }
23174 #else
23175                 rearrange_heap_segments(FALSE);
23176 #endif //MULTIPLE_HEAPS
23177             }
23178
23179 #ifdef MULTIPLE_HEAPS
23180             //join all threads to make sure they are synchronized
23181             dprintf(3, ("Restarting after Promotion granted"));
23182             gc_t_join.restart();
23183 #endif //MULTIPLE_HEAPS
23184         }
23185
23186 #ifdef _DEBUG
23187         for (int x = 0; x <= max_generation; x++)
23188         {
23189             assert (generation_allocation_start (generation_of (x)));
23190         }
23191 #endif //_DEBUG
23192
23193         //clear card for generation 1. generation 0 is empty
23194         clear_card_for_addresses (
23195             generation_allocation_start (generation_of (1)),
23196             generation_allocation_start (generation_of (0)));
23197         assert ((heap_segment_allocated (ephemeral_heap_segment) ==
23198                  (generation_allocation_start (youngest_generation) +
23199                   Align (min_obj_size))));
23200     }
23201
23202     //verify_partial();
23203 }
23204 #ifdef _PREFAST_
23205 #pragma warning(pop)
23206 #endif //_PREFAST_
23207
23208
23209 /*****************************
23210 Called after compact phase to fix all generation gaps
23211 ********************************/
23212 void gc_heap::fix_generation_bounds (int condemned_gen_number,
23213                                      generation* consing_gen)
23214 {
23215     UNREFERENCED_PARAMETER(consing_gen);
23216
23217     assert (generation_allocation_segment (consing_gen) ==
23218             ephemeral_heap_segment);
23219
23220     //assign the planned allocation start to the generation
23221     int gen_number = condemned_gen_number;
23222     int bottom_gen = 0;
23223
23224     while (gen_number >= bottom_gen)
23225     {
23226         generation*  gen = generation_of (gen_number);
23227         dprintf(3,("Fixing generation pointers for %Ix", gen_number));
23228         if ((gen_number < max_generation) && ephemeral_promotion)
23229         {
23230             make_unused_array (saved_ephemeral_plan_start[gen_number], 
23231                                saved_ephemeral_plan_start_size[gen_number]);
23232         }
23233         reset_allocation_pointers (gen, generation_plan_allocation_start (gen));
23234         make_unused_array (generation_allocation_start (gen), generation_plan_allocation_start_size (gen));
23235         dprintf(3,(" start %Ix", (size_t)generation_allocation_start (gen)));
23236         gen_number--;
23237     }
23238 #ifdef MULTIPLE_HEAPS
23239     if (ephemeral_promotion)
23240     {
23241         //we are creating a generation fault. set the cards.
23242         // and we are only doing this for multiple heaps because in the single heap scenario the 
23243         // new ephemeral generations will be empty and there'll be no need to set cards for the
23244         // old ephemeral generations that got promoted into max_generation.
23245         ptrdiff_t delta = 0;
23246 #ifdef SEG_MAPPING_TABLE
23247         heap_segment* old_ephemeral_seg = seg_mapping_table_segment_of (saved_ephemeral_plan_start[max_generation-1]);
23248 #else //SEG_MAPPING_TABLE
23249         heap_segment* old_ephemeral_seg = segment_of (saved_ephemeral_plan_start[max_generation-1], delta);
23250 #endif //SEG_MAPPING_TABLE
23251
23252         assert (in_range_for_segment (saved_ephemeral_plan_start[max_generation-1], old_ephemeral_seg));
23253         size_t end_card = card_of (align_on_card (heap_segment_plan_allocated (old_ephemeral_seg)));
23254         size_t card = card_of (saved_ephemeral_plan_start[max_generation-1]);
23255         while (card != end_card)
23256         {
23257             set_card (card);
23258             card++;
23259         }
23260     }
23261 #endif //MULTIPLE_HEAPS
23262     {
23263         alloc_allocated = heap_segment_plan_allocated(ephemeral_heap_segment);
23264         //reset the allocated size
23265         uint8_t* start = generation_allocation_start (youngest_generation);
23266         MAYBE_UNUSED_VAR(start);
23267         if (settings.promotion && !settings.demotion)
23268         {
23269             assert ((start + Align (size (start))) ==
23270                     heap_segment_plan_allocated(ephemeral_heap_segment));
23271         }
23272
23273         heap_segment_allocated(ephemeral_heap_segment)=
23274             heap_segment_plan_allocated(ephemeral_heap_segment);
23275     }
23276 }
23277
23278 uint8_t* gc_heap::generation_limit (int gen_number)
23279 {
23280     if (settings.promotion)
23281     {
23282         if (gen_number <= 1)
23283             return heap_segment_reserved (ephemeral_heap_segment);
23284         else
23285             return generation_allocation_start (generation_of ((gen_number - 2)));
23286     }
23287     else
23288     {
23289         if (gen_number <= 0)
23290             return heap_segment_reserved (ephemeral_heap_segment);
23291         else
23292             return generation_allocation_start (generation_of ((gen_number - 1)));
23293     }
23294 }
23295
23296 BOOL gc_heap::ensure_gap_allocation (int condemned_gen_number)
23297 {
23298     uint8_t* start = heap_segment_allocated (ephemeral_heap_segment);
23299     size_t size = Align (min_obj_size)*(condemned_gen_number+1);
23300     assert ((start + size) <=
23301             heap_segment_reserved (ephemeral_heap_segment));
23302     if ((start + size) >
23303         heap_segment_committed (ephemeral_heap_segment))
23304     {
23305         if (!grow_heap_segment (ephemeral_heap_segment, start + size))
23306         {
23307             return FALSE;
23308         }
23309     }
23310     return TRUE;
23311 }
23312
23313 uint8_t* gc_heap::allocate_at_end (size_t size)
23314 {
23315     uint8_t* start = heap_segment_allocated (ephemeral_heap_segment);
23316     size = Align (size);
23317     uint8_t* result = start;
23318     // only called to allocate a min obj so can't overflow here.
23319     assert ((start + size) <=
23320             heap_segment_reserved (ephemeral_heap_segment));
23321     //ensure_gap_allocation took care of it
23322     assert ((start + size) <=
23323             heap_segment_committed (ephemeral_heap_segment));
23324     heap_segment_allocated (ephemeral_heap_segment) += size;
23325     return result;
23326 }
23327
23328
23329 void gc_heap::make_free_lists (int condemned_gen_number)
23330 {
23331 #ifdef TIME_GC
23332     unsigned start;
23333     unsigned finish;
23334     start = GetCycleCount32();
23335 #endif //TIME_GC
23336
23337     //Promotion has to happen in sweep case.
23338     assert (settings.promotion);
23339
23340     generation* condemned_gen = generation_of (condemned_gen_number);
23341     uint8_t* start_address = generation_allocation_start (condemned_gen);
23342
23343     size_t  current_brick = brick_of (start_address);
23344     heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
23345
23346     PREFIX_ASSUME(current_heap_segment != NULL);
23347
23348     uint8_t*  end_address = heap_segment_allocated (current_heap_segment);
23349     size_t  end_brick = brick_of (end_address-1);
23350     make_free_args args;
23351     args.free_list_gen_number = min (max_generation, 1 + condemned_gen_number);
23352     args.current_gen_limit = (((condemned_gen_number == max_generation)) ?
23353                               MAX_PTR :
23354                               (generation_limit (args.free_list_gen_number)));
23355     args.free_list_gen = generation_of (args.free_list_gen_number);
23356     args.highest_plug = 0;
23357
23358     if ((start_address < end_address) ||
23359         (condemned_gen_number == max_generation))
23360     {
23361         while (1)
23362         {
23363             if ((current_brick > end_brick))
23364             {
23365                 if (args.current_gen_limit == MAX_PTR)
23366                 {
23367                     //We had an empty segment
23368                     //need to allocate the generation start
23369
23370                     generation* gen = generation_of (max_generation);
23371
23372                     heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
23373
23374                     PREFIX_ASSUME(start_seg != NULL);
23375
23376                     uint8_t* gap = heap_segment_mem (start_seg);
23377
23378                     generation_allocation_start (gen) = gap;
23379                     heap_segment_allocated (start_seg) = gap + Align (min_obj_size);
23380                     make_unused_array (gap, Align (min_obj_size));
23381                     reset_allocation_pointers (gen, gap);
23382                     dprintf (3, ("Start segment empty, fixing generation start of %d to: %Ix",
23383                                  max_generation, (size_t)gap));
23384                     args.current_gen_limit = generation_limit (args.free_list_gen_number);
23385                 }
23386                 if (heap_segment_next_rw (current_heap_segment))
23387                 {
23388                     current_heap_segment = heap_segment_next_rw (current_heap_segment);
23389                     current_brick = brick_of (heap_segment_mem (current_heap_segment));
23390                     end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
23391
23392                     continue;
23393                 }
23394                 else
23395                 {
23396                     break;
23397                 }
23398             }
23399             {
23400                 int brick_entry =  brick_table [ current_brick ];
23401                 if ((brick_entry >= 0))
23402                 {
23403                     make_free_list_in_brick (brick_address (current_brick) + brick_entry-1, &args);
23404                     dprintf(3,("Fixing brick entry %Ix to %Ix",
23405                                current_brick, (size_t)args.highest_plug));
23406                     set_brick (current_brick,
23407                                (args.highest_plug - brick_address (current_brick)));
23408                 }
23409                 else
23410                 {
23411                     if ((brick_entry > -32768))
23412                     {
23413
23414 #ifdef _DEBUG
23415                         ptrdiff_t offset = brick_of (args.highest_plug) - current_brick;
23416                         if ((brick_entry != -32767) && (! ((offset == brick_entry))))
23417                         {
23418                             assert ((brick_entry == -1));
23419                         }
23420 #endif //_DEBUG
23421                         //init to -1 for faster find_first_object
23422                         set_brick (current_brick, -1);
23423                     }
23424                 }
23425             }
23426             current_brick++;
23427         }
23428     }
23429     {
23430         int bottom_gen = 0;
23431         args.free_list_gen_number--;
23432         while (args.free_list_gen_number >= bottom_gen)
23433         {
23434             uint8_t*  gap = 0;
23435             generation* gen2 = generation_of (args.free_list_gen_number);
23436             gap = allocate_at_end (Align(min_obj_size));
23437             generation_allocation_start (gen2) = gap;
23438             reset_allocation_pointers (gen2, gap);
23439             dprintf(3,("Fixing generation start of %d to: %Ix",
23440                        args.free_list_gen_number, (size_t)gap));
23441             PREFIX_ASSUME(gap != NULL);
23442             make_unused_array (gap, Align (min_obj_size));
23443
23444             args.free_list_gen_number--;
23445         }
23446
23447         //reset the allocated size
23448         uint8_t* start2 = generation_allocation_start (youngest_generation);
23449         alloc_allocated = start2 + Align (size (start2));
23450     }
23451
23452 #ifdef TIME_GC
23453     finish = GetCycleCount32();
23454     sweep_time = finish - start;
23455 #endif //TIME_GC
23456 }
23457
23458 void gc_heap::make_free_list_in_brick (uint8_t* tree, make_free_args* args)
23459 {
23460     assert ((tree != NULL));
23461     {
23462         int  right_node = node_right_child (tree);
23463         int left_node = node_left_child (tree);
23464         args->highest_plug = 0;
23465         if (! (0 == tree))
23466         {
23467             if (! (0 == left_node))
23468             {
23469                 make_free_list_in_brick (tree + left_node, args);
23470
23471             }
23472             {
23473                 uint8_t*  plug = tree;
23474                 size_t  gap_size = node_gap_size (tree);
23475                 uint8_t*  gap = (plug - gap_size);
23476                 dprintf (3,("Making free list %Ix len %d in %d",
23477                 //dprintf (3,("F: %Ix len %Ix in %d",
23478                         (size_t)gap, gap_size, args->free_list_gen_number));
23479                 args->highest_plug = tree;
23480 #ifdef SHORT_PLUGS
23481                 if (is_plug_padded (plug))
23482                 {
23483                     dprintf (3, ("%Ix padded", plug));
23484                     clear_plug_padded (plug);
23485                 }
23486 #endif //SHORT_PLUGS
23487             gen_crossing:
23488                 {
23489                     if ((args->current_gen_limit == MAX_PTR) ||
23490                         ((plug >= args->current_gen_limit) &&
23491                          ephemeral_pointer_p (plug)))
23492                     {
23493                         dprintf(3,(" Crossing Generation boundary at %Ix",
23494                                (size_t)args->current_gen_limit));
23495                         if (!(args->current_gen_limit == MAX_PTR))
23496                         {
23497                             args->free_list_gen_number--;
23498                             args->free_list_gen = generation_of (args->free_list_gen_number);
23499                         }
23500                         dprintf(3,( " Fixing generation start of %d to: %Ix",
23501                                 args->free_list_gen_number, (size_t)gap));
23502
23503                         reset_allocation_pointers (args->free_list_gen, gap);
23504                         args->current_gen_limit = generation_limit (args->free_list_gen_number);
23505
23506                         if ((gap_size >= (2*Align (min_obj_size))))
23507                         {
23508                             dprintf(3,(" Splitting the gap in two %Id left",
23509                                    gap_size));
23510                             make_unused_array (gap, Align(min_obj_size));
23511                             gap_size = (gap_size - Align(min_obj_size));
23512                             gap = (gap + Align(min_obj_size));
23513                         }
23514                         else
23515                         {
23516                             make_unused_array (gap, gap_size);
23517                             gap_size = 0;
23518                         }
23519                         goto gen_crossing;
23520                     }
23521                 }
23522
23523                 thread_gap (gap, gap_size, args->free_list_gen);
23524                 add_gen_free (args->free_list_gen->gen_num, gap_size);
23525             }
23526             if (! (0 == right_node))
23527             {
23528                 make_free_list_in_brick (tree + right_node, args);
23529             }
23530         }
23531     }
23532 }
23533
23534 void gc_heap::thread_gap (uint8_t* gap_start, size_t size, generation*  gen)
23535 {
23536     assert (generation_allocation_start (gen));
23537     if ((size > 0))
23538     {
23539         if ((gen->gen_num == 0) && (size > CLR_SIZE))
23540         {
23541             gen0_big_free_spaces += size;
23542         }
23543
23544         assert ((heap_segment_rw (generation_start_segment (gen))!=
23545                  ephemeral_heap_segment) ||
23546                 (gap_start > generation_allocation_start (gen)));
23547         // The beginning of a segment gap is not aligned
23548         assert (size >= Align (min_obj_size));
23549         make_unused_array (gap_start, size, 
23550                           (!settings.concurrent && (gen != youngest_generation)),
23551                           (gen->gen_num == max_generation));
23552         dprintf (3, ("fr: [%Ix, %Ix[", (size_t)gap_start, (size_t)gap_start+size));
23553
23554         if ((size >= min_free_list))
23555         {
23556             generation_free_list_space (gen) += size;
23557             generation_allocator (gen)->thread_item (gap_start, size);
23558         }
23559         else
23560         {
23561             generation_free_obj_space (gen) += size;
23562         }
23563     }
23564 }
23565
23566 void gc_heap::loh_thread_gap_front (uint8_t* gap_start, size_t size, generation*  gen)
23567 {
23568     assert (generation_allocation_start (gen));
23569     if (size >= min_free_list)
23570     {
23571         generation_free_list_space (gen) += size;
23572         generation_allocator (gen)->thread_item_front (gap_start, size);
23573     }
23574 }
23575
23576 void gc_heap::make_unused_array (uint8_t* x, size_t size, BOOL clearp, BOOL resetp)
23577 {
23578     dprintf (3, ("Making unused array [%Ix, %Ix[",
23579         (size_t)x, (size_t)(x+size)));
23580     assert (size >= Align (min_obj_size));
23581
23582 //#if defined (VERIFY_HEAP) && defined (BACKGROUND_GC)
23583 //    check_batch_mark_array_bits (x, x+size);
23584 //#endif //VERIFY_HEAP && BACKGROUND_GC
23585
23586     if (resetp)
23587         reset_memory (x, size);
23588
23589     ((CObjectHeader*)x)->SetFree(size);
23590
23591 #ifdef BIT64
23592
23593 #if BIGENDIAN
23594 #error "This won't work on big endian platforms"
23595 #endif
23596
23597     size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size;
23598
23599     if (size_as_object < size)
23600     {
23601         //
23602         // If the size is more than 4GB, we need to create multiple objects because of
23603         // the Array::m_NumComponents is uint32_t and the high 32 bits of unused array
23604         // size is ignored in regular object size computation.
23605         //
23606         uint8_t * tmp = x + size_as_object;
23607         size_t remaining_size = size - size_as_object;
23608
23609         while (remaining_size > UINT32_MAX)
23610         {
23611             // Make sure that there will be at least Align(min_obj_size) left
23612             size_t current_size = UINT32_MAX - get_alignment_constant (FALSE) 
23613                 - Align (min_obj_size, get_alignment_constant (FALSE));
23614
23615             ((CObjectHeader*)tmp)->SetFree(current_size);
23616
23617             remaining_size -= current_size;
23618             tmp += current_size;
23619         }
23620
23621         ((CObjectHeader*)tmp)->SetFree(remaining_size);
23622     }
23623 #endif
23624
23625     if (clearp)
23626         clear_card_for_addresses (x, x + Align(size));
23627 }
23628
23629 // Clear memory set by make_unused_array.
23630 void gc_heap::clear_unused_array (uint8_t* x, size_t size)
23631 {
23632     // Also clear the sync block
23633     *(((PTR_PTR)x)-1) = 0;
23634
23635     ((CObjectHeader*)x)->UnsetFree();
23636
23637 #ifdef BIT64
23638
23639 #if BIGENDIAN
23640 #error "This won't work on big endian platforms"
23641 #endif
23642
23643     // The memory could have been cleared in the meantime. We have to mirror the algorithm
23644     // from make_unused_array since we cannot depend on the object sizes in memory.
23645     size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size;
23646
23647     if (size_as_object < size)
23648     {
23649         uint8_t * tmp = x + size_as_object;
23650         size_t remaining_size = size - size_as_object;
23651
23652         while (remaining_size > UINT32_MAX)
23653         {
23654             size_t current_size = UINT32_MAX - get_alignment_constant (FALSE) 
23655                 - Align (min_obj_size, get_alignment_constant (FALSE));
23656
23657             ((CObjectHeader*)tmp)->UnsetFree();
23658
23659             remaining_size -= current_size;
23660             tmp += current_size;
23661         }
23662
23663         ((CObjectHeader*)tmp)->UnsetFree();
23664     }
23665 #else
23666     UNREFERENCED_PARAMETER(size);
23667 #endif
23668 }
23669
23670 inline
23671 uint8_t* tree_search (uint8_t* tree, uint8_t* old_address)
23672 {
23673     uint8_t* candidate = 0;
23674     int cn;
23675     while (1)
23676     {
23677         if (tree < old_address)
23678         {
23679             if ((cn = node_right_child (tree)) != 0)
23680             {
23681                 assert (candidate < tree);
23682                 candidate = tree;
23683                 tree = tree + cn;
23684                 Prefetch (tree - 8);
23685                 continue;
23686             }
23687             else
23688                 break;
23689         }
23690         else if (tree > old_address)
23691         {
23692             if ((cn = node_left_child (tree)) != 0)
23693             {
23694                 tree = tree + cn;
23695                 Prefetch (tree - 8);
23696                 continue;
23697             }
23698             else
23699                 break;
23700         } else
23701             break;
23702     }
23703     if (tree <= old_address)
23704         return tree;
23705     else if (candidate)
23706         return candidate;
23707     else
23708         return tree;
23709 }
23710
23711 #ifdef FEATURE_BASICFREEZE
23712 bool gc_heap::frozen_object_p (Object* obj)
23713 {
23714 #ifdef MULTIPLE_HEAPS
23715     ptrdiff_t delta = 0;
23716     heap_segment* pSegment = segment_of ((uint8_t*)obj, delta);
23717 #else //MULTIPLE_HEAPS
23718     heap_segment* pSegment = gc_heap::find_segment ((uint8_t*)obj, FALSE);
23719     _ASSERTE(pSegment);
23720 #endif //MULTIPLE_HEAPS
23721
23722     return heap_segment_read_only_p(pSegment);
23723 }
23724 #endif // FEATURE_BASICFREEZE
23725
23726 #ifdef FEATURE_REDHAWK
23727 // TODO: this was added on RH, we have not done perf runs to see if this is the right
23728 // thing to do for other versions of the CLR.
23729 inline
23730 #endif // FEATURE_REDHAWK
23731 void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL)
23732 {
23733     uint8_t* old_address = *pold_address;
23734     if (!((old_address >= gc_low) && (old_address < gc_high)))
23735 #ifdef MULTIPLE_HEAPS
23736     {
23737         UNREFERENCED_PARAMETER(thread);
23738         if (old_address == 0)
23739             return;
23740         gc_heap* hp = heap_of (old_address);
23741         if ((hp == this) ||
23742             !((old_address >= hp->gc_low) && (old_address < hp->gc_high)))
23743             return;
23744     }
23745 #else //MULTIPLE_HEAPS
23746         return ;
23747 #endif //MULTIPLE_HEAPS
23748     // delta translates old_address into address_gc (old_address);
23749     size_t  brick = brick_of (old_address);
23750     int    brick_entry =  brick_table [ brick ];
23751     uint8_t*  new_address = old_address;
23752     if (! ((brick_entry == 0)))
23753     {
23754     retry:
23755         {
23756             while (brick_entry < 0)
23757             {
23758                 brick = (brick + brick_entry);
23759                 brick_entry =  brick_table [ brick ];
23760             }
23761             uint8_t* old_loc = old_address;
23762
23763             uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1),
23764                                       old_loc);
23765             if ((node <= old_loc))
23766                 new_address = (old_address + node_relocation_distance (node));
23767             else
23768             {
23769                 if (node_left_p (node))
23770                 {
23771                     dprintf(3,(" L: %Ix", (size_t)node));
23772                     new_address = (old_address +
23773                                    (node_relocation_distance (node) +
23774                                     node_gap_size (node)));
23775                 }
23776                 else
23777                 {
23778                     brick = brick - 1;
23779                     brick_entry =  brick_table [ brick ];
23780                     goto retry;
23781                 }
23782             }
23783         }
23784
23785         *pold_address = new_address;
23786         return;
23787     }
23788
23789 #ifdef FEATURE_LOH_COMPACTION
23790     if (loh_compacted_p
23791 #ifdef FEATURE_BASICFREEZE
23792         && !frozen_object_p((Object*)old_address)
23793 #endif // FEATURE_BASICFREEZE
23794         )
23795     {
23796         *pold_address = old_address + loh_node_relocation_distance (old_address);
23797     }
23798     else
23799 #endif //FEATURE_LOH_COMPACTION
23800     {
23801         *pold_address = new_address;
23802     }
23803 }
23804
23805 inline void 
23806 gc_heap::check_class_object_demotion (uint8_t* obj)
23807 {
23808 #ifdef COLLECTIBLE_CLASS
23809     if (is_collectible(obj))
23810     {
23811         check_class_object_demotion_internal (obj);
23812     }
23813 #else
23814     UNREFERENCED_PARAMETER(obj);
23815 #endif //COLLECTIBLE_CLASS
23816 }
23817
23818 #ifdef COLLECTIBLE_CLASS
23819 NOINLINE void 
23820 gc_heap::check_class_object_demotion_internal (uint8_t* obj)
23821 {
23822     if (settings.demotion)
23823     {
23824 #ifdef MULTIPLE_HEAPS
23825         // We set the card without checking the demotion range 'cause at this point
23826         // the handle that points to the loader allocator object may or may not have
23827         // been relocated by other GC threads. 
23828         set_card (card_of (obj));
23829 #else
23830         THREAD_FROM_HEAP;
23831         uint8_t* class_obj = get_class_object (obj);
23832         dprintf (3, ("%Ix: got classobj %Ix", obj, class_obj));
23833         uint8_t* temp_class_obj = class_obj;
23834         uint8_t** temp = &temp_class_obj;
23835         relocate_address (temp THREAD_NUMBER_ARG);
23836
23837         check_demotion_helper (temp, obj);
23838 #endif //MULTIPLE_HEAPS
23839     }
23840 }
23841
23842 #endif //COLLECTIBLE_CLASS
23843
23844 inline void
23845 gc_heap::check_demotion_helper (uint8_t** pval, uint8_t* parent_obj)
23846 {
23847     // detect if we are demoting an object
23848     if ((*pval < demotion_high) &&
23849         (*pval >= demotion_low))
23850     {
23851         dprintf(3, ("setting card %Ix:%Ix",
23852                     card_of((uint8_t*)pval),
23853                     (size_t)pval));
23854
23855         set_card (card_of (parent_obj));
23856     }
23857 #ifdef MULTIPLE_HEAPS
23858     else if (settings.demotion)
23859     {
23860         dprintf (4, ("Demotion active, computing heap_of object"));
23861         gc_heap* hp = heap_of (*pval);
23862         if ((*pval < hp->demotion_high) &&
23863             (*pval >= hp->demotion_low))
23864         {
23865             dprintf(3, ("setting card %Ix:%Ix",
23866                         card_of((uint8_t*)pval),
23867                         (size_t)pval));
23868
23869             set_card (card_of (parent_obj));
23870         }
23871     }
23872 #endif //MULTIPLE_HEAPS
23873 }
23874
23875 inline void
23876 gc_heap::reloc_survivor_helper (uint8_t** pval)
23877 {
23878     THREAD_FROM_HEAP;
23879     relocate_address (pval THREAD_NUMBER_ARG);
23880
23881     check_demotion_helper (pval, (uint8_t*)pval);
23882 }
23883
23884 inline void
23885 gc_heap::relocate_obj_helper (uint8_t* x, size_t s)
23886 {
23887     THREAD_FROM_HEAP;
23888     if (contain_pointers (x))
23889     {
23890         dprintf (3, ("$%Ix$", (size_t)x));
23891
23892         go_through_object_nostart (method_table(x), x, s, pval,
23893                             {
23894                                 uint8_t* child = *pval;
23895                                 reloc_survivor_helper (pval);
23896                                 if (child)
23897                                 {
23898                                     dprintf (3, ("%Ix->%Ix->%Ix", (uint8_t*)pval, child, *pval));
23899                                 }
23900                             });
23901
23902     }
23903     check_class_object_demotion (x);
23904 }
23905
23906 inline 
23907 void gc_heap::reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc)
23908 {
23909     THREAD_FROM_HEAP;
23910
23911     uint8_t* old_val = (address_to_reloc ? *address_to_reloc : 0);
23912     relocate_address (address_to_reloc THREAD_NUMBER_ARG);
23913     if (address_to_reloc)
23914     {
23915         dprintf (3, ("SR %Ix: %Ix->%Ix", (uint8_t*)address_to_reloc, old_val, *address_to_reloc));
23916     }
23917
23918     //check_demotion_helper (current_saved_info_to_relocate, (uint8_t*)pval);
23919     uint8_t* relocated_addr = *address_to_reloc;
23920     if ((relocated_addr < demotion_high) &&
23921         (relocated_addr >= demotion_low))
23922     {
23923         dprintf (3, ("set card for location %Ix(%Ix)",
23924                     (size_t)address_to_set_card, card_of((uint8_t*)address_to_set_card)));
23925
23926         set_card (card_of ((uint8_t*)address_to_set_card));
23927     }
23928 #ifdef MULTIPLE_HEAPS
23929     else if (settings.demotion)
23930     {
23931         gc_heap* hp = heap_of (relocated_addr);
23932         if ((relocated_addr < hp->demotion_high) &&
23933             (relocated_addr >= hp->demotion_low))
23934         {
23935             dprintf (3, ("%Ix on h%d, set card for location %Ix(%Ix)",
23936                         relocated_addr, hp->heap_number, (size_t)address_to_set_card, card_of((uint8_t*)address_to_set_card)));
23937
23938             set_card (card_of ((uint8_t*)address_to_set_card));
23939         }
23940     }
23941 #endif //MULTIPLE_HEAPS
23942 }
23943
23944 void gc_heap::relocate_pre_plug_info (mark* pinned_plug_entry)
23945 {
23946     THREAD_FROM_HEAP;
23947     uint8_t* plug = pinned_plug (pinned_plug_entry);
23948     uint8_t* pre_plug_start = plug - sizeof (plug_and_gap);
23949     // Note that we need to add one ptr size here otherwise we may not be able to find the relocated
23950     // address. Consider this scenario: 
23951     // gen1 start | 3-ptr sized NP | PP
23952     // 0          | 0x18           | 0x30
23953     // If we are asking for the reloc address of 0x10 we will AV in relocate_address because
23954     // the first plug we saw in the brick is 0x18 which means 0x10 will cause us to go back a brick
23955     // which is 0, and then we'll AV in tree_search when we try to do node_right_child (tree). 
23956     pre_plug_start += sizeof (uint8_t*);
23957     uint8_t** old_address = &pre_plug_start;
23958
23959     uint8_t* old_val = (old_address ? *old_address : 0);
23960     relocate_address (old_address THREAD_NUMBER_ARG);
23961     if (old_address)
23962     {
23963         dprintf (3, ("PreR %Ix: %Ix->%Ix, set reloc: %Ix", 
23964             (uint8_t*)old_address, old_val, *old_address, (pre_plug_start - sizeof (uint8_t*))));
23965     }
23966
23967     pinned_plug_entry->set_pre_plug_info_reloc_start (pre_plug_start - sizeof (uint8_t*));
23968 }
23969
23970 inline
23971 void gc_heap::relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned)
23972 {
23973     THREAD_FROM_HEAP;
23974     uint8_t* plug = pinned_plug (pinned_plug_entry);
23975
23976     if (!is_pinned)
23977     {
23978         //// Temporary - we just wanna make sure we are doing things right when padding is needed.
23979         //if ((x + s) < plug)
23980         //{
23981         //    dprintf (3, ("obj %Ix needed padding: end %Ix is %d bytes from pinned obj %Ix", 
23982         //        x, (x + s), (plug- (x + s)), plug));
23983         //    GCToOSInterface::DebugBreak();
23984         //}
23985
23986         relocate_pre_plug_info (pinned_plug_entry);
23987     }
23988
23989     verify_pins_with_post_plug_info("after relocate_pre_plug_info");
23990
23991     uint8_t* saved_plug_info_start = 0;
23992     uint8_t** saved_info_to_relocate = 0;
23993
23994     if (is_pinned)
23995     {
23996         saved_plug_info_start = (uint8_t*)(pinned_plug_entry->get_post_plug_info_start());
23997         saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info());
23998     }
23999     else
24000     {
24001         saved_plug_info_start = (plug - sizeof (plug_and_gap));
24002         saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info());
24003     }
24004     
24005     uint8_t** current_saved_info_to_relocate = 0;
24006     uint8_t* child = 0;
24007
24008     dprintf (3, ("x: %Ix, pp: %Ix, end: %Ix", x, plug, end));
24009
24010     if (contain_pointers (x))
24011     {
24012         dprintf (3,("$%Ix$", (size_t)x));
24013
24014         go_through_object_nostart (method_table(x), x, s, pval,
24015         {
24016             dprintf (3, ("obj %Ix, member: %Ix->%Ix", x, (uint8_t*)pval, *pval));
24017
24018             if ((uint8_t*)pval >= end)
24019             {
24020                 current_saved_info_to_relocate = saved_info_to_relocate + ((uint8_t*)pval - saved_plug_info_start) / sizeof (uint8_t**);
24021                 child = *current_saved_info_to_relocate;
24022                 reloc_ref_in_shortened_obj (pval, current_saved_info_to_relocate);
24023                 dprintf (3, ("last part: R-%Ix(saved: %Ix)->%Ix ->%Ix",
24024                     (uint8_t*)pval, current_saved_info_to_relocate, child, *current_saved_info_to_relocate));
24025             }
24026             else
24027             {
24028                 reloc_survivor_helper (pval);
24029             }
24030         });
24031     }
24032
24033     check_class_object_demotion (x);
24034 }
24035
24036 void gc_heap::relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end)
24037 {
24038     uint8_t*  x = plug;
24039     while (x < plug_end)
24040     {
24041         size_t s = size (x);
24042         uint8_t* next_obj = x + Align (s);
24043         Prefetch (next_obj);
24044         relocate_obj_helper (x, s);
24045         assert (s > 0);
24046         x = next_obj;
24047     }
24048 }
24049
24050 // if we expanded, right now we are not handling it as We are not saving the new reloc info.
24051 void gc_heap::verify_pins_with_post_plug_info (const char* msg)
24052 {
24053 #if defined  (_DEBUG) && defined (VERIFY_HEAP)
24054     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
24055     {
24056         if (!verify_pinned_queue_p)
24057             return;
24058
24059         if (settings.heap_expansion)
24060             return;
24061
24062         for (size_t i = 0; i < mark_stack_tos; i++)
24063         {
24064             mark& m = mark_stack_array[i];
24065
24066             mark* pinned_plug_entry = pinned_plug_of(i);
24067
24068             if (pinned_plug_entry->has_post_plug_info() && 
24069                 pinned_plug_entry->post_short_p() && 
24070                 (pinned_plug_entry->saved_post_plug_debug.gap != 1))
24071             {
24072                 uint8_t* next_obj = pinned_plug_entry->get_post_plug_info_start() + sizeof (plug_and_gap);
24073                 // object after pin
24074                 dprintf (3, ("OFP: %Ix, G: %Ix, R: %Ix, LC: %d, RC: %d", 
24075                     next_obj, node_gap_size (next_obj), node_relocation_distance (next_obj),
24076                     (int)node_left_child (next_obj), (int)node_right_child (next_obj)));
24077
24078                 size_t* post_plug_debug = (size_t*)(&m.saved_post_plug_debug);
24079
24080                 if (node_gap_size (next_obj) != *post_plug_debug)
24081                 {
24082                     dprintf (3, ("obj: %Ix gap should be %Ix but it is %Ix", 
24083                         next_obj, *post_plug_debug, (size_t)(node_gap_size (next_obj))));
24084                     FATAL_GC_ERROR();
24085                 }
24086                 post_plug_debug++;
24087                 // can't do node_relocation_distance here as it clears the left bit.
24088                 //if (node_relocation_distance (next_obj) != *post_plug_debug)
24089                 if (*((size_t*)(next_obj - 3 * sizeof (size_t))) != *post_plug_debug)
24090                 {
24091                     dprintf (3, ("obj: %Ix reloc should be %Ix but it is %Ix", 
24092                         next_obj, *post_plug_debug, (size_t)(node_relocation_distance (next_obj))));
24093                     FATAL_GC_ERROR();
24094                 }
24095                 if (node_left_child (next_obj) > 0)
24096                 {
24097                     dprintf (3, ("obj: %Ix, vLC: %d\n", next_obj, (int)(node_left_child (next_obj))));
24098                     FATAL_GC_ERROR();
24099                 }
24100             }
24101         }
24102
24103         dprintf (3, ("%s verified", msg));
24104     }
24105 #else // _DEBUG && VERIFY_HEAP
24106     UNREFERENCED_PARAMETER(msg);
24107 #endif // _DEBUG && VERIFY_HEAP
24108 }
24109
24110 #ifdef COLLECTIBLE_CLASS
24111 // We don't want to burn another ptr size space for pinned plugs to record this so just 
24112 // set the card unconditionally for collectible objects if we are demoting.
24113 inline void
24114 gc_heap::unconditional_set_card_collectible (uint8_t* obj)
24115 {
24116     if (settings.demotion)
24117     {
24118         set_card (card_of (obj));
24119     }
24120 }
24121 #endif //COLLECTIBLE_CLASS
24122
24123 void gc_heap::relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry)
24124 {
24125     uint8_t*  x = plug;
24126     uint8_t* p_plug = pinned_plug (pinned_plug_entry);
24127     BOOL is_pinned = (plug == p_plug);
24128     BOOL check_short_obj_p = (is_pinned ? pinned_plug_entry->post_short_p() : pinned_plug_entry->pre_short_p());
24129
24130     plug_end += sizeof (gap_reloc_pair);
24131
24132     //dprintf (3, ("%s %Ix is shortened, and last object %s overwritten", (is_pinned ? "PP" : "NP"), plug, (check_short_obj_p ? "is" : "is not")));
24133     dprintf (3, ("%s %Ix-%Ix short, LO: %s OW", (is_pinned ? "PP" : "NP"), plug, plug_end, (check_short_obj_p ? "is" : "is not")));
24134
24135     verify_pins_with_post_plug_info("begin reloc short surv");
24136
24137     while (x < plug_end)
24138     {
24139         if (check_short_obj_p && ((plug_end - x) < min_pre_pin_obj_size))
24140         {
24141             dprintf (3, ("last obj %Ix is short", x));
24142
24143             if (is_pinned)
24144             {
24145 #ifdef COLLECTIBLE_CLASS
24146                 if (pinned_plug_entry->post_short_collectible_p())
24147                     unconditional_set_card_collectible (x);
24148 #endif //COLLECTIBLE_CLASS
24149
24150                 // Relocate the saved references based on bits set.
24151                 uint8_t** saved_plug_info_start = (uint8_t**)(pinned_plug_entry->get_post_plug_info_start());
24152                 uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info());
24153                 for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++)
24154                 {
24155                     if (pinned_plug_entry->post_short_bit_p (i))
24156                     {
24157                         reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i));
24158                     }
24159                 }
24160             }
24161             else
24162             {
24163 #ifdef COLLECTIBLE_CLASS
24164                 if (pinned_plug_entry->pre_short_collectible_p())
24165                     unconditional_set_card_collectible (x);
24166 #endif //COLLECTIBLE_CLASS
24167
24168                 relocate_pre_plug_info (pinned_plug_entry);
24169
24170                 // Relocate the saved references based on bits set.
24171                 uint8_t** saved_plug_info_start = (uint8_t**)(p_plug - sizeof (plug_and_gap));
24172                 uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info());
24173                 for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++)
24174                 {
24175                     if (pinned_plug_entry->pre_short_bit_p (i))
24176                     {
24177                         reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i));
24178                     }
24179                 }
24180             }
24181
24182             break;
24183         }
24184
24185         size_t s = size (x);
24186         uint8_t* next_obj = x + Align (s);
24187         Prefetch (next_obj);
24188
24189         if (next_obj >= plug_end) 
24190         {
24191             dprintf (3, ("object %Ix is at the end of the plug %Ix->%Ix", 
24192                 next_obj, plug, plug_end));
24193
24194             verify_pins_with_post_plug_info("before reloc short obj");
24195
24196             relocate_shortened_obj_helper (x, s, (x + Align (s) - sizeof (plug_and_gap)), pinned_plug_entry, is_pinned);
24197         }
24198         else
24199         {
24200             relocate_obj_helper (x, s);
24201         }
24202
24203         assert (s > 0);
24204         x = next_obj;
24205     }
24206
24207     verify_pins_with_post_plug_info("end reloc short surv");
24208 }
24209
24210 void gc_heap::relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end,
24211                                           BOOL check_last_object_p, 
24212                                           mark* pinned_plug_entry)
24213 {
24214     //dprintf(3,("Relocating pointers in Plug [%Ix,%Ix[", (size_t)plug, (size_t)plug_end));
24215     dprintf (3,("RP: [%Ix,%Ix[", (size_t)plug, (size_t)plug_end));
24216
24217     if (check_last_object_p)
24218     {
24219         relocate_shortened_survivor_helper (plug, plug_end, pinned_plug_entry);
24220     }
24221     else
24222     {
24223         relocate_survivor_helper (plug, plug_end);
24224     }
24225 }
24226
24227 void gc_heap::relocate_survivors_in_brick (uint8_t* tree, relocate_args* args)
24228 {
24229     assert ((tree != NULL));
24230
24231     dprintf (3, ("tree: %Ix, args->last_plug: %Ix, left: %Ix, right: %Ix, gap(t): %Ix",
24232         tree, args->last_plug, 
24233         (tree + node_left_child (tree)),
24234         (tree + node_right_child (tree)),
24235         node_gap_size (tree)));
24236
24237     if (node_left_child (tree))
24238     {
24239         relocate_survivors_in_brick (tree + node_left_child (tree), args);
24240     }
24241     {
24242         uint8_t*  plug = tree;
24243         BOOL   has_post_plug_info_p = FALSE;
24244         BOOL   has_pre_plug_info_p = FALSE;
24245
24246         if (tree == oldest_pinned_plug)
24247         {
24248             args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
24249                                                                &has_post_plug_info_p);
24250             assert (tree == pinned_plug (args->pinned_plug_entry));
24251
24252             dprintf (3, ("tree is the oldest pin: %Ix", tree));
24253         }
24254         if (args->last_plug)
24255         {
24256             size_t  gap_size = node_gap_size (tree);
24257             uint8_t*  gap = (plug - gap_size);
24258             dprintf (3, ("tree: %Ix, gap: %Ix (%Ix)", tree, gap, gap_size));
24259             assert (gap_size >= Align (min_obj_size));
24260             uint8_t*  last_plug_end = gap;
24261
24262             BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
24263
24264             {
24265                 relocate_survivors_in_plug (args->last_plug, last_plug_end, check_last_object_p, args->pinned_plug_entry);
24266             }
24267         }
24268         else
24269         {
24270             assert (!has_pre_plug_info_p);
24271         }
24272
24273         args->last_plug = plug;
24274         args->is_shortened = has_post_plug_info_p;
24275         if (has_post_plug_info_p)
24276         {
24277             dprintf (3, ("setting %Ix as shortened", plug));
24278         }
24279         dprintf (3, ("last_plug: %Ix(shortened: %d)", plug, (args->is_shortened ? 1 : 0)));
24280     }
24281     if (node_right_child (tree))
24282     {
24283         relocate_survivors_in_brick (tree + node_right_child (tree), args);
24284     }
24285 }
24286
24287 inline
24288 void gc_heap::update_oldest_pinned_plug()
24289 {
24290     oldest_pinned_plug = (pinned_plug_que_empty_p() ? 0 : pinned_plug (oldest_pin()));
24291 }
24292
24293 void gc_heap::relocate_survivors (int condemned_gen_number,
24294                                   uint8_t* first_condemned_address)
24295 {
24296     generation* condemned_gen = generation_of (condemned_gen_number);
24297     uint8_t*  start_address = first_condemned_address;
24298     size_t  current_brick = brick_of (start_address);
24299     heap_segment*  current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
24300
24301     PREFIX_ASSUME(current_heap_segment != NULL);
24302
24303     uint8_t*  end_address = 0;
24304
24305     reset_pinned_queue_bos();
24306     update_oldest_pinned_plug();
24307     
24308     end_address = heap_segment_allocated (current_heap_segment);
24309
24310     size_t  end_brick = brick_of (end_address - 1);
24311     relocate_args args;
24312     args.low = gc_low;
24313     args.high = gc_high;
24314     args.is_shortened = FALSE;
24315     args.pinned_plug_entry = 0;
24316     args.last_plug = 0;
24317     while (1)
24318     {
24319         if (current_brick > end_brick)
24320         {
24321             if (args.last_plug)
24322             {
24323                 {
24324                     assert (!(args.is_shortened));
24325                     relocate_survivors_in_plug (args.last_plug,
24326                                                 heap_segment_allocated (current_heap_segment),
24327                                                 args.is_shortened, 
24328                                                 args.pinned_plug_entry);
24329                 }
24330
24331                 args.last_plug = 0;
24332             }
24333
24334             if (heap_segment_next_rw (current_heap_segment))
24335             {
24336                 current_heap_segment = heap_segment_next_rw (current_heap_segment);
24337                 current_brick = brick_of (heap_segment_mem (current_heap_segment));
24338                 end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
24339                 continue;
24340             }
24341             else
24342             {
24343                 break;
24344             }
24345         }
24346         {
24347             int brick_entry =  brick_table [ current_brick ];
24348
24349             if (brick_entry >= 0)
24350             {
24351                 relocate_survivors_in_brick (brick_address (current_brick) +
24352                                              brick_entry -1,
24353                                              &args);
24354             }
24355         }
24356         current_brick++;
24357     }
24358 }
24359
24360 void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args)
24361 {
24362     if (check_last_object_p)
24363     {
24364         size += sizeof (gap_reloc_pair);
24365         mark* entry = args->pinned_plug_entry;
24366
24367         if (args->is_shortened)
24368         {
24369             assert (entry->has_post_plug_info());
24370             entry->swap_post_plug_and_saved_for_profiler();
24371         }
24372         else
24373         {
24374             assert (entry->has_pre_plug_info());
24375             entry->swap_pre_plug_and_saved_for_profiler();
24376         }
24377     }
24378
24379     ptrdiff_t last_plug_relocation = node_relocation_distance (plug);
24380     STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation);
24381     ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
24382
24383     (args->fn) (plug, (plug + size), reloc, args->profiling_context, !!settings.compaction, false);
24384
24385     if (check_last_object_p)
24386     {
24387         mark* entry = args->pinned_plug_entry;
24388
24389         if (args->is_shortened)
24390         {
24391             entry->swap_post_plug_and_saved_for_profiler();
24392         }
24393         else
24394         {
24395             entry->swap_pre_plug_and_saved_for_profiler();
24396         }
24397     }
24398 }
24399
24400 void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args)
24401 {
24402     assert ((tree != NULL));
24403     if (node_left_child (tree))
24404     {
24405         walk_relocation_in_brick (tree + node_left_child (tree), args);
24406     }
24407
24408     uint8_t*  plug = tree;
24409     BOOL   has_pre_plug_info_p = FALSE;
24410     BOOL   has_post_plug_info_p = FALSE;
24411
24412     if (tree == oldest_pinned_plug)
24413     {
24414         args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
24415                                                            &has_post_plug_info_p);
24416         assert (tree == pinned_plug (args->pinned_plug_entry));
24417     }
24418
24419     if (args->last_plug != 0)
24420     {
24421         size_t gap_size = node_gap_size (tree);
24422         uint8_t*  gap = (plug - gap_size);
24423         uint8_t*  last_plug_end = gap;
24424         size_t last_plug_size = (last_plug_end - args->last_plug);
24425         dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix", 
24426             tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size));
24427         
24428         BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
24429         if (!check_last_object_p)
24430         {
24431             assert (last_plug_size >= Align (min_obj_size));
24432         }
24433
24434         walk_plug (args->last_plug, last_plug_size, check_last_object_p, args);
24435     }
24436     else
24437     {
24438         assert (!has_pre_plug_info_p);
24439     }
24440
24441     dprintf (3, ("set args last plug to plug: %Ix", plug));
24442     args->last_plug = plug;
24443     args->is_shortened = has_post_plug_info_p;
24444
24445     if (node_right_child (tree))
24446     {
24447         walk_relocation_in_brick (tree + node_right_child (tree), args);
24448     }
24449 }
24450
24451 void gc_heap::walk_relocation (void* profiling_context, record_surv_fn fn)
24452 {
24453     generation* condemned_gen = generation_of (settings.condemned_generation);
24454     uint8_t*  start_address = generation_allocation_start (condemned_gen);
24455     size_t  current_brick = brick_of (start_address);
24456     heap_segment*  current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
24457
24458     PREFIX_ASSUME(current_heap_segment != NULL);
24459
24460     reset_pinned_queue_bos();
24461     update_oldest_pinned_plug();
24462     size_t end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
24463     walk_relocate_args args;
24464     args.is_shortened = FALSE;
24465     args.pinned_plug_entry = 0;
24466     args.last_plug = 0;
24467     args.profiling_context = profiling_context;
24468     args.fn = fn;
24469
24470     while (1)
24471     {
24472         if (current_brick > end_brick)
24473         {
24474             if (args.last_plug)
24475             {
24476                 walk_plug (args.last_plug, 
24477                            (heap_segment_allocated (current_heap_segment) - args.last_plug), 
24478                            args.is_shortened,
24479                            &args);
24480                 args.last_plug = 0;
24481             }
24482             if (heap_segment_next_rw (current_heap_segment))
24483             {
24484                 current_heap_segment = heap_segment_next_rw (current_heap_segment);
24485                 current_brick = brick_of (heap_segment_mem (current_heap_segment));
24486                 end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
24487                 continue;
24488             }
24489             else
24490             {
24491                 break;
24492             }
24493         }
24494         {
24495             int brick_entry =  brick_table [ current_brick ];
24496             if (brick_entry >= 0)
24497             {
24498                 walk_relocation_in_brick (brick_address (current_brick) +
24499                                           brick_entry - 1,
24500                                           &args);
24501             }
24502         }
24503         current_brick++;
24504     }
24505 }
24506
24507 void gc_heap::walk_survivors (record_surv_fn fn, void* context, walk_surv_type type)
24508 {
24509     if (type == walk_for_gc)
24510         walk_survivors_relocation (context, fn);
24511 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
24512     else if (type == walk_for_bgc)
24513         walk_survivors_for_bgc (context, fn);
24514 #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
24515     else if (type == walk_for_loh)
24516         walk_survivors_for_loh (context, fn);
24517     else
24518         assert (!"unknown type!");
24519 }
24520
24521 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
24522 void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn)
24523 {
24524     // This should only be called for BGCs
24525     assert(settings.concurrent);
24526
24527     heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
24528
24529     BOOL small_object_segments = TRUE;
24530     int align_const = get_alignment_constant (small_object_segments);
24531
24532     while (1)
24533     {
24534         if (seg == 0)
24535         {
24536             if (small_object_segments)
24537             {
24538                 //switch to large segment
24539                 small_object_segments = FALSE;
24540
24541                 align_const = get_alignment_constant (small_object_segments);
24542                 seg = heap_segment_rw (generation_start_segment (large_object_generation));
24543
24544                 PREFIX_ASSUME(seg != NULL);
24545
24546                 continue;
24547             }
24548             else 
24549                 break;
24550         }
24551
24552         uint8_t* o = heap_segment_mem (seg);
24553         uint8_t* end = heap_segment_allocated (seg);
24554
24555         while (o < end)
24556         {
24557             if (method_table(o) == g_gc_pFreeObjectMethodTable)
24558             {
24559                 o += Align (size (o), align_const);
24560                 continue;
24561             }
24562
24563             // It's survived. Make a fake plug, starting at o,
24564             // and send the event
24565
24566             uint8_t* plug_start = o;
24567
24568             while (method_table(o) != g_gc_pFreeObjectMethodTable)
24569             {
24570                 o += Align (size (o), align_const);
24571                 if (o >= end)
24572                 {
24573                     break;
24574                 }
24575             }
24576                 
24577             uint8_t* plug_end = o;
24578
24579             fn (plug_start, 
24580                 plug_end,
24581                 0,              // Reloc distance == 0 as this is non-compacting
24582                 profiling_context,
24583                 false,          // Non-compacting
24584                 true);          // BGC
24585         }
24586
24587         seg = heap_segment_next (seg);
24588     }
24589 }
24590 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
24591
24592 void gc_heap::relocate_phase (int condemned_gen_number,
24593                               uint8_t* first_condemned_address)
24594 {
24595     ScanContext sc;
24596     sc.thread_number = heap_number;
24597     sc.promotion = FALSE;
24598     sc.concurrent = FALSE;
24599
24600
24601 #ifdef TIME_GC
24602         unsigned start;
24603         unsigned finish;
24604         start = GetCycleCount32();
24605 #endif //TIME_GC
24606
24607 //  %type%  category = quote (relocate);
24608     dprintf (2,("---- Relocate phase -----"));
24609
24610 #ifdef MULTIPLE_HEAPS
24611     //join all threads to make sure they are synchronized
24612     dprintf(3, ("Joining after end of plan"));
24613     gc_t_join.join(this, gc_join_begin_relocate_phase);
24614     if (gc_t_join.joined())
24615 #endif //MULTIPLE_HEAPS
24616
24617     {
24618 #ifdef MULTIPLE_HEAPS
24619
24620         //join all threads to make sure they are synchronized
24621         dprintf(3, ("Restarting for relocation"));
24622         gc_t_join.restart();
24623 #endif //MULTIPLE_HEAPS
24624     }
24625
24626     dprintf(3,("Relocating roots"));
24627     GCScan::GcScanRoots(GCHeap::Relocate,
24628                             condemned_gen_number, max_generation, &sc);
24629
24630     verify_pins_with_post_plug_info("after reloc stack");
24631
24632 #ifdef BACKGROUND_GC
24633     if (recursive_gc_sync::background_running_p())
24634     {
24635         scan_background_roots (GCHeap::Relocate, heap_number, &sc);
24636     }
24637 #endif //BACKGROUND_GC
24638
24639     if (condemned_gen_number != max_generation)
24640     {
24641         dprintf(3,("Relocating cross generation pointers"));
24642         mark_through_cards_for_segments (&gc_heap::relocate_address, TRUE);
24643         verify_pins_with_post_plug_info("after reloc cards");
24644     }
24645     if (condemned_gen_number != max_generation)
24646     {
24647         dprintf(3,("Relocating cross generation pointers for large objects"));
24648         mark_through_cards_for_large_objects (&gc_heap::relocate_address, TRUE);
24649     }
24650     else
24651     {
24652 #ifdef FEATURE_LOH_COMPACTION
24653         if (loh_compacted_p)
24654         {
24655             assert (settings.condemned_generation == max_generation);
24656             relocate_in_loh_compact();
24657         }
24658         else
24659 #endif //FEATURE_LOH_COMPACTION
24660         {
24661             relocate_in_large_objects ();
24662         }
24663     }
24664     {
24665         dprintf(3,("Relocating survivors"));
24666         relocate_survivors (condemned_gen_number,
24667                             first_condemned_address);
24668     }
24669
24670 #ifdef FEATURE_PREMORTEM_FINALIZATION
24671         dprintf(3,("Relocating finalization data"));
24672         finalize_queue->RelocateFinalizationData (condemned_gen_number,
24673                                                        __this);
24674 #endif // FEATURE_PREMORTEM_FINALIZATION
24675
24676
24677 // MTHTS
24678     {
24679         dprintf(3,("Relocating handle table"));
24680         GCScan::GcScanHandles(GCHeap::Relocate,
24681                                   condemned_gen_number, max_generation, &sc);
24682     }
24683
24684 #ifdef MULTIPLE_HEAPS
24685     //join all threads to make sure they are synchronized
24686     dprintf(3, ("Joining after end of relocation"));
24687     gc_t_join.join(this, gc_join_relocate_phase_done);
24688
24689 #endif //MULTIPLE_HEAPS
24690
24691 #ifdef TIME_GC
24692         finish = GetCycleCount32();
24693         reloc_time = finish - start;
24694 #endif //TIME_GC
24695
24696     dprintf(2,( "---- End of Relocate phase ----"));
24697 }
24698
24699 // This compares to see if tree is the current pinned plug and returns info
24700 // for this pinned plug. Also advances the pinned queue if that's the case.
24701 //
24702 // We don't change the values of the plug info if tree is not the same as 
24703 // the current pinned plug - the caller is responsible for setting the right
24704 // values to begin with.
24705 //
24706 // POPO TODO: We are keeping this temporarily as this is also used by realloc 
24707 // where it passes FALSE to deque_p, change it to use the same optimization 
24708 // as relocate. Not as essential since realloc is already a slow path.
24709 mark* gc_heap::get_next_pinned_entry (uint8_t* tree,
24710                                       BOOL* has_pre_plug_info_p, 
24711                                       BOOL* has_post_plug_info_p,
24712                                       BOOL deque_p)
24713 {
24714     if (!pinned_plug_que_empty_p())
24715     {
24716         mark* oldest_entry = oldest_pin();
24717         uint8_t* oldest_plug = pinned_plug (oldest_entry);
24718         if (tree == oldest_plug)
24719         {
24720             *has_pre_plug_info_p =  oldest_entry->has_pre_plug_info();
24721             *has_post_plug_info_p = oldest_entry->has_post_plug_info();
24722
24723             if (deque_p)
24724             {
24725                 deque_pinned_plug();
24726             }
24727
24728             dprintf (3, ("found a pinned plug %Ix, pre: %d, post: %d", 
24729                 tree, 
24730                 (*has_pre_plug_info_p ? 1 : 0),
24731                 (*has_post_plug_info_p ? 1 : 0)));
24732
24733             return oldest_entry;
24734         }
24735     }
24736
24737     return NULL;
24738 }
24739
24740 // This also deques the oldest entry and update the oldest plug
24741 mark* gc_heap::get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, 
24742                                         BOOL* has_post_plug_info_p)
24743 {
24744     mark* oldest_entry = oldest_pin();
24745     *has_pre_plug_info_p =  oldest_entry->has_pre_plug_info();
24746     *has_post_plug_info_p = oldest_entry->has_post_plug_info();
24747
24748     deque_pinned_plug();
24749     update_oldest_pinned_plug();
24750     return oldest_entry;
24751 }
24752
24753 inline
24754 void gc_heap::copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p)
24755 {
24756     if (copy_cards_p)
24757         copy_cards_for_addresses (dest, src, len);
24758     else
24759         clear_card_for_addresses (dest, dest + len);
24760 }
24761
24762 // POPO TODO: We should actually just recover the artifically made gaps here..because when we copy
24763 // we always copy the earlier plugs first which means we won't need the gap sizes anymore. This way
24764 // we won't need to individually recover each overwritten part of plugs.
24765 inline
24766 void  gc_heap::gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p)
24767 {
24768     if (dest != src)
24769     {
24770 #ifdef BACKGROUND_GC
24771         if (current_c_gc_state == c_gc_state_marking) 
24772         {
24773             //TODO: should look to see whether we should consider changing this
24774             // to copy a consecutive region of the mark array instead.
24775             copy_mark_bits_for_addresses (dest, src, len);
24776         }
24777 #endif //BACKGROUND_GC
24778         //dprintf(3,(" Memcopy [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len));
24779         dprintf(3,(" mc: [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len));
24780         memcopy (dest - plug_skew, src - plug_skew, len);
24781 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
24782         if (SoftwareWriteWatch::IsEnabledForGCHeap())
24783         {
24784             // The ranges [src - plug_kew .. src[ and [src + len - plug_skew .. src + len[ are ObjHeaders, which don't have GC
24785             // references, and are not relevant for write watch. The latter range actually corresponds to the ObjHeader for the
24786             // object at (src + len), so it can be ignored anyway.
24787             SoftwareWriteWatch::SetDirtyRegion(dest, len - plug_skew);
24788         }
24789 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
24790         copy_cards_range (dest, src, len, copy_cards_p);
24791     }
24792 }
24793
24794 void gc_heap::compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args)
24795 {
24796     args->print();
24797     uint8_t* reloc_plug = plug + args->last_plug_relocation;
24798
24799     if (check_last_object_p)
24800     {
24801         size += sizeof (gap_reloc_pair);
24802         mark* entry = args->pinned_plug_entry;
24803
24804         if (args->is_shortened)
24805         {
24806             assert (entry->has_post_plug_info());
24807             entry->swap_post_plug_and_saved();
24808         }
24809         else
24810         {
24811             assert (entry->has_pre_plug_info());
24812             entry->swap_pre_plug_and_saved();
24813         }
24814     }
24815
24816     int  old_brick_entry =  brick_table [brick_of (plug)];
24817
24818     assert (node_relocation_distance (plug) == args->last_plug_relocation);
24819
24820 #ifdef FEATURE_STRUCTALIGN
24821     ptrdiff_t alignpad = node_alignpad(plug);
24822     if (alignpad)
24823     {
24824         make_unused_array (reloc_plug - alignpad, alignpad);
24825         if (brick_of (reloc_plug - alignpad) != brick_of (reloc_plug))
24826         {
24827             // The alignment padding is straddling one or more bricks;
24828             // it has to be the last "object" of its first brick.
24829             fix_brick_to_highest (reloc_plug - alignpad, reloc_plug);
24830         }
24831     }
24832 #else // FEATURE_STRUCTALIGN
24833     size_t unused_arr_size = 0; 
24834     BOOL  already_padded_p = FALSE;
24835 #ifdef SHORT_PLUGS
24836     if (is_plug_padded (plug))
24837     {
24838         already_padded_p = TRUE;
24839         clear_plug_padded (plug);
24840         unused_arr_size = Align (min_obj_size);
24841     }
24842 #endif //SHORT_PLUGS
24843     if (node_realigned (plug))
24844     {
24845         unused_arr_size += switch_alignment_size (already_padded_p);
24846     }
24847
24848     if (unused_arr_size != 0) 
24849     {
24850         make_unused_array (reloc_plug - unused_arr_size, unused_arr_size);
24851
24852         if (brick_of (reloc_plug - unused_arr_size) != brick_of (reloc_plug))
24853         {
24854             dprintf (3, ("fix B for padding: %Id: %Ix->%Ix", 
24855                 unused_arr_size, (reloc_plug - unused_arr_size), reloc_plug));
24856             // The alignment padding is straddling one or more bricks;
24857             // it has to be the last "object" of its first brick.
24858             fix_brick_to_highest (reloc_plug - unused_arr_size, reloc_plug);
24859         }
24860     }
24861 #endif // FEATURE_STRUCTALIGN
24862
24863 #ifdef SHORT_PLUGS
24864     if (is_plug_padded (plug))
24865     {
24866         make_unused_array (reloc_plug - Align (min_obj_size), Align (min_obj_size));
24867
24868         if (brick_of (reloc_plug - Align (min_obj_size)) != brick_of (reloc_plug))
24869         {
24870             // The alignment padding is straddling one or more bricks;
24871             // it has to be the last "object" of its first brick.
24872             fix_brick_to_highest (reloc_plug - Align (min_obj_size), reloc_plug);
24873         }
24874     }
24875 #endif //SHORT_PLUGS
24876
24877     gcmemcopy (reloc_plug, plug, size, args->copy_cards_p);
24878
24879     if (args->check_gennum_p)
24880     {
24881         int src_gennum = args->src_gennum;
24882         if (src_gennum == -1)
24883         {
24884             src_gennum = object_gennum (plug);
24885         }
24886
24887         int dest_gennum = object_gennum_plan (reloc_plug);
24888
24889         if (src_gennum < dest_gennum)
24890         {
24891             generation_allocation_size (generation_of (dest_gennum)) += size;
24892         }
24893     }
24894
24895     size_t current_reloc_brick = args->current_compacted_brick;
24896
24897     if (brick_of (reloc_plug) != current_reloc_brick)
24898     {
24899         dprintf (3, ("last reloc B: %Ix, current reloc B: %Ix", 
24900             current_reloc_brick, brick_of (reloc_plug)));
24901
24902         if (args->before_last_plug)
24903         {
24904             dprintf (3,(" fixing last brick %Ix to point to last plug %Ix(%Ix)",
24905                      current_reloc_brick,
24906                      args->before_last_plug, 
24907                      (args->before_last_plug - brick_address (current_reloc_brick))));
24908
24909             {
24910                 set_brick (current_reloc_brick,
24911                         args->before_last_plug - brick_address (current_reloc_brick));
24912             }
24913         }
24914         current_reloc_brick = brick_of (reloc_plug);
24915     }
24916     size_t end_brick = brick_of (reloc_plug + size-1);
24917     if (end_brick != current_reloc_brick)
24918     {
24919         // The plug is straddling one or more bricks
24920         // It has to be the last plug of its first brick
24921         dprintf (3,("plug spanning multiple bricks, fixing first brick %Ix to %Ix(%Ix)",
24922                  current_reloc_brick, (size_t)reloc_plug,
24923                  (reloc_plug - brick_address (current_reloc_brick))));
24924
24925         {
24926             set_brick (current_reloc_brick,
24927                     reloc_plug - brick_address (current_reloc_brick));
24928         }
24929         // update all intervening brick
24930         size_t brick = current_reloc_brick + 1;
24931         dprintf (3,("setting intervening bricks %Ix->%Ix to -1",
24932             brick, (end_brick - 1)));
24933         while (brick < end_brick)
24934         {
24935             set_brick (brick, -1);
24936             brick++;
24937         }
24938         // code last brick offset as a plug address
24939         args->before_last_plug = brick_address (end_brick) -1;
24940         current_reloc_brick = end_brick;
24941         dprintf (3, ("setting before last to %Ix, last brick to %Ix",
24942             args->before_last_plug, current_reloc_brick));
24943     } 
24944     else
24945     {
24946         dprintf (3, ("still in the same brick: %Ix", end_brick));
24947         args->before_last_plug = reloc_plug;
24948     }
24949     args->current_compacted_brick = current_reloc_brick;
24950
24951     if (check_last_object_p)
24952     {
24953         mark* entry = args->pinned_plug_entry;
24954
24955         if (args->is_shortened)
24956         {
24957             entry->swap_post_plug_and_saved();
24958         }
24959         else
24960         {
24961             entry->swap_pre_plug_and_saved();
24962         }
24963     }
24964 }
24965
24966 void gc_heap::compact_in_brick (uint8_t* tree, compact_args* args)
24967 {
24968     assert (tree != NULL);
24969     int   left_node = node_left_child (tree);
24970     int   right_node = node_right_child (tree);
24971     ptrdiff_t relocation = node_relocation_distance (tree);
24972
24973     args->print();
24974
24975     if (left_node)
24976     {
24977         dprintf (3, ("B: L: %d->%Ix", left_node, (tree + left_node)));
24978         compact_in_brick ((tree + left_node), args);
24979     }
24980
24981     uint8_t*  plug = tree;
24982     BOOL   has_pre_plug_info_p = FALSE;
24983     BOOL   has_post_plug_info_p = FALSE;
24984
24985     if (tree == oldest_pinned_plug)
24986     {
24987         args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
24988                                                            &has_post_plug_info_p);
24989         assert (tree == pinned_plug (args->pinned_plug_entry));
24990     }
24991
24992     if (args->last_plug != 0)
24993     {
24994         size_t gap_size = node_gap_size (tree);
24995         uint8_t*  gap = (plug - gap_size);
24996         uint8_t*  last_plug_end = gap;
24997         size_t last_plug_size = (last_plug_end - args->last_plug);
24998         dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix", 
24999             tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size));
25000         
25001         BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
25002         if (!check_last_object_p)
25003         {
25004             assert (last_plug_size >= Align (min_obj_size));
25005         }
25006
25007         compact_plug (args->last_plug, last_plug_size, check_last_object_p, args);
25008     }
25009     else
25010     {
25011         assert (!has_pre_plug_info_p);
25012     }
25013
25014     dprintf (3, ("set args last plug to plug: %Ix, reloc: %Ix", plug, relocation));
25015     args->last_plug = plug;
25016     args->last_plug_relocation = relocation;
25017     args->is_shortened = has_post_plug_info_p;
25018
25019     if (right_node)
25020     {
25021         dprintf (3, ("B: R: %d->%Ix", right_node, (tree + right_node)));
25022         compact_in_brick ((tree + right_node), args);
25023     }
25024 }
25025
25026 void gc_heap::recover_saved_pinned_info()
25027 {
25028     reset_pinned_queue_bos();
25029
25030     while (!(pinned_plug_que_empty_p()))
25031     {
25032         mark* oldest_entry = oldest_pin();
25033         oldest_entry->recover_plug_info();
25034 #ifdef GC_CONFIG_DRIVEN
25035         if (oldest_entry->has_pre_plug_info() && oldest_entry->has_post_plug_info())
25036             record_interesting_data_point (idp_pre_and_post_pin);
25037         else if (oldest_entry->has_pre_plug_info())
25038             record_interesting_data_point (idp_pre_pin);
25039         else if (oldest_entry->has_post_plug_info())
25040             record_interesting_data_point (idp_post_pin);
25041 #endif //GC_CONFIG_DRIVEN
25042
25043         deque_pinned_plug();
25044     }
25045 }
25046
25047 void gc_heap::compact_phase (int condemned_gen_number,
25048                              uint8_t*  first_condemned_address,
25049                              BOOL clear_cards)
25050 {
25051 //  %type%  category = quote (compact);
25052 #ifdef TIME_GC
25053         unsigned start;
25054         unsigned finish;
25055         start = GetCycleCount32();
25056 #endif //TIME_GC
25057     generation*   condemned_gen = generation_of (condemned_gen_number);
25058     uint8_t*  start_address = first_condemned_address;
25059     size_t   current_brick = brick_of (start_address);
25060     heap_segment*  current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
25061
25062     PREFIX_ASSUME(current_heap_segment != NULL);
25063
25064     reset_pinned_queue_bos();
25065     update_oldest_pinned_plug();
25066
25067     BOOL reused_seg = expand_reused_seg_p();
25068     if (reused_seg)
25069     {
25070         for (int i = 1; i <= max_generation; i++)
25071         {
25072             generation_allocation_size (generation_of (i)) = 0;
25073         }
25074     }
25075
25076     uint8_t*  end_address = heap_segment_allocated (current_heap_segment);
25077
25078     size_t  end_brick = brick_of (end_address-1);
25079     compact_args args;
25080     args.last_plug = 0;
25081     args.before_last_plug = 0;
25082     args.current_compacted_brick = ~((size_t)1);
25083     args.is_shortened = FALSE;
25084     args.pinned_plug_entry = 0;
25085     args.copy_cards_p =  (condemned_gen_number >= 1) || !clear_cards;
25086     args.check_gennum_p = reused_seg;
25087     if (args.check_gennum_p)
25088     {
25089         args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2);
25090     }
25091
25092     dprintf (2,("---- Compact Phase: %Ix(%Ix)----", 
25093         first_condemned_address, brick_of (first_condemned_address)));
25094
25095 #ifdef MULTIPLE_HEAPS
25096     //restart
25097     if (gc_t_join.joined())
25098     {
25099 #endif //MULTIPLE_HEAPS
25100
25101 #ifdef MULTIPLE_HEAPS
25102         dprintf(3, ("Restarting for compaction"));
25103         gc_t_join.restart();
25104     }
25105 #endif //MULTIPLE_HEAPS
25106
25107     reset_pinned_queue_bos();
25108
25109 #ifdef FEATURE_LOH_COMPACTION
25110     if (loh_compacted_p)
25111     {
25112         compact_loh();
25113     }
25114 #endif //FEATURE_LOH_COMPACTION
25115
25116     if ((start_address < end_address) ||
25117         (condemned_gen_number == max_generation))
25118     {
25119         while (1)
25120         {
25121             if (current_brick > end_brick)
25122             {
25123                 if (args.last_plug != 0)
25124                 {
25125                     dprintf (3, ("compacting last plug: %Ix", args.last_plug))
25126                     compact_plug (args.last_plug,
25127                                   (heap_segment_allocated (current_heap_segment) - args.last_plug),
25128                                   args.is_shortened,
25129                                   &args);
25130                 }
25131
25132                 if (heap_segment_next_rw (current_heap_segment))
25133                 {
25134                     current_heap_segment = heap_segment_next_rw (current_heap_segment);
25135                     current_brick = brick_of (heap_segment_mem (current_heap_segment));
25136                     end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
25137                     args.last_plug = 0;
25138                     if (args.check_gennum_p)
25139                     {
25140                         args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2);
25141                     }
25142                     continue;
25143                 }
25144                 else
25145                 {
25146                     if (args.before_last_plug !=0)
25147                     {
25148                         dprintf (3, ("Fixing last brick %Ix to point to plug %Ix",
25149                                     args.current_compacted_brick, (size_t)args.before_last_plug));
25150                         assert (args.current_compacted_brick != ~1u);
25151                         set_brick (args.current_compacted_brick,
25152                                    args.before_last_plug - brick_address (args.current_compacted_brick));
25153                     }
25154                     break;
25155                 }
25156             }
25157             {
25158                 int  brick_entry =  brick_table [ current_brick ];
25159                 dprintf (3, ("B: %Ix(%Ix)->%Ix", 
25160                     current_brick, (size_t)brick_entry, (brick_address (current_brick) + brick_entry - 1)));
25161
25162                 if (brick_entry >= 0)
25163                 {
25164                     compact_in_brick ((brick_address (current_brick) + brick_entry -1),
25165                                       &args);
25166
25167                 }
25168             }
25169             current_brick++;
25170         }
25171     }
25172
25173     recover_saved_pinned_info();
25174
25175 #ifdef TIME_GC
25176     finish = GetCycleCount32();
25177     compact_time = finish - start;
25178 #endif //TIME_GC
25179
25180     concurrent_print_time_delta ("compact end");
25181
25182     dprintf(2,("---- End of Compact phase ----"));
25183 }
25184
25185 #ifdef MULTIPLE_HEAPS
25186
25187 #ifdef _MSC_VER
25188 #pragma warning(push)
25189 #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
25190 #endif //_MSC_VER
25191 void gc_heap::gc_thread_stub (void* arg)
25192 {
25193     gc_heap* heap = (gc_heap*)arg;
25194     if (!gc_thread_no_affinitize_p)
25195     {
25196         GCThreadAffinity affinity;
25197         affinity.Group = GCThreadAffinity::None;
25198         affinity.Processor = GCThreadAffinity::None;
25199
25200         // We are about to set affinity for GC threads. It is a good place to set up NUMA and
25201         // CPU groups because the process mask, processor number, and group number are all
25202         // readily available.
25203         if (GCToOSInterface::CanEnableGCCPUGroups())
25204             set_thread_group_affinity_for_heap(heap->heap_number, &affinity);
25205         else
25206             set_thread_affinity_mask_for_heap(heap->heap_number, &affinity);
25207
25208         if (!GCToOSInterface::SetThreadAffinity(&affinity))
25209         {
25210             dprintf(1, ("Failed to set thread affinity for server GC thread"));
25211         }
25212     }
25213
25214     // server GC threads run at a higher priority than normal.
25215     GCToOSInterface::BoostThreadPriority();
25216     _alloca (256*heap->heap_number);
25217     heap->gc_thread_function();
25218 }
25219 #ifdef _MSC_VER
25220 #pragma warning(pop)
25221 #endif //_MSC_VER
25222
25223 #endif //MULTIPLE_HEAPS
25224
25225 #ifdef BACKGROUND_GC
25226
25227 #ifdef _MSC_VER
25228 #pragma warning(push)
25229 #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
25230 #endif //_MSC_VER
25231 void gc_heap::bgc_thread_stub (void* arg)
25232 {
25233     gc_heap* heap = (gc_heap*)arg;
25234     heap->bgc_thread = GCToEEInterface::GetThread();
25235     assert(heap->bgc_thread != nullptr);
25236     heap->bgc_thread_function();
25237 }
25238 #ifdef _MSC_VER
25239 #pragma warning(pop)
25240 #endif //_MSC_VER
25241
25242 #endif //BACKGROUND_GC
25243
25244 /*------------------ Background GC ----------------------------*/
25245
25246 #ifdef BACKGROUND_GC
25247
25248 void gc_heap::background_drain_mark_list (int thread)
25249 {
25250     UNREFERENCED_PARAMETER(thread);
25251
25252     size_t saved_c_mark_list_index = c_mark_list_index;
25253
25254     if (saved_c_mark_list_index)
25255     {
25256         concurrent_print_time_delta ("SML");
25257     }
25258     while (c_mark_list_index != 0)
25259     {
25260         size_t current_index = c_mark_list_index - 1;
25261         uint8_t* o = c_mark_list [current_index];
25262         background_mark_object (o THREAD_NUMBER_ARG);
25263         c_mark_list_index--;
25264     }
25265     if (saved_c_mark_list_index)
25266     {
25267
25268         concurrent_print_time_delta ("EML");
25269     }
25270
25271     fire_drain_mark_list_event (saved_c_mark_list_index);
25272 }
25273
25274
25275 // The background GC version of scan_dependent_handles (see that method for a more in-depth comment).
25276 #ifdef MULTIPLE_HEAPS
25277 // Since we only scan dependent handles while we are stopped we'll never interfere with FGCs scanning
25278 // them. So we can use the same static variables.
25279 void gc_heap::background_scan_dependent_handles (ScanContext *sc)
25280 {
25281     // Whenever we call this method there may have been preceding object promotions. So set
25282     // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
25283     // based on the how the scanning proceeded).
25284     s_fUnscannedPromotions = TRUE;
25285
25286     // We don't know how many times we need to loop yet. In particular we can't base the loop condition on
25287     // the state of this thread's portion of the dependent handle table. That's because promotions on other
25288     // threads could cause handle promotions to become necessary here. Even if there are definitely no more
25289     // promotions possible in this thread's handles, we still have to stay in lock-step with those worker
25290     // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times
25291     // as all the others or they'll get out of step).
25292     while (true)
25293     {
25294         // The various worker threads are all currently racing in this code. We need to work out if at least
25295         // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the
25296         // dependent handle table when both of the following conditions apply:
25297         //  1) At least one (arbitrary) object might have been promoted since the last scan (because if this
25298         //     object happens to correspond to a primary in one of our handles we might potentially have to
25299         //     promote the associated secondary).
25300         //  2) The table for this thread has at least one handle with a secondary that isn't promoted yet.
25301         //
25302         // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first
25303         // iteration of this loop (see comment above) and in subsequent cycles each thread updates this
25304         // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary
25305         // being promoted. This value is cleared back to zero in a synchronized fashion in the join that
25306         // follows below. Note that we can't read this outside of the join since on any iteration apart from
25307         // the first threads will be racing between reading this value and completing their previous
25308         // iteration's table scan.
25309         //
25310         // The second condition is tracked by the dependent handle code itself on a per worker thread basis
25311         // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to
25312         // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is
25313         // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until
25314         // we're safely joined.
25315         if (GCScan::GcDhUnpromotedHandlesExist(sc))
25316             s_fUnpromotedHandles = TRUE;
25317
25318         // Synchronize all the threads so we can read our state variables safely. The following shared
25319         // variable (indicating whether we should scan the tables or terminate the loop) will be set by a
25320         // single thread inside the join.
25321         bgc_t_join.join(this, gc_join_scan_dependent_handles);
25322         if (bgc_t_join.joined())
25323         {
25324             // We're synchronized so it's safe to read our shared state variables. We update another shared
25325             // variable to indicate to all threads whether we'll be scanning for another cycle or terminating
25326             // the loop. We scan if there has been at least one object promotion since last time and at least
25327             // one thread has a dependent handle table with a potential handle promotion possible.
25328             s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles;
25329
25330             // Reset our shared state variables (ready to be set again on this scan or with a good initial
25331             // value for the next call if we're terminating the loop).
25332             s_fUnscannedPromotions = FALSE;
25333             s_fUnpromotedHandles = FALSE;
25334
25335             if (!s_fScanRequired)
25336             {
25337                 uint8_t* all_heaps_max = 0;
25338                 uint8_t* all_heaps_min = MAX_PTR;
25339                 int i;
25340                 for (i = 0; i < n_heaps; i++)
25341                 {
25342                     if (all_heaps_max < g_heaps[i]->background_max_overflow_address)
25343                         all_heaps_max = g_heaps[i]->background_max_overflow_address;
25344                     if (all_heaps_min > g_heaps[i]->background_min_overflow_address)
25345                         all_heaps_min = g_heaps[i]->background_min_overflow_address;
25346                 }
25347                 for (i = 0; i < n_heaps; i++)
25348                 {
25349                     g_heaps[i]->background_max_overflow_address = all_heaps_max;
25350                     g_heaps[i]->background_min_overflow_address = all_heaps_min;
25351                 }
25352             }
25353
25354             // Restart all the workers.
25355             dprintf(2, ("Starting all gc thread mark stack overflow processing"));
25356             bgc_t_join.restart();
25357         }
25358
25359         // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
25360         // being visible. If there really was an overflow (process_mark_overflow returns true) then set the
25361         // global flag indicating that at least one object promotion may have occurred (the usual comment
25362         // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and
25363         // exit the method since we unconditionally set this variable on method entry anyway).
25364         if (background_process_mark_overflow (sc->concurrent))
25365             s_fUnscannedPromotions = TRUE;
25366
25367         // If we decided that no scan was required we can terminate the loop now.
25368         if (!s_fScanRequired)
25369             break;
25370
25371         // Otherwise we must join with the other workers to ensure that all mark stack overflows have been
25372         // processed before we start scanning dependent handle tables (if overflows remain while we scan we
25373         // could miss noting the promotion of some primary objects).
25374         bgc_t_join.join(this, gc_join_rescan_dependent_handles);
25375         if (bgc_t_join.joined())
25376         {
25377             // Restart all the workers.
25378             dprintf(3, ("Starting all gc thread for dependent handle promotion"));
25379             bgc_t_join.restart();
25380         }
25381
25382         // If the portion of the dependent handle table managed by this worker has handles that could still be
25383         // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it
25384         // could require a rescan of handles on this or other workers.
25385         if (GCScan::GcDhUnpromotedHandlesExist(sc))
25386             if (GCScan::GcDhReScan(sc))
25387                 s_fUnscannedPromotions = TRUE;
25388     }
25389 }
25390 #else
25391 void gc_heap::background_scan_dependent_handles (ScanContext *sc)
25392 {
25393     // Whenever we call this method there may have been preceding object promotions. So set
25394     // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
25395     // based on the how the scanning proceeded).
25396     bool fUnscannedPromotions = true;
25397
25398     // Scan dependent handles repeatedly until there are no further promotions that can be made or we made a
25399     // scan without performing any new promotions.
25400     while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
25401     {
25402         // On each iteration of the loop start with the assumption that no further objects have been promoted.
25403         fUnscannedPromotions = false;
25404
25405         // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
25406         // being visible. If there was an overflow (background_process_mark_overflow returned true) then
25407         // additional objects now appear to be promoted and we should set the flag.
25408         if (background_process_mark_overflow (sc->concurrent))
25409             fUnscannedPromotions = true;
25410
25411         // Perform the scan and set the flag if any promotions resulted.
25412         if (GCScan::GcDhReScan (sc))
25413             fUnscannedPromotions = true;
25414     }
25415
25416     // Perform a last processing of any overflowed mark stack.
25417     background_process_mark_overflow (sc->concurrent);
25418 }
25419 #endif //MULTIPLE_HEAPS
25420
25421 void gc_heap::recover_bgc_settings()
25422 {
25423     if ((settings.condemned_generation < max_generation) && recursive_gc_sync::background_running_p())
25424     {
25425         dprintf (2, ("restoring bgc settings"));
25426         settings = saved_bgc_settings;
25427         GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation;
25428     }
25429 }
25430
25431 void gc_heap::allow_fgc()
25432 {
25433     assert (bgc_thread == GCToEEInterface::GetThread());
25434     bool bToggleGC = false;
25435
25436     if (g_fSuspensionPending > 0)
25437     {
25438         bToggleGC = GCToEEInterface::EnablePreemptiveGC();
25439         if (bToggleGC)
25440         {
25441             GCToEEInterface::DisablePreemptiveGC();
25442         }
25443     }
25444 }
25445
25446 BOOL gc_heap::should_commit_mark_array()
25447 {
25448     return (recursive_gc_sync::background_running_p() || (current_bgc_state == bgc_initialized));
25449 }
25450
25451 void gc_heap::clear_commit_flag()
25452 {
25453     generation* gen = generation_of (max_generation);
25454     heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
25455     while (1)
25456     {
25457         if (seg == 0)
25458         {
25459             if (gen != large_object_generation)
25460             {
25461                 gen = large_object_generation;
25462                 seg = heap_segment_in_range (generation_start_segment (gen));
25463             }
25464             else
25465             {
25466                 break;
25467             }
25468         }
25469
25470         if (seg->flags & heap_segment_flags_ma_committed)
25471         {
25472             seg->flags &= ~heap_segment_flags_ma_committed;
25473         }
25474
25475         if (seg->flags & heap_segment_flags_ma_pcommitted)
25476         {
25477             seg->flags &= ~heap_segment_flags_ma_pcommitted;
25478         }
25479
25480         seg = heap_segment_next (seg);
25481     }
25482 }
25483
25484 void gc_heap::clear_commit_flag_global()
25485 {
25486 #ifdef MULTIPLE_HEAPS
25487     for (int i = 0; i < n_heaps; i++)
25488     {
25489         g_heaps[i]->clear_commit_flag();
25490     }
25491 #else
25492     clear_commit_flag();
25493 #endif //MULTIPLE_HEAPS
25494 }
25495
25496 void gc_heap::verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr)
25497 {
25498 #ifdef _DEBUG
25499     size_t  markw = mark_word_of (begin);
25500     size_t  markw_end = mark_word_of (end);
25501
25502     while (markw < markw_end)
25503     {
25504         if (mark_array_addr[markw])
25505         {
25506             dprintf  (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
25507                             markw, mark_array_addr[markw], mark_word_address (markw)));
25508             FATAL_GC_ERROR();
25509         }
25510         markw++;
25511     }
25512 #else // _DEBUG
25513     UNREFERENCED_PARAMETER(begin);
25514     UNREFERENCED_PARAMETER(end);
25515     UNREFERENCED_PARAMETER(mark_array_addr);
25516 #endif //_DEBUG
25517 }
25518
25519 void gc_heap::verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr)
25520 {
25521     verify_mark_array_cleared (heap_segment_mem (seg), heap_segment_reserved (seg), mark_array_addr);
25522 }
25523
25524 BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp, 
25525                                          heap_segment* seg,
25526                                          uint32_t* new_card_table,
25527                                          uint8_t* new_lowest_address)
25528 {
25529     UNREFERENCED_PARAMETER(hp); // compiler bug? -- this *is*, indeed, referenced
25530
25531     uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
25532     uint8_t* end = heap_segment_reserved (seg);
25533
25534     uint8_t* lowest = hp->background_saved_lowest_address;
25535     uint8_t* highest = hp->background_saved_highest_address;
25536
25537     uint8_t* commit_start = NULL;
25538     uint8_t* commit_end = NULL;
25539     size_t commit_flag = 0;
25540
25541     if ((highest >= start) &&
25542         (lowest <= end))
25543     {
25544         if ((start >= lowest) && (end <= highest))
25545         {
25546             dprintf (GC_TABLE_LOG, ("completely in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix",
25547                                     start, end, lowest, highest));
25548             commit_flag = heap_segment_flags_ma_committed;
25549         }
25550         else
25551         {
25552             dprintf (GC_TABLE_LOG, ("partially in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix",
25553                                     start, end, lowest, highest));
25554             commit_flag = heap_segment_flags_ma_pcommitted;
25555         }
25556
25557         commit_start = max (lowest, start);
25558         commit_end = min (highest, end);
25559
25560         if (!commit_mark_array_by_range (commit_start, commit_end, hp->mark_array))
25561         {
25562             return FALSE;
25563         }
25564
25565         if (new_card_table == 0)
25566         {
25567             new_card_table = g_gc_card_table;
25568         }
25569
25570         if (hp->card_table != new_card_table)
25571         {
25572             if (new_lowest_address == 0)
25573             {
25574                 new_lowest_address = g_gc_lowest_address;
25575             }
25576
25577             uint32_t* ct = &new_card_table[card_word (gcard_of (new_lowest_address))];
25578             uint32_t* ma = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, new_lowest_address));
25579
25580             dprintf (GC_TABLE_LOG, ("table realloc-ed: %Ix->%Ix, MA: %Ix->%Ix", 
25581                                     hp->card_table, new_card_table,
25582                                     hp->mark_array, ma));
25583
25584             if (!commit_mark_array_by_range (commit_start, commit_end, ma))
25585             {
25586                 return FALSE;
25587             }
25588         }
25589
25590         seg->flags |= commit_flag;
25591     }
25592
25593     return TRUE;
25594 }
25595
25596 BOOL gc_heap::commit_mark_array_by_range (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr)
25597 {
25598     size_t beg_word = mark_word_of (begin);
25599     size_t end_word = mark_word_of (align_on_mark_word (end));
25600     uint8_t* commit_start = align_lower_page ((uint8_t*)&mark_array_addr[beg_word]);
25601     uint8_t* commit_end = align_on_page ((uint8_t*)&mark_array_addr[end_word]);
25602     size_t size = (size_t)(commit_end - commit_start);
25603
25604 #ifdef SIMPLE_DPRINTF
25605     dprintf (GC_TABLE_LOG, ("range: %Ix->%Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), commit %Ix->%Ix(%Id)",
25606                             begin, end,
25607                             beg_word, end_word,
25608                             (end_word - beg_word) * sizeof (uint32_t),
25609                             &mark_array_addr[beg_word],
25610                             &mark_array_addr[end_word],
25611                             (size_t)(&mark_array_addr[end_word] - &mark_array_addr[beg_word]),
25612                             commit_start, commit_end,
25613                             size));
25614 #endif //SIMPLE_DPRINTF
25615
25616     if (GCToOSInterface::VirtualCommit (commit_start, size))
25617     {
25618         // We can only verify the mark array is cleared from begin to end, the first and the last
25619         // page aren't necessarily all cleared 'cause they could be used by other segments or 
25620         // card bundle.
25621         verify_mark_array_cleared (begin, end, mark_array_addr);
25622         return TRUE;
25623     }
25624     else
25625     {
25626         dprintf (GC_TABLE_LOG, ("failed to commit %Id bytes", (end_word - beg_word) * sizeof (uint32_t)));
25627         return FALSE;
25628     }
25629 }
25630
25631 BOOL gc_heap::commit_mark_array_with_check (heap_segment* seg, uint32_t* new_mark_array_addr)
25632 {
25633     uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
25634     uint8_t* end = heap_segment_reserved (seg);
25635
25636 #ifdef MULTIPLE_HEAPS
25637     uint8_t* lowest = heap_segment_heap (seg)->background_saved_lowest_address;
25638     uint8_t* highest = heap_segment_heap (seg)->background_saved_highest_address;
25639 #else
25640     uint8_t* lowest = background_saved_lowest_address;
25641     uint8_t* highest = background_saved_highest_address;
25642 #endif //MULTIPLE_HEAPS
25643
25644     if ((highest >= start) &&
25645         (lowest <= end))
25646     {
25647         start = max (lowest, start);
25648         end = min (highest, end);
25649         if (!commit_mark_array_by_range (start, end, new_mark_array_addr))
25650         {
25651             return FALSE;
25652         }
25653     }
25654
25655     return TRUE;
25656 }
25657
25658 BOOL gc_heap::commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr)
25659 {
25660     dprintf (GC_TABLE_LOG, ("seg: %Ix->%Ix; MA: %Ix",
25661         seg,
25662         heap_segment_reserved (seg),
25663         mark_array_addr));
25664     uint8_t* start = (heap_segment_read_only_p (seg) ? heap_segment_mem (seg) : (uint8_t*)seg);
25665
25666     return commit_mark_array_by_range (start, heap_segment_reserved (seg), mark_array_addr);
25667 }
25668
25669 BOOL gc_heap::commit_mark_array_bgc_init (uint32_t* mark_array_addr)
25670 {
25671     UNREFERENCED_PARAMETER(mark_array_addr);
25672
25673     dprintf (GC_TABLE_LOG, ("BGC init commit: lowest: %Ix, highest: %Ix, mark_array: %Ix", 
25674                             lowest_address, highest_address, mark_array));
25675
25676     generation* gen = generation_of (max_generation);
25677     heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
25678     while (1)
25679     {
25680         if (seg == 0)
25681         {
25682             if (gen != large_object_generation)
25683             {
25684                 gen = large_object_generation;
25685                 seg = heap_segment_in_range (generation_start_segment (gen));
25686             }
25687             else
25688             {
25689                 break;
25690             }
25691         }
25692
25693         dprintf (GC_TABLE_LOG, ("seg: %Ix, flags: %Id", seg, seg->flags));
25694
25695         if (!(seg->flags & heap_segment_flags_ma_committed))
25696         {
25697             // For ro segments they could always be only partially in range so we'd
25698             // be calling this at the beginning of every BGC. We are not making this 
25699             // more efficient right now - ro segments are currently only used by redhawk.
25700             if (heap_segment_read_only_p (seg))
25701             {
25702                 if ((heap_segment_mem (seg) >= lowest_address) && 
25703                     (heap_segment_reserved (seg) <= highest_address))
25704                 {
25705                     if (commit_mark_array_by_seg (seg, mark_array))
25706                     {
25707                         seg->flags |= heap_segment_flags_ma_committed;
25708                     }
25709                     else
25710                     {
25711                         return FALSE;
25712                     }
25713                 }
25714                 else
25715                 {
25716                     uint8_t* start = max (lowest_address, heap_segment_mem (seg));
25717                     uint8_t* end = min (highest_address, heap_segment_reserved (seg));
25718                     if (commit_mark_array_by_range (start, end, mark_array))
25719                     {
25720                         seg->flags |= heap_segment_flags_ma_pcommitted;
25721                     }
25722                     else
25723                     {
25724                         return FALSE;
25725                     }
25726                 }
25727             }
25728             else
25729             {
25730                 // For normal segments they are by design completely in range so just 
25731                 // commit the whole mark array for each seg.
25732                 if (commit_mark_array_by_seg (seg, mark_array))
25733                 {
25734                     if (seg->flags & heap_segment_flags_ma_pcommitted)
25735                     {
25736                         seg->flags &= ~heap_segment_flags_ma_pcommitted;
25737                     }
25738                     seg->flags |= heap_segment_flags_ma_committed;
25739                 }
25740                 else
25741                 {
25742                     return FALSE;
25743                 }
25744             }
25745         }
25746
25747         seg = heap_segment_next (seg);
25748     }
25749
25750     return TRUE;
25751 }
25752
25753 // This function doesn't check the commit flag since it's for a new array -
25754 // the mark_array flag for these segments will remain the same.
25755 BOOL gc_heap::commit_new_mark_array (uint32_t* new_mark_array_addr)
25756 {
25757     dprintf (GC_TABLE_LOG, ("commiting existing segs on MA %Ix", new_mark_array_addr));
25758     generation* gen = generation_of (max_generation);
25759     heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
25760     while (1)
25761     {
25762         if (seg == 0)
25763         {
25764             if (gen != large_object_generation)
25765             {
25766                 gen = large_object_generation;
25767                 seg = heap_segment_in_range (generation_start_segment (gen));
25768             }
25769             else
25770             {
25771                 break;
25772             }
25773         }
25774
25775         if (!commit_mark_array_with_check (seg, new_mark_array_addr))
25776         {
25777             return FALSE;
25778         }
25779
25780         seg = heap_segment_next (seg);
25781     }
25782
25783 #ifdef MULTIPLE_HEAPS
25784     if (new_heap_segment)
25785     {
25786         if (!commit_mark_array_with_check (new_heap_segment, new_mark_array_addr))
25787         {
25788             return FALSE;
25789         }        
25790     }
25791 #endif //MULTIPLE_HEAPS
25792
25793     return TRUE;
25794 }
25795
25796 BOOL gc_heap::commit_new_mark_array_global (uint32_t* new_mark_array)
25797 {
25798 #ifdef MULTIPLE_HEAPS
25799     for (int i = 0; i < n_heaps; i++)
25800     {
25801         if (!g_heaps[i]->commit_new_mark_array (new_mark_array))
25802         {
25803             return FALSE;
25804         }
25805     }
25806 #else
25807     if (!commit_new_mark_array (new_mark_array))
25808     {
25809         return FALSE;
25810     }
25811 #endif //MULTIPLE_HEAPS
25812
25813     return TRUE;
25814 }
25815
25816 void gc_heap::decommit_mark_array_by_seg (heap_segment* seg)
25817 {
25818     // if BGC is disabled (the finalize watchdog does this at shutdown), the mark array could have
25819     // been set to NULL. 
25820     if (mark_array == NULL)
25821     {
25822         return;
25823     }
25824
25825     dprintf (GC_TABLE_LOG, ("decommitting seg %Ix(%Ix), MA: %Ix", seg, seg->flags, mark_array));
25826
25827     size_t flags = seg->flags;
25828
25829     if ((flags & heap_segment_flags_ma_committed) ||
25830         (flags & heap_segment_flags_ma_pcommitted))
25831     {
25832         uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
25833         uint8_t* end = heap_segment_reserved (seg);
25834
25835         if (flags & heap_segment_flags_ma_pcommitted)
25836         {
25837             start = max (lowest_address, start);
25838             end = min (highest_address, end);
25839         }
25840
25841         size_t beg_word = mark_word_of (start);
25842         size_t end_word = mark_word_of (align_on_mark_word (end));
25843         uint8_t* decommit_start = align_on_page ((uint8_t*)&mark_array[beg_word]);
25844         uint8_t* decommit_end = align_lower_page ((uint8_t*)&mark_array[end_word]);
25845         size_t size = (size_t)(decommit_end - decommit_start);
25846
25847 #ifdef SIMPLE_DPRINTF
25848         dprintf (GC_TABLE_LOG, ("seg: %Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), decommit %Ix->%Ix(%Id)",
25849                                 seg,
25850                                 beg_word, end_word,
25851                                 (end_word - beg_word) * sizeof (uint32_t),
25852                                 &mark_array[beg_word],
25853                                 &mark_array[end_word],
25854                                 (size_t)(&mark_array[end_word] - &mark_array[beg_word]),
25855                                 decommit_start, decommit_end,
25856                                 size));
25857 #endif //SIMPLE_DPRINTF
25858         
25859         if (decommit_start < decommit_end)
25860         {
25861             if (!GCToOSInterface::VirtualDecommit (decommit_start, size))
25862             {
25863                 dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualDecommit on %Ix for %Id bytes failed", 
25864                                         decommit_start, size));
25865                 assert (!"decommit failed");
25866             }
25867         }
25868
25869         dprintf (GC_TABLE_LOG, ("decommited [%Ix for address [%Ix", beg_word, seg));
25870     }
25871 }
25872
25873 void gc_heap::background_mark_phase ()
25874 {
25875     verify_mark_array_cleared();
25876
25877     ScanContext sc;
25878     sc.thread_number = heap_number;
25879     sc.promotion = TRUE;
25880     sc.concurrent = FALSE;
25881
25882     THREAD_FROM_HEAP;
25883     BOOL cooperative_mode = TRUE;
25884 #ifndef MULTIPLE_HEAPS
25885     const int thread = heap_number;
25886 #endif //!MULTIPLE_HEAPS
25887
25888     dprintf(2,("-(GC%d)BMark-", VolatileLoad(&settings.gc_index)));
25889
25890     assert (settings.concurrent);
25891
25892 #ifdef TIME_GC
25893     unsigned start;
25894     unsigned finish;
25895     start = GetCycleCount32();
25896 #endif //TIME_GC
25897
25898 #ifdef FFIND_OBJECT
25899     if (gen0_must_clear_bricks > 0)
25900         gen0_must_clear_bricks--;
25901 #endif //FFIND_OBJECT
25902
25903     background_soh_alloc_count = 0;
25904     background_loh_alloc_count = 0;
25905     bgc_overflow_count = 0;
25906
25907     bpromoted_bytes (heap_number) = 0;
25908     static uint32_t num_sizedrefs = 0;
25909
25910     background_min_overflow_address = MAX_PTR;
25911     background_max_overflow_address = 0;
25912     background_min_soh_overflow_address = MAX_PTR;
25913     background_max_soh_overflow_address = 0;
25914     processed_soh_overflow_p = FALSE;
25915
25916     {
25917         //set up the mark lists from g_mark_list
25918         assert (g_mark_list);
25919         mark_list = g_mark_list;
25920         //dont use the mark list for full gc
25921         //because multiple segments are more complex to handle and the list
25922         //is likely to overflow
25923         mark_list_end = &mark_list [0];
25924         mark_list_index = &mark_list [0];
25925
25926         c_mark_list_index = 0;
25927
25928 #ifndef MULTIPLE_HEAPS
25929         shigh = (uint8_t*) 0;
25930         slow  = MAX_PTR;
25931 #endif //MULTIPLE_HEAPS
25932
25933         generation*   gen = generation_of (max_generation);
25934
25935         dprintf(3,("BGC: stack marking"));
25936         sc.concurrent = TRUE;
25937
25938         GCScan::GcScanRoots(background_promote_callback,
25939                                 max_generation, max_generation,
25940                                 &sc);
25941     }
25942
25943     {
25944         dprintf(3,("BGC: finalization marking"));
25945         finalize_queue->GcScanRoots(background_promote_callback, heap_number, 0);
25946     }
25947
25948     size_t total_loh_size = generation_size (max_generation + 1);
25949     bgc_begin_loh_size = total_loh_size;
25950     bgc_alloc_spin_loh = 0;
25951     bgc_loh_size_increased = 0;
25952     bgc_loh_allocated_in_free = 0;
25953     size_t total_soh_size = generation_sizes (generation_of (max_generation));
25954
25955     dprintf (GTC_LOG, ("BM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
25956
25957     {
25958         //concurrent_print_time_delta ("copying stack roots");
25959         concurrent_print_time_delta ("CS");
25960
25961         FIRE_EVENT(BGC1stNonConEnd);
25962
25963         expanded_in_fgc = FALSE;
25964         saved_overflow_ephemeral_seg = 0;
25965         current_bgc_state = bgc_reset_ww;
25966
25967         // we don't need a join here - just whichever thread that gets here
25968         // first can change the states and call restart_vm.
25969         // this is not true - we can't let the EE run when we are scanning stack.
25970         // since we now allow reset ww to run concurrently and have a join for it,
25971         // we can do restart ee on the 1st thread that got here. Make sure we handle the 
25972         // sizedref handles correctly.
25973 #ifdef MULTIPLE_HEAPS
25974         bgc_t_join.join(this, gc_join_restart_ee);
25975         if (bgc_t_join.joined())
25976 #endif //MULTIPLE_HEAPS
25977         {
25978 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
25979             // Resetting write watch for software write watch is pretty fast, much faster than for hardware write watch. Reset
25980             // can be done while the runtime is suspended or after the runtime is restarted, the preference was to reset while
25981             // the runtime is suspended. The reset for hardware write watch is done after the runtime is restarted below.
25982 #ifdef WRITE_WATCH
25983             concurrent_print_time_delta ("CRWW begin");
25984
25985 #ifdef MULTIPLE_HEAPS
25986             for (int i = 0; i < n_heaps; i++)
25987             {
25988                 g_heaps[i]->reset_write_watch (FALSE);
25989             }
25990 #else
25991             reset_write_watch (FALSE);
25992 #endif //MULTIPLE_HEAPS
25993
25994             concurrent_print_time_delta ("CRWW");
25995 #endif //WRITE_WATCH
25996 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
25997
25998             num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
25999
26000             // this c_write is not really necessary because restart_vm
26001             // has an instruction that will flush the cpu cache (interlocked
26002             // or whatever) but we don't want to rely on that.
26003             dprintf (BGC_LOG, ("setting cm_in_progress"));
26004             c_write (cm_in_progress, TRUE);
26005
26006             //restart all thread, doing the marking from the array
26007             assert (dont_restart_ee_p);
26008             dont_restart_ee_p = FALSE;
26009
26010             restart_vm();
26011             GCToOSInterface::YieldThread (0);
26012 #ifdef MULTIPLE_HEAPS
26013             dprintf(3, ("Starting all gc threads for gc"));
26014             bgc_t_join.restart();
26015 #endif //MULTIPLE_HEAPS
26016         }
26017
26018 #ifdef MULTIPLE_HEAPS
26019         bgc_t_join.join(this, gc_join_after_reset);
26020         if (bgc_t_join.joined())
26021 #endif //MULTIPLE_HEAPS
26022         {
26023             disable_preemptive (true);
26024
26025 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26026             // When software write watch is enabled, resetting write watch is done while the runtime is suspended above. The
26027             // post-reset call to revisit_written_pages is only necessary for concurrent reset_write_watch, to discard dirtied
26028             // pages during the concurrent reset.
26029
26030 #ifdef WRITE_WATCH
26031             concurrent_print_time_delta ("CRWW begin");
26032
26033 #ifdef MULTIPLE_HEAPS
26034             for (int i = 0; i < n_heaps; i++)
26035             {
26036                 g_heaps[i]->reset_write_watch (TRUE);
26037             }
26038 #else
26039             reset_write_watch (TRUE);
26040 #endif //MULTIPLE_HEAPS
26041
26042             concurrent_print_time_delta ("CRWW");
26043 #endif //WRITE_WATCH
26044
26045 #ifdef MULTIPLE_HEAPS
26046             for (int i = 0; i < n_heaps; i++)
26047             {
26048                 g_heaps[i]->revisit_written_pages (TRUE, TRUE);
26049             }
26050 #else
26051             revisit_written_pages (TRUE, TRUE);
26052 #endif //MULTIPLE_HEAPS
26053
26054             concurrent_print_time_delta ("CRW");
26055 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26056
26057 #ifdef MULTIPLE_HEAPS
26058             for (int i = 0; i < n_heaps; i++)
26059             {
26060                 g_heaps[i]->current_bgc_state = bgc_mark_handles;
26061             }
26062 #else
26063             current_bgc_state = bgc_mark_handles;
26064 #endif //MULTIPLE_HEAPS
26065
26066             current_c_gc_state = c_gc_state_marking;
26067
26068             enable_preemptive ();
26069
26070 #ifdef MULTIPLE_HEAPS
26071             dprintf(3, ("Joining BGC threads after resetting writewatch"));
26072             bgc_t_join.restart();
26073 #endif //MULTIPLE_HEAPS
26074         }
26075
26076         disable_preemptive (true);
26077
26078         if (num_sizedrefs > 0)
26079         {
26080             GCScan::GcScanSizedRefs(background_promote, max_generation, max_generation, &sc);
26081
26082             enable_preemptive ();
26083
26084 #ifdef MULTIPLE_HEAPS
26085             bgc_t_join.join(this, gc_join_scan_sizedref_done);
26086             if (bgc_t_join.joined())
26087             {
26088                 dprintf(3, ("Done with marking all sized refs. Starting all bgc thread for marking other strong roots"));
26089                 bgc_t_join.restart();
26090             }
26091 #endif //MULTIPLE_HEAPS
26092
26093             disable_preemptive (true);
26094         }
26095
26096         dprintf (3,("BGC: handle table marking"));
26097         GCScan::GcScanHandles(background_promote,
26098                                   max_generation, max_generation,
26099                                   &sc);
26100         //concurrent_print_time_delta ("concurrent marking handle table");
26101         concurrent_print_time_delta ("CRH");
26102
26103         current_bgc_state = bgc_mark_stack;
26104         dprintf (2,("concurrent draining mark list"));
26105         background_drain_mark_list (thread);
26106         //concurrent_print_time_delta ("concurrent marking stack roots");
26107         concurrent_print_time_delta ("CRS");
26108
26109         dprintf (2,("concurrent revisiting dirtied pages"));
26110         revisit_written_pages (TRUE);
26111         revisit_written_pages (TRUE);
26112         //concurrent_print_time_delta ("concurrent marking dirtied pages on LOH");
26113         concurrent_print_time_delta ("CRre");
26114
26115         enable_preemptive ();
26116
26117 #ifdef MULTIPLE_HEAPS
26118         bgc_t_join.join(this, gc_join_concurrent_overflow);
26119         if (bgc_t_join.joined())
26120         {
26121             uint8_t* all_heaps_max = 0;
26122             uint8_t* all_heaps_min = MAX_PTR;
26123             int i;
26124             for (i = 0; i < n_heaps; i++)
26125             {
26126                 dprintf (3, ("heap %d overflow max is %Ix, min is %Ix", 
26127                     i,
26128                     g_heaps[i]->background_max_overflow_address,
26129                     g_heaps[i]->background_min_overflow_address));
26130                 if (all_heaps_max < g_heaps[i]->background_max_overflow_address)
26131                     all_heaps_max = g_heaps[i]->background_max_overflow_address;
26132                 if (all_heaps_min > g_heaps[i]->background_min_overflow_address)
26133                     all_heaps_min = g_heaps[i]->background_min_overflow_address;
26134             }
26135             for (i = 0; i < n_heaps; i++)
26136             {
26137                 g_heaps[i]->background_max_overflow_address = all_heaps_max;
26138                 g_heaps[i]->background_min_overflow_address = all_heaps_min;
26139             }
26140             dprintf(3, ("Starting all bgc threads after updating the overflow info"));
26141             bgc_t_join.restart();
26142         }
26143 #endif //MULTIPLE_HEAPS
26144
26145         disable_preemptive (true);
26146
26147         dprintf (2, ("before CRov count: %d", bgc_overflow_count));
26148         bgc_overflow_count = 0;
26149         background_process_mark_overflow (TRUE);
26150         dprintf (2, ("after CRov count: %d", bgc_overflow_count));
26151         bgc_overflow_count = 0;
26152         //concurrent_print_time_delta ("concurrent processing mark overflow");
26153         concurrent_print_time_delta ("CRov");
26154
26155         // Stop all threads, crawl all stacks and revisit changed pages.
26156         FIRE_EVENT(BGC1stConEnd);
26157
26158         dprintf (2, ("Stopping the EE"));
26159
26160         enable_preemptive ();
26161
26162 #ifdef MULTIPLE_HEAPS
26163         bgc_t_join.join(this, gc_join_suspend_ee);
26164         if (bgc_t_join.joined())
26165         {
26166             bgc_threads_sync_event.Reset();
26167
26168             dprintf(3, ("Joining BGC threads for non concurrent final marking"));
26169             bgc_t_join.restart();
26170         }
26171 #endif //MULTIPLE_HEAPS
26172
26173         if (heap_number == 0)
26174         {
26175             enter_spin_lock (&gc_lock);
26176
26177             bgc_suspend_EE ();
26178             //suspend_EE ();
26179             bgc_threads_sync_event.Set();
26180         }
26181         else
26182         {
26183             bgc_threads_sync_event.Wait(INFINITE, FALSE);
26184             dprintf (2, ("bgc_threads_sync_event is signalled"));
26185         }
26186
26187         assert (settings.concurrent);
26188         assert (settings.condemned_generation == max_generation);
26189
26190         dprintf (2, ("clearing cm_in_progress"));
26191         c_write (cm_in_progress, FALSE);
26192
26193         bgc_alloc_lock->check();
26194
26195         current_bgc_state = bgc_final_marking;
26196
26197         //concurrent_print_time_delta ("concurrent marking ended");
26198         concurrent_print_time_delta ("CR");
26199
26200         FIRE_EVENT(BGC2ndNonConBegin);
26201
26202         mark_absorb_new_alloc();
26203
26204         // We need a join here 'cause find_object would complain if the gen0
26205         // bricks of another heap haven't been fixed up. So we need to make sure
26206         // that every heap's gen0 bricks are fixed up before we proceed.
26207 #ifdef MULTIPLE_HEAPS
26208         bgc_t_join.join(this, gc_join_after_absorb);
26209         if (bgc_t_join.joined())
26210         {
26211             dprintf(3, ("Joining BGC threads after absorb"));
26212             bgc_t_join.restart();
26213         }
26214 #endif //MULTIPLE_HEAPS
26215
26216         // give VM a chance to do work
26217         GCToEEInterface::GcBeforeBGCSweepWork();
26218
26219         //reset the flag, indicating that the EE no longer expect concurrent
26220         //marking
26221         sc.concurrent = FALSE;
26222
26223         total_loh_size = generation_size (max_generation + 1);
26224         total_soh_size = generation_sizes (generation_of (max_generation));
26225
26226         dprintf (GTC_LOG, ("FM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
26227
26228         dprintf (2, ("nonconcurrent marking stack roots"));
26229         GCScan::GcScanRoots(background_promote,
26230                                 max_generation, max_generation,
26231                                 &sc);
26232         //concurrent_print_time_delta ("nonconcurrent marking stack roots");
26233         concurrent_print_time_delta ("NRS");
26234
26235 //        finalize_queue->EnterFinalizeLock();
26236         finalize_queue->GcScanRoots(background_promote, heap_number, 0);
26237 //        finalize_queue->LeaveFinalizeLock();
26238
26239         dprintf (2, ("nonconcurrent marking handle table"));
26240         GCScan::GcScanHandles(background_promote,
26241                                   max_generation, max_generation,
26242                                   &sc);
26243         //concurrent_print_time_delta ("nonconcurrent marking handle table");
26244         concurrent_print_time_delta ("NRH");
26245
26246         dprintf (2,("---- (GC%d)final going through written pages ----", VolatileLoad(&settings.gc_index)));
26247         revisit_written_pages (FALSE);
26248         //concurrent_print_time_delta ("nonconcurrent revisit dirtied pages on LOH");
26249         concurrent_print_time_delta ("NRre LOH");
26250
26251 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26252 #ifdef MULTIPLE_HEAPS
26253         bgc_t_join.join(this, gc_join_disable_software_write_watch);
26254         if (bgc_t_join.joined())
26255 #endif // MULTIPLE_HEAPS
26256         {
26257             // The runtime is suspended, and we will be doing a final query of dirty pages, so pause tracking written pages to
26258             // avoid further perf penalty after the runtime is restarted
26259             SoftwareWriteWatch::DisableForGCHeap();
26260
26261 #ifdef MULTIPLE_HEAPS
26262             dprintf(3, ("Restarting BGC threads after disabling software write watch"));
26263             bgc_t_join.restart();
26264 #endif // MULTIPLE_HEAPS
26265         }
26266 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26267
26268         dprintf (2, ("before NR 1st Hov count: %d", bgc_overflow_count));
26269         bgc_overflow_count = 0;
26270
26271         // Dependent handles need to be scanned with a special algorithm (see the header comment on
26272         // scan_dependent_handles for more detail). We perform an initial scan without processing any mark
26273         // stack overflow. This is not guaranteed to complete the operation but in a common case (where there
26274         // are no dependent handles that are due to be collected) it allows us to optimize away further scans.
26275         // The call to background_scan_dependent_handles is what will cycle through more iterations if
26276         // required and will also perform processing of any mark stack overflow once the dependent handle
26277         // table has been fully promoted.
26278         dprintf (2, ("1st dependent handle scan and process mark overflow"));
26279         GCScan::GcDhInitialScan(background_promote, max_generation, max_generation, &sc);
26280         background_scan_dependent_handles (&sc);
26281         //concurrent_print_time_delta ("1st nonconcurrent dependent handle scan and process mark overflow");
26282         concurrent_print_time_delta ("NR 1st Hov");
26283
26284         dprintf (2, ("after NR 1st Hov count: %d", bgc_overflow_count));
26285         bgc_overflow_count = 0;
26286
26287 #ifdef MULTIPLE_HEAPS
26288         bgc_t_join.join(this, gc_join_null_dead_short_weak);
26289         if (bgc_t_join.joined())
26290 #endif //MULTIPLE_HEAPS
26291         {
26292             GCToEEInterface::AfterGcScanRoots (max_generation, max_generation, &sc);
26293
26294 #ifdef MULTIPLE_HEAPS
26295             dprintf(3, ("Joining BGC threads for short weak handle scan"));
26296             bgc_t_join.restart();
26297 #endif //MULTIPLE_HEAPS
26298         }
26299
26300         // null out the target of short weakref that were not promoted.
26301         GCScan::GcShortWeakPtrScan(background_promote, max_generation, max_generation,&sc);
26302
26303         //concurrent_print_time_delta ("bgc GcShortWeakPtrScan");
26304         concurrent_print_time_delta ("NR GcShortWeakPtrScan");
26305     }
26306
26307     {
26308 #ifdef MULTIPLE_HEAPS
26309         bgc_t_join.join(this, gc_join_scan_finalization);
26310         if (bgc_t_join.joined())
26311         {
26312             dprintf(3, ("Joining BGC threads for finalization"));
26313             bgc_t_join.restart();
26314         }
26315 #endif //MULTIPLE_HEAPS
26316
26317         //Handle finalization.
26318         dprintf(3,("Marking finalization data"));
26319         //concurrent_print_time_delta ("bgc joined to mark finalization");
26320         concurrent_print_time_delta ("NRj");
26321
26322 //        finalize_queue->EnterFinalizeLock();
26323         finalize_queue->ScanForFinalization (background_promote, max_generation, FALSE, __this);
26324 //        finalize_queue->LeaveFinalizeLock();
26325
26326         concurrent_print_time_delta ("NRF");
26327     }
26328
26329     dprintf (2, ("before NR 2nd Hov count: %d", bgc_overflow_count));
26330     bgc_overflow_count = 0;
26331
26332     // Scan dependent handles again to promote any secondaries associated with primaries that were promoted
26333     // for finalization. As before background_scan_dependent_handles will also process any mark stack
26334     // overflow.
26335     dprintf (2, ("2nd dependent handle scan and process mark overflow"));
26336     background_scan_dependent_handles (&sc);
26337     //concurrent_print_time_delta ("2nd nonconcurrent dependent handle scan and process mark overflow");
26338     concurrent_print_time_delta ("NR 2nd Hov");
26339
26340 #ifdef MULTIPLE_HEAPS
26341     bgc_t_join.join(this, gc_join_null_dead_long_weak);
26342     if (bgc_t_join.joined())
26343     {
26344         dprintf(2, ("Joining BGC threads for weak pointer deletion"));
26345         bgc_t_join.restart();
26346     }
26347 #endif //MULTIPLE_HEAPS
26348
26349     // null out the target of long weakref that were not promoted.
26350     GCScan::GcWeakPtrScan (background_promote, max_generation, max_generation, &sc);
26351     concurrent_print_time_delta ("NR GcWeakPtrScan");
26352
26353 #ifdef MULTIPLE_HEAPS
26354     bgc_t_join.join(this, gc_join_null_dead_syncblk);
26355     if (bgc_t_join.joined())
26356 #endif //MULTIPLE_HEAPS
26357     {
26358         dprintf (2, ("calling GcWeakPtrScanBySingleThread"));
26359         // scan for deleted entries in the syncblk cache
26360         GCScan::GcWeakPtrScanBySingleThread (max_generation, max_generation, &sc);
26361         concurrent_print_time_delta ("NR GcWeakPtrScanBySingleThread");
26362 #ifdef MULTIPLE_HEAPS
26363         dprintf(2, ("Starting BGC threads for end of background mark phase"));
26364         bgc_t_join.restart();
26365 #endif //MULTIPLE_HEAPS
26366     }
26367
26368     gen0_bricks_cleared = FALSE;
26369
26370     dprintf (2, ("end of bgc mark: loh: %d, soh: %d", 
26371                  generation_size (max_generation + 1), 
26372                  generation_sizes (generation_of (max_generation))));
26373
26374     for (int gen_idx = max_generation; gen_idx <= (max_generation + 1); gen_idx++)
26375     {
26376         generation* gen = generation_of (gen_idx);
26377         dynamic_data* dd = dynamic_data_of (gen_idx);
26378         dd_begin_data_size (dd) = generation_size (gen_idx) - 
26379                                    (generation_free_list_space (gen) + generation_free_obj_space (gen)) -
26380                                    Align (size (generation_allocation_start (gen)));
26381         dd_survived_size (dd) = 0;
26382         dd_pinned_survived_size (dd) = 0;
26383         dd_artificial_pinned_survived_size (dd) = 0;
26384         dd_added_pinned_size (dd) = 0;
26385     }
26386
26387     heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
26388     PREFIX_ASSUME(seg != NULL);
26389
26390     while (seg)
26391     {
26392         seg->flags &= ~heap_segment_flags_swept;
26393
26394         if (heap_segment_allocated (seg) == heap_segment_mem (seg))
26395         {
26396             // This can't happen...
26397             FATAL_GC_ERROR();
26398         }
26399
26400         if (seg == ephemeral_heap_segment)
26401         {
26402             heap_segment_background_allocated (seg) = generation_allocation_start (generation_of (max_generation - 1));
26403         }
26404         else
26405         {
26406             heap_segment_background_allocated (seg) = heap_segment_allocated (seg);
26407         }
26408
26409         dprintf (2, ("seg %Ix background allocated is %Ix", 
26410                       heap_segment_mem (seg), 
26411                       heap_segment_background_allocated (seg)));
26412         seg = heap_segment_next_rw (seg);
26413     }
26414
26415     // We need to void alloc contexts here 'cause while background_ephemeral_sweep is running
26416     // we can't let the user code consume the left over parts in these alloc contexts.
26417     repair_allocation_contexts (FALSE);
26418
26419 #ifdef TIME_GC
26420         finish = GetCycleCount32();
26421         mark_time = finish - start;
26422 #endif //TIME_GC
26423
26424     dprintf (2, ("end of bgc mark: gen2 free list space: %d, free obj space: %d", 
26425         generation_free_list_space (generation_of (max_generation)), 
26426         generation_free_obj_space (generation_of (max_generation))));
26427
26428     dprintf(2,("---- (GC%d)End of background mark phase ----", VolatileLoad(&settings.gc_index)));
26429 }
26430
26431 void
26432 gc_heap::suspend_EE ()
26433 {
26434     dprintf (2, ("suspend_EE"));
26435 #ifdef MULTIPLE_HEAPS
26436     gc_heap* hp = gc_heap::g_heaps[0];
26437     GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26438 #else
26439     GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26440 #endif //MULTIPLE_HEAPS
26441 }
26442
26443 #ifdef MULTIPLE_HEAPS
26444 void
26445 gc_heap::bgc_suspend_EE ()
26446 {
26447     for (int i = 0; i < n_heaps; i++)
26448     {
26449         gc_heap::g_heaps[i]->reset_gc_done();
26450     }
26451     gc_started = TRUE;
26452     dprintf (2, ("bgc_suspend_EE"));
26453     GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26454
26455     gc_started = FALSE;
26456     for (int i = 0; i < n_heaps; i++)
26457     {
26458         gc_heap::g_heaps[i]->set_gc_done();
26459     }
26460 }
26461 #else
26462 void
26463 gc_heap::bgc_suspend_EE ()
26464 {
26465     reset_gc_done();
26466     gc_started = TRUE;
26467     dprintf (2, ("bgc_suspend_EE"));
26468     GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26469     gc_started = FALSE;
26470     set_gc_done();
26471 }
26472 #endif //MULTIPLE_HEAPS
26473
26474 void
26475 gc_heap::restart_EE ()
26476 {
26477     dprintf (2, ("restart_EE"));
26478 #ifdef MULTIPLE_HEAPS
26479     GCToEEInterface::RestartEE(FALSE);
26480 #else
26481     GCToEEInterface::RestartEE(FALSE);
26482 #endif //MULTIPLE_HEAPS
26483 }
26484
26485 inline uint8_t* gc_heap::high_page ( heap_segment* seg, BOOL concurrent_p)
26486 {
26487     if (concurrent_p)
26488     {
26489         uint8_t* end = ((seg == ephemeral_heap_segment) ?
26490                      generation_allocation_start (generation_of (max_generation-1)) :
26491                      heap_segment_allocated (seg));
26492         return align_lower_page (end);
26493     }
26494     else 
26495     {
26496         return heap_segment_allocated (seg);
26497     }
26498 }
26499
26500 void gc_heap::revisit_written_page (uint8_t* page,
26501                                     uint8_t* end,
26502                                     BOOL concurrent_p,
26503                                     heap_segment* seg,
26504                                     uint8_t*& last_page,
26505                                     uint8_t*& last_object,
26506                                     BOOL large_objects_p,
26507                                     size_t& num_marked_objects)
26508 {
26509     UNREFERENCED_PARAMETER(seg);
26510
26511     uint8_t*   start_address = page;
26512     uint8_t*   o             = 0;
26513     int align_const = get_alignment_constant (!large_objects_p);
26514     uint8_t* high_address = end;
26515     uint8_t* current_lowest_address = background_saved_lowest_address;
26516     uint8_t* current_highest_address = background_saved_highest_address;
26517     BOOL no_more_loop_p = FALSE;
26518
26519     THREAD_FROM_HEAP;
26520 #ifndef MULTIPLE_HEAPS
26521     const int thread = heap_number;
26522 #endif //!MULTIPLE_HEAPS
26523
26524     if (large_objects_p)
26525     {
26526         o = last_object;
26527     }
26528     else
26529     {
26530         if (((last_page + WRITE_WATCH_UNIT_SIZE) == page)
26531             || (start_address <= last_object))
26532         {
26533             o = last_object;
26534         }
26535         else
26536         {
26537             o = find_first_object (start_address, last_object);
26538             // We can visit the same object again, but on a different page.
26539             assert (o >= last_object);
26540         }
26541     }
26542
26543     dprintf (3,("page %Ix start: %Ix, %Ix[ ",
26544                (size_t)page, (size_t)o,
26545                (size_t)(min (high_address, page + WRITE_WATCH_UNIT_SIZE))));
26546
26547     while (o < (min (high_address, page + WRITE_WATCH_UNIT_SIZE)))
26548     {
26549         size_t s;
26550
26551         if (concurrent_p && large_objects_p)
26552         {
26553             bgc_alloc_lock->bgc_mark_set (o);
26554
26555             if (((CObjectHeader*)o)->IsFree())
26556             {
26557                 s = unused_array_size (o);
26558             }
26559             else
26560             {
26561                 s = size (o);
26562             }
26563         }
26564         else
26565         {
26566             s = size (o);
26567         }
26568
26569         dprintf (3,("Considering object %Ix(%s)", (size_t)o, (background_object_marked (o, FALSE) ? "bm" : "nbm")));
26570
26571         assert (Align (s) >= Align (min_obj_size));
26572
26573         uint8_t* next_o =  o + Align (s, align_const);
26574
26575         if (next_o >= start_address) 
26576         {
26577 #ifdef MULTIPLE_HEAPS
26578             if (concurrent_p)
26579             {
26580                 // We set last_object here for SVR BGC here because SVR BGC has more than 
26581                 // one GC thread. When we have more than one GC thread we would run into this 
26582                 // situation if we skipped unmarked objects:
26583                 // bgc thread 1 calls GWW, and detect object X not marked so it would skip it 
26584                 // for revisit. 
26585                 // bgc thread 2 marks X and all its current children.
26586                 // user thread comes along and dirties more (and later) pages in X.
26587                 // bgc thread 1 calls GWW again and gets those later pages but it will not mark anything
26588                 // on them because it had already skipped X. We need to detect that this object is now
26589                 // marked and mark the children on the dirtied pages.
26590                 // In the future if we have less BGC threads than we have heaps we should add
26591                 // the check to the number of BGC threads.
26592                 last_object = o;
26593             }
26594 #endif //MULTIPLE_HEAPS
26595
26596             if (contain_pointers (o) &&
26597                 (!((o >= current_lowest_address) && (o < current_highest_address)) ||
26598                 background_marked (o)))
26599             {
26600                 dprintf (3, ("going through %Ix", (size_t)o));
26601                 go_through_object (method_table(o), o, s, poo, start_address, use_start, (o + s),
26602                                     if ((uint8_t*)poo >= min (high_address, page + WRITE_WATCH_UNIT_SIZE))
26603                                     {
26604                                         no_more_loop_p = TRUE;
26605                                         goto end_limit;
26606                                     }
26607                                     uint8_t* oo = *poo;
26608
26609                                     num_marked_objects++;
26610                                     background_mark_object (oo THREAD_NUMBER_ARG);
26611                                 );
26612             }
26613             else if (
26614                 concurrent_p &&
26615 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // see comment below
26616                 large_objects_p &&
26617 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26618                 ((CObjectHeader*)o)->IsFree() &&
26619                 (next_o > min (high_address, page + WRITE_WATCH_UNIT_SIZE)))
26620             {
26621                 // We need to not skip the object here because of this corner scenario:
26622                 // A large object was being allocated during BGC mark so we first made it 
26623                 // into a free object, then cleared its memory. In this loop we would detect
26624                 // that it's a free object which normally we would skip. But by the next time
26625                 // we call GetWriteWatch we could still be on this object and the object had
26626                 // been made into a valid object and some of its memory was changed. We need
26627                 // to be sure to process those written pages so we can't skip the object just
26628                 // yet.
26629                 //
26630                 // Similarly, when using software write watch, don't advance last_object when
26631                 // the current object is a free object that spans beyond the current page or
26632                 // high_address. Software write watch acquires gc_lock before the concurrent
26633                 // GetWriteWatch() call during revisit_written_pages(). A foreground GC may
26634                 // happen at that point and allocate from this free region, so when
26635                 // revisit_written_pages() continues, it cannot skip now-valid objects in this
26636                 // region.
26637                 no_more_loop_p = TRUE;
26638                 goto end_limit;                
26639             }
26640         }
26641 end_limit:
26642         if (concurrent_p && large_objects_p)
26643         {
26644             bgc_alloc_lock->bgc_mark_done ();
26645         }
26646         if (no_more_loop_p)
26647         {
26648             break;
26649         }
26650         o = next_o;
26651     }
26652
26653 #ifdef MULTIPLE_HEAPS
26654     if (concurrent_p)
26655     {
26656         assert (last_object < (min (high_address, page + WRITE_WATCH_UNIT_SIZE)));
26657     }
26658     else
26659 #endif //MULTIPLE_HEAPS
26660     {
26661         last_object = o;
26662     }
26663
26664     dprintf (3,("Last object: %Ix", (size_t)last_object));
26665     last_page = align_write_watch_lower_page (o);
26666 }
26667
26668 // When reset_only_p is TRUE, we should only reset pages that are in range
26669 // because we need to consider the segments or part of segments that were
26670 // allocated out of range all live.
26671 void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
26672 {
26673 #ifdef WRITE_WATCH
26674     if (concurrent_p && !reset_only_p)
26675     {
26676         current_bgc_state = bgc_revisit_soh;
26677     }
26678
26679     size_t total_dirtied_pages = 0;
26680     size_t total_marked_objects = 0;
26681
26682     heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
26683
26684     PREFIX_ASSUME(seg != NULL);
26685
26686     bool reset_watch_state = !!concurrent_p;
26687     bool is_runtime_suspended = !concurrent_p;
26688     BOOL small_object_segments = TRUE;
26689     int align_const = get_alignment_constant (small_object_segments);
26690
26691     while (1)
26692     {
26693         if (seg == 0)
26694         {
26695             if (small_object_segments)
26696             {
26697                 //switch to large segment
26698                 if (concurrent_p && !reset_only_p)
26699                 {
26700                     current_bgc_state = bgc_revisit_loh;
26701                 }
26702
26703                 if (!reset_only_p)
26704                 {
26705                     dprintf (GTC_LOG, ("h%d: SOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
26706                     fire_revisit_event (total_dirtied_pages, total_marked_objects, !small_object_segments);
26707                     concurrent_print_time_delta (concurrent_p ? "CR SOH" : "NR SOH");
26708                     total_dirtied_pages = 0;
26709                     total_marked_objects = 0;
26710                 }
26711
26712                 small_object_segments = FALSE;
26713                 //concurrent_print_time_delta (concurrent_p ? "concurrent marking dirtied pages on SOH" : "nonconcurrent marking dirtied pages on SOH");
26714
26715                 dprintf (3, ("now revisiting large object segments"));
26716                 align_const = get_alignment_constant (small_object_segments);
26717                 seg = heap_segment_rw (generation_start_segment (large_object_generation));
26718
26719                 PREFIX_ASSUME(seg != NULL);
26720
26721                 continue;
26722             }
26723             else
26724             {
26725                 if (reset_only_p)
26726                 {
26727                     dprintf (GTC_LOG, ("h%d: tdp: %Id", heap_number, total_dirtied_pages));
26728                 } 
26729                 else
26730                 {
26731                     dprintf (GTC_LOG, ("h%d: LOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
26732                     fire_revisit_event (total_dirtied_pages, total_marked_objects, !small_object_segments);
26733                 }
26734                 break;
26735             }
26736         }
26737         uint8_t* base_address = (uint8_t*)heap_segment_mem (seg);
26738         //we need to truncate to the base of the page because
26739         //some newly allocated could exist beyond heap_segment_allocated
26740         //and if we reset the last page write watch status,
26741         // they wouldn't be guaranteed to be visited -> gc hole.
26742         uintptr_t bcount = array_size;
26743         uint8_t* last_page = 0;
26744         uint8_t* last_object = heap_segment_mem (seg);
26745         uint8_t* high_address = 0;
26746
26747         BOOL skip_seg_p = FALSE;
26748
26749         if (reset_only_p)
26750         {
26751             if ((heap_segment_mem (seg) >= background_saved_lowest_address) ||
26752                 (heap_segment_reserved (seg) <= background_saved_highest_address))
26753             {
26754                 dprintf (3, ("h%d: sseg: %Ix(-%Ix)", heap_number, 
26755                     heap_segment_mem (seg), heap_segment_reserved (seg)));
26756                 skip_seg_p = TRUE;
26757             }
26758         }
26759
26760         if (!skip_seg_p)
26761         {
26762             dprintf (3, ("looking at seg %Ix", (size_t)last_object));
26763
26764             if (reset_only_p)
26765             {
26766                 base_address = max (base_address, background_saved_lowest_address);
26767                 dprintf (3, ("h%d: reset only starting %Ix", heap_number, base_address));
26768             }
26769
26770             dprintf (3, ("h%d: starting: %Ix, seg %Ix-%Ix", heap_number, base_address, 
26771                 heap_segment_mem (seg), heap_segment_reserved (seg)));
26772
26773
26774             while (1)
26775             {
26776                 if (reset_only_p)
26777                 {
26778                     high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg));
26779                     high_address = min (high_address, background_saved_highest_address);
26780                 }
26781                 else
26782                 {
26783                     high_address = high_page (seg, concurrent_p);
26784                 }
26785
26786                 if ((base_address < high_address) &&
26787                     (bcount >= array_size))
26788                 {
26789                     ptrdiff_t region_size = high_address - base_address;
26790                     dprintf (3, ("h%d: gw: [%Ix(%Id)", heap_number, (size_t)base_address, (size_t)region_size));
26791
26792 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26793                     // When the runtime is not suspended, it's possible for the table to be resized concurrently with the scan
26794                     // for dirty pages below. Prevent that by synchronizing with grow_brick_card_tables(). When the runtime is
26795                     // suspended, it's ok to scan for dirty pages concurrently from multiple background GC threads for disjoint
26796                     // memory regions.
26797                     if (!is_runtime_suspended)
26798                     {
26799                         enter_spin_lock(&gc_lock);
26800                     }
26801 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26802
26803                     get_write_watch_for_gc_heap (reset_watch_state, base_address, region_size,
26804                                                  (void**)background_written_addresses,
26805                                                  &bcount, is_runtime_suspended);
26806
26807 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26808                     if (!is_runtime_suspended)
26809                     {
26810                         leave_spin_lock(&gc_lock);
26811                     }
26812 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26813
26814                     if (bcount != 0)
26815                     {
26816                         total_dirtied_pages += bcount;
26817
26818                         dprintf (3, ("Found %d pages [%Ix, %Ix[", 
26819                                         bcount, (size_t)base_address, (size_t)high_address));
26820                     }
26821
26822                     if (!reset_only_p)
26823                     {
26824                         for (unsigned i = 0; i < bcount; i++)
26825                         {
26826                             uint8_t* page = (uint8_t*)background_written_addresses[i];
26827                             dprintf (3, ("looking at page %d at %Ix(h: %Ix)", i, 
26828                                 (size_t)page, (size_t)high_address));
26829                             if (page < high_address)
26830                             {
26831                                 //search for marked objects in the page
26832                                 revisit_written_page (page, high_address, concurrent_p,
26833                                                     seg, last_page, last_object,
26834                                                     !small_object_segments,
26835                                                     total_marked_objects);
26836                             }
26837                             else
26838                             {
26839                                 dprintf (3, ("page %d at %Ix is >= %Ix!", i, (size_t)page, (size_t)high_address));
26840                                 assert (!"page shouldn't have exceeded limit");
26841                             }
26842                         }
26843                     }
26844
26845                     if (bcount >= array_size){
26846                         base_address = background_written_addresses [array_size-1] + WRITE_WATCH_UNIT_SIZE;
26847                         bcount = array_size;
26848                     }
26849                 }
26850                 else
26851                 {
26852                     break;
26853                 }
26854             }
26855         }
26856
26857         seg = heap_segment_next_rw (seg);
26858     }
26859
26860 #endif //WRITE_WATCH
26861 }
26862
26863 void gc_heap::background_grow_c_mark_list()
26864 {
26865     assert (c_mark_list_index >= c_mark_list_length);
26866     BOOL should_drain_p = FALSE;
26867     THREAD_FROM_HEAP;
26868 #ifndef MULTIPLE_HEAPS
26869     const int thread = heap_number;
26870 #endif //!MULTIPLE_HEAPS
26871
26872     dprintf (2, ("stack copy buffer overflow"));
26873     uint8_t** new_c_mark_list = 0;
26874     {
26875         FAULT_NOT_FATAL();
26876         if (c_mark_list_length >= (SIZE_T_MAX / (2 * sizeof (uint8_t*))))
26877         {
26878             should_drain_p = TRUE;
26879         }
26880         else
26881         {
26882             new_c_mark_list = new (nothrow) uint8_t*[c_mark_list_length*2];
26883             if (new_c_mark_list == 0)
26884             {
26885                 should_drain_p = TRUE;
26886             }
26887         }
26888     }
26889     if (should_drain_p)
26890
26891     {
26892         dprintf (2, ("No more memory for the stacks copy, draining.."));
26893         //drain the list by marking its elements
26894         background_drain_mark_list (thread);
26895     }
26896     else
26897     {
26898         assert (new_c_mark_list);
26899         memcpy (new_c_mark_list, c_mark_list, c_mark_list_length*sizeof(uint8_t*));
26900         c_mark_list_length = c_mark_list_length*2;
26901         delete c_mark_list;
26902         c_mark_list = new_c_mark_list;
26903     }
26904 }
26905
26906 void gc_heap::background_promote_callback (Object** ppObject, ScanContext* sc,
26907                                   uint32_t flags)
26908 {
26909     UNREFERENCED_PARAMETER(sc);
26910     //in order to save space on the array, mark the object,
26911     //knowing that it will be visited later
26912     assert (settings.concurrent);
26913
26914     THREAD_NUMBER_FROM_CONTEXT;
26915 #ifndef MULTIPLE_HEAPS
26916     const int thread = 0;
26917 #endif //!MULTIPLE_HEAPS
26918
26919     uint8_t* o = (uint8_t*)*ppObject;
26920
26921     if (o == 0)
26922         return;
26923
26924     HEAP_FROM_THREAD;
26925
26926     gc_heap* hp = gc_heap::heap_of (o);
26927
26928     if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address))
26929     {
26930         return;
26931     }
26932
26933 #ifdef INTERIOR_POINTERS
26934     if (flags & GC_CALL_INTERIOR)
26935     {
26936         o = hp->find_object (o, hp->background_saved_lowest_address);
26937         if (o == 0)
26938             return;
26939     }
26940 #endif //INTERIOR_POINTERS
26941
26942 #ifdef FEATURE_CONSERVATIVE_GC
26943     // For conservative GC, a value on stack may point to middle of a free object.
26944     // In this case, we don't need to promote the pointer.
26945     if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree())
26946     {
26947         return;
26948     }
26949 #endif //FEATURE_CONSERVATIVE_GC
26950
26951 #ifdef _DEBUG
26952     ((CObjectHeader*)o)->Validate();
26953 #endif //_DEBUG
26954
26955     dprintf (3, ("Concurrent Background Promote %Ix", (size_t)o));
26956     if (o && (size (o) > loh_size_threshold))
26957     {
26958         dprintf (3, ("Brc %Ix", (size_t)o));
26959     }
26960
26961     if (hpt->c_mark_list_index >= hpt->c_mark_list_length)
26962     {
26963         hpt->background_grow_c_mark_list();
26964     }
26965     dprintf (3, ("pushing %08x into mark_list", (size_t)o));
26966     hpt->c_mark_list [hpt->c_mark_list_index++] = o;
26967
26968     STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, "    GCHeap::Background Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL);
26969 }
26970
26971 void gc_heap::mark_absorb_new_alloc()
26972 {
26973     fix_allocation_contexts (FALSE);
26974     
26975     gen0_bricks_cleared = FALSE;
26976
26977     clear_gen0_bricks();
26978 }
26979
26980 BOOL gc_heap::prepare_bgc_thread(gc_heap* gh)
26981 {
26982     BOOL success = FALSE;
26983     BOOL thread_created = FALSE;
26984     dprintf (2, ("Preparing gc thread"));
26985     gh->bgc_threads_timeout_cs.Enter();
26986     if (!(gh->bgc_thread_running))
26987     {
26988         dprintf (2, ("GC thread not runnning"));
26989         if ((gh->bgc_thread == 0) && create_bgc_thread(gh))
26990         {
26991             success = TRUE;
26992             thread_created = TRUE;
26993         }
26994     }
26995     else
26996     {
26997         dprintf (3, ("GC thread already running"));
26998         success = TRUE;
26999     }
27000     gh->bgc_threads_timeout_cs.Leave();
27001
27002     if(thread_created)
27003         FIRE_EVENT(GCCreateConcurrentThread_V1);
27004
27005     return success;
27006 }
27007
27008 BOOL gc_heap::create_bgc_thread(gc_heap* gh)
27009 {
27010     assert (background_gc_done_event.IsValid());
27011
27012     //dprintf (2, ("Creating BGC thread"));
27013
27014     gh->bgc_thread_running = GCToEEInterface::CreateThread(gh->bgc_thread_stub, gh, true, ".NET Background GC");
27015     return gh->bgc_thread_running;
27016 }
27017
27018 BOOL gc_heap::create_bgc_threads_support (int number_of_heaps)
27019 {
27020     BOOL ret = FALSE;
27021     dprintf (3, ("Creating concurrent GC thread for the first time"));
27022     if (!background_gc_done_event.CreateManualEventNoThrow(TRUE))
27023     {
27024         goto cleanup;
27025     }
27026     if (!bgc_threads_sync_event.CreateManualEventNoThrow(FALSE))
27027     {
27028         goto cleanup;
27029     }
27030     if (!ee_proceed_event.CreateAutoEventNoThrow(FALSE))
27031     {
27032         goto cleanup;
27033     }
27034     if (!bgc_start_event.CreateManualEventNoThrow(FALSE))
27035     {
27036         goto cleanup;
27037     }
27038
27039 #ifdef MULTIPLE_HEAPS
27040     bgc_t_join.init (number_of_heaps, join_flavor_bgc);
27041 #else
27042     UNREFERENCED_PARAMETER(number_of_heaps);
27043 #endif //MULTIPLE_HEAPS
27044
27045     ret = TRUE;
27046
27047 cleanup:
27048
27049     if (!ret)
27050     {
27051         if (background_gc_done_event.IsValid())
27052         {
27053             background_gc_done_event.CloseEvent();
27054         }
27055         if (bgc_threads_sync_event.IsValid())
27056         {
27057             bgc_threads_sync_event.CloseEvent();
27058         }
27059         if (ee_proceed_event.IsValid())
27060         {
27061             ee_proceed_event.CloseEvent();
27062         }
27063         if (bgc_start_event.IsValid())
27064         {
27065             bgc_start_event.CloseEvent();
27066         }
27067     }
27068
27069     return ret;
27070 }
27071
27072 BOOL gc_heap::create_bgc_thread_support()
27073 {
27074     BOOL ret = FALSE;
27075     uint8_t** parr;
27076     
27077     if (!gc_lh_block_event.CreateManualEventNoThrow(FALSE))
27078     {
27079         goto cleanup;
27080     }
27081
27082     //needs to have room for enough smallest objects fitting on a page
27083     parr = new (nothrow) uint8_t*[1 + OS_PAGE_SIZE / MIN_OBJECT_SIZE];
27084     if (!parr)
27085     {
27086         goto cleanup;
27087     }
27088
27089     make_c_mark_list (parr);
27090
27091     ret = TRUE;
27092
27093 cleanup:
27094
27095     if (!ret)
27096     {
27097         if (gc_lh_block_event.IsValid())
27098         {
27099             gc_lh_block_event.CloseEvent();
27100         }
27101     }
27102
27103     return ret;
27104 }
27105
27106 int gc_heap::check_for_ephemeral_alloc()
27107 {
27108     int gen = ((settings.reason == reason_oos_soh) ? (max_generation - 1) : -1);
27109
27110     if (gen == -1)
27111     {
27112 #ifdef MULTIPLE_HEAPS
27113         for (int heap_index = 0; heap_index < n_heaps; heap_index++)
27114 #endif //MULTIPLE_HEAPS
27115         {
27116             for (int i = 0; i <= (max_generation - 1); i++)
27117             {
27118 #ifdef MULTIPLE_HEAPS
27119                 if (g_heaps[heap_index]->get_new_allocation (i) <= 0)
27120 #else
27121                 if (get_new_allocation (i) <= 0)
27122 #endif //MULTIPLE_HEAPS
27123                 {
27124                     gen = max (gen, i);
27125                 }
27126                 else
27127                     break;
27128             }
27129         }
27130     }
27131
27132     return gen;
27133 }
27134
27135 // Wait for gc to finish sequential part
27136 void gc_heap::wait_to_proceed()
27137 {
27138     assert (background_gc_done_event.IsValid());
27139     assert (bgc_start_event.IsValid());
27140
27141     user_thread_wait(&ee_proceed_event, FALSE);
27142 }
27143
27144 // Start a new concurrent gc
27145 void gc_heap::start_c_gc()
27146 {
27147     assert (background_gc_done_event.IsValid());
27148     assert (bgc_start_event.IsValid());
27149
27150 //Need to make sure that the gc thread is in the right place.
27151     background_gc_done_event.Wait(INFINITE, FALSE);
27152     background_gc_done_event.Reset();
27153     bgc_start_event.Set();
27154 }
27155
27156 void gc_heap::do_background_gc()
27157 {
27158     dprintf (2, ("starting a BGC"));
27159 #ifdef MULTIPLE_HEAPS
27160     for (int i = 0; i < n_heaps; i++)
27161     {
27162         g_heaps[i]->init_background_gc();
27163     }
27164 #else
27165     init_background_gc();
27166 #endif //MULTIPLE_HEAPS
27167     //start the background gc
27168     start_c_gc ();
27169
27170     //wait until we get restarted by the BGC.
27171     wait_to_proceed();
27172 }
27173
27174 void gc_heap::kill_gc_thread()
27175 {
27176     //assert (settings.concurrent == FALSE);
27177
27178     // We are doing a two-stage shutdown now.
27179     // In the first stage, we do minimum work, and call ExitProcess at the end.
27180     // In the secodn stage, we have the Loader lock and only one thread is
27181     // alive.  Hence we do not need to kill gc thread.
27182     background_gc_done_event.CloseEvent();
27183     gc_lh_block_event.CloseEvent();
27184     bgc_start_event.CloseEvent();
27185     bgc_threads_timeout_cs.Destroy();
27186     bgc_thread = 0;
27187     recursive_gc_sync::shutdown();
27188 }
27189
27190 void gc_heap::bgc_thread_function()
27191 {
27192     assert (background_gc_done_event.IsValid());
27193     assert (bgc_start_event.IsValid());
27194
27195     dprintf (3, ("gc_thread thread starting..."));
27196
27197     BOOL do_exit = FALSE;
27198
27199     bool cooperative_mode = true;
27200     bgc_thread_id.SetToCurrentThread();
27201     dprintf (1, ("bgc_thread_id is set to %x", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging()));
27202     while (1)
27203     {
27204         // Wait for work to do...
27205         dprintf (3, ("bgc thread: waiting..."));
27206
27207         cooperative_mode = enable_preemptive ();
27208         //current_thread->m_fPreemptiveGCDisabled = 0;
27209
27210         uint32_t result = bgc_start_event.Wait(
27211 #ifdef _DEBUG
27212 #ifdef MULTIPLE_HEAPS
27213                                              INFINITE,
27214 #else
27215                                              2000,
27216 #endif //MULTIPLE_HEAPS
27217 #else //_DEBUG
27218 #ifdef MULTIPLE_HEAPS
27219                                              INFINITE,
27220 #else
27221                                              20000,
27222 #endif //MULTIPLE_HEAPS
27223 #endif //_DEBUG
27224             FALSE);
27225         dprintf (2, ("gc thread: finished waiting"));
27226
27227         // not calling disable_preemptive here 'cause we 
27228         // can't wait for GC complete here - RestartEE will be called 
27229         // when we've done the init work.
27230
27231         if (result == WAIT_TIMEOUT)
27232         {
27233             // Should join the bgc threads and terminate all of them
27234             // at once.
27235             dprintf (1, ("GC thread timeout"));
27236             bgc_threads_timeout_cs.Enter();
27237             if (!keep_bgc_threads_p)
27238             {
27239                 dprintf (2, ("GC thread exiting"));
27240                 bgc_thread_running = FALSE;
27241                 bgc_thread = 0;
27242                 bgc_thread_id.Clear();
27243                 do_exit = TRUE;
27244             }
27245             bgc_threads_timeout_cs.Leave();
27246             if (do_exit)
27247                 break;
27248             else
27249             {
27250                 dprintf (3, ("GC thread needed, not exiting"));
27251                 continue;
27252             }
27253         }
27254         // if we signal the thread with no concurrent work to do -> exit
27255         if (!settings.concurrent)
27256         {
27257             dprintf (3, ("no concurrent GC needed, exiting"));
27258             break;
27259         }
27260 #ifdef TRACE_GC
27261         //trace_gc = TRUE;
27262 #endif //TRACE_GC
27263         recursive_gc_sync::begin_background();
27264         dprintf (2, ("beginning of bgc: gen2 FL: %d, FO: %d, frag: %d", 
27265             generation_free_list_space (generation_of (max_generation)),
27266             generation_free_obj_space (generation_of (max_generation)),
27267             dd_fragmentation (dynamic_data_of (max_generation))));
27268
27269         gc1();
27270
27271         current_bgc_state = bgc_not_in_process;
27272
27273 #ifdef TRACE_GC
27274         //trace_gc = FALSE;
27275 #endif //TRACE_GC
27276
27277         enable_preemptive ();
27278 #ifdef MULTIPLE_HEAPS
27279         bgc_t_join.join(this, gc_join_done);
27280         if (bgc_t_join.joined())
27281 #endif //MULTIPLE_HEAPS
27282         {
27283             enter_spin_lock (&gc_lock);
27284             dprintf (SPINLOCK_LOG, ("bgc Egc"));
27285             
27286             bgc_start_event.Reset();
27287             do_post_gc();
27288 #ifdef MULTIPLE_HEAPS
27289             for (int gen = max_generation; gen <= (max_generation + 1); gen++)
27290             {
27291                 size_t desired_per_heap = 0;
27292                 size_t total_desired = 0;
27293                 gc_heap* hp = 0;
27294                 dynamic_data* dd;
27295                 for (int i = 0; i < n_heaps; i++)
27296                 {
27297                     hp = g_heaps[i];
27298                     dd = hp->dynamic_data_of (gen);
27299                     size_t temp_total_desired = total_desired + dd_desired_allocation (dd);
27300                     if (temp_total_desired < total_desired)
27301                     {
27302                         // we overflowed.
27303                         total_desired = (size_t)MAX_PTR;
27304                         break;
27305                     }
27306                     total_desired = temp_total_desired;
27307                 }
27308
27309                 desired_per_heap = Align ((total_desired/n_heaps), get_alignment_constant (FALSE));
27310
27311                 for (int i = 0; i < n_heaps; i++)
27312                 {
27313                     hp = gc_heap::g_heaps[i];
27314                     dd = hp->dynamic_data_of (gen);
27315                     dd_desired_allocation (dd) = desired_per_heap;
27316                     dd_gc_new_allocation (dd) = desired_per_heap;
27317                     dd_new_allocation (dd) = desired_per_heap;
27318                 }
27319             }
27320 #endif //MULTIPLE_HEAPS
27321 #ifdef MULTIPLE_HEAPS
27322             fire_pevents();
27323 #endif //MULTIPLE_HEAPS
27324
27325             c_write (settings.concurrent, FALSE);
27326             recursive_gc_sync::end_background();
27327             keep_bgc_threads_p = FALSE;
27328             background_gc_done_event.Set();
27329
27330             dprintf (SPINLOCK_LOG, ("bgc Lgc"));
27331             leave_spin_lock (&gc_lock);
27332 #ifdef MULTIPLE_HEAPS
27333             dprintf(1, ("End of BGC - starting all BGC threads"));
27334             bgc_t_join.restart();
27335 #endif //MULTIPLE_HEAPS
27336         }
27337         // We can't disable preempt here because there might've been a GC already
27338         // started and decided to do a BGC and waiting for a BGC thread to restart 
27339         // vm. That GC will be waiting in wait_to_proceed and we are waiting for it
27340         // to restart the VM so we deadlock.
27341         //gc_heap::disable_preemptive (true);
27342     }
27343
27344     FIRE_EVENT(GCTerminateConcurrentThread_V1);
27345
27346     dprintf (3, ("bgc_thread thread exiting"));
27347     return;
27348 }
27349
27350 #endif //BACKGROUND_GC
27351
27352 //Clear the cards [start_card, end_card[
27353 void gc_heap::clear_cards (size_t start_card, size_t end_card)
27354 {
27355     if (start_card < end_card)
27356     {
27357         size_t start_word = card_word (start_card);
27358         size_t end_word = card_word (end_card);
27359         if (start_word < end_word)
27360         {
27361             // Figure out the bit positions of the cards within their words
27362             unsigned bits = card_bit (start_card);
27363             card_table [start_word] &= lowbits (~0, bits);
27364             for (size_t i = start_word+1; i < end_word; i++)
27365                 card_table [i] = 0;
27366             bits = card_bit (end_card);
27367             // Don't write beyond end_card (and possibly uncommitted card table space).
27368             if (bits != 0)
27369             {
27370                 card_table [end_word] &= highbits (~0, bits);
27371             }
27372         }
27373         else
27374         {
27375             // If the start and end cards are in the same word, just clear the appropriate card
27376             // bits in that word.
27377             card_table [start_word] &= (lowbits (~0, card_bit (start_card)) |
27378                                         highbits (~0, card_bit (end_card)));
27379         }
27380 #ifdef VERYSLOWDEBUG
27381         size_t  card = start_card;
27382         while (card < end_card)
27383         {
27384             assert (! (card_set_p (card)));
27385             card++;
27386         }
27387 #endif //VERYSLOWDEBUG
27388         dprintf (3,("Cleared cards [%Ix:%Ix, %Ix:%Ix[",
27389                   start_card, (size_t)card_address (start_card),
27390                   end_card, (size_t)card_address (end_card)));
27391     }
27392 }
27393
27394 void gc_heap::clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address)
27395 {
27396     size_t   start_card = card_of (align_on_card (start_address));
27397     size_t   end_card = card_of (align_lower_card (end_address));
27398     clear_cards (start_card, end_card);
27399 }
27400
27401 // copy [srccard, ...[ to [dst_card, end_card[
27402 // This will set the same bit twice. Can be optimized.
27403 inline
27404 void gc_heap::copy_cards (size_t dst_card,
27405                           size_t src_card,
27406                           size_t end_card, 
27407                           BOOL nextp)
27408 {
27409     // If the range is empty, this function is a no-op - with the subtlety that
27410     // either of the accesses card_table[srcwrd] or card_table[dstwrd] could be
27411     // outside the committed region.  To avoid the access, leave early.
27412     if (!(dst_card < end_card))
27413         return;
27414
27415     unsigned int srcbit = card_bit (src_card);
27416     unsigned int dstbit = card_bit (dst_card);
27417     size_t srcwrd = card_word (src_card);
27418     size_t dstwrd = card_word (dst_card);
27419     unsigned int srctmp = card_table[srcwrd];
27420     unsigned int dsttmp = card_table[dstwrd];
27421
27422     for (size_t card = dst_card; card < end_card; card++)
27423     {
27424         if (srctmp & (1 << srcbit))
27425             dsttmp |= 1 << dstbit;
27426         else
27427             dsttmp &= ~(1 << dstbit);
27428         if (!(++srcbit % 32))
27429         {
27430             srctmp = card_table[++srcwrd];
27431             srcbit = 0;
27432         }
27433
27434         if (nextp)
27435         {
27436             if (srctmp & (1 << srcbit))
27437                 dsttmp |= 1 << dstbit;
27438         }
27439
27440         if (!(++dstbit % 32))
27441         {
27442             card_table[dstwrd] = dsttmp;
27443
27444 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
27445             if (dsttmp != 0)
27446             {
27447                 card_bundle_set(cardw_card_bundle(dstwrd));
27448             }
27449 #endif
27450
27451             dstwrd++;
27452             dsttmp = card_table[dstwrd];
27453             dstbit = 0;
27454         }
27455     }
27456
27457     card_table[dstwrd] = dsttmp;
27458
27459 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
27460     if (dsttmp != 0)
27461     {
27462         card_bundle_set(cardw_card_bundle(dstwrd));
27463     }
27464 #endif
27465 }
27466
27467 void gc_heap::copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len)
27468 {
27469     ptrdiff_t relocation_distance = src - dest;
27470     size_t start_dest_card = card_of (align_on_card (dest));
27471     size_t end_dest_card = card_of (dest + len - 1);
27472     size_t dest_card = start_dest_card;
27473     size_t src_card = card_of (card_address (dest_card)+relocation_distance);
27474     dprintf (3,("Copying cards [%Ix:%Ix->%Ix:%Ix, ",
27475                  src_card, (size_t)src, dest_card, (size_t)dest));
27476     dprintf (3,(" %Ix->%Ix:%Ix[",
27477               (size_t)src+len, end_dest_card, (size_t)dest+len));
27478
27479     dprintf (3, ("dest: %Ix, src: %Ix, len: %Ix, reloc: %Ix, align_on_card(dest) is %Ix",
27480         dest, src, len, relocation_distance, (align_on_card (dest))));
27481
27482     dprintf (3, ("start_dest_card: %Ix (address: %Ix), end_dest_card: %Ix(addr: %Ix), card_of (dest): %Ix",
27483         start_dest_card, card_address (start_dest_card), end_dest_card, card_address (end_dest_card), card_of (dest)));
27484
27485     //First card has two boundaries
27486     if (start_dest_card != card_of (dest))
27487     {
27488         if ((card_of (card_address (start_dest_card) + relocation_distance) <= card_of (src + len - 1))&&
27489             card_set_p (card_of (card_address (start_dest_card) + relocation_distance)))
27490         {
27491             dprintf (3, ("card_address (start_dest_card) + reloc is %Ix, card: %Ix(set), src+len-1: %Ix, card: %Ix",
27492                     (card_address (start_dest_card) + relocation_distance),
27493                     card_of (card_address (start_dest_card) + relocation_distance),
27494                     (src + len - 1),
27495                     card_of (src + len - 1)));
27496
27497             dprintf (3, ("setting card: %Ix", card_of (dest)));
27498             set_card (card_of (dest));
27499         }
27500     }
27501
27502     if (card_set_p (card_of (src)))
27503         set_card (card_of (dest));
27504
27505
27506     copy_cards (dest_card, src_card, end_dest_card,
27507                 ((dest - align_lower_card (dest)) != (src - align_lower_card (src))));
27508
27509     //Last card has two boundaries.
27510     if ((card_of (card_address (end_dest_card) + relocation_distance) >= card_of (src)) &&
27511         card_set_p (card_of (card_address (end_dest_card) + relocation_distance)))
27512     {
27513         dprintf (3, ("card_address (end_dest_card) + reloc is %Ix, card: %Ix(set), src: %Ix, card: %Ix",
27514                 (card_address (end_dest_card) + relocation_distance),
27515                 card_of (card_address (end_dest_card) + relocation_distance),
27516                 src,
27517                 card_of (src)));
27518
27519         dprintf (3, ("setting card: %Ix", end_dest_card));
27520         set_card (end_dest_card);
27521     }
27522
27523     if (card_set_p (card_of (src + len - 1)))
27524         set_card (end_dest_card);
27525
27526 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
27527     card_bundles_set(cardw_card_bundle(card_word(card_of(dest))), cardw_card_bundle(align_cardw_on_bundle(card_word(end_dest_card))));
27528 #endif
27529 }
27530
27531 #ifdef BACKGROUND_GC
27532 // this does not need the Interlocked version of mark_array_set_marked.
27533 void gc_heap::copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len)
27534 {
27535     dprintf (3, ("Copying mark_bits for addresses [%Ix->%Ix, %Ix->%Ix[",
27536                  (size_t)src, (size_t)dest,
27537                  (size_t)src+len, (size_t)dest+len));
27538
27539     uint8_t* src_o = src;
27540     uint8_t* dest_o;
27541     uint8_t* src_end = src + len;
27542     int align_const = get_alignment_constant (TRUE);
27543     ptrdiff_t reloc = dest - src;
27544
27545     while (src_o < src_end)
27546     {
27547         uint8_t*  next_o = src_o + Align (size (src_o), align_const);
27548
27549         if (background_object_marked (src_o, TRUE))
27550         {
27551             dest_o = src_o + reloc;
27552
27553             //if (background_object_marked (dest_o, FALSE))
27554             //{
27555             //    dprintf (3, ("*%Ix shouldn't have already been marked!", (size_t)(dest_o)));
27556             //    FATAL_GC_ERROR();
27557             //}
27558
27559             background_mark (dest_o, 
27560                              background_saved_lowest_address, 
27561                              background_saved_highest_address);
27562             dprintf (3, ("bc*%Ix*bc, b*%Ix*b", (size_t)src_o, (size_t)(dest_o)));
27563         }
27564
27565         src_o = next_o;
27566     }
27567 }
27568 #endif //BACKGROUND_GC
27569
27570 void gc_heap::fix_brick_to_highest (uint8_t* o, uint8_t* next_o)
27571 {
27572     size_t new_current_brick = brick_of (o);
27573     set_brick (new_current_brick,
27574                (o - brick_address (new_current_brick)));
27575     size_t b = 1 + new_current_brick;
27576     size_t limit = brick_of (next_o);
27577     //dprintf(3,(" fixing brick %Ix to point to object %Ix, till %Ix(%Ix)",
27578     dprintf(3,("b:%Ix->%Ix-%Ix", 
27579                new_current_brick, (size_t)o, (size_t)next_o));
27580     while (b < limit)
27581     {
27582         set_brick (b,(new_current_brick - b));
27583         b++;
27584     }
27585 }
27586
27587 // start can not be >= heap_segment_allocated for the segment.
27588 uint8_t* gc_heap::find_first_object (uint8_t* start, uint8_t* first_object)
27589 {
27590     size_t brick = brick_of (start);
27591     uint8_t* o = 0;
27592     //last_object == null -> no search shortcut needed
27593     if ((brick == brick_of (first_object) || (start <= first_object)))
27594     {
27595         o = first_object;
27596     }
27597     else
27598     {
27599         ptrdiff_t  min_brick = (ptrdiff_t)brick_of (first_object);
27600         ptrdiff_t  prev_brick = (ptrdiff_t)brick - 1;
27601         int         brick_entry = 0;
27602         while (1)
27603         {
27604             if (prev_brick < min_brick)
27605             {
27606                 break;
27607             }
27608             if ((brick_entry = get_brick_entry(prev_brick)) >= 0)
27609             {
27610                 break;
27611             }
27612             assert (! ((brick_entry == 0)));
27613             prev_brick = (brick_entry + prev_brick);
27614
27615         }
27616         o = ((prev_brick < min_brick) ? first_object :
27617                       brick_address (prev_brick) + brick_entry - 1);
27618         assert (o <= start);
27619     }
27620
27621     assert (Align (size (o)) >= Align (min_obj_size));
27622     uint8_t*  next_o = o + Align (size (o));
27623     size_t curr_cl = (size_t)next_o / brick_size;
27624     size_t min_cl = (size_t)first_object / brick_size;
27625
27626     //dprintf (3,( "Looking for intersection with %Ix from %Ix", (size_t)start, (size_t)o));
27627 #ifdef TRACE_GC
27628     unsigned int n_o = 1;
27629 #endif //TRACE_GC
27630
27631     uint8_t* next_b = min (align_lower_brick (next_o) + brick_size, start+1);
27632
27633     while (next_o <= start)
27634     {
27635         do
27636         {
27637 #ifdef TRACE_GC
27638             n_o++;
27639 #endif //TRACE_GC
27640             o = next_o;
27641             assert (Align (size (o)) >= Align (min_obj_size));
27642             next_o = o + Align (size (o));
27643             Prefetch (next_o);
27644         }while (next_o < next_b);
27645
27646         if (((size_t)next_o / brick_size) != curr_cl)
27647         {
27648             if (curr_cl >= min_cl)
27649             {
27650                 fix_brick_to_highest (o, next_o);
27651             }
27652             curr_cl = (size_t) next_o / brick_size;
27653         }
27654         next_b = min (align_lower_brick (next_o) + brick_size, start+1);
27655     }
27656
27657     size_t bo = brick_of (o);
27658     //dprintf (3, ("Looked at %Id objects, fixing brick [%Ix-[%Ix", 
27659     dprintf (3, ("%Id o, [%Ix-[%Ix", 
27660         n_o, bo, brick));
27661     if (bo < brick)
27662     {
27663         set_brick (bo, (o - brick_address(bo)));
27664         size_t b = 1 + bo;
27665         int x = -1;
27666         while (b < brick)
27667         {
27668             set_brick (b,x--);
27669             b++;
27670         }
27671     }
27672
27673     return o;
27674 }
27675
27676 #ifdef CARD_BUNDLE
27677
27678 // Find the first non-zero card word between cardw and cardw_end.
27679 // The index of the word we find is returned in cardw.
27680 BOOL gc_heap::find_card_dword (size_t& cardw, size_t cardw_end)
27681 {
27682     dprintf (3, ("gc: %d, find_card_dword cardw: %Ix, cardw_end: %Ix",
27683                  dd_collection_count (dynamic_data_of (0)), cardw, cardw_end));
27684
27685     if (card_bundles_enabled())
27686     {
27687         size_t cardb = cardw_card_bundle (cardw);
27688         size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (cardw_end));
27689         while (1)
27690         {
27691             // Find a non-zero bundle
27692             while ((cardb < end_cardb) && (card_bundle_set_p (cardb) == 0))
27693             {
27694                 cardb++;
27695             }
27696             if (cardb == end_cardb)
27697                 return FALSE;
27698
27699             uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb),cardw)];
27700             uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1),cardw_end)];
27701             while ((card_word < card_word_end) && !(*card_word))
27702             {
27703                 card_word++;
27704             }
27705
27706             if (card_word != card_word_end)
27707             {
27708                 cardw = (card_word - &card_table[0]);
27709                 return TRUE;
27710             }
27711             else if ((cardw <= card_bundle_cardw (cardb)) &&
27712                      (card_word == &card_table [card_bundle_cardw (cardb+1)]))
27713             {
27714                 // a whole bundle was explored and is empty
27715                 dprintf  (3, ("gc: %d, find_card_dword clear bundle: %Ix cardw:[%Ix,%Ix[",
27716                         dd_collection_count (dynamic_data_of (0)), 
27717                         cardb, card_bundle_cardw (cardb),
27718                         card_bundle_cardw (cardb+1)));
27719                 card_bundle_clear (cardb);
27720             }
27721
27722             cardb++;
27723         }
27724     }
27725     else
27726     {
27727         uint32_t* card_word = &card_table[cardw];
27728         uint32_t* card_word_end = &card_table [cardw_end];
27729
27730         while (card_word < card_word_end)
27731         {
27732             if ((*card_word) != 0)
27733             {
27734                 cardw = (card_word - &card_table [0]);
27735                 return TRUE;
27736             }
27737
27738             card_word++;
27739         }
27740         return FALSE;
27741
27742     }
27743
27744 }
27745
27746 #endif //CARD_BUNDLE
27747
27748 // Find cards that are set between two points in a card table.
27749 // Parameters
27750 //     card_table    : The card table.
27751 //     card          : [in/out] As input, the card to start searching from.
27752 //                              As output, the first card that's set.
27753 //     card_word_end : The card word at which to stop looking.
27754 //     end_card      : [out] The last card which is set.
27755 BOOL gc_heap::find_card(uint32_t* card_table,
27756                         size_t&   card,
27757                         size_t    card_word_end,
27758                         size_t&   end_card)
27759 {
27760     uint32_t* last_card_word;
27761     uint32_t card_word_value;
27762     uint32_t bit_position;
27763     
27764     // Find the first card which is set
27765     last_card_word = &card_table [card_word (card)];
27766     bit_position = card_bit (card);
27767     card_word_value = (*last_card_word) >> bit_position;
27768     if (!card_word_value)
27769     {
27770         bit_position = 0;
27771 #ifdef CARD_BUNDLE
27772         // Using the card bundle, go through the remaining card words between here and 
27773         // card_word_end until we find one that is non-zero.
27774         size_t lcw = card_word(card) + 1;
27775         if (gc_heap::find_card_dword (lcw, card_word_end) == FALSE)
27776         {
27777             return FALSE;
27778         }
27779         else
27780         {
27781             last_card_word = &card_table [lcw];
27782             card_word_value = *last_card_word;
27783         }
27784
27785 #else //CARD_BUNDLE
27786         // Go through the remaining card words between here and card_word_end until we find
27787         // one that is non-zero.
27788         do
27789         {
27790             ++last_card_word;
27791         }
27792
27793         while ((last_card_word < &card_table [card_word_end]) && !(*last_card_word));
27794         if (last_card_word < &card_table [card_word_end])
27795         {
27796             card_word_value = *last_card_word;
27797         }
27798         else
27799         {
27800             // We failed to find any non-zero card words before we got to card_word_end
27801             return FALSE;
27802         }
27803 #endif //CARD_BUNDLE
27804     }
27805
27806
27807     // Look for the lowest bit set
27808     if (card_word_value)
27809     {
27810         while (!(card_word_value & 1))
27811         {
27812             bit_position++;
27813             card_word_value = card_word_value / 2;
27814         }
27815     }
27816     
27817     // card is the card word index * card size + the bit index within the card
27818     card = (last_card_word - &card_table[0]) * card_word_width + bit_position;
27819
27820     do
27821     {
27822         // Keep going until we get to an un-set card.
27823         bit_position++;
27824         card_word_value = card_word_value / 2;
27825
27826         // If we reach the end of the card word and haven't hit a 0 yet, start going
27827         // card word by card word until we get to one that's not fully set (0xFFFF...)
27828         // or we reach card_word_end.
27829         if ((bit_position == card_word_width) && (last_card_word < &card_table [card_word_end]))
27830         {
27831             do
27832             {
27833                 card_word_value = *(++last_card_word);
27834             } while ((last_card_word < &card_table [card_word_end]) &&
27835
27836 #ifdef _MSC_VER
27837                      (card_word_value == (1 << card_word_width)-1)
27838 #else
27839                      // if left shift count >= width of type,
27840                      // gcc reports error.
27841                      (card_word_value == ~0u)
27842 #endif // _MSC_VER
27843                 );
27844             bit_position = 0;
27845         }
27846     } while (card_word_value & 1);
27847
27848     end_card = (last_card_word - &card_table [0])* card_word_width + bit_position;
27849     
27850     //dprintf (3, ("find_card: [%Ix, %Ix[ set", card, end_card));
27851     dprintf (3, ("fc: [%Ix, %Ix[", card, end_card));
27852     return TRUE;
27853 }
27854
27855
27856     //because of heap expansion, computing end is complicated.
27857 uint8_t* compute_next_end (heap_segment* seg, uint8_t* low)
27858 {
27859     if ((low >=  heap_segment_mem (seg)) &&
27860         (low < heap_segment_allocated (seg)))
27861         return low;
27862     else
27863         return heap_segment_allocated (seg);
27864 }
27865
27866 uint8_t*
27867 gc_heap::compute_next_boundary (uint8_t* low, int gen_number,
27868                                 BOOL relocating)
27869 {
27870     UNREFERENCED_PARAMETER(low);
27871
27872     //when relocating, the fault line is the plan start of the younger
27873     //generation because the generation is promoted.
27874     if (relocating && (gen_number == (settings.condemned_generation + 1)))
27875     {
27876         generation* gen = generation_of (gen_number - 1);
27877         uint8_t* gen_alloc = generation_plan_allocation_start (gen);
27878         assert (gen_alloc);
27879         return gen_alloc;
27880     }
27881     else
27882     {
27883         assert (gen_number > settings.condemned_generation);
27884         return generation_allocation_start (generation_of (gen_number - 1 ));
27885     }
27886
27887 }
27888
27889 inline void
27890 gc_heap::keep_card_live (uint8_t* o, size_t& n_gen,
27891                          size_t& cg_pointers_found)
27892 {
27893     THREAD_FROM_HEAP;
27894     if ((gc_low <= o) && (gc_high > o))
27895     {
27896         n_gen++;
27897     }
27898 #ifdef MULTIPLE_HEAPS
27899     else if (o)
27900     {
27901         gc_heap* hp = heap_of (o);
27902         if (hp != this)
27903         {
27904             if ((hp->gc_low <= o) &&
27905                 (hp->gc_high > o))
27906             {
27907                 n_gen++;
27908             }
27909         }
27910     }
27911 #endif //MULTIPLE_HEAPS
27912     cg_pointers_found ++;
27913     dprintf (4, ("keep card live for %Ix", o));
27914 }
27915
27916 inline void
27917 gc_heap::mark_through_cards_helper (uint8_t** poo, size_t& n_gen,
27918                                     size_t& cg_pointers_found,
27919                                     card_fn fn, uint8_t* nhigh,
27920                                     uint8_t* next_boundary)
27921 {
27922     THREAD_FROM_HEAP;
27923     if ((gc_low <= *poo) && (gc_high > *poo))
27924     {
27925         n_gen++;
27926         call_fn(fn) (poo THREAD_NUMBER_ARG);
27927     }
27928 #ifdef MULTIPLE_HEAPS
27929     else if (*poo)
27930     {
27931         gc_heap* hp = heap_of_gc (*poo);
27932         if (hp != this)
27933         {
27934             if ((hp->gc_low <= *poo) &&
27935                 (hp->gc_high > *poo))
27936             {
27937                 n_gen++;
27938                 call_fn(fn) (poo THREAD_NUMBER_ARG);
27939             }
27940             if ((fn == &gc_heap::relocate_address) ||
27941                 ((hp->ephemeral_low <= *poo) &&
27942                  (hp->ephemeral_high > *poo)))
27943             {
27944                 cg_pointers_found++;
27945             }
27946         }
27947     }
27948 #endif //MULTIPLE_HEAPS
27949     if ((next_boundary <= *poo) && (nhigh > *poo))
27950     {
27951         cg_pointers_found ++;
27952         dprintf (4, ("cg pointer %Ix found, %Id so far",
27953                      (size_t)*poo, cg_pointers_found ));
27954
27955     }
27956 }
27957
27958 BOOL gc_heap::card_transition (uint8_t* po, uint8_t* end, size_t card_word_end,
27959                                size_t& cg_pointers_found, 
27960                                size_t& n_eph, size_t& n_card_set,
27961                                size_t& card, size_t& end_card,
27962                                BOOL& foundp, uint8_t*& start_address,
27963                                uint8_t*& limit, size_t& n_cards_cleared)
27964 {
27965     dprintf (3, ("pointer %Ix past card %Ix", (size_t)po, (size_t)card));
27966     dprintf (3, ("ct: %Id cg", cg_pointers_found));
27967     BOOL passed_end_card_p = FALSE;
27968     foundp = FALSE;
27969
27970     if (cg_pointers_found == 0)
27971     {
27972         //dprintf(3,(" Clearing cards [%Ix, %Ix[ ",
27973         dprintf(3,(" CC [%Ix, %Ix[ ",
27974                 (size_t)card_address(card), (size_t)po));
27975         clear_cards (card, card_of(po));
27976         n_card_set -= (card_of (po) - card);
27977         n_cards_cleared += (card_of (po) - card);
27978
27979     }
27980     n_eph +=cg_pointers_found;
27981     cg_pointers_found = 0;
27982     card = card_of (po);
27983     if (card >= end_card)
27984     {
27985         passed_end_card_p = TRUE;
27986         dprintf (3, ("card %Ix exceeding end_card %Ix",
27987                     (size_t)card, (size_t)end_card));
27988         foundp = find_card (card_table, card, card_word_end, end_card);
27989         if (foundp)
27990         {
27991             n_card_set+= end_card - card;
27992             start_address = card_address (card);
27993             dprintf (3, ("NewC: %Ix, start: %Ix, end: %Ix",
27994                         (size_t)card, (size_t)start_address,
27995                         (size_t)card_address (end_card)));
27996         }
27997         limit = min (end, card_address (end_card));
27998
27999         assert (!((limit == card_address (end_card))&&
28000                 card_set_p (end_card)));
28001     }
28002
28003     return passed_end_card_p;
28004 }
28005
28006 void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating)
28007 {
28008 #ifdef BACKGROUND_GC
28009     dprintf (3, ("current_sweep_pos is %Ix, saved_sweep_ephemeral_seg is %Ix(%Ix)",
28010                  current_sweep_pos, saved_sweep_ephemeral_seg, saved_sweep_ephemeral_start));
28011
28012     heap_segment* soh_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
28013     PREFIX_ASSUME(soh_seg != NULL);
28014
28015     while (soh_seg)
28016     {
28017         dprintf (3, ("seg %Ix, bgc_alloc: %Ix, alloc: %Ix", 
28018             soh_seg, 
28019             heap_segment_background_allocated (soh_seg),
28020             heap_segment_allocated (soh_seg)));
28021
28022         soh_seg = heap_segment_next_rw (soh_seg);
28023     }
28024 #endif //BACKGROUND_GC
28025
28026     uint8_t* low = gc_low;
28027     uint8_t* high = gc_high;
28028     size_t end_card = 0;
28029
28030     generation*   oldest_gen        = generation_of (max_generation);
28031     int           curr_gen_number   = max_generation;
28032     uint8_t*      gen_boundary      = generation_allocation_start(generation_of(curr_gen_number - 1));
28033     uint8_t*      next_boundary     = compute_next_boundary(gc_low, curr_gen_number, relocating);
28034     
28035     heap_segment* seg               = heap_segment_rw (generation_start_segment (oldest_gen));
28036     PREFIX_ASSUME(seg != NULL);
28037
28038     uint8_t*      beg               = generation_allocation_start (oldest_gen);
28039     uint8_t*      end               = compute_next_end (seg, low);
28040     uint8_t*      last_object       = beg;
28041
28042     size_t  cg_pointers_found = 0;
28043
28044     size_t  card_word_end = (card_of (align_on_card_word (end)) / card_word_width);
28045
28046     size_t        n_eph             = 0;
28047     size_t        n_gen             = 0;
28048     size_t        n_card_set        = 0;
28049     uint8_t*      nhigh             = (relocating ?
28050                                        heap_segment_plan_allocated (ephemeral_heap_segment) : high);
28051
28052     BOOL          foundp            = FALSE;
28053     uint8_t*      start_address     = 0;
28054     uint8_t*      limit             = 0;
28055     size_t        card              = card_of (beg);
28056 #ifdef BACKGROUND_GC
28057     BOOL consider_bgc_mark_p        = FALSE;
28058     BOOL check_current_sweep_p      = FALSE;
28059     BOOL check_saved_sweep_p        = FALSE;
28060     should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
28061 #endif //BACKGROUND_GC
28062
28063     dprintf(3, ("CMs: %Ix->%Ix", (size_t)beg, (size_t)end));
28064     size_t total_cards_cleared = 0;
28065
28066     while (1)
28067     {
28068         if (card_of(last_object) > card)
28069         {
28070             dprintf (3, ("Found %Id cg pointers", cg_pointers_found));
28071             if (cg_pointers_found == 0)
28072             {
28073                 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)last_object));
28074                 clear_cards (card, card_of(last_object));
28075                 n_card_set -= (card_of (last_object) - card);
28076                 total_cards_cleared += (card_of (last_object) - card);
28077             }
28078
28079             n_eph += cg_pointers_found;
28080             cg_pointers_found = 0;
28081             card = card_of (last_object);
28082         }
28083
28084         if (card >= end_card)
28085         {
28086             foundp = find_card (card_table, card, card_word_end, end_card);
28087             if (foundp)
28088             {
28089                 n_card_set += end_card - card;
28090                 start_address = max (beg, card_address (card));
28091             }
28092             limit = min (end, card_address (end_card));
28093         }
28094         if (!foundp || (last_object >= end) || (card_address (card) >= end))
28095         {
28096             if (foundp && (cg_pointers_found == 0))
28097             {
28098                 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card),
28099                            (size_t)end));
28100                 clear_cards (card, card_of (end));
28101                 n_card_set -= (card_of (end) - card);
28102                 total_cards_cleared += (card_of (end) - card);
28103             }
28104             n_eph += cg_pointers_found;
28105             cg_pointers_found = 0;
28106             if ((seg = heap_segment_next_in_range (seg)) != 0)
28107             {
28108 #ifdef BACKGROUND_GC
28109                 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
28110 #endif //BACKGROUND_GC
28111                 beg = heap_segment_mem (seg);
28112                 end = compute_next_end (seg, low);
28113                 card_word_end = card_of (align_on_card_word (end)) / card_word_width;
28114                 card = card_of (beg);
28115                 last_object = beg;
28116                 end_card = 0;
28117                 continue;
28118             }
28119             else
28120             {
28121                 break;
28122             }
28123         }
28124
28125         assert (card_set_p (card));
28126         {
28127             uint8_t* o = last_object;
28128
28129             o = find_first_object (start_address, last_object);
28130             // Never visit an object twice.
28131             assert (o >= last_object);
28132
28133             //dprintf(3,("Considering card %Ix start object: %Ix, %Ix[ boundary: %Ix",
28134             dprintf(3, ("c: %Ix, o: %Ix, l: %Ix[ boundary: %Ix",
28135                    card, (size_t)o, (size_t)limit, (size_t)gen_boundary));
28136
28137             while (o < limit)
28138             {
28139                 assert (Align (size (o)) >= Align (min_obj_size));
28140                 size_t s = size (o);
28141
28142                 uint8_t* next_o =  o + Align (s);
28143                 Prefetch (next_o);
28144
28145                 if ((o >= gen_boundary) &&
28146                     (seg == ephemeral_heap_segment))
28147                 {
28148                     dprintf (3, ("switching gen boundary %Ix", (size_t)gen_boundary));
28149                     curr_gen_number--;
28150                     assert ((curr_gen_number > 0));
28151                     gen_boundary = generation_allocation_start
28152                         (generation_of (curr_gen_number - 1));
28153                     next_boundary = (compute_next_boundary
28154                                      (low, curr_gen_number, relocating));
28155                 }
28156
28157                 dprintf (4, ("|%Ix|", (size_t)o));
28158
28159                 if (next_o < start_address)
28160                 {
28161                     goto end_object;
28162                 }
28163
28164 #ifdef BACKGROUND_GC
28165                 if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p))
28166                 {
28167                     goto end_object;
28168                 }
28169 #endif //BACKGROUND_GC
28170
28171 #ifdef COLLECTIBLE_CLASS
28172                 if (is_collectible(o))
28173                 {
28174                     BOOL passed_end_card_p = FALSE;
28175
28176                     if (card_of (o) > card)
28177                     {
28178                         passed_end_card_p = card_transition (o, end, card_word_end,
28179                             cg_pointers_found, 
28180                             n_eph, n_card_set,
28181                             card, end_card,
28182                             foundp, start_address,
28183                             limit, total_cards_cleared);
28184                     }
28185
28186                     if ((!passed_end_card_p || foundp) && (card_of (o) == card))
28187                     {
28188                         // card is valid and it covers the head of the object
28189                         if (fn == &gc_heap::relocate_address)
28190                         {
28191                             keep_card_live (o, n_gen, cg_pointers_found);
28192                         }
28193                         else
28194                         {
28195                             uint8_t* class_obj = get_class_object (o);
28196                             mark_through_cards_helper (&class_obj, n_gen,
28197                                                     cg_pointers_found, fn,
28198                                                     nhigh, next_boundary);
28199                         }
28200                     }
28201
28202                     if (passed_end_card_p)
28203                     {
28204                         if (foundp && (card_address (card) < next_o))
28205                         {
28206                             goto go_through_refs;
28207                         }
28208                         else if (foundp && (start_address < limit))
28209                         {
28210                             next_o = find_first_object (start_address, o);
28211                             goto end_object;
28212                         }
28213                         else
28214                             goto end_limit;                            
28215                     }
28216                 }
28217
28218 go_through_refs:
28219 #endif //COLLECTIBLE_CLASS
28220
28221                 if (contain_pointers (o))
28222                 {
28223                     dprintf(3,("Going through %Ix start_address: %Ix", (size_t)o, (size_t)start_address));
28224
28225                     {
28226                         dprintf (4, ("normal object path"));
28227                         go_through_object
28228                             (method_table(o), o, s, poo,
28229                              start_address, use_start, (o + s),
28230                              {
28231                                  dprintf (4, ("<%Ix>:%Ix", (size_t)poo, (size_t)*poo));
28232                                  if (card_of ((uint8_t*)poo) > card)
28233                                  {
28234                                     BOOL passed_end_card_p  = card_transition ((uint8_t*)poo, end,
28235                                             card_word_end,
28236                                             cg_pointers_found, 
28237                                             n_eph, n_card_set,
28238                                             card, end_card,
28239                                             foundp, start_address,
28240                                             limit, total_cards_cleared);
28241
28242                                      if (passed_end_card_p)
28243                                      {
28244                                         if (foundp && (card_address (card) < next_o))
28245                                         {
28246                                              //new_start();
28247                                              {
28248                                                  if (ppstop <= (uint8_t**)start_address)
28249                                                      {break;}
28250                                                  else if (poo < (uint8_t**)start_address)
28251                                                      {poo = (uint8_t**)start_address;}
28252                                              }
28253                                         }
28254                                         else if (foundp && (start_address < limit))
28255                                         {
28256                                             next_o = find_first_object (start_address, o);
28257                                             goto end_object;
28258                                         }
28259                                          else
28260                                             goto end_limit;
28261                                      }
28262                                  }
28263
28264                                  mark_through_cards_helper (poo, n_gen,
28265                                                             cg_pointers_found, fn,
28266                                                             nhigh, next_boundary);
28267                              }
28268                             );
28269                     }
28270                 }
28271
28272             end_object:
28273                 if (((size_t)next_o / brick_size) != ((size_t) o / brick_size))
28274                 {
28275                     if (brick_table [brick_of (o)] <0)
28276                         fix_brick_to_highest (o, next_o);
28277                 }
28278                 o = next_o;
28279             }
28280         end_limit:
28281             last_object = o;
28282         }
28283     }
28284     // compute the efficiency ratio of the card table
28285     if (!relocating)
28286     {
28287         generation_skip_ratio = ((n_eph > 400)? (int)(((float)n_gen / (float)n_eph) * 100) : 100);
28288         dprintf (3, ("Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", 
28289             n_eph, n_gen , n_card_set, total_cards_cleared, generation_skip_ratio));
28290     }
28291     else
28292     {
28293         dprintf (3, ("R: Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", 
28294             n_gen, n_eph, n_card_set, total_cards_cleared, generation_skip_ratio));
28295     }
28296 }
28297
28298 #ifdef SEG_REUSE_STATS
28299 size_t gc_heap::dump_buckets (size_t* ordered_indices, int count, size_t* total_size)
28300 {
28301     size_t total_items = 0;
28302     *total_size = 0;
28303     for (int i = 0; i < count; i++)
28304     {
28305         total_items += ordered_indices[i];
28306         *total_size += ordered_indices[i] << (MIN_INDEX_POWER2 + i);
28307         dprintf (SEG_REUSE_LOG_0, ("[%d]%4d 2^%2d", heap_number, ordered_indices[i], (MIN_INDEX_POWER2 + i)));
28308     } 
28309     dprintf (SEG_REUSE_LOG_0, ("[%d]Total %d items, total size is 0x%Ix", heap_number, total_items, *total_size));
28310     return total_items;
28311 }
28312 #endif // SEG_REUSE_STATS
28313
28314 void gc_heap::count_plug (size_t last_plug_size, uint8_t*& last_plug)
28315 {
28316     // detect pinned plugs
28317     if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin())))
28318     {
28319         deque_pinned_plug();
28320         update_oldest_pinned_plug();
28321         dprintf (3, ("dequed pin,now oldest pin is %Ix", pinned_plug (oldest_pin())));
28322     }
28323     else
28324     {
28325         size_t plug_size = last_plug_size + Align(min_obj_size);
28326         BOOL is_padded = FALSE;
28327
28328 #ifdef SHORT_PLUGS
28329         plug_size += Align (min_obj_size);
28330         is_padded = TRUE;
28331 #endif //SHORT_PLUGS
28332
28333 #ifdef RESPECT_LARGE_ALIGNMENT
28334         plug_size += switch_alignment_size (is_padded);
28335 #endif //RESPECT_LARGE_ALIGNMENT
28336
28337         total_ephemeral_plugs += plug_size;
28338         size_t plug_size_power2 = round_up_power2 (plug_size);
28339         ordered_plug_indices[relative_index_power2_plug (plug_size_power2)]++;
28340         dprintf (SEG_REUSE_LOG_1, ("[%d]count_plug: adding 0x%Ix - %Id (2^%d) to ordered plug array", 
28341             heap_number, 
28342             last_plug, 
28343             plug_size, 
28344             (relative_index_power2_plug (plug_size_power2) + MIN_INDEX_POWER2)));
28345     }
28346 }
28347
28348 void gc_heap::count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug)
28349 {
28350     assert ((tree != NULL));
28351     if (node_left_child (tree))
28352     {
28353         count_plugs_in_brick (tree + node_left_child (tree), last_plug);
28354     }
28355
28356     if (last_plug != 0)
28357     {
28358         uint8_t*  plug = tree;
28359         size_t gap_size = node_gap_size (plug);
28360         uint8_t*   gap = (plug - gap_size);
28361         uint8_t*  last_plug_end = gap;
28362         size_t  last_plug_size = (last_plug_end - last_plug);
28363         dprintf (3, ("tree: %Ix, last plug: %Ix, gap size: %Ix, gap: %Ix, last plug size: %Ix",
28364             tree, last_plug, gap_size, gap, last_plug_size));
28365
28366         if (tree == oldest_pinned_plug)
28367         {
28368             dprintf (3, ("tree %Ix is pinned, last plug is %Ix, size is %Ix",
28369                 tree, last_plug, last_plug_size));
28370             mark* m = oldest_pin();
28371             if (m->has_pre_plug_info())
28372             {
28373                 last_plug_size += sizeof (gap_reloc_pair);
28374                 dprintf (3, ("pin %Ix has pre plug, adjusting plug size to %Ix", tree, last_plug_size));
28375             }
28376         }
28377         // Can't assert here - if it's a pinned plug it can be less.
28378         //assert (last_plug_size >= Align (min_obj_size));
28379
28380         count_plug (last_plug_size, last_plug);
28381     }
28382
28383     last_plug = tree;
28384
28385     if (node_right_child (tree))
28386     {
28387         count_plugs_in_brick (tree + node_right_child (tree), last_plug);
28388     }
28389 }
28390
28391 void gc_heap::build_ordered_plug_indices ()
28392 {
28393     memset (ordered_plug_indices, 0, sizeof(ordered_plug_indices));
28394     memset (saved_ordered_plug_indices, 0, sizeof(saved_ordered_plug_indices));
28395
28396     uint8_t*  start_address = generation_limit (max_generation);
28397     uint8_t* end_address = heap_segment_allocated (ephemeral_heap_segment);
28398     size_t  current_brick = brick_of (start_address);
28399     size_t  end_brick = brick_of (end_address - 1);
28400     uint8_t* last_plug = 0;
28401
28402     //Look for the right pinned plug to start from.
28403     reset_pinned_queue_bos();
28404     while (!pinned_plug_que_empty_p())
28405     {
28406         mark* m = oldest_pin();
28407         if ((m->first >= start_address) && (m->first < end_address))
28408         {
28409             dprintf (3, ("found a pin %Ix between %Ix and %Ix", m->first, start_address, end_address));
28410
28411             break;
28412         }
28413         else
28414             deque_pinned_plug();
28415     }
28416     
28417     update_oldest_pinned_plug();
28418
28419     while (current_brick <= end_brick)
28420     {
28421         int brick_entry =  brick_table [ current_brick ];
28422         if (brick_entry >= 0)
28423         {
28424             count_plugs_in_brick (brick_address (current_brick) + brick_entry -1, last_plug);
28425         }
28426
28427         current_brick++;
28428     }
28429
28430     if (last_plug !=0)
28431     {
28432         count_plug (end_address - last_plug, last_plug);
28433     }
28434
28435     // we need to make sure that after fitting all the existing plugs, we
28436     // have big enough free space left to guarantee that the next allocation
28437     // will succeed.
28438     size_t extra_size = END_SPACE_AFTER_GC + Align (min_obj_size);
28439     total_ephemeral_plugs += extra_size;
28440     dprintf (SEG_REUSE_LOG_0, ("Making sure we can fit a large object after fitting all plugs"));
28441     ordered_plug_indices[relative_index_power2_plug (round_up_power2 (extra_size))]++;
28442     
28443     memcpy (saved_ordered_plug_indices, ordered_plug_indices, sizeof(ordered_plug_indices));
28444
28445 #ifdef SEG_REUSE_STATS
28446     dprintf (SEG_REUSE_LOG_0, ("Plugs:"));
28447     size_t total_plug_power2 = 0;
28448     dump_buckets (ordered_plug_indices, MAX_NUM_BUCKETS, &total_plug_power2);
28449     dprintf (SEG_REUSE_LOG_0, ("plugs: 0x%Ix (rounded up to 0x%Ix (%d%%))", 
28450                 total_ephemeral_plugs, 
28451                 total_plug_power2, 
28452                 (total_ephemeral_plugs ? 
28453                     (total_plug_power2 * 100 / total_ephemeral_plugs) :
28454                     0)));
28455     dprintf (SEG_REUSE_LOG_0, ("-------------------"));
28456 #endif // SEG_REUSE_STATS
28457 }
28458
28459 void gc_heap::init_ordered_free_space_indices ()
28460 {
28461     memset (ordered_free_space_indices, 0, sizeof(ordered_free_space_indices));
28462     memset (saved_ordered_free_space_indices, 0, sizeof(saved_ordered_free_space_indices));
28463 }
28464
28465 void gc_heap::trim_free_spaces_indices ()
28466 {
28467     trimmed_free_space_index = -1;
28468     size_t max_count = max_free_space_items - 1;
28469     size_t count = 0;
28470     int i = 0;
28471     for (i = (MAX_NUM_BUCKETS - 1); i >= 0; i--)
28472     {
28473         count += ordered_free_space_indices[i];
28474
28475         if (count >= max_count)
28476         {
28477             break;
28478         }
28479     }
28480
28481     ptrdiff_t extra_free_space_items = count - max_count;
28482
28483     if (extra_free_space_items > 0)
28484     {
28485         ordered_free_space_indices[i] -= extra_free_space_items;
28486         free_space_items = max_count;
28487         trimmed_free_space_index = i;
28488     }
28489     else
28490     {
28491         free_space_items = count;
28492     }
28493
28494     if (i == -1)
28495     {
28496         i = 0;
28497     }
28498
28499     free_space_buckets = MAX_NUM_BUCKETS - i;
28500
28501     for (--i; i >= 0; i--)
28502     {
28503         ordered_free_space_indices[i] = 0;
28504     }
28505
28506     memcpy (saved_ordered_free_space_indices, 
28507             ordered_free_space_indices,
28508             sizeof(ordered_free_space_indices));
28509 }
28510
28511 // We fit as many plugs as we can and update the number of plugs left and the number
28512 // of free spaces left.
28513 BOOL gc_heap::can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index)
28514 {
28515     assert (small_index <= big_index);
28516     assert (big_index < MAX_NUM_BUCKETS);
28517
28518     size_t small_blocks = ordered_blocks[small_index];
28519
28520     if (small_blocks == 0)
28521     {
28522         return TRUE;
28523     }
28524
28525     size_t big_spaces = ordered_spaces[big_index];
28526
28527     if (big_spaces == 0)
28528     {
28529         return FALSE;
28530     }
28531
28532     dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting %Id 2^%d plugs into %Id 2^%d free spaces", 
28533         heap_number,
28534         small_blocks, (small_index + MIN_INDEX_POWER2),
28535         big_spaces, (big_index + MIN_INDEX_POWER2)));
28536
28537     size_t big_to_small = big_spaces << (big_index - small_index);
28538
28539     ptrdiff_t extra_small_spaces = big_to_small - small_blocks;
28540     dprintf (SEG_REUSE_LOG_1, ("[%d]%d 2^%d spaces can fit %d 2^%d blocks", 
28541         heap_number,
28542         big_spaces, (big_index + MIN_INDEX_POWER2), big_to_small, (small_index + MIN_INDEX_POWER2)));
28543     BOOL can_fit = (extra_small_spaces >= 0);
28544
28545     if (can_fit) 
28546     {
28547         dprintf (SEG_REUSE_LOG_1, ("[%d]Can fit with %d 2^%d extras blocks", 
28548             heap_number,
28549             extra_small_spaces, (small_index + MIN_INDEX_POWER2)));
28550     }
28551
28552     int i = 0;
28553
28554     dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d spaces to 0", heap_number, (big_index + MIN_INDEX_POWER2)));
28555     ordered_spaces[big_index] = 0;
28556     if (extra_small_spaces > 0)
28557     {
28558         dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d blocks to 0", heap_number, (small_index + MIN_INDEX_POWER2)));
28559         ordered_blocks[small_index] = 0;
28560         for (i = small_index; i < big_index; i++)
28561         {
28562             if (extra_small_spaces & 1)
28563             {
28564                 dprintf (SEG_REUSE_LOG_1, ("[%d]Increasing # of 2^%d spaces from %d to %d", 
28565                     heap_number,
28566                     (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + 1)));
28567                 ordered_spaces[i] += 1;
28568             }
28569             extra_small_spaces >>= 1;
28570         }
28571
28572         dprintf (SEG_REUSE_LOG_1, ("[%d]Finally increasing # of 2^%d spaces from %d to %d", 
28573             heap_number,
28574             (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + extra_small_spaces)));
28575         ordered_spaces[i] += extra_small_spaces;
28576     }
28577     else
28578     {
28579         dprintf (SEG_REUSE_LOG_1, ("[%d]Decreasing # of 2^%d blocks from %d to %d", 
28580             heap_number,
28581             (small_index + MIN_INDEX_POWER2), 
28582             ordered_blocks[small_index], 
28583             (ordered_blocks[small_index] - big_to_small)));
28584         ordered_blocks[small_index] -= big_to_small;
28585     }
28586
28587 #ifdef SEG_REUSE_STATS
28588     size_t temp;
28589     dprintf (SEG_REUSE_LOG_1, ("[%d]Plugs became:", heap_number));
28590     dump_buckets (ordered_blocks, MAX_NUM_BUCKETS, &temp);
28591
28592     dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces became:", heap_number));
28593     dump_buckets (ordered_spaces, MAX_NUM_BUCKETS, &temp);
28594 #endif //SEG_REUSE_STATS
28595
28596     return can_fit;
28597 }
28598
28599 // space_index gets updated to the biggest available space index.
28600 BOOL gc_heap::can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index)
28601 {
28602     assert (*space_index >= block_index);
28603
28604     while (!can_fit_in_spaces_p (ordered_blocks, block_index, ordered_spaces, *space_index))
28605     {
28606         (*space_index)--;
28607         if (*space_index < block_index)
28608         {
28609             return FALSE;
28610         }
28611     }
28612
28613     return TRUE;
28614 }
28615
28616 BOOL gc_heap::can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count)
28617 {
28618 #ifdef FEATURE_STRUCTALIGN
28619     // BARTOKTODO (4841): reenable when can_fit_in_spaces_p takes alignment requirements into account
28620     return FALSE;
28621 #endif // FEATURE_STRUCTALIGN
28622     int space_index = count - 1;
28623     for (int block_index = (count - 1); block_index >= 0; block_index--)
28624     {
28625         if (!can_fit_blocks_p (ordered_blocks, block_index, ordered_spaces, &space_index))
28626         {
28627             return FALSE;
28628         }
28629     }
28630
28631     return TRUE;
28632 }
28633
28634 void gc_heap::build_ordered_free_spaces (heap_segment* seg)
28635 {
28636     assert (bestfit_seg);
28637
28638     //bestfit_seg->add_buckets (MAX_NUM_BUCKETS - free_space_buckets + MIN_INDEX_POWER2, 
28639     //                    ordered_free_space_indices + (MAX_NUM_BUCKETS - free_space_buckets), 
28640     //                    free_space_buckets, 
28641     //                    free_space_items);
28642
28643     bestfit_seg->add_buckets (MIN_INDEX_POWER2, 
28644                         ordered_free_space_indices, 
28645                         MAX_NUM_BUCKETS, 
28646                         free_space_items);
28647
28648     assert (settings.condemned_generation == max_generation);
28649
28650     uint8_t* first_address = heap_segment_mem (seg);
28651     uint8_t* end_address   = heap_segment_reserved (seg);
28652     //look through the pinned plugs for relevant ones.
28653     //Look for the right pinned plug to start from.
28654     reset_pinned_queue_bos();
28655     mark* m = 0;
28656     // See comment in can_expand_into_p why we need (max_generation + 1).
28657     size_t eph_gen_starts = (Align (min_obj_size)) * (max_generation + 1);
28658     BOOL has_fit_gen_starts = FALSE;
28659
28660     while (!pinned_plug_que_empty_p())
28661     {
28662         m = oldest_pin();
28663         if ((pinned_plug (m) >= first_address) && 
28664             (pinned_plug (m) < end_address) &&
28665             (pinned_len (m) >= eph_gen_starts))
28666         {
28667
28668             assert ((pinned_plug (m) - pinned_len (m)) == bestfit_first_pin);
28669             break;
28670         }
28671         else
28672         {
28673             deque_pinned_plug();
28674         }
28675     }
28676
28677     if (!pinned_plug_que_empty_p())
28678     {
28679         bestfit_seg->add ((void*)m, TRUE, TRUE);
28680         deque_pinned_plug();
28681         m = oldest_pin();
28682         has_fit_gen_starts = TRUE;
28683     }
28684
28685     while (!pinned_plug_que_empty_p() &&
28686             ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address)))
28687     {
28688         bestfit_seg->add ((void*)m, TRUE, FALSE);
28689         deque_pinned_plug();
28690         m = oldest_pin();
28691     }
28692
28693     if (commit_end_of_seg)
28694     {
28695         if (!has_fit_gen_starts)
28696         {
28697             assert (bestfit_first_pin == heap_segment_plan_allocated (seg));
28698         }
28699         bestfit_seg->add ((void*)seg, FALSE, (!has_fit_gen_starts));
28700     }
28701
28702 #ifdef _DEBUG
28703     bestfit_seg->check();
28704 #endif //_DEBUG
28705 }
28706
28707 BOOL gc_heap::try_best_fit (BOOL end_of_segment_p)
28708 {
28709     if (!end_of_segment_p)
28710     {
28711         trim_free_spaces_indices ();
28712     }
28713
28714     BOOL can_bestfit = can_fit_all_blocks_p (ordered_plug_indices, 
28715                                              ordered_free_space_indices, 
28716                                              MAX_NUM_BUCKETS);
28717
28718     return can_bestfit;
28719 }
28720
28721 BOOL gc_heap::best_fit (size_t free_space, 
28722                         size_t largest_free_space, 
28723                         size_t additional_space, 
28724                         BOOL* use_additional_space)
28725 {
28726     dprintf (SEG_REUSE_LOG_0, ("gen%d: trying best fit mechanism", settings.condemned_generation));
28727
28728     assert (!additional_space || (additional_space && use_additional_space));
28729     if (use_additional_space)
28730     {
28731         *use_additional_space = FALSE;
28732     }
28733
28734     if (ordered_plug_indices_init == FALSE)
28735     {
28736         total_ephemeral_plugs = 0;
28737         build_ordered_plug_indices();
28738         ordered_plug_indices_init = TRUE;
28739     }
28740     else
28741     {
28742         memcpy (ordered_plug_indices, saved_ordered_plug_indices, sizeof(ordered_plug_indices));
28743     }
28744
28745     if (total_ephemeral_plugs == (END_SPACE_AFTER_GC + Align (min_obj_size)))
28746     {
28747         dprintf (SEG_REUSE_LOG_0, ("No ephemeral plugs to realloc, done"));
28748         size_t empty_eph = (END_SPACE_AFTER_GC + Align (min_obj_size) + (Align (min_obj_size)) * (max_generation + 1));
28749         BOOL can_fit_empty_eph = (largest_free_space >= empty_eph);
28750         if (!can_fit_empty_eph)
28751         {
28752             can_fit_empty_eph = (additional_space >= empty_eph);
28753
28754             if (can_fit_empty_eph)
28755             {
28756                 *use_additional_space = TRUE;
28757             }
28758         }
28759
28760         return can_fit_empty_eph;
28761     }
28762
28763     if ((total_ephemeral_plugs + approximate_new_allocation()) >= (free_space + additional_space))
28764     {
28765         dprintf (SEG_REUSE_LOG_0, ("We won't have enough free space left in this segment after fitting, done"));
28766         return FALSE;
28767     }
28768
28769     if ((free_space + additional_space) == 0)
28770     {
28771         dprintf (SEG_REUSE_LOG_0, ("No free space in this segment, done"));
28772         return FALSE;
28773     }
28774
28775 #ifdef SEG_REUSE_STATS
28776     dprintf (SEG_REUSE_LOG_0, ("Free spaces:"));
28777     size_t total_free_space_power2 = 0;
28778     size_t total_free_space_items = 
28779         dump_buckets (ordered_free_space_indices, 
28780                       MAX_NUM_BUCKETS,
28781                       &total_free_space_power2);
28782     dprintf (SEG_REUSE_LOG_0, ("currently max free spaces is %Id", max_free_space_items));
28783
28784     dprintf (SEG_REUSE_LOG_0, ("Ephemeral plugs: 0x%Ix, free space: 0x%Ix (rounded down to 0x%Ix (%Id%%)), additional free_space: 0x%Ix",
28785                 total_ephemeral_plugs, 
28786                 free_space, 
28787                 total_free_space_power2, 
28788                 (free_space ? (total_free_space_power2 * 100 / free_space) : 0),
28789                 additional_space));
28790
28791     size_t saved_all_free_space_indices[MAX_NUM_BUCKETS];
28792     memcpy (saved_all_free_space_indices, 
28793             ordered_free_space_indices, 
28794             sizeof(saved_all_free_space_indices));
28795
28796 #endif // SEG_REUSE_STATS
28797
28798     if (total_ephemeral_plugs > (free_space + additional_space))
28799     {
28800         return FALSE;
28801     }
28802
28803     use_bestfit = try_best_fit(FALSE);
28804
28805     if (!use_bestfit && additional_space)
28806     {
28807         int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (additional_space));
28808
28809         if (relative_free_space_index != -1)
28810         {
28811             int relative_plug_index = 0;
28812             size_t plugs_to_fit = 0;
28813
28814             for (relative_plug_index = (MAX_NUM_BUCKETS - 1); relative_plug_index >= 0; relative_plug_index--)
28815             {
28816                 plugs_to_fit = ordered_plug_indices[relative_plug_index];
28817                 if (plugs_to_fit != 0)
28818                 {
28819                     break;
28820                 }
28821             }
28822
28823             if ((relative_plug_index > relative_free_space_index) ||
28824                 ((relative_plug_index == relative_free_space_index) &&
28825                 (plugs_to_fit > 1)))
28826             {
28827 #ifdef SEG_REUSE_STATS
28828                 dprintf (SEG_REUSE_LOG_0, ("additional space is 2^%d but we stopped at %d 2^%d plug(s)",
28829                             (relative_free_space_index + MIN_INDEX_POWER2),
28830                             plugs_to_fit,
28831                             (relative_plug_index + MIN_INDEX_POWER2)));
28832 #endif // SEG_REUSE_STATS
28833                 goto adjust;
28834             }
28835             
28836             dprintf (SEG_REUSE_LOG_0, ("Adding end of segment (2^%d)", (relative_free_space_index + MIN_INDEX_POWER2)));
28837             ordered_free_space_indices[relative_free_space_index]++;
28838             use_bestfit = try_best_fit(TRUE);
28839             if (use_bestfit)
28840             {
28841                 free_space_items++;
28842                 // Since we might've trimmed away some of the free spaces we had, we should see
28843                 // if we really need to use end of seg space - if it's the same or smaller than
28844                 // the largest space we trimmed we can just add that one back instead of 
28845                 // using end of seg.
28846                 if (relative_free_space_index > trimmed_free_space_index)
28847                 {
28848                     *use_additional_space = TRUE;
28849                 }
28850                 else 
28851                 {
28852                     // If the addition space is <= than the last trimmed space, we
28853                     // should just use that last trimmed space instead.
28854                     saved_ordered_free_space_indices[trimmed_free_space_index]++;
28855                 }
28856             }
28857         }
28858     }
28859
28860 adjust:
28861
28862     if (!use_bestfit)
28863     {
28864         dprintf (SEG_REUSE_LOG_0, ("couldn't fit..."));
28865
28866 #ifdef SEG_REUSE_STATS
28867         size_t saved_max = max_free_space_items;
28868         BOOL temp_bestfit = FALSE;
28869
28870         dprintf (SEG_REUSE_LOG_0, ("----Starting experiment process----"));
28871         dprintf (SEG_REUSE_LOG_0, ("----Couldn't fit with max free items %Id", max_free_space_items));
28872
28873         // TODO: need to take the end of segment into consideration.
28874         while (max_free_space_items <= total_free_space_items)
28875         {
28876             max_free_space_items += max_free_space_items / 2;
28877             dprintf (SEG_REUSE_LOG_0, ("----Temporarily increasing max free spaces to %Id", max_free_space_items));
28878             memcpy (ordered_free_space_indices, 
28879                     saved_all_free_space_indices,
28880                     sizeof(ordered_free_space_indices));
28881             if (try_best_fit(FALSE))
28882             {
28883                 temp_bestfit = TRUE;
28884                 break;
28885             }
28886         }
28887
28888         if (temp_bestfit)
28889         {
28890             dprintf (SEG_REUSE_LOG_0, ("----With %Id max free spaces we could fit", max_free_space_items));
28891         }
28892         else
28893         {
28894             dprintf (SEG_REUSE_LOG_0, ("----Tried all free spaces and still couldn't fit, lost too much space"));
28895         }
28896
28897         dprintf (SEG_REUSE_LOG_0, ("----Restoring max free spaces to %Id", saved_max));
28898         max_free_space_items = saved_max;
28899 #endif // SEG_REUSE_STATS
28900         if (free_space_items)
28901         {
28902             max_free_space_items = min (MAX_NUM_FREE_SPACES, free_space_items * 2);
28903             max_free_space_items = max (max_free_space_items, MIN_NUM_FREE_SPACES);
28904         }
28905         else
28906         {
28907             max_free_space_items = MAX_NUM_FREE_SPACES;
28908         }
28909     }
28910
28911     dprintf (SEG_REUSE_LOG_0, ("Adjusted number of max free spaces to %Id", max_free_space_items));
28912     dprintf (SEG_REUSE_LOG_0, ("------End of best fitting process------\n"));
28913
28914     return use_bestfit;
28915 }
28916
28917 BOOL gc_heap::process_free_space (heap_segment* seg, 
28918                          size_t free_space,
28919                          size_t min_free_size, 
28920                          size_t min_cont_size,
28921                          size_t* total_free_space,
28922                          size_t* largest_free_space)
28923 {
28924     *total_free_space += free_space;
28925     *largest_free_space = max (*largest_free_space, free_space);
28926
28927 #ifdef SIMPLE_DPRINTF
28928     dprintf (SEG_REUSE_LOG_1, ("free space len: %Ix, total free space: %Ix, largest free space: %Ix", 
28929                 free_space, *total_free_space, *largest_free_space));
28930 #endif //SIMPLE_DPRINTF
28931
28932     if ((*total_free_space >= min_free_size) && (*largest_free_space >= min_cont_size))
28933     {
28934 #ifdef SIMPLE_DPRINTF
28935         dprintf (SEG_REUSE_LOG_0, ("(gen%d)total free: %Ix(min: %Ix), largest free: %Ix(min: %Ix). Found segment %Ix to reuse without bestfit", 
28936             settings.condemned_generation,
28937             *total_free_space, min_free_size, *largest_free_space, min_cont_size,
28938             (size_t)seg));
28939 #else
28940         UNREFERENCED_PARAMETER(seg);
28941 #endif //SIMPLE_DPRINTF
28942         return TRUE;
28943     }
28944
28945     int free_space_index = relative_index_power2_free_space (round_down_power2 (free_space));
28946     if (free_space_index != -1)
28947     {
28948         ordered_free_space_indices[free_space_index]++;
28949     }
28950     return FALSE;
28951 }
28952
28953 BOOL gc_heap::expand_reused_seg_p()
28954 {
28955     BOOL reused_seg = FALSE;
28956     int heap_expand_mechanism = gc_data_per_heap.get_mechanism (gc_heap_expand);
28957     if ((heap_expand_mechanism == expand_reuse_bestfit) || 
28958         (heap_expand_mechanism == expand_reuse_normal))
28959     {
28960         reused_seg = TRUE;
28961     }
28962
28963     return reused_seg;
28964 }
28965
28966 BOOL gc_heap::can_expand_into_p (heap_segment* seg, size_t min_free_size, size_t min_cont_size,
28967                                  allocator* gen_allocator)
28968 {
28969     min_cont_size += END_SPACE_AFTER_GC;
28970     use_bestfit = FALSE;
28971     commit_end_of_seg = FALSE;
28972     bestfit_first_pin = 0;
28973     uint8_t* first_address = heap_segment_mem (seg);
28974     uint8_t* end_address   = heap_segment_reserved (seg);
28975     size_t end_extra_space = end_space_after_gc();
28976
28977     if ((heap_segment_reserved (seg) - end_extra_space) <= heap_segment_plan_allocated (seg))
28978     {
28979         dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: can't use segment [%Ix %Ix, has less than %d bytes at the end",
28980                                    first_address, end_address, end_extra_space));
28981         return FALSE;
28982     }
28983
28984     end_address -= end_extra_space;
28985
28986     dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p(gen%d): min free: %Ix, min continuous: %Ix", 
28987         settings.condemned_generation, min_free_size, min_cont_size));
28988     size_t eph_gen_starts = eph_gen_starts_size;
28989
28990     if (settings.condemned_generation == max_generation)
28991     {
28992         size_t free_space = 0;
28993         size_t largest_free_space = free_space;
28994         dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen2: testing segment [%Ix %Ix", first_address, end_address));
28995         //Look through the pinned plugs for relevant ones and Look for the right pinned plug to start from. 
28996         //We are going to allocate the generation starts in the 1st free space,
28997         //so start from the first free space that's big enough for gen starts and a min object size.
28998         // If we see a free space that is >= gen starts but < gen starts + min obj size we just don't use it - 
28999         // we could use it by allocating the last generation start a bit bigger but 
29000         // the complexity isn't worth the effort (those plugs are from gen2 
29001         // already anyway).
29002         reset_pinned_queue_bos();
29003         mark* m = 0;
29004         BOOL has_fit_gen_starts = FALSE;
29005
29006         init_ordered_free_space_indices ();
29007         while (!pinned_plug_que_empty_p())
29008         {
29009             m = oldest_pin();
29010             if ((pinned_plug (m) >= first_address) && 
29011                 (pinned_plug (m) < end_address) &&
29012                 (pinned_len (m) >= (eph_gen_starts + Align (min_obj_size))))
29013             {
29014                 break;
29015             }
29016             else
29017             {
29018                 deque_pinned_plug();
29019             }
29020         }
29021
29022         if (!pinned_plug_que_empty_p())
29023         {
29024             bestfit_first_pin = pinned_plug (m) - pinned_len (m);
29025
29026             if (process_free_space (seg, 
29027                                     pinned_len (m) - eph_gen_starts, 
29028                                     min_free_size, min_cont_size, 
29029                                     &free_space, &largest_free_space))
29030             {
29031                 return TRUE;
29032             }
29033
29034             deque_pinned_plug();
29035             m = oldest_pin();
29036             has_fit_gen_starts = TRUE;
29037         }
29038
29039         dprintf (3, ("first pin is %Ix", pinned_plug (m)));
29040
29041         //tally up free space
29042         while (!pinned_plug_que_empty_p() &&
29043                ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address)))
29044         {
29045             dprintf (3, ("looking at pin %Ix", pinned_plug (m)));
29046             if (process_free_space (seg, 
29047                                     pinned_len (m), 
29048                                     min_free_size, min_cont_size, 
29049                                     &free_space, &largest_free_space))
29050             {
29051                 return TRUE;
29052             }
29053
29054             deque_pinned_plug();
29055             m = oldest_pin();
29056         }
29057
29058         //try to find space at the end of the segment. 
29059         size_t end_space = (end_address - heap_segment_plan_allocated (seg)); 
29060         size_t additional_space = ((min_free_size > free_space) ? (min_free_size - free_space) : 0); 
29061         dprintf (SEG_REUSE_LOG_0, ("end space: %Ix; additional: %Ix", end_space, additional_space));
29062         if (end_space >= additional_space)
29063         {
29064             BOOL can_fit = TRUE;
29065             commit_end_of_seg = TRUE;
29066
29067             if (largest_free_space < min_cont_size)
29068             {
29069                 if (end_space >= min_cont_size)
29070                 {
29071                     additional_space = max (min_cont_size, additional_space);
29072                     dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg for eph", 
29073                         seg));
29074                 }
29075                 else 
29076                 {
29077                     if (settings.concurrent)
29078                     {
29079                         can_fit = FALSE;
29080                         commit_end_of_seg = FALSE;
29081                     }
29082                     else
29083                     {
29084                         size_t additional_space_bestfit = additional_space;
29085                         if (!has_fit_gen_starts)
29086                         {
29087                             if (additional_space_bestfit < (eph_gen_starts + Align (min_obj_size)))
29088                             {
29089                                 dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, gen starts not allocated yet and end space is too small: %Id",
29090                                         additional_space_bestfit));
29091                                 return FALSE;
29092                             }
29093
29094                             bestfit_first_pin = heap_segment_plan_allocated (seg);
29095                             additional_space_bestfit -= eph_gen_starts;
29096                         }
29097
29098                         can_fit = best_fit (free_space, 
29099                                             largest_free_space,
29100                                             additional_space_bestfit, 
29101                                             &commit_end_of_seg);
29102
29103                         if (can_fit)
29104                         {
29105                             dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse with bestfit, %s committing end of seg", 
29106                                 seg, (commit_end_of_seg ? "with" : "without")));
29107                         }
29108                         else
29109                         {
29110                             dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space)));
29111                         }
29112                     }
29113                 }
29114             }
29115             else
29116             {
29117                 dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg", seg));
29118             }
29119
29120             assert (additional_space <= end_space);
29121             if (commit_end_of_seg)
29122             {
29123                 if (!grow_heap_segment (seg, heap_segment_plan_allocated (seg) + additional_space))
29124                 {
29125                     dprintf (2, ("Couldn't commit end of segment?!"));
29126                     use_bestfit = FALSE;
29127  
29128                     return FALSE;
29129                 }
29130
29131                 if (use_bestfit)
29132                 {
29133                     // We increase the index here because growing heap segment could create a discrepency with 
29134                     // the additional space we used (could be bigger).
29135                     size_t free_space_end_of_seg = 
29136                         heap_segment_committed (seg) - heap_segment_plan_allocated (seg);
29137                     int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (free_space_end_of_seg));
29138                     saved_ordered_free_space_indices[relative_free_space_index]++;
29139                 }
29140             }
29141         
29142             if (use_bestfit)
29143             {
29144                 memcpy (ordered_free_space_indices, 
29145                         saved_ordered_free_space_indices, 
29146                         sizeof(ordered_free_space_indices));
29147                 max_free_space_items = max (MIN_NUM_FREE_SPACES, free_space_items * 3 / 2);
29148                 max_free_space_items = min (MAX_NUM_FREE_SPACES, max_free_space_items);
29149                 dprintf (SEG_REUSE_LOG_0, ("could fit! %Id free spaces, %Id max", free_space_items, max_free_space_items));
29150             }
29151
29152             return can_fit;
29153         }
29154
29155         dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space)));
29156         return FALSE;
29157     }
29158     else
29159     {
29160         assert (settings.condemned_generation == (max_generation-1));
29161         size_t free_space = (end_address - heap_segment_plan_allocated (seg));
29162         size_t largest_free_space = free_space;
29163         dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen1: testing segment [%Ix %Ix", first_address, end_address));
29164         //find the first free list in range of the current segment
29165         size_t sz_list = gen_allocator->first_bucket_size();
29166         unsigned int a_l_idx = 0;
29167         uint8_t* free_list = 0;
29168         for (; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
29169         {
29170             if ((eph_gen_starts <= sz_list) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
29171             {
29172                 free_list = gen_allocator->alloc_list_head_of (a_l_idx);
29173                 while (free_list)
29174                 {
29175                     if ((free_list >= first_address) && 
29176                         (free_list < end_address) && 
29177                         (unused_array_size (free_list) >= eph_gen_starts))
29178                     {
29179                         goto next;
29180                     }
29181                     else
29182                     {
29183                         free_list = free_list_slot (free_list);
29184                     }
29185                 }
29186             }
29187         }
29188 next:
29189         if (free_list)
29190         {
29191             init_ordered_free_space_indices ();
29192             if (process_free_space (seg, 
29193                                     unused_array_size (free_list) - eph_gen_starts + Align (min_obj_size), 
29194                                     min_free_size, min_cont_size, 
29195                                     &free_space, &largest_free_space))
29196             {
29197                 return TRUE;
29198             }
29199
29200             free_list = free_list_slot (free_list);
29201         }
29202         else
29203         {
29204             dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, no free list"));
29205             return FALSE;
29206         }
29207
29208        //tally up free space
29209
29210         while (1)
29211         {
29212             while (free_list)
29213             {
29214                 if ((free_list >= first_address) && (free_list < end_address) &&
29215                     process_free_space (seg, 
29216                                         unused_array_size (free_list), 
29217                                         min_free_size, min_cont_size, 
29218                                         &free_space, &largest_free_space))
29219                 {
29220                     return TRUE;
29221                 }
29222
29223                 free_list = free_list_slot (free_list);
29224             }
29225             a_l_idx++;
29226             if (a_l_idx < gen_allocator->number_of_buckets())
29227             {
29228                 free_list = gen_allocator->alloc_list_head_of (a_l_idx);
29229             }
29230             else
29231                 break;
29232         } 
29233
29234         dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space));
29235         return FALSE;
29236
29237         /*
29238         BOOL can_fit = best_fit (free_space, 0, NULL);
29239         if (can_fit)
29240         {
29241             dprintf (SEG_REUSE_LOG_0, ("(gen1)Found segment %Ix to reuse with bestfit", seg));
29242         }
29243         else
29244         {
29245             dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space));
29246         }
29247
29248         return can_fit;
29249         */
29250     }
29251 }
29252
29253 void gc_heap::realloc_plug (size_t last_plug_size, uint8_t*& last_plug,
29254                             generation* gen, uint8_t* start_address,
29255                             unsigned int& active_new_gen_number,
29256                             uint8_t*& last_pinned_gap, BOOL& leftp,
29257                             BOOL shortened_p
29258 #ifdef SHORT_PLUGS
29259                             , mark* pinned_plug_entry
29260 #endif //SHORT_PLUGS
29261                             )
29262 {
29263     // detect generation boundaries
29264     // make sure that active_new_gen_number is not the youngest generation.
29265     // because the generation_limit wouldn't return the right thing in this case.
29266     if (!use_bestfit)
29267     {
29268         if ((active_new_gen_number > 1) &&
29269             (last_plug >= generation_limit (active_new_gen_number)))
29270         {
29271             assert (last_plug >= start_address);
29272             active_new_gen_number--;
29273             realloc_plan_generation_start (generation_of (active_new_gen_number), gen);
29274             assert (generation_plan_allocation_start (generation_of (active_new_gen_number)));
29275             leftp = FALSE;
29276         }
29277     }
29278
29279     // detect pinned plugs
29280     if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin())))
29281     {
29282         size_t  entry = deque_pinned_plug();
29283         mark*  m = pinned_plug_of (entry);
29284
29285         size_t saved_pinned_len = pinned_len(m);
29286         pinned_len(m) = last_plug - last_pinned_gap;
29287         //dprintf (3,("Adjusting pinned gap: [%Ix, %Ix[", (size_t)last_pinned_gap, (size_t)last_plug));
29288
29289         if (m->has_post_plug_info())
29290         {
29291             last_plug_size += sizeof (gap_reloc_pair);
29292             dprintf (3, ("ra pinned %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size))
29293         }
29294
29295         last_pinned_gap = last_plug + last_plug_size;
29296         dprintf (3, ("ra found pin %Ix, len: %Ix->%Ix, last_p: %Ix, last_p_size: %Ix",
29297             pinned_plug (m), saved_pinned_len, pinned_len (m), last_plug, last_plug_size));
29298         leftp = FALSE;
29299
29300         //we are creating a generation fault. set the cards.
29301         {
29302             size_t end_card = card_of (align_on_card (last_plug + last_plug_size));
29303             size_t card = card_of (last_plug);
29304             while (card != end_card)
29305             {
29306                 set_card (card);
29307                 card++;
29308             }
29309         }
29310     }
29311     else if (last_plug >= start_address)
29312     {
29313 #ifdef FEATURE_STRUCTALIGN
29314         int requiredAlignment;
29315         ptrdiff_t pad;
29316         node_aligninfo (last_plug, requiredAlignment, pad);
29317
29318         // from how we previously aligned the plug's destination address,
29319         // compute the actual alignment offset.
29320         uint8_t* reloc_plug = last_plug + node_relocation_distance (last_plug);
29321         ptrdiff_t alignmentOffset = ComputeStructAlignPad(reloc_plug, requiredAlignment, 0);
29322         if (!alignmentOffset)
29323         {
29324             // allocate_in_expanded_heap doesn't expect alignmentOffset to be zero.
29325             alignmentOffset = requiredAlignment;
29326         }
29327
29328         //clear the alignment info because we are reallocating
29329         clear_node_aligninfo (last_plug);
29330 #else // FEATURE_STRUCTALIGN
29331         //clear the realignment flag because we are reallocating
29332         clear_node_realigned (last_plug);
29333 #endif // FEATURE_STRUCTALIGN
29334         BOOL adjacentp = FALSE;
29335         BOOL set_padding_on_saved_p = FALSE;
29336
29337         if (shortened_p)
29338         {
29339             last_plug_size += sizeof (gap_reloc_pair);
29340
29341 #ifdef SHORT_PLUGS
29342             assert (pinned_plug_entry != NULL);
29343             if (last_plug_size <= sizeof (plug_and_gap))
29344             {
29345                 set_padding_on_saved_p = TRUE;
29346             }
29347 #endif //SHORT_PLUGS
29348
29349             dprintf (3, ("ra plug %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size))
29350         }
29351
29352 #ifdef SHORT_PLUGS
29353         clear_padding_in_expand (last_plug, set_padding_on_saved_p, pinned_plug_entry);
29354 #endif //SHORT_PLUGS
29355
29356         uint8_t* new_address = allocate_in_expanded_heap(gen, last_plug_size, adjacentp, last_plug,
29357 #ifdef SHORT_PLUGS
29358                                      set_padding_on_saved_p,
29359                                      pinned_plug_entry,
29360 #endif //SHORT_PLUGS
29361                                      TRUE, active_new_gen_number REQD_ALIGN_AND_OFFSET_ARG);
29362
29363         dprintf (3, ("ra NA: [%Ix, %Ix[: %Ix", new_address, (new_address + last_plug_size), last_plug_size));
29364         assert (new_address);
29365         set_node_relocation_distance (last_plug, new_address - last_plug);
29366 #ifdef FEATURE_STRUCTALIGN
29367         if (leftp && node_alignpad (last_plug) == 0)
29368 #else // FEATURE_STRUCTALIGN
29369         if (leftp && !node_realigned (last_plug))
29370 #endif // FEATURE_STRUCTALIGN
29371         {
29372             // TODO - temporarily disable L optimization because of a bug in it.
29373             //set_node_left (last_plug);
29374         }
29375         dprintf (3,(" Re-allocating %Ix->%Ix len %Id", (size_t)last_plug, (size_t)new_address, last_plug_size));
29376         leftp = adjacentp;
29377     }
29378 }
29379
29380 void gc_heap::realloc_in_brick (uint8_t* tree, uint8_t*& last_plug,
29381                                 uint8_t* start_address,
29382                                 generation* gen,
29383                                 unsigned int& active_new_gen_number,
29384                                 uint8_t*& last_pinned_gap, BOOL& leftp)
29385 {
29386     assert (tree != NULL);
29387     int   left_node = node_left_child (tree);
29388     int   right_node = node_right_child (tree);
29389
29390     dprintf (3, ("ra: tree: %Ix, last_pin_gap: %Ix, last_p: %Ix, L: %d, R: %d", 
29391         tree, last_pinned_gap, last_plug, left_node, right_node));
29392
29393     if (left_node)
29394     {
29395         dprintf (3, ("LN: realloc %Ix(%Ix)", (tree + left_node), last_plug));
29396         realloc_in_brick ((tree + left_node), last_plug, start_address,
29397                           gen, active_new_gen_number, last_pinned_gap,
29398                           leftp);
29399     }
29400
29401     if (last_plug != 0)
29402     {
29403         uint8_t*  plug = tree;
29404
29405         BOOL has_pre_plug_info_p = FALSE;
29406         BOOL has_post_plug_info_p = FALSE;
29407         mark* pinned_plug_entry = get_next_pinned_entry (tree, 
29408                                                          &has_pre_plug_info_p,
29409                                                          &has_post_plug_info_p, 
29410                                                          FALSE);
29411
29412         // We only care about the pre plug info 'cause that's what decides if the last plug is shortened.
29413         // The pinned plugs are handled in realloc_plug.
29414         size_t gap_size = node_gap_size (plug);
29415         uint8_t*   gap = (plug - gap_size);
29416         uint8_t*  last_plug_end = gap;
29417         size_t  last_plug_size = (last_plug_end - last_plug);
29418         // Cannot assert this - a plug could be less than that due to the shortened ones.
29419         //assert (last_plug_size >= Align (min_obj_size));
29420         dprintf (3, ("ra: plug %Ix, gap size: %Ix, last_pin_gap: %Ix, last_p: %Ix, last_p_end: %Ix, shortened: %d",
29421             plug, gap_size, last_pinned_gap, last_plug, last_plug_end, (has_pre_plug_info_p ? 1 : 0)));
29422         realloc_plug (last_plug_size, last_plug, gen, start_address,
29423                       active_new_gen_number, last_pinned_gap,
29424                       leftp, has_pre_plug_info_p
29425 #ifdef SHORT_PLUGS
29426                       , pinned_plug_entry
29427 #endif //SHORT_PLUGS
29428                       );
29429     }
29430
29431     last_plug = tree;
29432
29433     if (right_node)
29434     {
29435         dprintf (3, ("RN: realloc %Ix(%Ix)", (tree + right_node), last_plug));
29436         realloc_in_brick ((tree + right_node), last_plug, start_address,
29437                           gen, active_new_gen_number, last_pinned_gap,
29438                           leftp);
29439     }
29440 }
29441
29442 void
29443 gc_heap::realloc_plugs (generation* consing_gen, heap_segment* seg,
29444                         uint8_t* start_address, uint8_t* end_address,
29445                         unsigned active_new_gen_number)
29446 {
29447     dprintf (3, ("--- Reallocing ---"));
29448
29449     if (use_bestfit)
29450     {
29451         //make sure that every generation has a planned allocation start
29452         int  gen_number = max_generation - 1;
29453         while (gen_number >= 0)
29454         {
29455             generation* gen = generation_of (gen_number);
29456             if (0 == generation_plan_allocation_start (gen))
29457             {
29458                 generation_plan_allocation_start (gen) = 
29459                     bestfit_first_pin + (max_generation - gen_number - 1) * Align (min_obj_size);
29460                 generation_plan_allocation_start_size (gen) = Align (min_obj_size);
29461                 assert (generation_plan_allocation_start (gen));
29462             }
29463             gen_number--;
29464         }
29465     }
29466
29467     uint8_t* first_address = start_address;
29468     //Look for the right pinned plug to start from.
29469     reset_pinned_queue_bos();
29470     uint8_t* planned_ephemeral_seg_end = heap_segment_plan_allocated (seg);
29471     while (!pinned_plug_que_empty_p())
29472     {
29473         mark* m = oldest_pin();
29474         if ((pinned_plug (m) >= planned_ephemeral_seg_end) && (pinned_plug (m) < end_address))
29475         {
29476             if (pinned_plug (m) < first_address)
29477             {
29478                 first_address = pinned_plug (m);
29479             }
29480             break;
29481         }
29482         else
29483             deque_pinned_plug();
29484     }
29485
29486     size_t  current_brick = brick_of (first_address);
29487     size_t  end_brick = brick_of (end_address-1);
29488     uint8_t*  last_plug = 0;
29489
29490     uint8_t* last_pinned_gap = heap_segment_plan_allocated (seg);
29491     BOOL leftp = FALSE;
29492
29493     dprintf (3, ("start addr: %Ix, first addr: %Ix, current oldest pin: %Ix",
29494         start_address, first_address, pinned_plug (oldest_pin())));
29495
29496     while (current_brick <= end_brick)
29497     {
29498         int   brick_entry =  brick_table [ current_brick ];
29499         if (brick_entry >= 0)
29500         {
29501             realloc_in_brick ((brick_address (current_brick) + brick_entry - 1),
29502                               last_plug, start_address, consing_gen,
29503                               active_new_gen_number, last_pinned_gap,
29504                               leftp);
29505         }
29506         current_brick++;
29507     }
29508
29509     if (last_plug != 0)
29510     {
29511         realloc_plug (end_address - last_plug, last_plug, consing_gen,
29512                       start_address,
29513                       active_new_gen_number, last_pinned_gap,
29514                       leftp, FALSE
29515 #ifdef SHORT_PLUGS
29516                       , NULL
29517 #endif //SHORT_PLUGS
29518                       );
29519     }
29520
29521     //Fix the old segment allocated size
29522     assert (last_pinned_gap >= heap_segment_mem (seg));
29523     assert (last_pinned_gap <= heap_segment_committed (seg));
29524     heap_segment_plan_allocated (seg) = last_pinned_gap;
29525 }
29526
29527 void gc_heap::verify_no_pins (uint8_t* start, uint8_t* end)
29528 {
29529 #ifdef VERIFY_HEAP
29530     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
29531     {
29532         BOOL contains_pinned_plugs = FALSE;
29533         size_t mi = 0;
29534         mark* m = 0;
29535         while (mi != mark_stack_tos)
29536         {
29537             m = pinned_plug_of (mi);
29538             if ((pinned_plug (m) >= start) && (pinned_plug (m) < end))
29539             {
29540                 contains_pinned_plugs = TRUE;
29541                 break;
29542             }
29543             else
29544                 mi++;
29545         }
29546
29547         if (contains_pinned_plugs)
29548         {
29549             FATAL_GC_ERROR();
29550         }
29551     }
29552 #endif //VERIFY_HEAP
29553 }
29554
29555 void gc_heap::set_expand_in_full_gc (int condemned_gen_number)
29556 {
29557     if (!should_expand_in_full_gc)
29558     {
29559         if ((condemned_gen_number != max_generation) && 
29560             (settings.pause_mode != pause_low_latency) &&
29561             (settings.pause_mode != pause_sustained_low_latency))
29562         {
29563             should_expand_in_full_gc = TRUE;
29564         }
29565     }
29566 }
29567
29568 void gc_heap::save_ephemeral_generation_starts()
29569 {
29570     for (int ephemeral_generation = 0; ephemeral_generation < max_generation; ephemeral_generation++)
29571     {
29572         saved_ephemeral_plan_start[ephemeral_generation] = 
29573             generation_plan_allocation_start (generation_of (ephemeral_generation));
29574         saved_ephemeral_plan_start_size[ephemeral_generation] = 
29575             generation_plan_allocation_start_size (generation_of (ephemeral_generation));
29576     }
29577 }
29578
29579 generation* gc_heap::expand_heap (int condemned_generation,
29580                                   generation* consing_gen,
29581                                   heap_segment* new_heap_segment)
29582 {
29583     UNREFERENCED_PARAMETER(condemned_generation);
29584     assert (condemned_generation >= (max_generation -1));
29585     unsigned int active_new_gen_number = max_generation; //Set one too high to get generation gap
29586     uint8_t*  start_address = generation_limit (max_generation);
29587     uint8_t*  end_address = heap_segment_allocated (ephemeral_heap_segment);
29588     BOOL should_promote_ephemeral = FALSE;
29589     ptrdiff_t eph_size = total_ephemeral_size;
29590 #ifdef BACKGROUND_GC
29591     dprintf(2,("%s: ---- Heap Expansion ----", (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")));
29592 #endif //BACKGROUND_GC
29593     settings.heap_expansion = TRUE;
29594
29595 #ifdef BACKGROUND_GC
29596     if (cm_in_progress)
29597     {
29598         if (!expanded_in_fgc)
29599         {
29600             expanded_in_fgc = TRUE;
29601         }
29602     }
29603 #endif //BACKGROUND_GC
29604
29605     //reset the elevation state for next time.
29606     dprintf (2, ("Elevation: elevation = el_none"));
29607     if (settings.should_lock_elevation && !expand_reused_seg_p())
29608         settings.should_lock_elevation = FALSE;
29609
29610     heap_segment* new_seg = new_heap_segment;
29611
29612     if (!new_seg)
29613         return consing_gen;
29614
29615     //copy the card and brick tables
29616     if (g_gc_card_table!= card_table)
29617         copy_brick_card_table();
29618
29619     BOOL new_segment_p = (heap_segment_next (new_seg) == 0);
29620     dprintf (2, ("new_segment_p %Ix", (size_t)new_segment_p));
29621
29622     assert (generation_plan_allocation_start (generation_of (max_generation-1)));
29623     assert (generation_plan_allocation_start (generation_of (max_generation-1)) >=
29624             heap_segment_mem (ephemeral_heap_segment));
29625     assert (generation_plan_allocation_start (generation_of (max_generation-1)) <=
29626             heap_segment_committed (ephemeral_heap_segment));
29627
29628     assert (generation_plan_allocation_start (youngest_generation));
29629     assert (generation_plan_allocation_start (youngest_generation) <
29630             heap_segment_plan_allocated (ephemeral_heap_segment));
29631
29632     if (settings.pause_mode == pause_no_gc)
29633     {
29634         // We don't reuse for no gc, so the size used on the new eph seg is eph_size.
29635         if ((size_t)(heap_segment_reserved (new_seg) - heap_segment_mem (new_seg)) < (eph_size + soh_allocation_no_gc))
29636             should_promote_ephemeral = TRUE;
29637     }
29638     else
29639     {
29640         if (!use_bestfit)
29641         {
29642             should_promote_ephemeral = dt_low_ephemeral_space_p (tuning_deciding_promote_ephemeral);
29643         }
29644     }
29645
29646     if (should_promote_ephemeral)
29647     {
29648         ephemeral_promotion = TRUE;
29649         get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_new_seg_ep);
29650         dprintf (2, ("promoting ephemeral"));
29651         save_ephemeral_generation_starts();
29652     }
29653     else
29654     {
29655         // commit the new ephemeral segment all at once if it is a new one.
29656         if ((eph_size > 0) && new_segment_p)
29657         {
29658 #ifdef FEATURE_STRUCTALIGN
29659             // The destination may require a larger alignment padding than the source.
29660             // Assume the worst possible alignment padding.
29661             eph_size += ComputeStructAlignPad(heap_segment_mem (new_seg), MAX_STRUCTALIGN, OBJECT_ALIGNMENT_OFFSET);
29662 #endif // FEATURE_STRUCTALIGN
29663 #ifdef RESPECT_LARGE_ALIGNMENT
29664             //Since the generation start can be larger than min_obj_size
29665             //The alignment could be switched. 
29666             eph_size += switch_alignment_size(FALSE);
29667 #endif //RESPECT_LARGE_ALIGNMENT
29668             //Since the generation start can be larger than min_obj_size
29669             //Compare the alignment of the first object in gen1 
29670             if (grow_heap_segment (new_seg, heap_segment_mem (new_seg) + eph_size) == 0)
29671             {
29672                 fgm_result.set_fgm (fgm_commit_eph_segment, eph_size, FALSE);
29673                 return consing_gen;
29674             }
29675             heap_segment_used (new_seg) = heap_segment_committed (new_seg);
29676         }
29677
29678         //Fix the end of the old ephemeral heap segment
29679         heap_segment_plan_allocated (ephemeral_heap_segment) =
29680             generation_plan_allocation_start (generation_of (max_generation-1));
29681
29682         dprintf (3, ("Old ephemeral allocated set to %Ix",
29683                     (size_t)heap_segment_plan_allocated (ephemeral_heap_segment)));
29684     }
29685
29686     if (new_segment_p)
29687     {
29688         // TODO - Is this really necessary? We should think about it.
29689         //initialize the first brick
29690         size_t first_brick = brick_of (heap_segment_mem (new_seg));
29691         set_brick (first_brick,
29692                 heap_segment_mem (new_seg) - brick_address (first_brick));
29693     }
29694
29695     //From this point on, we cannot run out of memory
29696
29697     //reset the allocation of the consing generation back to the end of the
29698     //old ephemeral segment
29699     generation_allocation_limit (consing_gen) =
29700         heap_segment_plan_allocated (ephemeral_heap_segment);
29701     generation_allocation_pointer (consing_gen) = generation_allocation_limit (consing_gen);
29702     generation_allocation_segment (consing_gen) = ephemeral_heap_segment;
29703
29704     //clear the generation gap for all of the ephemeral generations
29705     {
29706         int generation_num = max_generation-1;
29707         while (generation_num >= 0)
29708         {
29709             generation* gen = generation_of (generation_num);
29710             generation_plan_allocation_start (gen) = 0;
29711             generation_num--;
29712         }
29713     }
29714
29715     heap_segment* old_seg = ephemeral_heap_segment;
29716     ephemeral_heap_segment = new_seg;
29717
29718     //Note: the ephemeral segment shouldn't be threaded onto the segment chain
29719     //because the relocation and compact phases shouldn't see it
29720
29721     // set the generation members used by allocate_in_expanded_heap
29722     // and switch to ephemeral generation
29723     consing_gen = ensure_ephemeral_heap_segment (consing_gen);
29724
29725     if (!should_promote_ephemeral)
29726     {
29727         realloc_plugs (consing_gen, old_seg, start_address, end_address,
29728                     active_new_gen_number);
29729     }
29730
29731     if (!use_bestfit)
29732     {
29733         repair_allocation_in_expanded_heap (consing_gen);
29734     }
29735
29736     // assert that the generation gap for all of the ephemeral generations were allocated.
29737 #ifdef _DEBUG
29738     {
29739         int generation_num = max_generation-1;
29740         while (generation_num >= 0)
29741         {
29742             generation* gen = generation_of (generation_num);
29743             assert (generation_plan_allocation_start (gen));
29744             generation_num--;
29745         }
29746     }
29747 #endif // _DEBUG
29748
29749     if (!new_segment_p)
29750     {
29751         dprintf (2, ("Demoting ephemeral segment"));
29752         //demote the entire segment.
29753         settings.demotion = TRUE;
29754         get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);
29755         demotion_low = heap_segment_mem (ephemeral_heap_segment);
29756         demotion_high = heap_segment_reserved (ephemeral_heap_segment);
29757     }
29758     else
29759     {
29760         demotion_low = MAX_PTR;
29761         demotion_high = 0;
29762 #ifndef MULTIPLE_HEAPS
29763         settings.demotion = FALSE;
29764         get_gc_data_per_heap()->clear_mechanism_bit (gc_demotion_bit);
29765 #endif //!MULTIPLE_HEAPS
29766     }
29767     ptrdiff_t eph_size1 = total_ephemeral_size;
29768     MAYBE_UNUSED_VAR(eph_size1);
29769
29770     if (!should_promote_ephemeral && new_segment_p)
29771     {
29772         assert (eph_size1 <= eph_size);
29773     }
29774
29775     if (heap_segment_mem (old_seg) == heap_segment_plan_allocated (old_seg))
29776     {
29777         // This is to catch when we accidently delete a segment that has pins.
29778         verify_no_pins (heap_segment_mem (old_seg), heap_segment_reserved (old_seg));
29779     }
29780
29781     verify_no_pins (heap_segment_plan_allocated (old_seg), heap_segment_reserved(old_seg));
29782
29783     dprintf(2,("---- End of Heap Expansion ----"));
29784     return consing_gen;
29785 }
29786
29787 void gc_heap::set_static_data()
29788 {
29789     static_data* pause_mode_sdata = static_data_table[latency_level];
29790     for (int i = 0; i < NUMBERGENERATIONS; i++)
29791     {
29792         dynamic_data* dd = dynamic_data_of (i);
29793         static_data* sdata = &pause_mode_sdata[i];
29794
29795         dd->sdata = sdata;
29796         dd->min_size = sdata->min_size;
29797
29798         dprintf (GTC_LOG, ("PM: %d - min: %Id, max: %Id, fr_l: %Id, fr_b: %d%%",
29799             settings.pause_mode,
29800             dd->min_size, dd_max_size, 
29801             sdata->fragmentation_limit, (int)(sdata->fragmentation_burden_limit * 100)));
29802     }
29803 }
29804
29805 // Initialize the values that are not const.
29806 void gc_heap::init_static_data()
29807 {
29808     size_t gen0size = GCHeap::GetValidGen0MaxSize(get_valid_segment_size());
29809     size_t gen0_min_size = Align(gen0size / 8 * 5);
29810
29811     size_t gen0_max_size =
29812 #ifdef MULTIPLE_HEAPS
29813         max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024));
29814 #else //MULTIPLE_HEAPS
29815         (gc_can_use_concurrent ?
29816             6*1024*1024 :
29817             max (6*1024*1024,  min ( Align(soh_segment_size/2), 200*1024*1024)));
29818 #endif //MULTIPLE_HEAPS
29819
29820     // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap.
29821     size_t gen1_max_size = 
29822 #ifdef MULTIPLE_HEAPS
29823         max (6*1024*1024, Align(soh_segment_size/2));
29824 #else //MULTIPLE_HEAPS
29825         (gc_can_use_concurrent ?
29826             6*1024*1024 :
29827             max (6*1024*1024, Align(soh_segment_size/2)));
29828 #endif //MULTIPLE_HEAPS
29829
29830     dprintf (GTC_LOG, ("gen0size: %Id, gen0 min: %Id, max: %Id, gen1 max: %Id",
29831         gen0size, gen0_min_size, gen0_max_size, gen1_max_size));
29832
29833     for (int i = latency_level_first; i <= latency_level_last; i++)
29834     {
29835         static_data_table[i][0].min_size = gen0_min_size;
29836         static_data_table[i][0].max_size = gen0_max_size;
29837         static_data_table[i][1].max_size = gen1_max_size;
29838     }
29839 }
29840
29841 bool gc_heap::init_dynamic_data()
29842 {
29843     qpf = GCToOSInterface::QueryPerformanceFrequency();
29844
29845     uint32_t now = (uint32_t)GetHighPrecisionTimeStamp();
29846
29847     set_static_data();
29848
29849     for (int i = 0; i <= max_generation+1; i++)
29850     {
29851         dynamic_data* dd = dynamic_data_of (i);
29852         dd->gc_clock = 0;
29853         dd->time_clock = now;
29854         dd->current_size = 0;
29855         dd->promoted_size = 0;
29856         dd->collection_count = 0;
29857         dd->new_allocation = dd->min_size;
29858         dd->gc_new_allocation = dd->new_allocation;
29859         dd->desired_allocation = dd->new_allocation;
29860         dd->fragmentation = 0;
29861     }
29862
29863 #ifdef GC_CONFIG_DRIVEN
29864     if (heap_number == 0)
29865         time_init = now;
29866 #endif //GC_CONFIG_DRIVEN
29867
29868     return true;
29869 }
29870
29871 float gc_heap::surv_to_growth (float cst, float limit, float max_limit)
29872 {
29873     if (cst < ((max_limit - limit ) / (limit * (max_limit-1.0f))))
29874         return ((limit - limit*cst) / (1.0f - (cst * limit)));
29875     else
29876         return max_limit;
29877 }
29878
29879
29880 //if the allocation budget wasn't exhausted, the new budget may be wrong because the survival may 
29881 //not be correct (collection happened too soon). Correct with a linear estimation based on the previous 
29882 //value of the budget 
29883 static size_t linear_allocation_model (float allocation_fraction, size_t new_allocation, 
29884                                        size_t previous_desired_allocation, size_t collection_count)
29885 {
29886     if ((allocation_fraction < 0.95) && (allocation_fraction > 0.0))
29887     {
29888         dprintf (2, ("allocation fraction: %d", (int)(allocation_fraction/100.0)));
29889         new_allocation = (size_t)(allocation_fraction*new_allocation + (1.0-allocation_fraction)*previous_desired_allocation);
29890     }
29891 #if 0 
29892     size_t smoothing = 3; // exponential smoothing factor
29893     if (smoothing  > collection_count)
29894         smoothing  = collection_count;
29895     new_allocation = new_allocation / smoothing + ((previous_desired_allocation / smoothing) * (smoothing-1));
29896 #else
29897     UNREFERENCED_PARAMETER(collection_count);
29898 #endif //0
29899     return new_allocation;
29900 }
29901
29902 size_t gc_heap::desired_new_allocation (dynamic_data* dd,
29903                                         size_t out, int gen_number,
29904                                         int pass)
29905 {
29906     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
29907
29908     if (dd_begin_data_size (dd) == 0)
29909     {
29910         size_t new_allocation = dd_min_size (dd);
29911         current_gc_data_per_heap->gen_data[gen_number].new_allocation = new_allocation;        
29912         return new_allocation;
29913     }
29914     else
29915     {
29916         float     cst;
29917         size_t    previous_desired_allocation = dd_desired_allocation (dd);
29918         size_t    current_size = dd_current_size (dd);
29919         float     max_limit = dd_max_limit (dd);
29920         float     limit = dd_limit (dd);
29921         size_t    min_gc_size = dd_min_size (dd);
29922         float     f = 0;
29923         size_t    max_size = dd_max_size (dd);
29924         size_t    new_allocation = 0;
29925         float allocation_fraction = (float) (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)) / (float) (dd_desired_allocation (dd));
29926         if (gen_number >= max_generation)
29927         {
29928             size_t    new_size = 0;
29929
29930             cst = min (1.0f, float (out) / float (dd_begin_data_size (dd)));
29931
29932             f = surv_to_growth (cst, limit, max_limit);
29933             size_t max_growth_size = (size_t)(max_size / f);
29934             if (current_size >= max_growth_size)
29935             {
29936                 new_size = max_size;
29937             }
29938             else
29939             {
29940                 new_size = (size_t) min (max ( (f * current_size), min_gc_size), max_size);
29941             }
29942
29943             assert ((new_size >= current_size) || (new_size == max_size));
29944
29945             if (gen_number == max_generation)
29946             {
29947                 new_allocation  =  max((new_size - current_size), min_gc_size);
29948
29949                 new_allocation = linear_allocation_model (allocation_fraction, new_allocation, 
29950                                                           dd_desired_allocation (dd), dd_collection_count (dd));
29951
29952                 if ((dd_fragmentation (dd) > ((size_t)((f-1)*current_size))))
29953                 {
29954                     //reducing allocation in case of fragmentation
29955                     size_t new_allocation1 = max (min_gc_size,
29956                                                   // CAN OVERFLOW
29957                                                   (size_t)((float)new_allocation * current_size /
29958                                                            ((float)current_size + 2*dd_fragmentation (dd))));
29959                     dprintf (2, ("Reducing max_gen allocation due to fragmentation from %Id to %Id",
29960                                  new_allocation, new_allocation1));
29961                     new_allocation = new_allocation1;
29962                 }
29963             }
29964             else //large object heap
29965             {
29966                 uint32_t memory_load = 0;
29967                 uint64_t available_physical = 0;
29968                 get_memory_info (&memory_load, &available_physical);
29969                 if (heap_number == 0)
29970                     settings.exit_memory_load = memory_load;
29971                 if (available_physical > 1024*1024)
29972                     available_physical -= 1024*1024;
29973
29974                 uint64_t available_free = available_physical + (uint64_t)generation_free_list_space (generation_of (gen_number));
29975                 if (available_free > (uint64_t)MAX_PTR)
29976                 {
29977                     available_free = (uint64_t)MAX_PTR;
29978                 }
29979
29980                 //try to avoid OOM during large object allocation
29981                 new_allocation = max (min(max((new_size - current_size), dd_desired_allocation (dynamic_data_of (max_generation))), 
29982                                           (size_t)available_free), 
29983                                       max ((current_size/4), min_gc_size));
29984
29985                 new_allocation = linear_allocation_model (allocation_fraction, new_allocation,
29986                                                           dd_desired_allocation (dd), dd_collection_count (dd));
29987
29988             }
29989         }
29990         else
29991         {
29992             size_t survivors = out;
29993             cst = float (survivors) / float (dd_begin_data_size (dd));
29994             f = surv_to_growth (cst, limit, max_limit);
29995             new_allocation = (size_t) min (max ((f * (survivors)), min_gc_size), max_size);
29996
29997             new_allocation = linear_allocation_model (allocation_fraction, new_allocation, 
29998                                                       dd_desired_allocation (dd), dd_collection_count (dd));
29999
30000             if (gen_number == 0)
30001             {
30002                 if (pass == 0)
30003                 {
30004
30005                     //printf ("%f, %Id\n", cst, new_allocation);
30006                     size_t free_space = generation_free_list_space (generation_of (gen_number));
30007                     // DTREVIEW - is min_gc_size really a good choice? 
30008                     // on 64-bit this will almost always be true.
30009                     dprintf (GTC_LOG, ("frag: %Id, min: %Id", free_space, min_gc_size));
30010                     if (free_space > min_gc_size)
30011                     {
30012                         settings.gen0_reduction_count = 2;
30013                     }
30014                     else
30015                     {
30016                         if (settings.gen0_reduction_count > 0)
30017                             settings.gen0_reduction_count--;
30018                     }
30019                 }
30020                 if (settings.gen0_reduction_count > 0)
30021                 {
30022                     dprintf (2, ("Reducing new allocation based on fragmentation"));
30023                     new_allocation = min (new_allocation,
30024                                           max (min_gc_size, (max_size/3)));
30025                 }
30026             }
30027         }
30028
30029         size_t new_allocation_ret = 
30030             Align (new_allocation, get_alignment_constant (!(gen_number == (max_generation+1))));
30031         int gen_data_index = gen_number;
30032         gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_data_index]);
30033         gen_data->new_allocation = new_allocation_ret;
30034
30035         dd_surv (dd) = cst;
30036
30037 #ifdef SIMPLE_DPRINTF
30038         dprintf (1, ("h%d g%d surv: %Id current: %Id alloc: %Id (%d%%) f: %d%% new-size: %Id new-alloc: %Id",
30039                      heap_number, gen_number, out, current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)),
30040                      (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation));
30041 #else
30042         dprintf (1,("gen: %d in: %Id out: %Id ", gen_number, generation_allocation_size (generation_of (gen_number)), out));
30043         dprintf (1,("current: %Id alloc: %Id ", current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd))));
30044         dprintf (1,(" surv: %d%% f: %d%% new-size: %Id new-alloc: %Id",
30045                     (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation));
30046 #endif //SIMPLE_DPRINTF
30047
30048         return new_allocation_ret;
30049     }
30050 }
30051
30052 //returns the planned size of a generation (including free list element)
30053 size_t gc_heap::generation_plan_size (int gen_number)
30054 {
30055     if (0 == gen_number)
30056         return max((heap_segment_plan_allocated (ephemeral_heap_segment) -
30057                     generation_plan_allocation_start (generation_of (gen_number))),
30058                    (int)Align (min_obj_size));
30059     else
30060     {
30061         generation* gen = generation_of (gen_number);
30062         if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment)
30063             return (generation_plan_allocation_start (generation_of (gen_number - 1)) -
30064                     generation_plan_allocation_start (generation_of (gen_number)));
30065         else
30066         {
30067             size_t gensize = 0;
30068             heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
30069
30070             PREFIX_ASSUME(seg != NULL);
30071
30072             while (seg && (seg != ephemeral_heap_segment))
30073             {
30074                 gensize += heap_segment_plan_allocated (seg) -
30075                            heap_segment_mem (seg);
30076                 seg = heap_segment_next_rw (seg);
30077             }
30078             if (seg)
30079             {
30080                 gensize += (generation_plan_allocation_start (generation_of (gen_number - 1)) -
30081                             heap_segment_mem (ephemeral_heap_segment));
30082             }
30083             return gensize;
30084         }
30085     }
30086
30087 }
30088
30089 //returns the size of a generation (including free list element)
30090 size_t gc_heap::generation_size (int gen_number)
30091 {
30092     if (0 == gen_number)
30093         return max((heap_segment_allocated (ephemeral_heap_segment) -
30094                     generation_allocation_start (generation_of (gen_number))),
30095                    (int)Align (min_obj_size));
30096     else
30097     {
30098         generation* gen = generation_of (gen_number);
30099         if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment)
30100             return (generation_allocation_start (generation_of (gen_number - 1)) -
30101                     generation_allocation_start (generation_of (gen_number)));
30102         else
30103         {
30104             size_t gensize = 0;
30105             heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
30106
30107             PREFIX_ASSUME(seg != NULL);
30108
30109             while (seg && (seg != ephemeral_heap_segment))
30110             {
30111                 gensize += heap_segment_allocated (seg) -
30112                            heap_segment_mem (seg);
30113                 seg = heap_segment_next_rw (seg);
30114             }
30115             if (seg)
30116             {
30117                 gensize += (generation_allocation_start (generation_of (gen_number - 1)) -
30118                             heap_segment_mem (ephemeral_heap_segment));
30119             }
30120
30121             return gensize;
30122         }
30123     }
30124
30125 }
30126
30127 size_t  gc_heap::compute_in (int gen_number)
30128 {
30129     assert (gen_number != 0);
30130     dynamic_data* dd = dynamic_data_of (gen_number);
30131
30132     size_t in = generation_allocation_size (generation_of (gen_number));
30133
30134     if (gen_number == max_generation && ephemeral_promotion)
30135     {
30136         in = 0;
30137         for (int i = 0; i <= max_generation; i++)
30138         {
30139             dynamic_data* dd = dynamic_data_of (i);
30140             in += dd_survived_size (dd);
30141             if (i != max_generation)
30142             {
30143                 generation_condemned_allocated (generation_of (gen_number)) += dd_survived_size (dd);
30144             }
30145         }
30146     }
30147
30148     dd_gc_new_allocation (dd) -= in;
30149     dd_new_allocation (dd) = dd_gc_new_allocation (dd);
30150
30151     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
30152     gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]);
30153     gen_data->in = in;
30154
30155     generation_allocation_size (generation_of (gen_number)) = 0;
30156     return in;
30157 }
30158
30159 void  gc_heap::compute_promoted_allocation (int gen_number)
30160 {
30161     compute_in (gen_number);
30162 }
30163
30164 #ifdef BIT64
30165 inline
30166 size_t gc_heap::trim_youngest_desired (uint32_t memory_load,
30167                                        size_t total_new_allocation,
30168                                        size_t total_min_allocation)
30169 {
30170     if (memory_load < MAX_ALLOWED_MEM_LOAD)
30171     {
30172         // If the total of memory load and gen0 budget exceeds 
30173         // our max memory load limit, trim the gen0 budget so the total 
30174         // is the max memory load limit.
30175         size_t remain_memory_load = (MAX_ALLOWED_MEM_LOAD - memory_load) * mem_one_percent;
30176         return min (total_new_allocation, remain_memory_load);
30177     }
30178     else
30179     {
30180         return max (mem_one_percent, total_min_allocation);
30181     }
30182 }
30183
30184 size_t gc_heap::joined_youngest_desired (size_t new_allocation)
30185 {
30186     dprintf (2, ("Entry memory load: %d; gen0 new_alloc: %Id", settings.entry_memory_load, new_allocation));
30187
30188     size_t final_new_allocation = new_allocation;
30189     if (new_allocation > MIN_YOUNGEST_GEN_DESIRED)
30190     {
30191         uint32_t num_heaps = 1;
30192
30193 #ifdef MULTIPLE_HEAPS
30194         num_heaps = gc_heap::n_heaps;
30195 #endif //MULTIPLE_HEAPS
30196
30197         size_t total_new_allocation = new_allocation * num_heaps;
30198         size_t total_min_allocation = MIN_YOUNGEST_GEN_DESIRED * num_heaps;
30199
30200         if ((settings.entry_memory_load >= MAX_ALLOWED_MEM_LOAD) ||
30201             (total_new_allocation > max (youngest_gen_desired_th, total_min_allocation)))
30202         {
30203             uint32_t memory_load = 0;
30204             get_memory_info (&memory_load);
30205             settings.exit_memory_load = memory_load;
30206             dprintf (2, ("Current emory load: %d", memory_load));
30207
30208             size_t final_total = 
30209                 trim_youngest_desired (memory_load, total_new_allocation, total_min_allocation);
30210             size_t max_new_allocation = 
30211 #ifdef MULTIPLE_HEAPS
30212                                          dd_max_size (g_heaps[0]->dynamic_data_of (0));
30213 #else //MULTIPLE_HEAPS
30214                                          dd_max_size (dynamic_data_of (0));
30215 #endif //MULTIPLE_HEAPS
30216
30217             final_new_allocation  = min (Align ((final_total / num_heaps), get_alignment_constant (TRUE)), max_new_allocation);
30218         }
30219     }
30220
30221     if (final_new_allocation < new_allocation)
30222     {
30223         settings.gen0_reduction_count = 2;
30224     }
30225
30226     return final_new_allocation;
30227 }
30228 #endif // BIT64 
30229
30230 inline
30231 gc_history_per_heap* gc_heap::get_gc_data_per_heap()
30232 {
30233 #ifdef BACKGROUND_GC
30234     return (settings.concurrent ? &bgc_data_per_heap : &gc_data_per_heap);
30235 #else
30236     return &gc_data_per_heap;
30237 #endif //BACKGROUND_GC
30238 }
30239
30240 void gc_heap::compute_new_dynamic_data (int gen_number)
30241 {
30242     PREFIX_ASSUME(gen_number >= 0);
30243     PREFIX_ASSUME(gen_number <= max_generation);
30244
30245     dynamic_data* dd = dynamic_data_of (gen_number);
30246     generation*   gen = generation_of (gen_number);
30247     size_t        in = (gen_number==0) ? 0 : compute_in (gen_number);
30248
30249     size_t total_gen_size = generation_size (gen_number);
30250     //keep track of fragmentation
30251     dd_fragmentation (dd) = generation_free_list_space (gen) + generation_free_obj_space (gen);
30252     dd_current_size (dd) = total_gen_size - dd_fragmentation (dd);
30253
30254     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
30255
30256     size_t out = dd_survived_size (dd);
30257
30258     gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]);
30259     gen_data->size_after = total_gen_size;
30260     gen_data->free_list_space_after = generation_free_list_space (gen);
30261     gen_data->free_obj_space_after = generation_free_obj_space (gen);
30262
30263     if ((settings.pause_mode == pause_low_latency) && (gen_number <= 1))
30264     {
30265         // When we are in the low latency mode, we can still be
30266         // condemning more than gen1's 'cause of induced GCs.
30267         dd_desired_allocation (dd) = low_latency_alloc;
30268     }
30269     else
30270     {
30271         if (gen_number == 0)
30272         {
30273             //compensate for dead finalizable objects promotion.
30274             //they shoudn't be counted for growth.
30275             size_t final_promoted = 0;
30276             final_promoted = min (promoted_bytes (heap_number), out);
30277             // Prefast: this is clear from above but prefast needs to be told explicitly
30278             PREFIX_ASSUME(final_promoted <= out);
30279
30280             dprintf (2, ("gen: %d final promoted: %Id", gen_number, final_promoted));
30281             dd_freach_previous_promotion (dd) = final_promoted;
30282             size_t lower_bound = desired_new_allocation  (dd, out-final_promoted, gen_number, 0);
30283
30284             if (settings.condemned_generation == 0)
30285             {
30286                 //there is no noise.
30287                 dd_desired_allocation (dd) = lower_bound;
30288             }
30289             else
30290             {
30291                 size_t higher_bound = desired_new_allocation (dd, out, gen_number, 1);
30292
30293                 // <TODO>This assert was causing AppDomains\unload\test1n\test1nrun.bat to fail</TODO>
30294                 //assert ( lower_bound <= higher_bound);
30295
30296                 //discount the noise. Change the desired allocation
30297                 //only if the previous value is outside of the range.
30298                 if (dd_desired_allocation (dd) < lower_bound)
30299                 {
30300                     dd_desired_allocation (dd) = lower_bound;
30301                 }
30302                 else if (dd_desired_allocation (dd) > higher_bound)
30303                 {
30304                     dd_desired_allocation (dd) = higher_bound;
30305                 }
30306 #if defined (BIT64) && !defined (MULTIPLE_HEAPS)
30307                 dd_desired_allocation (dd) = joined_youngest_desired (dd_desired_allocation (dd));
30308 #endif // BIT64 && !MULTIPLE_HEAPS
30309                 trim_youngest_desired_low_memory();
30310                 dprintf (2, ("final gen0 new_alloc: %Id", dd_desired_allocation (dd)));
30311             }
30312         }
30313         else
30314         {
30315             dd_desired_allocation (dd) = desired_new_allocation (dd, out, gen_number, 0);
30316         }
30317     }
30318
30319     gen_data->pinned_surv = dd_pinned_survived_size (dd);
30320     gen_data->npinned_surv = dd_survived_size (dd) - dd_pinned_survived_size (dd);
30321
30322     dd_gc_new_allocation (dd) = dd_desired_allocation (dd);
30323     dd_new_allocation (dd) = dd_gc_new_allocation (dd);
30324
30325     //update counter
30326     dd_promoted_size (dd) = out;
30327     if (gen_number == max_generation)
30328     {
30329         dd = dynamic_data_of (max_generation+1);
30330         total_gen_size = generation_size (max_generation + 1);
30331         dd_fragmentation (dd) = generation_free_list_space (large_object_generation) + 
30332                                 generation_free_obj_space (large_object_generation);
30333         dd_current_size (dd) = total_gen_size - dd_fragmentation (dd);
30334         dd_survived_size (dd) = dd_current_size (dd);
30335         in = 0;
30336         out = dd_current_size (dd);
30337         dd_desired_allocation (dd) = desired_new_allocation (dd, out, max_generation+1, 0);
30338         dd_gc_new_allocation (dd) = Align (dd_desired_allocation (dd),
30339                                            get_alignment_constant (FALSE));
30340         dd_new_allocation (dd) = dd_gc_new_allocation (dd);
30341
30342         gen_data = &(current_gc_data_per_heap->gen_data[max_generation+1]);
30343         gen_data->size_after = total_gen_size;
30344         gen_data->free_list_space_after = generation_free_list_space (large_object_generation);
30345         gen_data->free_obj_space_after = generation_free_obj_space (large_object_generation);
30346         gen_data->npinned_surv = out;
30347 #ifdef BACKGROUND_GC
30348         end_loh_size = total_gen_size;
30349 #endif //BACKGROUND_GC
30350         //update counter
30351         dd_promoted_size (dd) = out;
30352     }
30353 }
30354
30355 void gc_heap::trim_youngest_desired_low_memory()
30356 {
30357     if (g_low_memory_status)
30358     {
30359         size_t committed_mem = 0;
30360         heap_segment* seg = generation_start_segment (generation_of (max_generation));
30361         while (seg)
30362         {
30363             committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg);
30364             seg = heap_segment_next (seg);
30365         }
30366         seg = generation_start_segment (generation_of (max_generation + 1));
30367         while (seg)
30368         {
30369             committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg);
30370             seg = heap_segment_next (seg);
30371         }
30372
30373         dynamic_data* dd = dynamic_data_of (0);
30374         size_t current = dd_desired_allocation (dd);
30375         size_t candidate = max (Align ((committed_mem / 10), get_alignment_constant(FALSE)), dd_min_size (dd));
30376
30377         dd_desired_allocation (dd) = min (current, candidate);
30378     }
30379 }
30380
30381 void gc_heap::decommit_ephemeral_segment_pages()
30382 {
30383     if (settings.concurrent)
30384     {
30385         return;
30386     }
30387
30388     size_t slack_space = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment);
30389     dynamic_data* dd = dynamic_data_of (0);
30390
30391 #ifndef MULTIPLE_HEAPS
30392     size_t extra_space = (g_low_memory_status ? 0 : (512 * 1024));
30393     size_t decommit_timeout = (g_low_memory_status ? 0 : GC_EPHEMERAL_DECOMMIT_TIMEOUT);
30394     size_t ephemeral_elapsed = dd_time_clock(dd) - gc_last_ephemeral_decommit_time;
30395
30396     if (dd_desired_allocation (dd) > gc_gen0_desired_high)
30397     {
30398         gc_gen0_desired_high = dd_desired_allocation (dd) + extra_space;
30399     }
30400
30401     if (ephemeral_elapsed >= decommit_timeout)
30402     {
30403         slack_space = min (slack_space, gc_gen0_desired_high);
30404
30405         gc_last_ephemeral_decommit_time = dd_time_clock(dd);
30406         gc_gen0_desired_high = 0;
30407     }
30408 #endif //!MULTIPLE_HEAPS
30409
30410     if (settings.condemned_generation >= (max_generation-1))
30411     {
30412         size_t new_slack_space = 
30413 #ifdef BIT64
30414                     max(min(min(soh_segment_size/32, dd_max_size(dd)), (generation_size (max_generation) / 10)), dd_desired_allocation(dd));
30415 #else
30416 #ifdef FEATURE_CORECLR
30417                     dd_desired_allocation (dd);
30418 #else
30419                     dd_max_size (dd);
30420 #endif //FEATURE_CORECLR                                    
30421 #endif // BIT64
30422
30423         slack_space = min (slack_space, new_slack_space);
30424     }
30425
30426     decommit_heap_segment_pages (ephemeral_heap_segment, slack_space);    
30427
30428     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
30429     current_gc_data_per_heap->extra_gen0_committed = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment);
30430 }
30431
30432 //This is meant to be called by decide_on_compacting.
30433
30434 size_t gc_heap::generation_fragmentation (generation* gen,
30435                                           generation* consing_gen,
30436                                           uint8_t* end)
30437 {
30438     size_t frag;
30439     uint8_t* alloc = generation_allocation_pointer (consing_gen);
30440     // If the allocation pointer has reached the ephemeral segment
30441     // fine, otherwise the whole ephemeral segment is considered
30442     // fragmentation
30443     if (in_range_for_segment (alloc, ephemeral_heap_segment))
30444         {
30445             if (alloc <= heap_segment_allocated(ephemeral_heap_segment))
30446                 frag = end - alloc;
30447             else
30448             {
30449                 // case when no survivors, allocated set to beginning
30450                 frag = 0;
30451             }
30452             dprintf (3, ("ephemeral frag: %Id", frag));
30453         }
30454     else
30455         frag = (heap_segment_allocated (ephemeral_heap_segment) -
30456                 heap_segment_mem (ephemeral_heap_segment));
30457     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
30458
30459     PREFIX_ASSUME(seg != NULL);
30460
30461     while (seg != ephemeral_heap_segment)
30462     {
30463         frag += (heap_segment_allocated (seg) -
30464                  heap_segment_plan_allocated (seg));
30465         dprintf (3, ("seg: %Ix, frag: %Id", (size_t)seg,
30466                      (heap_segment_allocated (seg) -
30467                       heap_segment_plan_allocated (seg))));
30468
30469         seg = heap_segment_next_rw (seg);
30470         assert (seg);
30471     }
30472     dprintf (3, ("frag: %Id discounting pinned plugs", frag));
30473     //add the length of the dequeued plug free space
30474     size_t bos = 0;
30475     while (bos < mark_stack_bos)
30476     {
30477         frag += (pinned_len (pinned_plug_of (bos)));
30478         bos++;
30479     }
30480
30481     return frag;
30482 }
30483
30484 // for SOH this returns the total sizes of the generation and its 
30485 // younger generation(s).
30486 // for LOH this returns just LOH size.
30487 size_t gc_heap::generation_sizes (generation* gen)
30488 {
30489     size_t result = 0;
30490     if (generation_start_segment (gen ) == ephemeral_heap_segment)
30491         result = (heap_segment_allocated (ephemeral_heap_segment) -
30492                   generation_allocation_start (gen));
30493     else
30494     {
30495         heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
30496
30497         PREFIX_ASSUME(seg != NULL);
30498
30499         while (seg)
30500         {
30501             result += (heap_segment_allocated (seg) -
30502                        heap_segment_mem (seg));
30503             seg = heap_segment_next_in_range (seg);
30504         }
30505     }
30506
30507     return result;
30508 }
30509
30510 BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
30511                                     size_t fragmentation,
30512                                     BOOL& should_expand)
30513 {
30514     BOOL should_compact = FALSE;
30515     should_expand = FALSE;
30516     generation*   gen = generation_of (condemned_gen_number);
30517     dynamic_data* dd = dynamic_data_of (condemned_gen_number);
30518     size_t gen_sizes     = generation_sizes(gen);
30519     float  fragmentation_burden = ( ((0 == fragmentation) || (0 == gen_sizes)) ? (0.0f) :
30520                                     (float (fragmentation) / gen_sizes) );
30521
30522     dprintf (GTC_LOG, ("fragmentation: %Id (%d%%)", fragmentation, (int)(fragmentation_burden * 100.0)));
30523
30524 #ifdef STRESS_HEAP
30525     // for pure GC stress runs we need compaction, for GC stress "mix"
30526     // we need to ensure a better mix of compacting and sweeping collections
30527     if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent
30528         && !g_pConfig->IsGCStressMix())
30529         should_compact = TRUE;
30530
30531 #ifdef GC_STATS
30532     // in GC stress "mix" mode, for stress induced collections make sure we 
30533     // keep sweeps and compactions relatively balanced. do not (yet) force sweeps
30534     // against the GC's determination, as it may lead to premature OOMs.
30535     if (g_pConfig->IsGCStressMix() && settings.stress_induced)
30536     {
30537         int compactions = g_GCStatistics.cntCompactFGC+g_GCStatistics.cntCompactNGC;
30538         int sweeps = g_GCStatistics.cntFGC + g_GCStatistics.cntNGC - compactions;
30539         if (compactions < sweeps / 10)
30540         {
30541             should_compact = TRUE;
30542         }
30543     }
30544 #endif // GC_STATS
30545 #endif //STRESS_HEAP
30546
30547     if (GCConfig::GetForceCompact())
30548         should_compact = TRUE;
30549
30550     if ((condemned_gen_number == max_generation) && last_gc_before_oom)
30551     {
30552         should_compact = TRUE;
30553         last_gc_before_oom = FALSE;
30554         get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_last_gc);
30555     }
30556
30557     if (settings.reason == reason_induced_compacting)
30558     {
30559         dprintf (2, ("induced compacting GC"));
30560         should_compact = TRUE;
30561         get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_induced_compacting);
30562     }
30563
30564     if (settings.reason == reason_pm_full_gc)
30565     {
30566         assert (condemned_gen_number == max_generation);
30567         if (heap_number == 0)
30568         {
30569             dprintf (GTC_LOG, ("PM doing compacting full GC after a gen1"));
30570         }
30571         should_compact = TRUE;
30572     }
30573
30574     dprintf (2, ("Fragmentation: %d Fragmentation burden %d%%",
30575                 fragmentation, (int) (100*fragmentation_burden)));
30576
30577     if (provisional_mode_triggered && (condemned_gen_number == (max_generation - 1)))
30578     {
30579         dprintf (GTC_LOG, ("gen1 in PM always compact"));
30580         should_compact = TRUE;
30581     }
30582
30583     if (!should_compact)
30584     {
30585         if (dt_low_ephemeral_space_p (tuning_deciding_compaction))
30586         {
30587             dprintf(GTC_LOG, ("compacting due to low ephemeral"));
30588             should_compact = TRUE;
30589             get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_low_ephemeral);
30590         }
30591     }
30592
30593     if (should_compact)
30594     {
30595         if ((condemned_gen_number >= (max_generation - 1)))
30596         {
30597             if (dt_low_ephemeral_space_p (tuning_deciding_expansion))
30598             {
30599                 dprintf (GTC_LOG,("Not enough space for all ephemeral generations with compaction"));
30600                 should_expand = TRUE;
30601             }
30602         }
30603     }
30604
30605 #ifdef BIT64
30606     BOOL high_memory = FALSE;
30607 #endif // BIT64
30608
30609     if (!should_compact)
30610     {
30611         // We are not putting this in dt_high_frag_p because it's not exactly
30612         // high fragmentation - it's just enough planned fragmentation for us to 
30613         // want to compact. Also the "fragmentation" we are talking about here
30614         // is different from anywhere else.
30615         BOOL frag_exceeded = ((fragmentation >= dd_fragmentation_limit (dd)) &&
30616                                 (fragmentation_burden >= dd_fragmentation_burden_limit (dd)));
30617
30618         if (frag_exceeded)
30619         {
30620 #ifdef BACKGROUND_GC
30621             // do not force compaction if this was a stress-induced GC
30622             IN_STRESS_HEAP(if (!settings.stress_induced))
30623             {
30624 #endif // BACKGROUND_GC
30625             assert (settings.concurrent == FALSE);
30626             should_compact = TRUE;
30627             get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_frag);
30628 #ifdef BACKGROUND_GC
30629             }
30630 #endif // BACKGROUND_GC
30631         }
30632
30633 #ifdef BIT64
30634         // check for high memory situation
30635         if(!should_compact)
30636         {
30637             uint32_t num_heaps = 1;
30638 #ifdef MULTIPLE_HEAPS
30639             num_heaps = gc_heap::n_heaps;
30640 #endif // MULTIPLE_HEAPS
30641             
30642             ptrdiff_t reclaim_space = generation_size(max_generation) - generation_plan_size(max_generation);
30643             if((settings.entry_memory_load >= high_memory_load_th) && (settings.entry_memory_load < v_high_memory_load_th))
30644             {
30645                 if(reclaim_space > (int64_t)(min_high_fragmentation_threshold (entry_available_physical_mem, num_heaps)))
30646                 {
30647                     dprintf(GTC_LOG,("compacting due to fragmentation in high memory"));
30648                     should_compact = TRUE;
30649                     get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_mem_frag);
30650                 }
30651                 high_memory = TRUE;
30652             }
30653             else if(settings.entry_memory_load >= v_high_memory_load_th)
30654             {
30655                 if(reclaim_space > (ptrdiff_t)(min_reclaim_fragmentation_threshold (num_heaps)))
30656                 {
30657                     dprintf(GTC_LOG,("compacting due to fragmentation in very high memory"));
30658                     should_compact = TRUE;
30659                     get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_vhigh_mem_frag);
30660                 }
30661                 high_memory = TRUE;
30662             }
30663         }
30664 #endif // BIT64
30665     }
30666
30667     // The purpose of calling ensure_gap_allocation here is to make sure
30668     // that we actually are able to commit the memory to allocate generation
30669     // starts.
30670     if ((should_compact == FALSE) &&
30671         (ensure_gap_allocation (condemned_gen_number) == FALSE))
30672     {
30673         should_compact = TRUE;
30674         get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_no_gaps);
30675     }
30676
30677     if (settings.condemned_generation == max_generation)
30678     {
30679         //check the progress
30680         if (
30681 #ifdef BIT64
30682             (high_memory && !should_compact) ||
30683 #endif // BIT64
30684             (generation_plan_allocation_start (generation_of (max_generation - 1)) >= 
30685                 generation_allocation_start (generation_of (max_generation - 1))))
30686         {
30687             dprintf (2, (" Elevation: gen2 size: %d, gen2 plan size: %d, no progress, elevation = locked",
30688                      generation_size (max_generation),
30689                      generation_plan_size (max_generation)));
30690             //no progress -> lock
30691             settings.should_lock_elevation = TRUE;
30692         }
30693     }
30694
30695     if (settings.pause_mode == pause_no_gc)
30696     {
30697         should_compact = TRUE;
30698         if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_plan_allocated (ephemeral_heap_segment))
30699             < soh_allocation_no_gc)
30700         {
30701             should_expand = TRUE;
30702         }
30703     }
30704
30705     dprintf (2, ("will %s", (should_compact ? "compact" : "sweep")));
30706     return should_compact;
30707 }
30708
30709 size_t align_lower_good_size_allocation (size_t size)
30710 {
30711     return (size/64)*64;
30712 }
30713
30714 size_t gc_heap::approximate_new_allocation()
30715 {
30716     dynamic_data* dd0 = dynamic_data_of (0);
30717     return max (2*dd_min_size (dd0), ((dd_desired_allocation (dd0)*2)/3));
30718 }
30719
30720 // After we did a GC we expect to have at least this 
30721 // much space at the end of the segment to satisfy
30722 // a reasonable amount of allocation requests.
30723 size_t gc_heap::end_space_after_gc()
30724 {
30725     return max ((dd_min_size (dynamic_data_of (0))/2), (END_SPACE_AFTER_GC + Align (min_obj_size)));
30726 }
30727
30728 BOOL gc_heap::ephemeral_gen_fit_p (gc_tuning_point tp)
30729 {
30730     uint8_t* start = 0;
30731     
30732     if ((tp == tuning_deciding_condemned_gen) ||
30733         (tp == tuning_deciding_compaction))
30734     {
30735         start = (settings.concurrent ? alloc_allocated : heap_segment_allocated (ephemeral_heap_segment));
30736         if (settings.concurrent)
30737         {
30738             dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment (alloc_allocated)", 
30739                 (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated)));
30740         }
30741         else
30742         {
30743             dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment (allocated)", 
30744                 (size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment))));
30745         }
30746     }
30747     else if (tp == tuning_deciding_expansion)
30748     {
30749         start = heap_segment_plan_allocated (ephemeral_heap_segment);
30750         dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment based on plan", 
30751             (size_t)(heap_segment_reserved (ephemeral_heap_segment) - start)));
30752     }
30753     else
30754     {
30755         assert (tp == tuning_deciding_full_gc);
30756         dprintf (GTC_LOG, ("FGC: %Id left at the end of ephemeral segment (alloc_allocated)", 
30757             (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated)));
30758         start = alloc_allocated;
30759     }
30760     
30761     if (start == 0) // empty ephemeral generations
30762     {
30763         assert (tp == tuning_deciding_expansion);
30764         // if there are no survivors in the ephemeral segment, 
30765         // this should be the beginning of ephemeral segment.
30766         start = generation_allocation_pointer (generation_of (max_generation));
30767         assert (start == heap_segment_mem (ephemeral_heap_segment));
30768     }
30769
30770     if (tp == tuning_deciding_expansion)
30771     {
30772         assert (settings.condemned_generation >= (max_generation-1));
30773         size_t gen0size = approximate_new_allocation();
30774         size_t eph_size = gen0size;
30775
30776         for (int j = 1; j <= max_generation-1; j++)
30777         {
30778             eph_size += 2*dd_min_size (dynamic_data_of(j));
30779         }
30780         
30781         // We must find room for one large object and enough room for gen0size
30782         if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - start) > eph_size)
30783         {
30784             dprintf (3, ("Enough room before end of segment"));
30785             return TRUE;
30786         }
30787         else
30788         {
30789             size_t room = align_lower_good_size_allocation
30790                 (heap_segment_reserved (ephemeral_heap_segment) - start);
30791             size_t end_seg = room;
30792
30793             //look at the plug free space
30794             size_t largest_alloc = END_SPACE_AFTER_GC + Align (min_obj_size);
30795             bool large_chunk_found = FALSE;
30796             size_t bos = 0;
30797             uint8_t* gen0start = generation_plan_allocation_start (youngest_generation);
30798             dprintf (3, ("ephemeral_gen_fit_p: gen0 plan start: %Ix", (size_t)gen0start));
30799             if (gen0start == 0)
30800                 return FALSE;
30801             dprintf (3, ("ephemeral_gen_fit_p: room before free list search %Id, needed: %Id",
30802                          room, gen0size));
30803             while ((bos < mark_stack_bos) &&
30804                    !((room >= gen0size) && large_chunk_found))
30805             {
30806                 uint8_t* plug = pinned_plug (pinned_plug_of (bos));
30807                 if (in_range_for_segment (plug, ephemeral_heap_segment))
30808                 {
30809                     if (plug >= gen0start)
30810                     {
30811                         size_t chunk = align_lower_good_size_allocation (pinned_len (pinned_plug_of (bos)));
30812                         room += chunk;
30813                         if (!large_chunk_found)
30814                         {
30815                             large_chunk_found = (chunk >= largest_alloc);
30816                         }
30817                         dprintf (3, ("ephemeral_gen_fit_p: room now %Id, large chunk: %Id",
30818                                      room, large_chunk_found));
30819                     }
30820                 }
30821                 bos++;
30822             }
30823
30824             if (room >= gen0size)
30825             {
30826                 if (large_chunk_found)
30827                 {
30828                     sufficient_gen0_space_p = TRUE;
30829
30830                     dprintf (3, ("Enough room"));
30831                     return TRUE;
30832                 }
30833                 else
30834                 {
30835                     // now we need to find largest_alloc at the end of the segment.
30836                     if (end_seg >= end_space_after_gc())
30837                     {
30838                         dprintf (3, ("Enough room (may need end of seg)"));
30839                         return TRUE;
30840                     }
30841                 }
30842             }
30843
30844             dprintf (3, ("Not enough room"));
30845                 return FALSE;
30846         }
30847     }
30848     else
30849     {
30850         size_t end_space = 0;
30851         dynamic_data* dd = dynamic_data_of (0);
30852         if ((tp == tuning_deciding_condemned_gen) ||
30853             (tp == tuning_deciding_full_gc))
30854         {
30855             end_space = max (2*dd_min_size (dd), end_space_after_gc());
30856         }
30857         else
30858         {
30859             assert (tp == tuning_deciding_compaction);
30860             end_space = approximate_new_allocation();
30861         }
30862
30863         if (!((size_t)(heap_segment_reserved (ephemeral_heap_segment) - start) > end_space))
30864         {
30865             dprintf (GTC_LOG, ("ephemeral_gen_fit_p: does not fit without compaction"));
30866         }
30867         return ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - start) > end_space);
30868     }
30869 }
30870
30871 CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_bytes)
30872 {
30873     //create a new alloc context because gen3context is shared.
30874     alloc_context acontext;
30875     acontext.alloc_ptr = 0;
30876     acontext.alloc_limit = 0;
30877     acontext.alloc_bytes = 0;
30878 #ifdef MULTIPLE_HEAPS
30879     acontext.set_alloc_heap(vm_heap);
30880 #endif //MULTIPLE_HEAPS
30881
30882 #if BIT64
30883     size_t maxObjectSize = (INT64_MAX - 7 - Align(min_obj_size));
30884 #else
30885     size_t maxObjectSize = (INT32_MAX - 7 - Align(min_obj_size));
30886 #endif
30887
30888     if (jsize >= maxObjectSize)
30889     {
30890         if (GCConfig::GetBreakOnOOM())
30891         {
30892             GCToOSInterface::DebugBreak();
30893         }
30894         return NULL;
30895     }
30896
30897     size_t size = AlignQword (jsize);
30898     int align_const = get_alignment_constant (FALSE);
30899 #ifdef FEATURE_LOH_COMPACTION
30900     size_t pad = Align (loh_padding_obj_size, align_const);
30901 #else
30902     size_t pad = 0;
30903 #endif //FEATURE_LOH_COMPACTION
30904
30905     assert (size >= Align (min_obj_size, align_const));
30906 #ifdef _MSC_VER
30907 #pragma inline_depth(0)
30908 #endif //_MSC_VER
30909     if (! allocate_more_space (&acontext, (size + pad), max_generation+1))
30910     {
30911         return 0;
30912     }
30913
30914 #ifdef _MSC_VER
30915 #pragma inline_depth(20)
30916 #endif //_MSC_VER
30917
30918 #ifdef MARK_ARRAY
30919     uint8_t* current_lowest_address = lowest_address;
30920     uint8_t* current_highest_address = highest_address;
30921 #ifdef BACKGROUND_GC
30922     if (recursive_gc_sync::background_running_p())
30923     {
30924         current_lowest_address = background_saved_lowest_address;
30925         current_highest_address = background_saved_highest_address;
30926     }
30927 #endif //BACKGROUND_GC
30928 #endif // MARK_ARRAY
30929
30930 #ifdef FEATURE_LOH_COMPACTION
30931     // The GC allocator made a free object already in this alloc context and
30932     // adjusted the alloc_ptr accordingly.
30933 #endif //FEATURE_LOH_COMPACTION
30934
30935     uint8_t*  result = acontext.alloc_ptr;
30936
30937     assert ((size_t)(acontext.alloc_limit - acontext.alloc_ptr) == size);
30938     alloc_bytes += size;
30939
30940     CObjectHeader* obj = (CObjectHeader*)result;
30941
30942 #ifdef MARK_ARRAY
30943     if (recursive_gc_sync::background_running_p())
30944     {
30945         if ((result < current_highest_address) && (result >= current_lowest_address))
30946         {
30947             dprintf (3, ("Clearing mark bit at address %Ix",
30948                      (size_t)(&mark_array [mark_word_of (result)])));
30949
30950             mark_array_clear_marked (result);
30951         }
30952 #ifdef BACKGROUND_GC
30953         //the object has to cover one full mark uint32_t
30954         assert (size > mark_word_size);
30955         if (current_c_gc_state != c_gc_state_free)
30956         {
30957             dprintf (3, ("Concurrent allocation of a large object %Ix",
30958                         (size_t)obj));
30959             //mark the new block specially so we know it is a new object
30960             if ((result < current_highest_address) && (result >= current_lowest_address))
30961             {
30962                 dprintf (3, ("Setting mark bit at address %Ix",
30963                             (size_t)(&mark_array [mark_word_of (result)])));
30964     
30965                 mark_array_set_marked (result);
30966             }
30967         }
30968 #endif //BACKGROUND_GC
30969     }
30970 #endif //MARK_ARRAY
30971
30972     assert (obj != 0);
30973     assert ((size_t)obj == Align ((size_t)obj, align_const));
30974
30975     return obj;
30976 }
30977
30978 void reset_memory (uint8_t* o, size_t sizeo)
30979 {
30980     if (sizeo > 128 * 1024)
30981     {
30982         // We cannot reset the memory for the useful part of a free object.
30983         size_t size_to_skip = min_free_list - plug_skew;
30984
30985         size_t page_start = align_on_page ((size_t)(o + size_to_skip));
30986         size_t size = align_lower_page ((size_t)o + sizeo - size_to_skip - plug_skew) - page_start;
30987         // Note we need to compensate for an OS bug here. This bug would cause the MEM_RESET to fail
30988         // on write watched memory.
30989         if (reset_mm_p)
30990         {
30991 #ifdef MULTIPLE_HEAPS
30992             bool unlock_p = true;
30993 #else
30994             // We don't do unlock because there could be many processes using workstation GC and it's
30995             // bad perf to have many threads doing unlock at the same time.
30996             bool unlock_p = false;
30997 #endif //MULTIPLE_HEAPS
30998
30999             reset_mm_p = GCToOSInterface::VirtualReset((void*)page_start, size, unlock_p);
31000         }
31001     }
31002 }
31003
31004 void gc_heap::reset_large_object (uint8_t* o)
31005 {
31006     // If it's a large object, allow the O/S to discard the backing store for these pages.
31007     reset_memory (o, size(o));
31008 }
31009
31010 BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp)
31011 {
31012     BOOL m = FALSE;
31013     // It shouldn't be necessary to do these comparisons because this is only used for blocking
31014     // GCs and LOH segments cannot be out of range.
31015     if ((o >= lowest_address) && (o < highest_address))
31016     {
31017         if (marked (o))
31018         {
31019             if (clearp)
31020             {
31021                 clear_marked (o);
31022                 if (pinned (o))
31023                     clear_pinned(o);
31024             }
31025             m = TRUE;
31026         }
31027         else
31028             m = FALSE;
31029     }
31030     else
31031         m = TRUE;
31032     return m;
31033 }
31034
31035 void gc_heap::walk_survivors_relocation (void* profiling_context, record_surv_fn fn)
31036 {
31037     // Now walk the portion of memory that is actually being relocated.
31038     walk_relocation (profiling_context, fn);
31039
31040 #ifdef FEATURE_LOH_COMPACTION
31041     if (loh_compacted_p)
31042     {
31043         walk_relocation_for_loh (profiling_context, fn);
31044     }
31045 #endif //FEATURE_LOH_COMPACTION
31046 }
31047
31048 void gc_heap::walk_survivors_for_loh (void* profiling_context, record_surv_fn fn)
31049 {
31050     generation* gen        = large_object_generation;
31051     heap_segment* seg      = heap_segment_rw (generation_start_segment (gen));;
31052
31053     PREFIX_ASSUME(seg != NULL);
31054
31055     uint8_t* o                = generation_allocation_start (gen);
31056     uint8_t* plug_end         = o;
31057     uint8_t* plug_start       = o;
31058
31059     while (1)
31060     {
31061         if (o >= heap_segment_allocated (seg))
31062         {
31063             seg = heap_segment_next (seg);
31064             if (seg == 0)
31065                 break;
31066             else
31067                 o = heap_segment_mem (seg);
31068         }
31069         if (large_object_marked(o, FALSE))
31070         {
31071             plug_start = o;
31072
31073             BOOL m = TRUE;
31074             while (m)
31075             {
31076                 o = o + AlignQword (size (o));
31077                 if (o >= heap_segment_allocated (seg))
31078                 {
31079                     break;
31080                 }
31081                 m = large_object_marked (o, FALSE);
31082             }
31083
31084             plug_end = o;
31085
31086             fn (plug_start, plug_end, 0, profiling_context, false, false);
31087         }
31088         else
31089         {
31090             while (o < heap_segment_allocated (seg) && !large_object_marked(o, FALSE))
31091             {
31092                 o = o + AlignQword (size (o));
31093             }
31094         }
31095     }
31096 }
31097
31098 #ifdef BACKGROUND_GC
31099
31100 BOOL gc_heap::background_object_marked (uint8_t* o, BOOL clearp)
31101 {
31102     BOOL m = FALSE;
31103     if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address))
31104     {
31105         if (mark_array_marked (o))
31106         {
31107             if (clearp)
31108             {
31109                 mark_array_clear_marked (o);
31110                 //dprintf (3, ("mark array bit for object %Ix is cleared", o));
31111                 dprintf (3, ("CM: %Ix", o));
31112             }
31113             m = TRUE;
31114         }
31115         else
31116             m = FALSE;
31117     }
31118     else
31119         m = TRUE;
31120
31121     dprintf (3, ("o %Ix(%d) %s", o, size(o), (m ? "was bm" : "was NOT bm")));
31122     return m;
31123 }
31124
31125 void gc_heap::background_delay_delete_loh_segments()
31126 {
31127     generation* gen = large_object_generation;
31128     heap_segment* seg = heap_segment_rw (generation_start_segment (large_object_generation));
31129     heap_segment* prev_seg = 0;
31130
31131     while (seg)
31132     {
31133         heap_segment* next_seg = heap_segment_next (seg);
31134         if (seg->flags & heap_segment_flags_loh_delete)
31135         {
31136             dprintf (3, ("deleting %Ix-%Ix-%Ix", (size_t)seg, heap_segment_allocated (seg), heap_segment_reserved (seg)));
31137             delete_heap_segment (seg, (GCConfig::GetRetainVM() != 0));
31138             heap_segment_next (prev_seg) = next_seg;
31139         }
31140         else
31141         {
31142             prev_seg = seg;
31143         }
31144
31145         seg = next_seg;
31146     }
31147 }
31148
31149 uint8_t* gc_heap::background_next_end (heap_segment* seg, BOOL large_objects_p)
31150 {
31151     return
31152         (large_objects_p ? heap_segment_allocated (seg) : heap_segment_background_allocated (seg));
31153 }
31154
31155 void gc_heap::set_mem_verify (uint8_t* start, uint8_t* end, uint8_t b)
31156 {
31157 #ifdef VERIFY_HEAP
31158     if (end > start)
31159     {
31160         if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
31161            !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_MEM_FILL))
31162         {
31163             dprintf (3, ("setting mem to %c [%Ix, [%Ix", b, start, end));
31164             memset (start, b, (end - start));
31165         }
31166     }
31167 #endif //VERIFY_HEAP
31168 }
31169
31170 void gc_heap::generation_delete_heap_segment (generation* gen, 
31171                                               heap_segment* seg,
31172                                               heap_segment* prev_seg,
31173                                               heap_segment* next_seg)
31174 {
31175     dprintf (3, ("bgc sweep: deleting seg %Ix", seg));
31176     if (gen == large_object_generation)
31177     {
31178         dprintf (3, ("Preparing empty large segment %Ix for deletion", (size_t)seg));
31179
31180         // We cannot thread segs in here onto freeable_large_heap_segment because 
31181         // grow_brick_card_tables could be committing mark array which needs to read 
31182         // the seg list. So we delay it till next time we suspend EE.
31183         seg->flags |= heap_segment_flags_loh_delete;
31184         // Since we will be decommitting the seg, we need to prevent heap verification
31185         // to verify this segment.
31186         heap_segment_allocated (seg) = heap_segment_mem (seg);
31187     }
31188     else
31189     {
31190         if (seg == ephemeral_heap_segment)
31191         {
31192             FATAL_GC_ERROR();
31193         }
31194
31195         heap_segment_next (next_seg) = prev_seg;
31196
31197         dprintf (3, ("Preparing empty small segment %Ix for deletion", (size_t)seg));
31198         heap_segment_next (seg) = freeable_small_heap_segment;
31199         freeable_small_heap_segment = seg;
31200     }
31201
31202     decommit_heap_segment (seg);
31203     seg->flags |= heap_segment_flags_decommitted;
31204
31205     set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb);
31206 }
31207
31208 void gc_heap::process_background_segment_end (heap_segment* seg, 
31209                                           generation* gen,
31210                                           uint8_t* last_plug_end,
31211                                           heap_segment* start_seg,
31212                                           BOOL* delete_p)
31213 {
31214     *delete_p = FALSE;
31215     uint8_t* allocated = heap_segment_allocated (seg);
31216     uint8_t* background_allocated = heap_segment_background_allocated (seg);
31217     BOOL loh_p = heap_segment_loh_p (seg);
31218
31219     dprintf (3, ("Processing end of background segment [%Ix, %Ix[(%Ix[)", 
31220                 (size_t)heap_segment_mem (seg), background_allocated, allocated));
31221
31222     if (!loh_p && (allocated != background_allocated))
31223     {
31224         assert (gen != large_object_generation);
31225
31226         dprintf (3, ("Make a free object before newly promoted objects [%Ix, %Ix[", 
31227                     (size_t)last_plug_end, background_allocated));
31228         thread_gap (last_plug_end, background_allocated - last_plug_end, generation_of (max_generation));
31229
31230
31231         fix_brick_to_highest (last_plug_end, background_allocated);
31232
31233         // When we allowed fgc's during going through gaps, we could have erased the brick
31234         // that corresponds to bgc_allocated 'cause we had to update the brick there, 
31235         // recover it here.
31236         fix_brick_to_highest (background_allocated, background_allocated);
31237     }
31238     else
31239     {
31240         // by default, if allocated == background_allocated, it can't
31241         // be the ephemeral segment.
31242         if (seg == ephemeral_heap_segment)
31243         {
31244             FATAL_GC_ERROR();
31245         }
31246
31247         if (allocated == heap_segment_mem (seg))
31248         {
31249             // this can happen with LOH segments when multiple threads
31250             // allocate new segments and not all of them were needed to
31251             // satisfy allocation requests.
31252             assert (gen == large_object_generation);
31253         }
31254
31255         if (last_plug_end == heap_segment_mem (seg))
31256         {
31257             dprintf (3, ("Segment allocated is %Ix (beginning of this seg) - %s be deleted",
31258                         (size_t)allocated, (*delete_p ? "should" : "should not")));
31259
31260             if (seg != start_seg)
31261             {
31262                 *delete_p = TRUE;
31263             }
31264         }
31265         else
31266         {
31267             dprintf (3, ("Trimming seg to %Ix[", (size_t)last_plug_end));
31268             heap_segment_allocated (seg) = last_plug_end;
31269             set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb);
31270
31271             decommit_heap_segment_pages (seg, 0);
31272         }
31273     }
31274
31275     dprintf (3, ("verifying seg %Ix's mark array was completely cleared", seg));
31276     bgc_verify_mark_array_cleared (seg);
31277 }
31278
31279 void gc_heap::process_n_background_segments (heap_segment* seg, 
31280                                              heap_segment* prev_seg,
31281                                              generation* gen)
31282 {
31283     assert (gen != large_object_generation);
31284
31285     while (seg)
31286     {
31287         dprintf (2, ("processing seg %Ix (not seen by bgc mark)", seg));
31288         heap_segment* next_seg = heap_segment_next (seg);
31289
31290         if (heap_segment_read_only_p (seg))
31291         {
31292             prev_seg = seg;
31293         }
31294         else
31295         {
31296             if (heap_segment_allocated (seg) == heap_segment_mem (seg))
31297             {
31298                 // This can happen - if we have a LOH segment where nothing survived
31299                 // or a SOH segment allocated by a gen1 GC when BGC was going where 
31300                 // nothing survived last time we did a gen1 GC.
31301                 generation_delete_heap_segment (gen, seg, prev_seg, next_seg);
31302             }
31303             else
31304             {
31305                 prev_seg = seg;
31306             }
31307         }
31308
31309         verify_soh_segment_list();
31310         seg = next_seg;
31311     }
31312 }
31313
31314 inline
31315 BOOL gc_heap::fgc_should_consider_object (uint8_t* o,
31316                                           heap_segment* seg,
31317                                           BOOL consider_bgc_mark_p, 
31318                                           BOOL check_current_sweep_p, 
31319                                           BOOL check_saved_sweep_p)
31320 {
31321     // the logic for this function must be kept in sync with the analogous function
31322     // in ToolBox\SOS\Strike\gc.cpp
31323
31324     // TRUE means we don't need to check the bgc mark bit
31325     // FALSE means we do.
31326     BOOL no_bgc_mark_p = FALSE;
31327
31328     if (consider_bgc_mark_p)
31329     {
31330         if (check_current_sweep_p && (o < current_sweep_pos))
31331         {
31332             dprintf (3, ("no bgc mark - o: %Ix < cs: %Ix", o, current_sweep_pos));
31333             no_bgc_mark_p = TRUE;
31334         }
31335
31336         if (!no_bgc_mark_p)
31337         {
31338             if(check_saved_sweep_p && (o >= saved_sweep_ephemeral_start))
31339             {
31340                 dprintf (3, ("no bgc mark - o: %Ix >= ss: %Ix", o, saved_sweep_ephemeral_start));
31341                 no_bgc_mark_p = TRUE;
31342             }
31343
31344             if (!check_saved_sweep_p)
31345             {
31346                 uint8_t* background_allocated = heap_segment_background_allocated (seg);
31347                 // if this was the saved ephemeral segment, check_saved_sweep_p 
31348                 // would've been true.
31349                 assert (heap_segment_background_allocated (seg) != saved_sweep_ephemeral_start);
31350                 // background_allocated could be 0 for the new segments acquired during bgc
31351                 // sweep and we still want no_bgc_mark_p to be true.
31352                 if (o >= background_allocated)
31353                 {
31354                     dprintf (3, ("no bgc mark - o: %Ix >= ba: %Ix", o, background_allocated));
31355                     no_bgc_mark_p = TRUE;
31356                 }
31357             }
31358         }
31359     }
31360     else
31361     {
31362         no_bgc_mark_p = TRUE;
31363     }
31364
31365     dprintf (3, ("bgc mark %Ix: %s (bm: %s)", o, (no_bgc_mark_p ? "no" : "yes"), (background_object_marked (o, FALSE) ? "yes" : "no")));
31366     return (no_bgc_mark_p ? TRUE : background_object_marked (o, FALSE));
31367 }
31368
31369 // consider_bgc_mark_p tells you if you need to care about the bgc mark bit at all
31370 // if it's TRUE, check_current_sweep_p tells you if you should consider the
31371 // current sweep position or not.
31372 void gc_heap::should_check_bgc_mark (heap_segment* seg, 
31373                                      BOOL* consider_bgc_mark_p, 
31374                                      BOOL* check_current_sweep_p,
31375                                      BOOL* check_saved_sweep_p)
31376 {
31377     // the logic for this function must be kept in sync with the analogous function
31378     // in ToolBox\SOS\Strike\gc.cpp
31379     *consider_bgc_mark_p = FALSE;
31380     *check_current_sweep_p = FALSE;
31381     *check_saved_sweep_p = FALSE;
31382
31383     if (current_c_gc_state == c_gc_state_planning)
31384     {
31385         // We are doing the current_sweep_pos comparison here because we have yet to 
31386         // turn on the swept flag for the segment but in_range_for_segment will return
31387         // FALSE if the address is the same as reserved.
31388         if ((seg->flags & heap_segment_flags_swept) || (current_sweep_pos == heap_segment_reserved (seg)))
31389         {
31390             dprintf (3, ("seg %Ix is already swept by bgc", seg));
31391         }
31392         else
31393         {
31394             *consider_bgc_mark_p = TRUE;
31395
31396             dprintf (3, ("seg %Ix hasn't been swept by bgc", seg));
31397
31398             if (seg == saved_sweep_ephemeral_seg)
31399             {
31400                 dprintf (3, ("seg %Ix is the saved ephemeral seg", seg));
31401                 *check_saved_sweep_p = TRUE;
31402             }
31403
31404             if (in_range_for_segment (current_sweep_pos, seg))
31405             {
31406                 dprintf (3, ("current sweep pos is %Ix and within seg %Ix", 
31407                               current_sweep_pos, seg));
31408                 *check_current_sweep_p = TRUE;
31409             }
31410         }
31411     }
31412 }
31413
31414 void gc_heap::background_ephemeral_sweep()
31415 {
31416     dprintf (3, ("bgc ephemeral sweep"));
31417
31418     int align_const = get_alignment_constant (TRUE);
31419
31420     saved_sweep_ephemeral_seg = ephemeral_heap_segment;
31421     saved_sweep_ephemeral_start = generation_allocation_start (generation_of (max_generation - 1));
31422
31423     // Since we don't want to interfere with gen0 allocation while we are threading gen0 free list,
31424     // we thread onto a list first then publish it when we are done.
31425     allocator youngest_free_list;
31426     size_t youngest_free_list_space = 0;
31427     size_t youngest_free_obj_space = 0;
31428
31429     youngest_free_list.clear();
31430
31431     for (int i = 0; i <= (max_generation - 1); i++)
31432     {
31433         generation* gen_to_reset = generation_of (i);
31434         assert (generation_free_list_space (gen_to_reset) == 0);
31435         // Can only assert free_list_space is 0, not free_obj_space as the allocator could have added 
31436         // something there.
31437     }
31438
31439     for (int i = (max_generation - 1); i >= 0; i--)
31440     {
31441         generation* current_gen = generation_of (i);
31442         uint8_t* o = generation_allocation_start (current_gen);
31443         //Skip the generation gap object
31444         o = o + Align(size (o), align_const);
31445         uint8_t* end = ((i > 0) ?
31446                      generation_allocation_start (generation_of (i - 1)) : 
31447                      heap_segment_allocated (ephemeral_heap_segment));
31448
31449         uint8_t* plug_end = o;
31450         uint8_t* plug_start = o;
31451         BOOL marked_p = FALSE;
31452
31453         while (o < end)
31454         {
31455             marked_p = background_object_marked (o, TRUE);
31456             if (marked_p)
31457             {
31458                 plug_start = o;
31459                 size_t plug_size = plug_start - plug_end;
31460
31461                 if (i >= 1)
31462                 {
31463                     thread_gap (plug_end, plug_size, current_gen);
31464                 }
31465                 else
31466                 {
31467                     if (plug_size > 0)
31468                     {
31469                         make_unused_array (plug_end, plug_size);
31470                         if (plug_size >= min_free_list)
31471                         {
31472                             youngest_free_list_space += plug_size;
31473                             youngest_free_list.thread_item (plug_end, plug_size);
31474                         }
31475                         else
31476                         {
31477                             youngest_free_obj_space += plug_size;
31478                         }
31479                     }
31480                 }
31481
31482                 fix_brick_to_highest (plug_end, plug_start);
31483                 fix_brick_to_highest (plug_start, plug_start);
31484
31485                 BOOL m = TRUE;
31486                 while (m)
31487                 {
31488                     o = o + Align (size (o), align_const);
31489                     if (o >= end)
31490                     {
31491                         break;
31492                     }
31493
31494                     m = background_object_marked (o, TRUE);
31495                 }
31496                 plug_end = o;
31497                 dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
31498             }
31499             else
31500             {
31501                 while ((o < end) && !background_object_marked (o, FALSE))
31502                 {
31503                     o = o + Align (size (o), align_const);
31504                 }
31505             }
31506         }
31507
31508         if (plug_end != end)
31509         {
31510             if (i >= 1)
31511             {
31512                 thread_gap (plug_end, end - plug_end, current_gen);
31513                 fix_brick_to_highest (plug_end, end);
31514             }
31515             else
31516             {
31517                 heap_segment_allocated (ephemeral_heap_segment) = plug_end;
31518                 // the following line is temporary.
31519                 heap_segment_saved_bg_allocated (ephemeral_heap_segment) = plug_end;
31520 #ifdef VERIFY_HEAP
31521                 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
31522                 {
31523                     make_unused_array (plug_end, (end - plug_end));
31524                 }
31525 #endif //VERIFY_HEAP
31526             }
31527         }
31528
31529         dd_fragmentation (dynamic_data_of (i)) = 
31530             generation_free_list_space (current_gen) + generation_free_obj_space (current_gen);
31531     }
31532
31533     generation* youngest_gen = generation_of (0);
31534     generation_free_list_space (youngest_gen) = youngest_free_list_space;
31535     generation_free_obj_space (youngest_gen) = youngest_free_obj_space;
31536     dd_fragmentation (dynamic_data_of (0)) = youngest_free_list_space + youngest_free_obj_space;
31537     generation_allocator (youngest_gen)->copy_with_no_repair (&youngest_free_list);
31538 }
31539
31540 void gc_heap::background_sweep()
31541 {
31542     generation* gen         = generation_of (max_generation);
31543     dynamic_data* dd        = dynamic_data_of (max_generation);
31544     // For SOH segments we go backwards.
31545     heap_segment* start_seg = ephemeral_heap_segment;
31546     PREFIX_ASSUME(start_seg != NULL);
31547     heap_segment* fseg      = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
31548     heap_segment* seg       = start_seg;
31549     uint8_t* o                 = heap_segment_mem (seg);
31550
31551     heap_segment* prev_seg = heap_segment_next (seg);
31552     int align_const        = get_alignment_constant (TRUE);
31553     if (seg == fseg)
31554     {
31555         assert (o == generation_allocation_start (generation_of (max_generation)));
31556         o = o + Align(size (o), align_const);
31557     }
31558
31559     uint8_t* plug_end      = o;
31560     uint8_t* plug_start    = o;
31561     next_sweep_obj         = o;
31562     current_sweep_pos      = o;
31563
31564     //uint8_t* end              = background_next_end (seg, (gen == large_object_generation));
31565     uint8_t* end              = heap_segment_background_allocated (seg);
31566     BOOL delete_p          = FALSE;
31567
31568     //concurrent_print_time_delta ("finished with mark and start with sweep");
31569     concurrent_print_time_delta ("Sw");
31570     dprintf (2, ("---- (GC%d)Background Sweep Phase ----", VolatileLoad(&settings.gc_index)));
31571
31572     //block concurrent allocation for large objects
31573     dprintf (3, ("lh state: planning"));
31574     if (gc_lh_block_event.IsValid())
31575     {
31576         gc_lh_block_event.Reset();
31577     }
31578
31579     for (int i = 0; i <= (max_generation + 1); i++)
31580     {
31581         generation* gen_to_reset = generation_of (i);
31582         generation_allocator (gen_to_reset)->clear();
31583         generation_free_list_space (gen_to_reset) = 0;
31584         generation_free_obj_space (gen_to_reset) = 0;
31585         generation_free_list_allocated (gen_to_reset) = 0;
31586         generation_end_seg_allocated (gen_to_reset) = 0;
31587         generation_condemned_allocated (gen_to_reset) = 0; 
31588         //reset the allocation so foreground gc can allocate into older generation
31589         generation_allocation_pointer (gen_to_reset)= 0;
31590         generation_allocation_limit (gen_to_reset) = 0;
31591         generation_allocation_segment (gen_to_reset) = heap_segment_rw (generation_start_segment (gen_to_reset));
31592     }
31593
31594     FIRE_EVENT(BGC2ndNonConEnd);
31595
31596     loh_alloc_thread_count = 0;
31597     current_bgc_state = bgc_sweep_soh;
31598     verify_soh_segment_list();
31599
31600 #ifdef FEATURE_BASICFREEZE
31601     if ((generation_start_segment (gen) != ephemeral_heap_segment) &&
31602         ro_segments_in_range)
31603     {
31604         sweep_ro_segments (generation_start_segment (gen));
31605     }
31606 #endif // FEATURE_BASICFREEZE
31607
31608     //TODO BACKGROUND_GC: can we move this to where we switch to the LOH?
31609     if (current_c_gc_state != c_gc_state_planning)
31610     {
31611         current_c_gc_state = c_gc_state_planning;
31612     }
31613
31614     concurrent_print_time_delta ("Swe");
31615
31616     heap_segment* loh_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation + 1)));
31617     PREFIX_ASSUME(loh_seg  != NULL);
31618     while (loh_seg )
31619     {
31620         loh_seg->flags &= ~heap_segment_flags_swept;
31621         heap_segment_background_allocated (loh_seg) = heap_segment_allocated (loh_seg);
31622         loh_seg = heap_segment_next_rw (loh_seg);
31623     }
31624
31625 #ifdef MULTIPLE_HEAPS
31626     bgc_t_join.join(this, gc_join_restart_ee);
31627     if (bgc_t_join.joined())
31628 #endif //MULTIPLE_HEAPS 
31629     {
31630 #ifdef MULTIPLE_HEAPS
31631         dprintf(2, ("Starting BGC threads for resuming EE"));
31632         bgc_t_join.restart();
31633 #endif //MULTIPLE_HEAPS
31634     }
31635
31636     if (heap_number == 0)
31637     {
31638         restart_EE ();
31639     }
31640
31641     FIRE_EVENT(BGC2ndConBegin);
31642
31643     background_ephemeral_sweep();
31644
31645     concurrent_print_time_delta ("Swe eph");
31646
31647 #ifdef MULTIPLE_HEAPS
31648     bgc_t_join.join(this, gc_join_after_ephemeral_sweep);
31649     if (bgc_t_join.joined())
31650 #endif //MULTIPLE_HEAPS
31651     {
31652 #ifdef FEATURE_EVENT_TRACE
31653         bgc_heap_walk_for_etw_p = GCEventStatus::IsEnabled(GCEventProvider_Default, 
31654                                                            GCEventKeyword_GCHeapSurvivalAndMovement, 
31655                                                            GCEventLevel_Information);
31656 #endif //FEATURE_EVENT_TRACE
31657
31658         leave_spin_lock (&gc_lock);
31659
31660 #ifdef MULTIPLE_HEAPS
31661         dprintf(2, ("Starting BGC threads for BGC sweeping"));
31662         bgc_t_join.restart();
31663 #endif //MULTIPLE_HEAPS
31664     }
31665
31666     disable_preemptive (true);
31667
31668     dprintf (2, ("bgs: sweeping gen2 objects"));
31669     dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
31670                     (size_t)heap_segment_mem (seg),
31671                     (size_t)heap_segment_allocated (seg),
31672                     (size_t)heap_segment_background_allocated (seg)));
31673
31674     int num_objs = 256;
31675     int current_num_objs = 0;
31676     heap_segment* next_seg = 0;
31677
31678     while (1)
31679     {
31680         if (o >= end)
31681         {
31682             if (gen == large_object_generation)
31683             {
31684                 next_seg = heap_segment_next (seg);
31685             }
31686             else
31687             {
31688                 next_seg = heap_segment_prev (fseg, seg);
31689             }
31690
31691             delete_p = FALSE;
31692
31693             if (!heap_segment_read_only_p (seg))
31694             {
31695                 if (gen == large_object_generation)
31696                 {
31697                     // we can treat all LOH segments as in the bgc domain
31698                     // regardless of whether we saw in bgc mark or not
31699                     // because we don't allow LOH allocations during bgc
31700                     // sweep anyway - the LOH segments can't change.
31701                     process_background_segment_end (seg, gen, plug_end, 
31702                                                     start_seg, &delete_p);
31703                 }
31704                 else
31705                 {
31706                     assert (heap_segment_background_allocated (seg) != 0);
31707                     process_background_segment_end (seg, gen, plug_end, 
31708                                                     start_seg, &delete_p);
31709
31710                     assert (next_seg || !delete_p);
31711                 }
31712             }
31713
31714             if (delete_p)
31715             {
31716                 generation_delete_heap_segment (gen, seg, prev_seg, next_seg);
31717             }
31718             else
31719             {
31720                 prev_seg = seg;
31721                 dprintf (2, ("seg %Ix has been swept", seg));
31722                 seg->flags |= heap_segment_flags_swept;
31723             }
31724
31725             verify_soh_segment_list();
31726
31727             seg = next_seg;
31728
31729             dprintf (GTC_LOG, ("seg: %Ix, next_seg: %Ix, prev_seg: %Ix", seg, next_seg, prev_seg));
31730             
31731             if (seg == 0)
31732             {
31733                 generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));
31734
31735                 PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);
31736
31737                 if (gen != large_object_generation)
31738                 {
31739                     dprintf (2, ("bgs: sweeping gen3 objects"));
31740                     concurrent_print_time_delta ("Swe SOH");
31741                     FIRE_EVENT(BGC1stSweepEnd, 0);
31742
31743                     enter_spin_lock (&more_space_lock_loh);
31744                     add_saved_spinlock_info (true, me_acquire, mt_bgc_loh_sweep);
31745
31746                     concurrent_print_time_delta ("Swe LOH took msl");
31747
31748                     // We wait till all allocating threads are completely done.
31749                     int spin_count = yp_spin_count_unit;
31750                     while (loh_alloc_thread_count)
31751                     {
31752                         spin_and_switch (spin_count, (loh_alloc_thread_count == 0));
31753                     }
31754
31755                     current_bgc_state = bgc_sweep_loh;
31756                     gen = generation_of (max_generation+1);
31757                     start_seg = heap_segment_rw (generation_start_segment (gen));
31758
31759                     PREFIX_ASSUME(start_seg != NULL);
31760
31761                     seg = start_seg;
31762                     prev_seg = 0;
31763                     o = generation_allocation_start (gen);
31764                     assert (method_table (o) == g_gc_pFreeObjectMethodTable);
31765                     align_const = get_alignment_constant (FALSE);
31766                     o = o + Align(size (o), align_const);
31767                     plug_end = o;
31768                     end = heap_segment_allocated (seg);
31769                     dprintf (2, ("sweeping gen3 objects"));
31770                     generation_free_obj_space (gen) = 0;
31771                     generation_allocator (gen)->clear();
31772                     generation_free_list_space (gen) = 0;
31773
31774                     dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
31775                                     (size_t)heap_segment_mem (seg),
31776                                     (size_t)heap_segment_allocated (seg),
31777                                     (size_t)heap_segment_background_allocated (seg)));
31778                 }
31779                 else
31780                     break;
31781             }
31782             else
31783             {
31784                 o = heap_segment_mem (seg);
31785                 if (seg == fseg)
31786                 {
31787                     assert (gen != large_object_generation);
31788                     assert (o == generation_allocation_start (generation_of (max_generation)));
31789                     align_const = get_alignment_constant (TRUE);
31790                     o = o + Align(size (o), align_const);
31791                 }
31792
31793                 plug_end = o;
31794                 current_sweep_pos = o;
31795                 next_sweep_obj = o;
31796                 
31797                 allow_fgc();
31798                 end = background_next_end (seg, (gen == large_object_generation));
31799                 dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
31800                                 (size_t)heap_segment_mem (seg),
31801                                 (size_t)heap_segment_allocated (seg),
31802                                 (size_t)heap_segment_background_allocated (seg)));
31803             }
31804         }
31805
31806         if ((o < end) && background_object_marked (o, TRUE))
31807         {
31808             plug_start = o;
31809             if (gen == large_object_generation)
31810             {
31811                 dprintf (2, ("loh fr: [%Ix-%Ix[(%Id)", plug_end, plug_start, plug_start-plug_end));
31812             }
31813
31814             thread_gap (plug_end, plug_start-plug_end, gen);
31815             if (gen != large_object_generation)
31816             {
31817                 add_gen_free (max_generation, plug_start-plug_end);
31818                 fix_brick_to_highest (plug_end, plug_start);
31819                 // we need to fix the brick for the next plug here 'cause an FGC can
31820                 // happen and can't read a stale brick.
31821                 fix_brick_to_highest (plug_start, plug_start);
31822             }
31823
31824             BOOL m = TRUE;
31825
31826             while (m)
31827             {
31828                 next_sweep_obj = o + Align(size (o), align_const);
31829                 current_num_objs++;
31830                 if (current_num_objs >= num_objs)
31831                 {
31832                     current_sweep_pos = next_sweep_obj;
31833
31834                     allow_fgc();
31835                     current_num_objs = 0;
31836                 }
31837
31838                 o = next_sweep_obj;
31839                 if (o >= end)
31840                 {
31841                     break;
31842                 }
31843
31844                 m = background_object_marked (o, TRUE);
31845             }
31846             plug_end = o;
31847             if (gen != large_object_generation)
31848             {
31849                 add_gen_plug (max_generation, plug_end-plug_start);
31850                 dd_survived_size (dd) += (plug_end - plug_start);
31851             }
31852             dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
31853         }
31854         else
31855         {
31856             while ((o < end) && !background_object_marked (o, FALSE))
31857             {
31858                 next_sweep_obj = o + Align(size (o), align_const);;
31859                 current_num_objs++;
31860                 if (current_num_objs >= num_objs)
31861                 {
31862                     current_sweep_pos = plug_end;
31863                     dprintf (1234, ("f: swept till %Ix", current_sweep_pos));
31864                     allow_fgc();
31865                     current_num_objs = 0;
31866                 }
31867
31868                 o = next_sweep_obj;
31869             }
31870         }
31871     }
31872
31873     size_t total_loh_size = generation_size (max_generation + 1);
31874     size_t total_soh_size = generation_sizes (generation_of (max_generation));
31875
31876     dprintf (GTC_LOG, ("h%d: S: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
31877
31878     dprintf (GTC_LOG, ("end of bgc sweep: gen2 FL: %Id, FO: %Id", 
31879         generation_free_list_space (generation_of (max_generation)),
31880         generation_free_obj_space (generation_of (max_generation))));
31881     dprintf (GTC_LOG, ("h%d: end of bgc sweep: gen3 FL: %Id, FO: %Id", 
31882         heap_number,
31883         generation_free_list_space (generation_of (max_generation + 1)),
31884         generation_free_obj_space (generation_of (max_generation + 1))));
31885
31886     FIRE_EVENT(BGC2ndConEnd);
31887     concurrent_print_time_delta ("background sweep");
31888     
31889     heap_segment* reset_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
31890     PREFIX_ASSUME(reset_seg != NULL);
31891
31892     while (reset_seg)
31893     {
31894         heap_segment_saved_bg_allocated (reset_seg) = heap_segment_background_allocated (reset_seg);
31895         heap_segment_background_allocated (reset_seg) = 0;
31896         reset_seg = heap_segment_next_rw (reset_seg);
31897     }
31898
31899     generation* loh_gen = generation_of (max_generation + 1);
31900     generation_allocation_segment (loh_gen) = heap_segment_rw (generation_start_segment (loh_gen));
31901
31902     // We calculate dynamic data here because if we wait till we signal the lh event, 
31903     // the allocation thread can change the fragmentation and we may read an intermediate
31904     // value (which can be greater than the generation size). Plus by that time it won't 
31905     // be accurate.
31906     compute_new_dynamic_data (max_generation);
31907
31908     enable_preemptive ();
31909
31910 #ifdef MULTIPLE_HEAPS
31911     bgc_t_join.join(this, gc_join_set_state_free);
31912     if (bgc_t_join.joined())
31913 #endif //MULTIPLE_HEAPS
31914     {
31915         // TODO: We are using this join just to set the state. Should
31916         // look into eliminating it - check to make sure things that use 
31917         // this state can live with per heap state like should_check_bgc_mark.
31918         current_c_gc_state = c_gc_state_free;
31919
31920 #ifdef MULTIPLE_HEAPS
31921         dprintf(2, ("Starting BGC threads after background sweep phase"));
31922         bgc_t_join.restart();
31923 #endif //MULTIPLE_HEAPS
31924     }
31925
31926     disable_preemptive (true);
31927
31928     if (gc_lh_block_event.IsValid())
31929     {
31930         gc_lh_block_event.Set();
31931     }
31932
31933     add_saved_spinlock_info (true, me_release, mt_bgc_loh_sweep);
31934     leave_spin_lock (&more_space_lock_loh);
31935
31936     //dprintf (GTC_LOG, ("---- (GC%d)End Background Sweep Phase ----", VolatileLoad(&settings.gc_index)));
31937     dprintf (GTC_LOG, ("---- (GC%d)ESw ----", VolatileLoad(&settings.gc_index)));
31938 }
31939 #endif //BACKGROUND_GC
31940
31941 void gc_heap::sweep_large_objects ()
31942 {
31943     //this min value is for the sake of the dynamic tuning.
31944     //so we know that we are not starting even if we have no
31945     //survivors.
31946     generation* gen        = large_object_generation;
31947     heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
31948
31949     PREFIX_ASSUME(start_seg != NULL);
31950
31951     heap_segment* seg      = start_seg;
31952     heap_segment* prev_seg = 0;
31953     uint8_t* o             = generation_allocation_start (gen);
31954     int align_const        = get_alignment_constant (FALSE);
31955
31956     //Skip the generation gap object
31957     o = o + Align(size (o), align_const);
31958
31959     uint8_t* plug_end         = o;
31960     uint8_t* plug_start       = o;
31961
31962     generation_allocator (gen)->clear();
31963     generation_free_list_space (gen) = 0;
31964     generation_free_obj_space (gen) = 0;
31965
31966
31967     dprintf (3, ("sweeping large objects"));
31968     dprintf (3, ("seg: %Ix, [%Ix, %Ix[, starting from %Ix", 
31969                  (size_t)seg,
31970                  (size_t)heap_segment_mem (seg),
31971                  (size_t)heap_segment_allocated (seg),
31972                  o));
31973
31974     while (1)
31975     {
31976         if (o >= heap_segment_allocated (seg))
31977         {
31978             heap_segment* next_seg = heap_segment_next (seg);
31979             //delete the empty segment if not the only one
31980             if ((plug_end == heap_segment_mem (seg)) &&
31981                 (seg != start_seg) && !heap_segment_read_only_p (seg))
31982             {
31983                 //prepare for deletion
31984                 dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg));
31985                 assert (prev_seg);
31986                 heap_segment_next (prev_seg) = next_seg;
31987                 heap_segment_next (seg) = freeable_large_heap_segment;
31988                 freeable_large_heap_segment = seg;
31989             }
31990             else
31991             {
31992                 if (!heap_segment_read_only_p (seg))
31993                 {
31994                     dprintf (3, ("Trimming seg to %Ix[", (size_t)plug_end));
31995                     heap_segment_allocated (seg) = plug_end;
31996                     decommit_heap_segment_pages (seg, 0);
31997                 }
31998                 prev_seg = seg;
31999             }
32000             seg = next_seg;
32001             if (seg == 0)
32002                 break;
32003             else
32004             {
32005                 o = heap_segment_mem (seg);
32006                 plug_end = o;
32007                 dprintf (3, ("seg: %Ix, [%Ix, %Ix[", (size_t)seg,
32008                              (size_t)heap_segment_mem (seg),
32009                              (size_t)heap_segment_allocated (seg)));
32010             }
32011         }
32012         if (large_object_marked(o, TRUE))
32013         {
32014             plug_start = o;
32015             //everything between plug_end and plug_start is free
32016             thread_gap (plug_end, plug_start-plug_end, gen);
32017
32018             BOOL m = TRUE;
32019             while (m)
32020             {
32021                 o = o + AlignQword (size (o));
32022                 if (o >= heap_segment_allocated (seg))
32023                 {
32024                     break;
32025                 }
32026                 m = large_object_marked (o, TRUE);
32027             }
32028             plug_end = o;
32029             dprintf (3, ("plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
32030         }
32031         else
32032         {
32033             while (o < heap_segment_allocated (seg) && !large_object_marked(o, FALSE))
32034             {
32035                 o = o + AlignQword (size (o));
32036             }
32037         }
32038     }
32039
32040     generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));
32041
32042     PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);
32043 }
32044
32045 void gc_heap::relocate_in_large_objects ()
32046 {
32047     relocate_args args;
32048     args.low = gc_low;
32049     args.high = gc_high;
32050     args.last_plug = 0;
32051
32052     generation* gen = large_object_generation;
32053
32054     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
32055
32056     PREFIX_ASSUME(seg != NULL);
32057
32058     uint8_t* o = generation_allocation_start (gen);
32059
32060     while (1)
32061     {
32062         if (o >= heap_segment_allocated (seg))
32063         {
32064             seg = heap_segment_next_rw (seg);
32065             if (seg == 0)
32066                 break;
32067             else
32068             {
32069                 o = heap_segment_mem (seg);
32070             }
32071         }
32072         while (o < heap_segment_allocated (seg))
32073         {
32074             check_class_object_demotion (o);
32075             if (contain_pointers (o))
32076             {
32077                 dprintf(3, ("Relocating through large object %Ix", (size_t)o));
32078                 go_through_object_nostart (method_table (o), o, size(o), pval,
32079                         {
32080                             reloc_survivor_helper (pval);
32081                         });
32082             }
32083             o = o + AlignQword (size (o));
32084         }
32085     }
32086 }
32087
32088 void gc_heap::mark_through_cards_for_large_objects (card_fn fn,
32089                                                     BOOL relocating)
32090 {
32091     uint8_t*      low               = gc_low;
32092     size_t        end_card          = 0;
32093     generation*   oldest_gen        = generation_of (max_generation+1);
32094     heap_segment* seg               = heap_segment_rw (generation_start_segment (oldest_gen));
32095
32096     PREFIX_ASSUME(seg != NULL);
32097
32098     uint8_t*      beg               = generation_allocation_start (oldest_gen);
32099     uint8_t*      end               = heap_segment_allocated (seg);
32100
32101     size_t  cg_pointers_found = 0;
32102
32103     size_t  card_word_end = (card_of (align_on_card_word (end)) /
32104                              card_word_width);
32105
32106     size_t      n_eph             = 0;
32107     size_t      n_gen             = 0;
32108     size_t      n_card_set        = 0;
32109     uint8_t*    next_boundary = (relocating ?
32110                               generation_plan_allocation_start (generation_of (max_generation -1)) :
32111                               ephemeral_low);
32112
32113     uint8_t*    nhigh         = (relocating ?
32114                               heap_segment_plan_allocated (ephemeral_heap_segment) :
32115                               ephemeral_high);
32116
32117     BOOL          foundp            = FALSE;
32118     uint8_t*      start_address     = 0;
32119     uint8_t*      limit             = 0;
32120     size_t        card              = card_of (beg);
32121     uint8_t*      o                 = beg;
32122 #ifdef BACKGROUND_GC
32123     BOOL consider_bgc_mark_p        = FALSE;
32124     BOOL check_current_sweep_p      = FALSE;
32125     BOOL check_saved_sweep_p        = FALSE;
32126     should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
32127 #endif //BACKGROUND_GC
32128
32129     size_t total_cards_cleared = 0;
32130
32131     //dprintf(3,( "scanning large objects from %Ix to %Ix", (size_t)beg, (size_t)end));
32132     dprintf(3, ("CMl: %Ix->%Ix", (size_t)beg, (size_t)end));
32133     while (1)
32134     {
32135         if ((o < end) && (card_of(o) > card))
32136         {
32137             dprintf (3, ("Found %Id cg pointers", cg_pointers_found));
32138             if (cg_pointers_found == 0)
32139             {
32140                 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)o));
32141                 clear_cards (card, card_of((uint8_t*)o));
32142                 total_cards_cleared += (card_of((uint8_t*)o) - card);
32143             }
32144             n_eph +=cg_pointers_found;
32145             cg_pointers_found = 0;
32146             card = card_of ((uint8_t*)o);
32147         }
32148         if ((o < end) &&(card >= end_card))
32149         {
32150             foundp = find_card (card_table, card, card_word_end, end_card);
32151             if (foundp)
32152             {
32153                 n_card_set+= end_card - card;
32154                 start_address = max (beg, card_address (card));
32155             }
32156             limit = min (end, card_address (end_card));
32157         }
32158         if ((!foundp) || (o >= end) || (card_address (card) >= end))
32159         {
32160             if ((foundp) && (cg_pointers_found == 0))
32161             {
32162                 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card),
32163                            (size_t)card_address(card+1)));
32164                 clear_cards (card, card+1);
32165                 total_cards_cleared += 1;
32166             }
32167             n_eph +=cg_pointers_found;
32168             cg_pointers_found = 0;
32169             if ((seg = heap_segment_next_rw (seg)) != 0)
32170             {
32171 #ifdef BACKGROUND_GC
32172                 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
32173 #endif //BACKGROUND_GC
32174                 beg = heap_segment_mem (seg);
32175                 end = compute_next_end (seg, low);
32176                 card_word_end = card_of (align_on_card_word (end)) / card_word_width;
32177                 card = card_of (beg);
32178                 o  = beg;
32179                 end_card = 0;
32180                 continue;
32181             }
32182             else
32183             {
32184                 break;
32185             }
32186         }
32187
32188         assert (card_set_p (card));
32189         {
32190             dprintf(3,("card %Ix: o: %Ix, l: %Ix[ ",
32191                        card, (size_t)o, (size_t)limit));
32192
32193             assert (Align (size (o)) >= Align (min_obj_size));
32194             size_t s = size (o);
32195             uint8_t* next_o =  o + AlignQword (s);
32196             Prefetch (next_o);
32197
32198             while (o < limit)
32199             {
32200                 s = size (o);
32201                 assert (Align (s) >= Align (min_obj_size));
32202                 next_o =  o + AlignQword (s);
32203                 Prefetch (next_o);
32204
32205                 dprintf (4, ("|%Ix|", (size_t)o));
32206                 if (next_o < start_address)
32207                 {
32208                     goto end_object;
32209                 }
32210
32211 #ifdef BACKGROUND_GC
32212                 if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p))
32213                 {
32214                     goto end_object;
32215                 }
32216 #endif //BACKGROUND_GC
32217
32218 #ifdef COLLECTIBLE_CLASS
32219                 if (is_collectible(o))
32220                 {
32221                     BOOL passed_end_card_p = FALSE;
32222
32223                     if (card_of (o) > card)
32224                     {
32225                         passed_end_card_p = card_transition (o, end, card_word_end,
32226                             cg_pointers_found, 
32227                             n_eph, n_card_set,
32228                             card, end_card,
32229                             foundp, start_address,
32230                             limit, total_cards_cleared);
32231                     }
32232
32233                     if ((!passed_end_card_p || foundp) && (card_of (o) == card))
32234                     {
32235                         // card is valid and it covers the head of the object
32236                         if (fn == &gc_heap::relocate_address)
32237                         {
32238                             keep_card_live (o, n_gen, cg_pointers_found);
32239                         }
32240                         else
32241                         {
32242                             uint8_t* class_obj = get_class_object (o);
32243                             mark_through_cards_helper (&class_obj, n_gen,
32244                                                     cg_pointers_found, fn,
32245                                                     nhigh, next_boundary);
32246                         }
32247                     }
32248
32249                     if (passed_end_card_p)
32250                     {
32251                         if (foundp && (card_address (card) < next_o))
32252                         {
32253                             goto go_through_refs;
32254                         }
32255                         else 
32256                         {
32257                             goto end_object;
32258                         }
32259                     }
32260                 }
32261
32262 go_through_refs:
32263 #endif //COLLECTIBLE_CLASS
32264
32265                 if (contain_pointers (o))
32266                 {
32267                     dprintf(3,("Going through %Ix", (size_t)o));
32268
32269                     go_through_object (method_table(o), o, s, poo,
32270                                        start_address, use_start, (o + s),
32271                        {
32272                            if (card_of ((uint8_t*)poo) > card)
32273                            {
32274                                 BOOL passed_end_card_p  = card_transition ((uint8_t*)poo, end,
32275                                         card_word_end,
32276                                         cg_pointers_found, 
32277                                         n_eph, n_card_set,
32278                                         card, end_card,
32279                                         foundp, start_address,
32280                                         limit, total_cards_cleared);
32281
32282                                 if (passed_end_card_p)
32283                                 {
32284                                     if (foundp && (card_address (card) < next_o))
32285                                     {
32286                                         //new_start();
32287                                         {
32288                                             if (ppstop <= (uint8_t**)start_address)
32289                                             {break;}
32290                                             else if (poo < (uint8_t**)start_address)
32291                                             {poo = (uint8_t**)start_address;}
32292                                         }
32293                                     }
32294                                     else
32295                                     {
32296                                         goto end_object;
32297                                     }
32298                                 }
32299                             }
32300
32301                            mark_through_cards_helper (poo, n_gen,
32302                                                       cg_pointers_found, fn,
32303                                                       nhigh, next_boundary);
32304                        }
32305                         );
32306                 }
32307
32308             end_object:
32309                 o = next_o;
32310             }
32311
32312         }
32313     }
32314
32315     // compute the efficiency ratio of the card table
32316     if (!relocating)
32317     {
32318         generation_skip_ratio = min (((n_eph > 800) ?
32319                                       (int)(((float)n_gen / (float)n_eph) * 100) : 100),
32320                                      generation_skip_ratio);
32321
32322         dprintf (3, ("Mloh: cross: %Id, useful: %Id, cards cleared: %Id, cards set: %Id, ratio: %d", 
32323              n_eph, n_gen, total_cards_cleared, n_card_set, generation_skip_ratio));
32324     }
32325     else
32326     {
32327         dprintf (3, ("R: Mloh: cross: %Id, useful: %Id, cards set: %Id, ratio: %d", 
32328              n_eph, n_gen, n_card_set, generation_skip_ratio));
32329     }
32330 }
32331
32332 void gc_heap::descr_segment (heap_segment* seg )
32333 {
32334 #ifdef TRACE_GC
32335     uint8_t*  x = heap_segment_mem (seg);
32336     while (x < heap_segment_allocated (seg))
32337     {
32338         dprintf(2, ( "%Ix: %d ", (size_t)x, size (x)));
32339         x = x + Align(size (x));
32340     }
32341 #else // TRACE_GC
32342     UNREFERENCED_PARAMETER(seg);
32343 #endif // TRACE_GC
32344 }
32345
32346 void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
32347 {
32348 #ifdef MULTIPLE_HEAPS
32349     int n_heaps = g_theGCHeap->GetNumberOfHeaps ();
32350     for (int i = 0; i < n_heaps; i++)
32351     {
32352         gc_heap* hp = GCHeap::GetHeap(i)->pGenGCHeap;
32353 #else //MULTIPLE_HEAPS
32354     {
32355         gc_heap* hp = NULL;
32356 #ifdef _PREFAST_
32357         // prefix complains about us dereferencing hp in wks build even though we only access static members
32358         // this way. not sure how to shut it up except for this ugly workaround:
32359         PREFIX_ASSUME(hp != NULL);
32360 #endif // _PREFAST_
32361 #endif //MULTIPLE_HEAPS
32362
32363         int curr_gen_number0 = max_generation+1;
32364         while (curr_gen_number0 >= 0)
32365         {
32366             generation* gen = hp->generation_of (curr_gen_number0);
32367             heap_segment* seg = generation_start_segment (gen);
32368             while (seg && (seg != hp->ephemeral_heap_segment))
32369             {
32370                 assert (curr_gen_number0 > 0);
32371
32372                 // report bounds from heap_segment_mem (seg) to
32373                 // heap_segment_allocated (seg);
32374                 // for generation # curr_gen_number0
32375                 // for heap # heap_no
32376
32377                 fn(context, curr_gen_number0, heap_segment_mem (seg),
32378                                               heap_segment_allocated (seg),
32379                                               curr_gen_number0 == max_generation+1 ? heap_segment_reserved (seg) : heap_segment_allocated (seg));
32380
32381                 seg = heap_segment_next (seg);
32382             }
32383             if (seg)
32384             {
32385                 assert (seg == hp->ephemeral_heap_segment);
32386                 assert (curr_gen_number0 <= max_generation);
32387                 //
32388                 if (curr_gen_number0 == max_generation)
32389                 {
32390                     if (heap_segment_mem (seg) < generation_allocation_start (hp->generation_of (max_generation-1)))
32391                     {
32392                         // report bounds from heap_segment_mem (seg) to
32393                         // generation_allocation_start (generation_of (max_generation-1))
32394                         // for heap # heap_number
32395
32396                         fn(context, curr_gen_number0, heap_segment_mem (seg),
32397                                                       generation_allocation_start (hp->generation_of (max_generation-1)),
32398                                                       generation_allocation_start (hp->generation_of (max_generation-1)) );
32399                     }
32400                 }
32401                 else if (curr_gen_number0 != 0)
32402                 {
32403                     //report bounds from generation_allocation_start (generation_of (curr_gen_number0))
32404                     // to generation_allocation_start (generation_of (curr_gen_number0-1))
32405                     // for heap # heap_number
32406
32407                     fn(context, curr_gen_number0, generation_allocation_start (hp->generation_of (curr_gen_number0)),
32408                                                   generation_allocation_start (hp->generation_of (curr_gen_number0-1)),
32409                                                   generation_allocation_start (hp->generation_of (curr_gen_number0-1)));
32410                 }
32411                 else
32412                 {
32413                     //report bounds from generation_allocation_start (generation_of (curr_gen_number0))
32414                     // to heap_segment_allocated (ephemeral_heap_segment);
32415                     // for heap # heap_number
32416
32417                     fn(context, curr_gen_number0, generation_allocation_start (hp->generation_of (curr_gen_number0)),
32418                                                   heap_segment_allocated (hp->ephemeral_heap_segment),
32419                                                   heap_segment_reserved (hp->ephemeral_heap_segment) );
32420                 }
32421             }
32422             curr_gen_number0--;
32423         }
32424     }
32425 }
32426
32427 #ifdef TRACE_GC
32428 // Note that when logging is on it can take a long time to go through the free items.
32429 void gc_heap::print_free_list (int gen, heap_segment* seg)
32430 {
32431     UNREFERENCED_PARAMETER(gen);
32432     UNREFERENCED_PARAMETER(seg);
32433 /*
32434     if (settings.concurrent == FALSE)
32435     {
32436         uint8_t* seg_start = heap_segment_mem (seg);
32437         uint8_t* seg_end = heap_segment_allocated (seg);
32438
32439         dprintf (3, ("Free list in seg %Ix:", seg_start));
32440
32441         size_t total_free_item = 0;
32442
32443         allocator* gen_allocator = generation_allocator (generation_of (gen));
32444         for (unsigned int b = 0; b < gen_allocator->number_of_buckets(); b++)
32445         {
32446             uint8_t* fo = gen_allocator->alloc_list_head_of (b);
32447             while (fo)
32448             {
32449                 if (fo >= seg_start && fo < seg_end)
32450                 {
32451                     total_free_item++;
32452
32453                     size_t free_item_len = size(fo);
32454
32455                     dprintf (3, ("[%Ix, %Ix[:%Id",
32456                                  (size_t)fo,
32457                                  (size_t)(fo + free_item_len),
32458                                  free_item_len));
32459                 }
32460
32461                 fo = free_list_slot (fo);
32462             }
32463         }
32464
32465         dprintf (3, ("total %Id free items", total_free_item));
32466     }
32467 */
32468 }
32469 #endif //TRACE_GC
32470
32471 void gc_heap::descr_generations (BOOL begin_gc_p)
32472 {
32473     UNREFERENCED_PARAMETER(begin_gc_p);
32474 #ifdef STRESS_LOG
32475     if (StressLog::StressLogOn(LF_GC, LL_INFO10))
32476     {
32477         gc_heap* hp = 0;
32478 #ifdef MULTIPLE_HEAPS
32479         hp= this;
32480 #endif //MULTIPLE_HEAPS
32481
32482         STRESS_LOG1(LF_GC, LL_INFO10, "GC Heap %p\n", hp);
32483         for (int n = max_generation; n >= 0; --n)
32484         {
32485             STRESS_LOG4(LF_GC, LL_INFO10, "    Generation %d [%p, %p] cur = %p\n",
32486                     n,
32487                     generation_allocation_start(generation_of(n)),
32488                     generation_allocation_limit(generation_of(n)),
32489                     generation_allocation_pointer(generation_of(n)));
32490
32491             heap_segment* seg = generation_start_segment(generation_of(n));
32492             while (seg)
32493             {
32494                 STRESS_LOG4(LF_GC, LL_INFO10, "        Segment mem %p alloc = %p used %p committed %p\n",
32495                         heap_segment_mem(seg),
32496                         heap_segment_allocated(seg),
32497                         heap_segment_used(seg),
32498                         heap_segment_committed(seg));
32499                 seg = heap_segment_next(seg);
32500             }
32501         }
32502     }
32503 #endif  // STRESS_LOG
32504
32505 #ifdef TRACE_GC
32506     dprintf (2, ("lowest_address: %Ix highest_address: %Ix",
32507              (size_t) lowest_address, (size_t) highest_address));
32508 #ifdef BACKGROUND_GC
32509     dprintf (2, ("bgc lowest_address: %Ix bgc highest_address: %Ix",
32510              (size_t) background_saved_lowest_address, (size_t) background_saved_highest_address));
32511 #endif //BACKGROUND_GC
32512
32513     if (heap_number == 0)
32514     {
32515         dprintf (1, ("total heap size: %Id, commit size: %Id", get_total_heap_size(), get_total_committed_size()));
32516     }
32517
32518     int curr_gen_number = max_generation+1;
32519     while (curr_gen_number >= 0)
32520     {
32521         size_t total_gen_size = generation_size (curr_gen_number);
32522 #ifdef SIMPLE_DPRINTF
32523         dprintf (GTC_LOG, ("[%s][g%d]gen %d:, size: %Id, frag: %Id(L: %Id, O: %Id), f: %d%% %s %s %s",
32524                       (begin_gc_p ? "BEG" : "END"),
32525                       settings.condemned_generation,
32526                       curr_gen_number,
32527                       total_gen_size,
32528                       dd_fragmentation (dynamic_data_of (curr_gen_number)),
32529                       generation_free_list_space (generation_of (curr_gen_number)),
32530                       generation_free_obj_space (generation_of (curr_gen_number)),
32531                       (total_gen_size ? 
32532                         (int)(((double)dd_fragmentation (dynamic_data_of (curr_gen_number)) / (double)total_gen_size) * 100) :
32533                         0),
32534                       (begin_gc_p ? ("") : (settings.compaction ? "(compact)" : "(sweep)")),
32535                       (settings.heap_expansion ? "(EX)" : " "),
32536                       (settings.promotion ? "Promotion" : "NoPromotion")));
32537 #else
32538         dprintf (2, ( "Generation %d: gap size: %d, generation size: %Id, fragmentation: %Id",
32539                       curr_gen_number,
32540                       size (generation_allocation_start (generation_of (curr_gen_number))),
32541                       total_gen_size,
32542                       dd_fragmentation (dynamic_data_of (curr_gen_number))));
32543 #endif //SIMPLE_DPRINTF
32544
32545         generation* gen = generation_of (curr_gen_number);
32546         heap_segment* seg = generation_start_segment (gen);
32547         while (seg && (seg != ephemeral_heap_segment))
32548         {
32549             dprintf (GTC_LOG, ("g%d: [%Ix %Ix[-%Ix[ (%Id) (%Id)",
32550                         curr_gen_number,
32551                         (size_t)heap_segment_mem (seg),
32552                         (size_t)heap_segment_allocated (seg),
32553                         (size_t)heap_segment_committed (seg),
32554                         (size_t)(heap_segment_allocated (seg) - heap_segment_mem (seg)),
32555                         (size_t)(heap_segment_committed (seg) - heap_segment_allocated (seg))));
32556             print_free_list (curr_gen_number, seg);
32557             seg = heap_segment_next (seg);
32558         }
32559         if (seg && (seg != generation_start_segment (gen)))
32560         {
32561             dprintf (GTC_LOG, ("g%d: [%Ix %Ix[",
32562                          curr_gen_number,
32563                          (size_t)heap_segment_mem (seg),
32564                          (size_t)generation_allocation_start (generation_of (curr_gen_number-1))));
32565             print_free_list (curr_gen_number, seg);
32566
32567         }
32568         else if (seg)
32569         {
32570             dprintf (GTC_LOG, ("g%d: [%Ix %Ix[",
32571                          curr_gen_number,
32572                          (size_t)generation_allocation_start (generation_of (curr_gen_number)),
32573                          (size_t)(((curr_gen_number == 0)) ?
32574                                   (heap_segment_allocated
32575                                    (generation_start_segment
32576                                     (generation_of (curr_gen_number)))) :
32577                                   (generation_allocation_start
32578                                    (generation_of (curr_gen_number - 1))))
32579                          ));
32580             print_free_list (curr_gen_number, seg);
32581         }
32582         curr_gen_number--;
32583     }
32584
32585 #endif //TRACE_GC
32586 }
32587
32588 #undef TRACE_GC
32589
32590 //#define TRACE_GC
32591
32592 //-----------------------------------------------------------------------------
32593 //
32594 //                                  VM Specific support
32595 //
32596 //-----------------------------------------------------------------------------
32597
32598
32599 #ifdef TRACE_GC
32600
32601  unsigned int PromotedObjectCount  = 0;
32602  unsigned int CreatedObjectCount       = 0;
32603  unsigned int AllocDuration            = 0;
32604  unsigned int AllocCount               = 0;
32605  unsigned int AllocBigCount            = 0;
32606  unsigned int AllocSmallCount      = 0;
32607  unsigned int AllocStart             = 0;
32608 #endif //TRACE_GC
32609
32610 //Static member variables.
32611 VOLATILE(BOOL)    GCHeap::GcInProgress            = FALSE;
32612 //GCTODO
32613 //CMCSafeLock*      GCHeap::fGcLock;
32614 GCEvent            *GCHeap::WaitForGCEvent         = NULL;
32615 //GCTODO
32616 #ifdef TRACE_GC
32617 unsigned int       GCHeap::GcDuration;
32618 #endif //TRACE_GC
32619 unsigned            GCHeap::GcCondemnedGeneration   = 0;
32620 size_t              GCHeap::totalSurvivedSize       = 0;
32621 #ifdef FEATURE_PREMORTEM_FINALIZATION
32622 CFinalize*          GCHeap::m_Finalize              = 0;
32623 BOOL                GCHeap::GcCollectClasses        = FALSE;
32624 VOLATILE(int32_t)      GCHeap::m_GCFLock               = 0;
32625
32626 #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
32627 #ifdef STRESS_HEAP
32628 #ifdef BACKGROUND_GC
32629 int                 GCHeap::gc_stress_fgcs_in_bgc   = 0;
32630 #endif // BACKGROUND_GC
32631 #ifndef MULTIPLE_HEAPS
32632 OBJECTHANDLE        GCHeap::m_StressObjs[NUM_HEAP_STRESS_OBJS];
32633 int                 GCHeap::m_CurStressObj          = 0;
32634 #endif // !MULTIPLE_HEAPS
32635 #endif // STRESS_HEAP
32636 #endif // FEATURE_REDHAWK
32637
32638 #endif //FEATURE_PREMORTEM_FINALIZATION
32639
32640 class NoGCRegionLockHolder
32641 {
32642 public:
32643     NoGCRegionLockHolder()
32644     {
32645         enter_spin_lock_noinstru(&g_no_gc_lock);
32646     }
32647
32648     ~NoGCRegionLockHolder()
32649     {
32650         leave_spin_lock_noinstru(&g_no_gc_lock);
32651     }
32652 };
32653
32654 // An explanation of locking for finalization:
32655 //
32656 // Multiple threads allocate objects.  During the allocation, they are serialized by
32657 // the AllocLock above.  But they release that lock before they register the object
32658 // for finalization.  That's because there is much contention for the alloc lock, but
32659 // finalization is presumed to be a rare case.
32660 //
32661 // So registering an object for finalization must be protected by the FinalizeLock.
32662 //
32663 // There is another logical queue that involves finalization.  When objects registered
32664 // for finalization become unreachable, they are moved from the "registered" queue to
32665 // the "unreachable" queue.  Note that this only happens inside a GC, so no other
32666 // threads can be manipulating either queue at that time.  Once the GC is over and
32667 // threads are resumed, the Finalizer thread will dequeue objects from the "unreachable"
32668 // queue and call their finalizers.  This dequeue operation is also protected with
32669 // the finalize lock.
32670 //
32671 // At first, this seems unnecessary.  Only one thread is ever enqueuing or dequeuing
32672 // on the unreachable queue (either the GC thread during a GC or the finalizer thread
32673 // when a GC is not in progress).  The reason we share a lock with threads enqueuing
32674 // on the "registered" queue is that the "registered" and "unreachable" queues are
32675 // interrelated.
32676 //
32677 // They are actually two regions of a longer list, which can only grow at one end.
32678 // So to enqueue an object to the "registered" list, you actually rotate an unreachable
32679 // object at the boundary between the logical queues, out to the other end of the
32680 // unreachable queue -- where all growing takes place.  Then you move the boundary
32681 // pointer so that the gap we created at the boundary is now on the "registered"
32682 // side rather than the "unreachable" side.  Now the object can be placed into the
32683 // "registered" side at that point.  This is much more efficient than doing moves
32684 // of arbitrarily long regions, but it causes the two queues to require a shared lock.
32685 //
32686 // Notice that Enter/LeaveFinalizeLock is not a GC-aware spin lock.  Instead, it relies
32687 // on the fact that the lock will only be taken for a brief period and that it will
32688 // never provoke or allow a GC while the lock is held.  This is critical.  If the
32689 // FinalizeLock used enter_spin_lock (and thus sometimes enters preemptive mode to
32690 // allow a GC), then the Alloc client would have to GC protect a finalizable object
32691 // to protect against that eventuality.  That is too slow!
32692
32693
32694
32695 BOOL IsValidObject99(uint8_t *pObject)
32696 {
32697 #ifdef VERIFY_HEAP
32698     if (!((CObjectHeader*)pObject)->IsFree())
32699         ((CObjectHeader *) pObject)->Validate();
32700 #endif //VERIFY_HEAP
32701     return(TRUE);
32702 }
32703
32704 #ifdef BACKGROUND_GC 
32705 BOOL gc_heap::bgc_mark_array_range (heap_segment* seg, 
32706                                     BOOL whole_seg_p,
32707                                     uint8_t** range_beg,
32708                                     uint8_t** range_end)
32709 {
32710     uint8_t* seg_start = heap_segment_mem (seg);
32711     uint8_t* seg_end = (whole_seg_p ? heap_segment_reserved (seg) : align_on_mark_word (heap_segment_allocated (seg)));
32712
32713     if ((seg_start < background_saved_highest_address) &&
32714         (seg_end > background_saved_lowest_address))
32715     {
32716         *range_beg = max (seg_start, background_saved_lowest_address);
32717         *range_end = min (seg_end, background_saved_highest_address);
32718         return TRUE;
32719     }
32720     else
32721     {
32722         return FALSE;
32723     }
32724 }
32725
32726 void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg)
32727 {
32728 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32729     if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
32730     {
32731         uint8_t* range_beg = 0;
32732         uint8_t* range_end = 0;
32733
32734         if (bgc_mark_array_range (seg, TRUE, &range_beg, &range_end))
32735         {
32736             size_t  markw = mark_word_of (range_beg);
32737             size_t  markw_end = mark_word_of (range_end);
32738             while (markw < markw_end)
32739             {
32740                 if (mark_array [markw])
32741                 {
32742                     dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
32743                                     markw, mark_array [markw], mark_word_address (markw)));
32744                     FATAL_GC_ERROR();
32745                 }
32746                 markw++;
32747             }
32748             uint8_t* p = mark_word_address (markw_end);
32749             while (p < range_end)
32750             {
32751                 assert (!(mark_array_marked (p)));
32752                 p++;
32753             }
32754         }
32755     }
32756 #endif //VERIFY_HEAP && MARK_ARRAY
32757 }
32758
32759 void gc_heap::verify_mark_bits_cleared (uint8_t* obj, size_t s)
32760 {
32761 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32762     size_t start_mark_bit = mark_bit_of (obj) + 1;
32763     size_t end_mark_bit = mark_bit_of (obj + s);
32764     unsigned int startbit = mark_bit_bit (start_mark_bit);
32765     unsigned int endbit = mark_bit_bit (end_mark_bit);
32766     size_t startwrd = mark_bit_word (start_mark_bit);
32767     size_t endwrd = mark_bit_word (end_mark_bit);
32768     unsigned int result = 0;
32769
32770     unsigned int firstwrd = ~(lowbits (~0, startbit));
32771     unsigned int lastwrd = ~(highbits (~0, endbit));
32772
32773     if (startwrd == endwrd)
32774     {
32775         unsigned int wrd = firstwrd & lastwrd;
32776         result = mark_array[startwrd] & wrd;
32777         if (result)
32778         {
32779             FATAL_GC_ERROR();
32780         }
32781         return;
32782     }
32783
32784     // verify the first mark word is cleared.
32785     if (startbit)
32786     {
32787         result = mark_array[startwrd] & firstwrd;
32788         if (result)
32789         {
32790             FATAL_GC_ERROR();
32791         }
32792         startwrd++;
32793     }
32794
32795     for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
32796     {
32797         result = mark_array[wrdtmp];
32798         if (result)
32799         {
32800             FATAL_GC_ERROR();
32801         }
32802     }
32803
32804     // set the last mark word.
32805     if (endbit)
32806     {
32807         result = mark_array[endwrd] & lastwrd;
32808         if (result)
32809         {
32810             FATAL_GC_ERROR();
32811         }
32812     }
32813 #endif //VERIFY_HEAP && MARK_ARRAY
32814 }
32815
32816 void gc_heap::clear_all_mark_array()
32817 {
32818 #ifdef MARK_ARRAY
32819     //size_t num_dwords_written = 0;
32820     //size_t begin_time = GetHighPrecisionTimeStamp();
32821
32822     generation* gen = generation_of (max_generation);
32823     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
32824     
32825     while (1)
32826     {
32827         if (seg == 0)
32828         {
32829             if (gen != large_object_generation)
32830             {
32831                 gen = generation_of (max_generation+1);
32832                 seg = heap_segment_rw (generation_start_segment (gen));
32833             }
32834             else
32835             {
32836                 break;
32837             }
32838         }
32839
32840         uint8_t* range_beg = 0;
32841         uint8_t* range_end = 0;
32842
32843         if (bgc_mark_array_range (seg, (seg == ephemeral_heap_segment), &range_beg, &range_end))
32844         { 
32845             size_t markw = mark_word_of (range_beg);
32846             size_t markw_end = mark_word_of (range_end);
32847             size_t size_total = (markw_end - markw) * sizeof (uint32_t);
32848             //num_dwords_written = markw_end - markw;
32849             size_t size = 0;
32850             size_t size_left = 0;
32851
32852             assert (((size_t)&mark_array[markw] & (sizeof(PTR_PTR)-1)) == 0);
32853
32854             if ((size_total & (sizeof(PTR_PTR) - 1)) != 0)
32855             {
32856                 size = (size_total & ~(sizeof(PTR_PTR) - 1));
32857                 size_left = size_total - size;
32858                 assert ((size_left & (sizeof (uint32_t) - 1)) == 0);
32859             }
32860             else
32861             {
32862                 size = size_total;
32863             }
32864
32865             memclr ((uint8_t*)&mark_array[markw], size);
32866
32867             if (size_left != 0)
32868             {
32869                 uint32_t* markw_to_clear = &mark_array[markw + size / sizeof (uint32_t)];
32870                 for (size_t i = 0; i < (size_left / sizeof (uint32_t)); i++)
32871                 {
32872                     *markw_to_clear = 0;
32873                     markw_to_clear++;
32874                 }
32875             }
32876         }
32877
32878         seg = heap_segment_next_rw (seg);
32879     }
32880
32881     //size_t end_time = GetHighPrecisionTimeStamp() - begin_time; 
32882
32883     //printf ("took %Id ms to clear %Id bytes\n", end_time, num_dwords_written*sizeof(uint32_t));
32884
32885 #endif //MARK_ARRAY
32886 }
32887
32888 #endif //BACKGROUND_GC 
32889
32890 void gc_heap::verify_mark_array_cleared (heap_segment* seg)
32891 {
32892 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32893     assert (card_table == g_gc_card_table);
32894     size_t  markw = mark_word_of (heap_segment_mem (seg));
32895     size_t  markw_end = mark_word_of (heap_segment_reserved (seg));
32896
32897     while (markw < markw_end)
32898     {
32899         if (mark_array [markw])
32900         {
32901             dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
32902                             markw, mark_array [markw], mark_word_address (markw)));
32903             FATAL_GC_ERROR();
32904         }
32905         markw++;
32906     }
32907 #endif //VERIFY_HEAP && MARK_ARRAY
32908 }
32909
32910 void gc_heap::verify_mark_array_cleared ()
32911 {
32912 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32913     if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
32914     {
32915         generation* gen = generation_of (max_generation);
32916         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
32917         
32918         while (1)
32919         {
32920             if (seg == 0)
32921             {
32922                 if (gen != large_object_generation)
32923                 {
32924                     gen = generation_of (max_generation+1);
32925                     seg = heap_segment_rw (generation_start_segment (gen));
32926                 }
32927                 else
32928                 {
32929                     break;
32930                 }
32931             }
32932
32933             bgc_verify_mark_array_cleared (seg);
32934             seg = heap_segment_next_rw (seg);
32935         }
32936     }
32937 #endif //VERIFY_HEAP && MARK_ARRAY
32938 }
32939
32940 void gc_heap::verify_seg_end_mark_array_cleared()
32941 {
32942 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32943     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
32944     {
32945         generation* gen = generation_of (max_generation);
32946         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
32947         
32948         while (1)
32949         {
32950             if (seg == 0)
32951             {
32952                 if (gen != large_object_generation)
32953                 {
32954                     gen = generation_of (max_generation+1);
32955                     seg = heap_segment_rw (generation_start_segment (gen));
32956                 }
32957                 else
32958                 {
32959                     break;
32960                 }
32961             }
32962
32963             // We already cleared all mark array bits for ephemeral generations
32964             // at the beginning of bgc sweep
32965             uint8_t* from = ((seg == ephemeral_heap_segment) ?
32966                           generation_allocation_start (generation_of (max_generation - 1)) :
32967                           heap_segment_allocated (seg));
32968             size_t  markw = mark_word_of (align_on_mark_word (from));
32969             size_t  markw_end = mark_word_of (heap_segment_reserved (seg));
32970
32971             while (from < mark_word_address (markw))
32972             {
32973                 if (is_mark_bit_set (from))
32974                 {
32975                     dprintf (3, ("mark bit for %Ix was not cleared", from));
32976                     FATAL_GC_ERROR();
32977                 }
32978
32979                 from += mark_bit_pitch;
32980             }
32981
32982             while (markw < markw_end)
32983             {
32984                 if (mark_array [markw])
32985                 {
32986                     dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
32987                                     markw, mark_array [markw], mark_word_address (markw)));
32988                     FATAL_GC_ERROR();
32989                 }
32990                 markw++;
32991             }
32992             seg = heap_segment_next_rw (seg);
32993         }
32994     }
32995 #endif //VERIFY_HEAP && MARK_ARRAY
32996 }
32997
32998 // This function is called to make sure we don't mess up the segment list
32999 // in SOH. It's called by:
33000 // 1) begin and end of ephemeral GCs
33001 // 2) during bgc sweep when we switch segments.
33002 void gc_heap::verify_soh_segment_list()
33003 {
33004 #ifdef VERIFY_HEAP
33005     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
33006     {
33007         generation* gen = generation_of (max_generation);
33008         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
33009         heap_segment* last_seg = 0;
33010         while (seg)
33011         {
33012             last_seg = seg;
33013             seg = heap_segment_next_rw (seg);
33014         }
33015         if (last_seg != ephemeral_heap_segment)
33016         {
33017             FATAL_GC_ERROR();
33018         }
33019     }
33020 #endif //VERIFY_HEAP
33021 }
33022
33023 // This function can be called at any foreground GCs or blocking GCs. For background GCs,
33024 // it can be called at the end of the final marking; and at any point during background
33025 // sweep.
33026 // NOTE - to be able to call this function during background sweep, we need to temporarily 
33027 // NOT clear the mark array bits as we go.
33028 void gc_heap::verify_partial ()
33029 {
33030 #ifdef BACKGROUND_GC
33031     //printf ("GC#%d: Verifying loh during sweep\n", settings.gc_index);
33032     //generation* gen = large_object_generation;
33033     generation* gen = generation_of (max_generation);
33034     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
33035     int align_const = get_alignment_constant (gen != large_object_generation);
33036
33037     uint8_t* o = 0;
33038     uint8_t* end = 0;
33039     size_t s = 0;
33040
33041     // Different ways to fail.
33042     BOOL mark_missed_p = FALSE;
33043     BOOL bad_ref_p = FALSE;
33044     BOOL free_ref_p = FALSE;
33045
33046     while (1)
33047     {
33048         if (seg == 0)
33049         {
33050             if (gen != large_object_generation)
33051             {
33052                 //switch to LOH
33053                 gen = large_object_generation;
33054                 align_const = get_alignment_constant (gen != large_object_generation);
33055                 seg = heap_segment_rw (generation_start_segment (gen));
33056                 continue;
33057             }
33058             else
33059             {
33060                 break;
33061             }
33062         }
33063
33064         o = heap_segment_mem (seg);
33065         end  = heap_segment_allocated (seg);
33066         //printf ("validating [%Ix-[%Ix\n", o, end);
33067         while (o < end)
33068         {
33069             s = size (o);
33070
33071             BOOL marked_p = background_object_marked (o, FALSE);
33072
33073             if (marked_p)
33074             {
33075                 go_through_object_cl (method_table (o), o, s, oo,
33076                     {
33077                         if (*oo)
33078                         {
33079                             //dprintf (3, ("VOM: verifying member %Ix in obj %Ix", (size_t)*oo, o));
33080                             MethodTable *pMT = method_table (*oo);
33081
33082                             if (pMT == g_gc_pFreeObjectMethodTable)
33083                             {
33084                                 free_ref_p = TRUE;
33085                                 FATAL_GC_ERROR();
33086                             }
33087
33088                             if (!pMT->SanityCheck()) 
33089                             {
33090                                 bad_ref_p = TRUE;
33091                                 dprintf (3, ("Bad member of %Ix %Ix",
33092                                             (size_t)oo, (size_t)*oo));
33093                                 FATAL_GC_ERROR();
33094                             }
33095
33096                             if (current_bgc_state == bgc_final_marking)
33097                             {
33098                                 if (marked_p && !background_object_marked (*oo, FALSE))
33099                                 {
33100                                     mark_missed_p = TRUE;
33101                                     FATAL_GC_ERROR();
33102                                 }
33103                             }
33104                         }
33105                     }
33106                                     );
33107             }
33108
33109             o = o + Align(s, align_const);
33110         }
33111         seg = heap_segment_next_rw (seg);
33112     }
33113
33114     //printf ("didn't find any large object large enough...\n");
33115     //printf ("finished verifying loh\n");
33116 #endif //BACKGROUND_GC 
33117 }
33118
33119 #ifdef VERIFY_HEAP
33120
33121 void 
33122 gc_heap::verify_free_lists ()
33123 {
33124     for (int gen_num = 0; gen_num <= max_generation+1; gen_num++)
33125     {
33126         dprintf (3, ("Verifying free list for gen:%d", gen_num));
33127         allocator* gen_alloc = generation_allocator (generation_of (gen_num));
33128         size_t sz = gen_alloc->first_bucket_size();
33129         bool verify_undo_slot = (gen_num != 0) && (gen_num != max_generation+1) && !gen_alloc->discard_if_no_fit_p();
33130
33131         for (unsigned int a_l_number = 0; a_l_number < gen_alloc->number_of_buckets(); a_l_number++)
33132         {
33133             uint8_t* free_list = gen_alloc->alloc_list_head_of (a_l_number);
33134             uint8_t* prev = 0;
33135             while (free_list)
33136             {
33137                 if (!((CObjectHeader*)free_list)->IsFree())
33138                 {
33139                     dprintf (3, ("Verifiying Heap: curr free list item %Ix isn't a free object)",
33140                                  (size_t)free_list));
33141                     FATAL_GC_ERROR();
33142                 }
33143                 if (((a_l_number < (gen_alloc->number_of_buckets()-1))&& (unused_array_size (free_list) >= sz))
33144                     || ((a_l_number != 0) && (unused_array_size (free_list) < sz/2)))
33145                 {
33146                     dprintf (3, ("Verifiying Heap: curr free list item %Ix isn't in the right bucket",
33147                                  (size_t)free_list));
33148                     FATAL_GC_ERROR();
33149                 }
33150                 if (verify_undo_slot && (free_list_undo (free_list) != UNDO_EMPTY))
33151                 {
33152                     dprintf (3, ("Verifiying Heap: curr free list item %Ix has non empty undo slot",
33153                                  (size_t)free_list));
33154                     FATAL_GC_ERROR();
33155                 }
33156                 if ((gen_num != max_generation+1)&&(object_gennum (free_list)!= gen_num))
33157                 {
33158                     dprintf (3, ("Verifiying Heap: curr free list item %Ix is in the wrong generation free list",
33159                                  (size_t)free_list));
33160                     FATAL_GC_ERROR();
33161                 }
33162                     
33163                 prev = free_list;
33164                 free_list = free_list_slot (free_list);
33165             }
33166             //verify the sanity of the tail 
33167             uint8_t* tail = gen_alloc->alloc_list_tail_of (a_l_number);
33168             if (!((tail == 0) || (tail == prev)))
33169             {
33170                 dprintf (3, ("Verifying Heap: tail of free list is not correct"));
33171                 FATAL_GC_ERROR();
33172             }
33173             if (tail == 0)
33174             {
33175                 uint8_t* head = gen_alloc->alloc_list_head_of (a_l_number);
33176                 if ((head != 0) && (free_list_slot (head) != 0))
33177                 {
33178                     dprintf (3, ("Verifying Heap: tail of free list is not correct"));
33179                     FATAL_GC_ERROR();
33180                 }
33181             }
33182
33183             sz *=2;
33184         }
33185     }
33186 }
33187
33188 void
33189 gc_heap::verify_heap (BOOL begin_gc_p)
33190 {
33191     int             heap_verify_level = static_cast<int>(GCConfig::GetHeapVerifyLevel());
33192     size_t          last_valid_brick = 0;
33193     BOOL            bCurrentBrickInvalid = FALSE;
33194     BOOL            large_brick_p = TRUE;
33195     size_t          curr_brick = 0;
33196     size_t          prev_brick = (size_t)-1;
33197     int             curr_gen_num = max_generation+1;    
33198     heap_segment*   seg = heap_segment_in_range (generation_start_segment (generation_of (curr_gen_num ) ));
33199
33200     PREFIX_ASSUME(seg != NULL);
33201
33202     uint8_t*        curr_object = heap_segment_mem (seg);
33203     uint8_t*        prev_object = 0;
33204     uint8_t*        begin_youngest = generation_allocation_start(generation_of(0));
33205     uint8_t*        end_youngest = heap_segment_allocated (ephemeral_heap_segment);
33206     uint8_t*        next_boundary = generation_allocation_start (generation_of (max_generation - 1));
33207     int             align_const = get_alignment_constant (FALSE);
33208     size_t          total_objects_verified = 0;
33209     size_t          total_objects_verified_deep = 0;
33210
33211 #ifdef BACKGROUND_GC
33212     BOOL consider_bgc_mark_p    = FALSE;
33213     BOOL check_current_sweep_p  = FALSE;
33214     BOOL check_saved_sweep_p    = FALSE;
33215     should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
33216 #endif //BACKGROUND_GC
33217
33218 #ifdef MULTIPLE_HEAPS
33219     t_join* current_join = &gc_t_join;
33220 #ifdef BACKGROUND_GC
33221     if (settings.concurrent && (bgc_thread_id.IsCurrentThread()))
33222     {
33223         // We always call verify_heap on entry of GC on the SVR GC threads.
33224         current_join = &bgc_t_join;
33225     }
33226 #endif //BACKGROUND_GC
33227 #endif //MULTIPLE_HEAPS
33228
33229     UNREFERENCED_PARAMETER(begin_gc_p);
33230 #ifdef BACKGROUND_GC 
33231     dprintf (2,("[%s]GC#%d(%s): Verifying heap - begin", 
33232         (begin_gc_p ? "BEG" : "END"),
33233         VolatileLoad(&settings.gc_index), 
33234         (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
33235 #else
33236     dprintf (2,("[%s]GC#%d: Verifying heap - begin", 
33237                 (begin_gc_p ? "BEG" : "END"), VolatileLoad(&settings.gc_index)));
33238 #endif //BACKGROUND_GC 
33239
33240 #ifndef MULTIPLE_HEAPS
33241     if ((ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) ||
33242         (ephemeral_high != heap_segment_reserved (ephemeral_heap_segment)))
33243     {
33244         FATAL_GC_ERROR();
33245     }
33246 #endif //MULTIPLE_HEAPS
33247
33248 #ifdef BACKGROUND_GC
33249     //don't touch the memory because the program is allocating from it.
33250     if (!settings.concurrent)
33251 #endif //BACKGROUND_GC
33252     {
33253         if (!(heap_verify_level & GCConfig::HEAPVERIFY_NO_MEM_FILL))
33254         {
33255             //uninit the unused portions of segments.
33256             generation* gen1 = large_object_generation;
33257             heap_segment* seg1 = heap_segment_rw (generation_start_segment (gen1));
33258             PREFIX_ASSUME(seg1 != NULL);
33259
33260             while (1)
33261             {
33262                 if (seg1)
33263                 {
33264                     uint8_t* clear_start = heap_segment_allocated (seg1) - plug_skew;
33265                     if (heap_segment_used (seg1) > clear_start)
33266                     {
33267                         dprintf (3, ("setting end of seg %Ix: [%Ix-[%Ix to 0xaa", 
33268                                     heap_segment_mem (seg1),
33269                                     clear_start ,
33270                                     heap_segment_used (seg1)));
33271                         memset (heap_segment_allocated (seg1) - plug_skew, 0xaa,
33272                             (heap_segment_used (seg1) - clear_start));
33273                     }
33274                     seg1 = heap_segment_next_rw (seg1);
33275                 }
33276                 else
33277                 {
33278                     if (gen1 == large_object_generation)
33279                     {
33280                         gen1 = generation_of (max_generation);
33281                         seg1 = heap_segment_rw (generation_start_segment (gen1));
33282                         PREFIX_ASSUME(seg1 != NULL);
33283                     }
33284                     else
33285                     {
33286                         break;
33287                     }
33288                 }
33289             }
33290         }
33291     }
33292
33293 #ifdef MULTIPLE_HEAPS
33294     current_join->join(this, gc_join_verify_copy_table);
33295     if (current_join->joined())
33296     {
33297         // in concurrent GC, new segment could be allocated when GC is working so the card brick table might not be updated at this point
33298         for (int i = 0; i < n_heaps; i++)
33299         {
33300             //copy the card and brick tables
33301             if (g_gc_card_table != g_heaps[i]->card_table)
33302             {
33303                 g_heaps[i]->copy_brick_card_table();
33304             }
33305         }
33306
33307         current_join->restart();
33308     }
33309 #else
33310         if (g_gc_card_table != card_table)
33311             copy_brick_card_table();
33312 #endif //MULTIPLE_HEAPS
33313
33314     //verify that the generation structures makes sense
33315     {
33316         generation* gen = generation_of (max_generation);
33317
33318         assert (generation_allocation_start (gen) ==
33319                 heap_segment_mem (heap_segment_rw (generation_start_segment (gen))));
33320         int gen_num = max_generation-1;
33321         generation* prev_gen = gen;
33322         while (gen_num >= 0)
33323         {
33324             gen = generation_of (gen_num);
33325             assert (generation_allocation_segment (gen) == ephemeral_heap_segment);
33326             assert (generation_allocation_start (gen) >= heap_segment_mem (ephemeral_heap_segment));
33327             assert (generation_allocation_start (gen) < heap_segment_allocated (ephemeral_heap_segment));
33328
33329             if (generation_start_segment (prev_gen ) ==
33330                 generation_start_segment (gen))
33331             {
33332                 assert (generation_allocation_start (prev_gen) <
33333                         generation_allocation_start (gen));
33334             }
33335             prev_gen = gen;
33336             gen_num--;
33337         }
33338     }
33339
33340     while (1)
33341     {
33342         // Handle segment transitions
33343         if (curr_object >= heap_segment_allocated (seg))
33344         {
33345             if (curr_object > heap_segment_allocated(seg))
33346             {
33347                 dprintf (3, ("Verifiying Heap: curr_object: %Ix > heap_segment_allocated (seg: %Ix)",
33348                         (size_t)curr_object, (size_t)seg));
33349                 FATAL_GC_ERROR();
33350             }
33351             seg = heap_segment_next_in_range (seg);
33352             if (seg)
33353             {
33354 #ifdef BACKGROUND_GC
33355                 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
33356 #endif //BACKGROUND_GC
33357                 curr_object = heap_segment_mem(seg);
33358                 prev_object = 0;
33359                 continue;
33360             }
33361             else
33362             {
33363                 if (curr_gen_num == (max_generation+1))
33364                 {
33365                     curr_gen_num--;
33366                     seg = heap_segment_in_range (generation_start_segment (generation_of (curr_gen_num)));
33367
33368                     PREFIX_ASSUME(seg != NULL);
33369
33370 #ifdef BACKGROUND_GC
33371                     should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
33372 #endif //BACKGROUND_GC
33373                     curr_object = heap_segment_mem (seg);
33374                     prev_object = 0;
33375                     large_brick_p = FALSE;
33376                     align_const = get_alignment_constant (TRUE);
33377                 }
33378                 else
33379                     break;  // Done Verifying Heap -- no more segments
33380             }
33381         }
33382
33383         // Are we at the end of the youngest_generation?
33384         if (seg == ephemeral_heap_segment)
33385         {
33386             if (curr_object >= end_youngest)
33387             {
33388                 // prev_object length is too long if we hit this int3
33389                 if (curr_object > end_youngest)
33390                 {
33391                     dprintf (3, ("Verifiying Heap: curr_object: %Ix > end_youngest: %Ix",
33392                             (size_t)curr_object, (size_t)end_youngest));
33393                     FATAL_GC_ERROR();
33394                 }
33395                 break;
33396             }
33397             
33398             if ((curr_object >= next_boundary) && (curr_gen_num > 0))
33399             {
33400                 curr_gen_num--;
33401                 if (curr_gen_num > 0)
33402                 {
33403                     next_boundary = generation_allocation_start (generation_of (curr_gen_num - 1));
33404                 }
33405             }
33406         }
33407
33408          //if (is_mark_set (curr_object))
33409          //{
33410          //        printf ("curr_object: %Ix is marked!",(size_t)curr_object);
33411          //        FATAL_GC_ERROR();
33412          //}
33413
33414         size_t s = size (curr_object);
33415         dprintf (3, ("o: %Ix, s: %d", (size_t)curr_object, s));
33416         if (s == 0)
33417         {
33418             dprintf (3, ("Verifying Heap: size of current object %Ix == 0", curr_object));
33419             FATAL_GC_ERROR();
33420         }
33421
33422         // If object is not in the youngest generation, then lets
33423         // verify that the brick table is correct....
33424         if (((seg != ephemeral_heap_segment) ||
33425              (brick_of(curr_object) < brick_of(begin_youngest))))
33426         {
33427             curr_brick = brick_of(curr_object);
33428
33429             // Brick Table Verification...
33430             //
33431             // On brick transition
33432             //     if brick is negative
33433             //          verify that brick indirects to previous valid brick
33434             //     else
33435             //          set current brick invalid flag to be flipped if we
33436             //          encounter an object at the correct place
33437             //
33438             if (curr_brick != prev_brick)
33439             {
33440                 // If the last brick we were examining had positive
33441                 // entry but we never found the matching object, then
33442                 // we have a problem
33443                 // If prev_brick was the last one of the segment
33444                 // it's ok for it to be invalid because it is never looked at
33445                 if (bCurrentBrickInvalid &&
33446                     (curr_brick != brick_of (heap_segment_mem (seg))) &&
33447                     !heap_segment_read_only_p (seg))
33448                 {
33449                     dprintf (3, ("curr brick %Ix invalid", curr_brick));
33450                     FATAL_GC_ERROR();
33451                 }
33452
33453                 if (large_brick_p)
33454                 {
33455                     //large objects verify the table only if they are in
33456                     //range.
33457                     if ((heap_segment_reserved (seg) <= highest_address) &&
33458                         (heap_segment_mem (seg) >= lowest_address) &&
33459                         brick_table [curr_brick] != 0)
33460                     {
33461                         dprintf (3, ("curr_brick %Ix for large object %Ix not set to -32768",
33462                                 curr_brick, (size_t)curr_object));
33463                         FATAL_GC_ERROR();
33464                     }
33465                     else
33466                     {
33467                         bCurrentBrickInvalid = FALSE;
33468                     }
33469                 }
33470                 else
33471                 {
33472                     // If the current brick contains a negative value make sure
33473                     // that the indirection terminates at the last  valid brick
33474                     if (brick_table [curr_brick] <= 0)
33475                     {
33476                         if (brick_table [curr_brick] == 0)
33477                         {
33478                             dprintf(3, ("curr_brick %Ix for object %Ix set to 0",
33479                                     curr_brick, (size_t)curr_object));
33480                             FATAL_GC_ERROR();
33481                         }
33482                         ptrdiff_t i = curr_brick;
33483                         while ((i >= ((ptrdiff_t) brick_of (heap_segment_mem (seg)))) &&
33484                                (brick_table[i] < 0))
33485                         {
33486                             i = i + brick_table[i];
33487                         }
33488                         if (i <  ((ptrdiff_t)(brick_of (heap_segment_mem (seg))) - 1))
33489                         {
33490                             dprintf (3, ("ptrdiff i: %Ix < brick_of (heap_segment_mem (seg)):%Ix - 1. curr_brick: %Ix",
33491                                     i, brick_of (heap_segment_mem (seg)),
33492                                     curr_brick));
33493                             FATAL_GC_ERROR();
33494                         }
33495                         // if (i != last_valid_brick)
33496                         //  FATAL_GC_ERROR();
33497                         bCurrentBrickInvalid = FALSE;
33498                     }
33499                     else if (!heap_segment_read_only_p (seg))
33500                     {
33501                         bCurrentBrickInvalid = TRUE;
33502                     }
33503                 }
33504             }
33505
33506             if (bCurrentBrickInvalid)
33507             {
33508                 if (curr_object == (brick_address(curr_brick) + brick_table[curr_brick] - 1))
33509                 {
33510                     bCurrentBrickInvalid = FALSE;
33511                     last_valid_brick = curr_brick;
33512                 }
33513             }
33514         }
33515
33516         if (*((uint8_t**)curr_object) != (uint8_t *) g_gc_pFreeObjectMethodTable)
33517         {
33518 #ifdef FEATURE_LOH_COMPACTION
33519             if ((curr_gen_num == (max_generation+1)) && (prev_object != 0))
33520             {
33521                 assert (method_table (prev_object) == g_gc_pFreeObjectMethodTable);
33522             }
33523 #endif //FEATURE_LOH_COMPACTION
33524
33525             total_objects_verified++;
33526
33527             BOOL can_verify_deep = TRUE;
33528 #ifdef BACKGROUND_GC
33529             can_verify_deep = fgc_should_consider_object (curr_object, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p);
33530 #endif //BACKGROUND_GC
33531
33532             BOOL deep_verify_obj = can_verify_deep;
33533             if ((heap_verify_level & GCConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction)
33534                 deep_verify_obj = FALSE;
33535
33536             ((CObjectHeader*)curr_object)->ValidateHeap((Object*)curr_object, deep_verify_obj);
33537
33538             if (can_verify_deep)
33539             {
33540                 if (curr_gen_num > 0)
33541                 {
33542                     BOOL need_card_p = FALSE;
33543                     if (contain_pointers_or_collectible (curr_object))
33544                     {
33545                         dprintf (4, ("curr_object: %Ix", (size_t)curr_object));
33546                         size_t crd = card_of (curr_object);
33547                         BOOL found_card_p = card_set_p (crd);
33548
33549 #ifdef COLLECTIBLE_CLASS
33550                         if (is_collectible(curr_object))
33551                         {
33552                             uint8_t* class_obj = get_class_object (curr_object);
33553                             if ((class_obj < ephemeral_high) && (class_obj >= next_boundary))
33554                             {
33555                                 if (!found_card_p)
33556                                 {
33557                                     dprintf (3, ("Card not set, curr_object = [%Ix:%Ix pointing to class object %Ix",
33558                                                 card_of (curr_object), (size_t)curr_object, class_obj));
33559
33560                                     FATAL_GC_ERROR();
33561                                 }
33562                             }
33563                         }
33564 #endif //COLLECTIBLE_CLASS
33565
33566                         if (contain_pointers(curr_object))
33567                         {
33568                             go_through_object_nostart
33569                                 (method_table(curr_object), curr_object, s, oo,
33570                                 {
33571                                     if ((crd != card_of ((uint8_t*)oo)) && !found_card_p)
33572                                     {
33573                                         crd = card_of ((uint8_t*)oo);
33574                                         found_card_p = card_set_p (crd);
33575                                         need_card_p = FALSE;
33576                                     }
33577                                     if ((*oo < ephemeral_high) && (*oo >= next_boundary))
33578                                     {
33579                                         need_card_p = TRUE;
33580                                     }
33581
33582                                 if (need_card_p && !found_card_p)
33583                                 {
33584
33585                                         dprintf (3, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[",
33586                                                     card_of (curr_object), (size_t)curr_object,
33587                                                     card_of (curr_object+Align(s, align_const)), (size_t)curr_object+Align(s, align_const)));
33588                                         FATAL_GC_ERROR();
33589                                     }
33590                                 }
33591                                     );
33592                         }
33593                         if (need_card_p && !found_card_p)
33594                         {
33595                             dprintf (3, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[",
33596                                     card_of (curr_object), (size_t)curr_object,
33597                                     card_of (curr_object+Align(s, align_const)), (size_t)curr_object+Align(s, align_const)));
33598                             FATAL_GC_ERROR();
33599                         }
33600                     }
33601                 }
33602                 total_objects_verified_deep++;
33603             }
33604         }
33605
33606         prev_object = curr_object;
33607         prev_brick = curr_brick;
33608         curr_object = curr_object + Align(s, align_const);
33609         if (curr_object < prev_object)
33610         {
33611             dprintf (3, ("overflow because of a bad object size: %Ix size %Ix", prev_object, s));
33612             FATAL_GC_ERROR();
33613         }
33614     }
33615
33616 #ifdef BACKGROUND_GC
33617     dprintf (2, ("(%s)(%s)(%s) total_objects_verified is %Id, total_objects_verified_deep is %Id", 
33618                  (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p () ? "FGC" : "NGC")),
33619                  (begin_gc_p ? "BEG" : "END"),
33620                  ((current_c_gc_state == c_gc_state_planning) ? "in plan" : "not in plan"),
33621                  total_objects_verified, total_objects_verified_deep));
33622     if (current_c_gc_state != c_gc_state_planning)
33623     {
33624         assert (total_objects_verified == total_objects_verified_deep);
33625     }
33626 #endif //BACKGROUND_GC
33627     
33628     verify_free_lists();
33629
33630 #ifdef FEATURE_PREMORTEM_FINALIZATION
33631     finalize_queue->CheckFinalizerObjects();
33632 #endif // FEATURE_PREMORTEM_FINALIZATION
33633
33634     {
33635         // to be consistent with handle table APIs pass a ScanContext*
33636         // to provide the heap number.  the SC isn't complete though so
33637         // limit its scope to handle table verification.
33638         ScanContext sc;
33639         sc.thread_number = heap_number;
33640         GCScan::VerifyHandleTable(max_generation, max_generation, &sc);
33641     }
33642
33643 #ifdef MULTIPLE_HEAPS
33644     current_join->join(this, gc_join_verify_objects_done);
33645     if (current_join->joined())
33646 #endif //MULTIPLE_HEAPS
33647     {
33648         GCToEEInterface::VerifySyncTableEntry();
33649 #ifdef MULTIPLE_HEAPS
33650         current_join->restart();
33651 #endif //MULTIPLE_HEAPS
33652     }
33653
33654 #ifdef BACKGROUND_GC 
33655     if (!settings.concurrent)
33656     {
33657         if (current_c_gc_state == c_gc_state_planning)
33658         {
33659             // temporarily commenting this out 'cause an FGC
33660             // could be triggered before we sweep ephemeral.
33661             //verify_seg_end_mark_array_cleared();
33662         }
33663     }
33664
33665     if (settings.concurrent)
33666     {
33667         verify_mark_array_cleared();
33668     }
33669     dprintf (2,("GC%d(%s): Verifying heap - end", 
33670         VolatileLoad(&settings.gc_index), 
33671         (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
33672 #else
33673     dprintf (2,("GC#d: Verifying heap - end", VolatileLoad(&settings.gc_index)));
33674 #endif //BACKGROUND_GC 
33675 }
33676
33677 #endif  //VERIFY_HEAP
33678
33679
33680 void GCHeap::ValidateObjectMember (Object* obj)
33681 {
33682 #ifdef VERIFY_HEAP
33683     size_t s = size (obj);
33684     uint8_t* o = (uint8_t*)obj;
33685
33686     go_through_object_cl (method_table (obj), o, s, oo,
33687                                 {
33688                                     uint8_t* child_o = *oo;
33689                                     if (child_o)
33690                                     {
33691                                         dprintf (3, ("VOM: m: %Ix obj %Ix", (size_t)child_o, o));
33692                                         MethodTable *pMT = method_table (child_o);
33693                                         assert(pMT);
33694                                         if (!pMT->SanityCheck()) {
33695                                             dprintf (3, ("Bad member of %Ix %Ix",
33696                                                         (size_t)oo, (size_t)child_o));
33697                                             FATAL_GC_ERROR();
33698                                         }
33699                                     }
33700                                 } );
33701 #endif // VERIFY_HEAP
33702 }
33703
33704 void DestructObject (CObjectHeader* hdr)
33705 {
33706     UNREFERENCED_PARAMETER(hdr); // compiler bug? -- this *is*, indeed, referenced
33707     hdr->~CObjectHeader();
33708 }
33709
33710 HRESULT GCHeap::Shutdown ()
33711 {
33712     deleteGCShadow();
33713
33714     GCScan::GcRuntimeStructuresValid (FALSE);
33715
33716     // Cannot assert this, since we use SuspendEE as the mechanism to quiesce all
33717     // threads except the one performing the shutdown.
33718     // ASSERT( !GcInProgress );
33719
33720     // Guard against any more GC occurring and against any threads blocking
33721     // for GC to complete when the GC heap is gone.  This fixes a race condition
33722     // where a thread in GC is destroyed as part of process destruction and
33723     // the remaining threads block for GC complete.
33724
33725     //GCTODO
33726     //EnterAllocLock();
33727     //Enter();
33728     //EnterFinalizeLock();
33729     //SetGCDone();
33730
33731     // during shutdown lot of threads are suspended
33732     // on this even, we don't want to wake them up just yet
33733     //CloseHandle (WaitForGCEvent);
33734
33735     //find out if the global card table hasn't been used yet
33736     uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
33737     if (card_table_refcount (ct) == 0)
33738     {
33739         destroy_card_table (ct);
33740         g_gc_card_table = nullptr;
33741
33742 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
33743         g_gc_card_bundle_table = nullptr;
33744 #endif
33745 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
33746         SoftwareWriteWatch::StaticClose();
33747 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
33748     }
33749
33750     //destroy all segments on the standby list
33751     while(gc_heap::segment_standby_list != 0)
33752     {
33753         heap_segment* next_seg = heap_segment_next (gc_heap::segment_standby_list);
33754 #ifdef MULTIPLE_HEAPS
33755         (gc_heap::g_heaps[0])->delete_heap_segment (gc_heap::segment_standby_list, FALSE);
33756 #else //MULTIPLE_HEAPS
33757         pGenGCHeap->delete_heap_segment (gc_heap::segment_standby_list, FALSE);
33758 #endif //MULTIPLE_HEAPS
33759         gc_heap::segment_standby_list = next_seg;
33760     }
33761
33762
33763 #ifdef MULTIPLE_HEAPS
33764
33765     for (int i = 0; i < gc_heap::n_heaps; i ++)
33766     {
33767         delete gc_heap::g_heaps[i]->vm_heap;
33768         //destroy pure GC stuff
33769         gc_heap::destroy_gc_heap (gc_heap::g_heaps[i]);
33770     }
33771 #else
33772     gc_heap::destroy_gc_heap (pGenGCHeap);
33773
33774 #endif //MULTIPLE_HEAPS
33775     gc_heap::shutdown_gc();
33776
33777     return S_OK;
33778 }
33779
33780 // Wait until a garbage collection is complete
33781 // returns NOERROR if wait was OK, other error code if failure.
33782 // WARNING: This will not undo the must complete state. If you are
33783 // in a must complete when you call this, you'd better know what you're
33784 // doing.
33785
33786 #ifdef FEATURE_PREMORTEM_FINALIZATION
33787 static
33788 HRESULT AllocateCFinalize(CFinalize **pCFinalize)
33789 {
33790     *pCFinalize = new (nothrow) CFinalize();
33791     if (*pCFinalize == NULL || !(*pCFinalize)->Initialize())
33792         return E_OUTOFMEMORY;
33793
33794     return S_OK;
33795 }
33796 #endif // FEATURE_PREMORTEM_FINALIZATION
33797
33798 // init the instance heap
33799 HRESULT GCHeap::Init(size_t hn)
33800 {
33801     HRESULT hres = S_OK;
33802
33803 #ifdef MULTIPLE_HEAPS
33804     if ((pGenGCHeap = gc_heap::make_gc_heap(this, (int)hn)) == 0)
33805         hres = E_OUTOFMEMORY;
33806 #else
33807     UNREFERENCED_PARAMETER(hn);
33808     if (!gc_heap::make_gc_heap())
33809         hres = E_OUTOFMEMORY;
33810 #endif //MULTIPLE_HEAPS
33811
33812     // Failed.
33813     return hres;
33814 }
33815
33816 //System wide initialization
33817 HRESULT GCHeap::Initialize ()
33818 {
33819     HRESULT hr = S_OK;
33820
33821     g_gc_pFreeObjectMethodTable = GCToEEInterface::GetFreeObjectMethodTable();
33822     g_num_processors = GCToOSInterface::GetTotalProcessorCount();
33823     assert(g_num_processors != 0);
33824
33825 //Initialize the static members.
33826 #ifdef TRACE_GC
33827     GcDuration = 0;
33828     CreatedObjectCount = 0;
33829 #endif //TRACE_GC
33830
33831     size_t seg_size = get_valid_segment_size();
33832     gc_heap::soh_segment_size = seg_size;
33833     size_t large_seg_size = get_valid_segment_size(TRUE);
33834     gc_heap::min_loh_segment_size = large_seg_size;
33835     gc_heap::min_segment_size = min (seg_size, large_seg_size);
33836 #ifdef SEG_MAPPING_TABLE
33837     gc_heap::min_segment_size_shr = index_of_highest_set_bit (gc_heap::min_segment_size);
33838 #endif //SEG_MAPPING_TABLE
33839
33840 #ifdef MULTIPLE_HEAPS
33841     uint32_t nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount());
33842
33843     // GetGCProcessCpuCount only returns up to 64 procs.
33844     unsigned int nhp_from_process = GCToOSInterface::CanEnableGCCPUGroups() ?
33845                                     GCToOSInterface::GetTotalProcessorCount():
33846                                     GCToOSInterface::GetCurrentProcessCpuCount();
33847
33848     unsigned int nhp = ((nhp_from_config == 0) ? nhp_from_process :
33849                                                  (min (nhp_from_config, nhp_from_process)));
33850
33851
33852     nhp = min (nhp, MAX_SUPPORTED_CPUS);
33853
33854     if (GCConfig::GetNoAffinitize())
33855         gc_heap::gc_thread_no_affinitize_p = true;
33856
33857 #if !defined(FEATURE_REDHAWK) && !defined(FEATURE_CORECLR)
33858     if (!(gc_heap::gc_thread_no_affinitize_p))
33859     {
33860         if (!(GCToOSInterface::CanEnableGCCPUGroups()))
33861         {
33862             size_t gc_thread_affinity_mask = static_cast<size_t>(GCConfig::GetGCHeapAffinitizeMask());
33863
33864             uintptr_t pmask, smask;
33865             if (GCToOSInterface::GetCurrentProcessAffinityMask(&pmask, &smask))
33866             {
33867                 pmask &= smask;
33868
33869                 if (gc_thread_affinity_mask)
33870                 {
33871                     pmask &= gc_thread_affinity_mask;
33872                 }
33873
33874                 process_mask = pmask;
33875
33876                 unsigned int set_bits_in_pmask = 0;
33877                 while (pmask)
33878                 {
33879                     if (pmask & 1)
33880                         set_bits_in_pmask++;
33881                     pmask >>= 1;
33882                 }
33883
33884                 nhp = min (nhp, set_bits_in_pmask);
33885             }
33886             else
33887             {
33888                 gc_heap::gc_thread_no_affinitize_p = true;
33889             }
33890         }
33891     }
33892 #endif //!FEATURE_REDHAWK && !FEATURE_CORECLR
33893
33894     hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/, nhp);
33895 #else
33896     hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/);
33897 #endif //MULTIPLE_HEAPS
33898
33899     if (hr != S_OK)
33900         return hr;
33901
33902     gc_heap::total_physical_mem = GCToOSInterface::GetPhysicalMemoryLimit();
33903
33904     gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100;
33905 #ifndef MULTIPLE_HEAPS
33906     gc_heap::mem_one_percent /= g_num_processors;
33907 #endif //!MULTIPLE_HEAPS
33908
33909     uint32_t highmem_th_from_config = (uint32_t)GCConfig::GetGCHighMemPercent();
33910     if (highmem_th_from_config)
33911     {
33912         gc_heap::high_memory_load_th = min (99, highmem_th_from_config);
33913         gc_heap::v_high_memory_load_th = min (99, (highmem_th_from_config + 7));
33914     }
33915     else
33916     {
33917         // We should only use this if we are in the "many process" mode which really is only applicable
33918         // to very powerful machines - before that's implemented, temporarily I am only enabling this for 80GB+ memory. 
33919         // For now I am using an estimate to calculate these numbers but this should really be obtained 
33920         // programmatically going forward.
33921         // I am assuming 47 processes using WKS GC and 3 using SVR GC.
33922         // I am assuming 3 in part due to the "very high memory load" is 97%.
33923         int available_mem_th = 10;
33924         if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024))
33925         {
33926             int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(GCToOSInterface::GetTotalProcessorCount()));
33927             available_mem_th = min (available_mem_th, adjusted_available_mem_th);
33928         }
33929
33930         gc_heap::high_memory_load_th = 100 - available_mem_th;
33931         gc_heap::v_high_memory_load_th = 97;
33932     }
33933
33934     gc_heap::m_high_memory_load_th = min ((gc_heap::high_memory_load_th + 5), gc_heap::v_high_memory_load_th);
33935
33936     gc_heap::pm_stress_on = (GCConfig::GetGCProvModeStress() != 0);
33937
33938 #if defined(BIT64) 
33939     gc_heap::youngest_gen_desired_th = gc_heap::mem_one_percent;
33940 #endif // BIT64
33941
33942     WaitForGCEvent = new (nothrow) GCEvent;
33943
33944     if (!WaitForGCEvent)
33945     {
33946         return E_OUTOFMEMORY;
33947     }
33948
33949     if (!WaitForGCEvent->CreateManualEventNoThrow(TRUE))
33950     {
33951         return E_FAIL;
33952     }
33953
33954 #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
33955 #if defined (STRESS_HEAP) && !defined (MULTIPLE_HEAPS)
33956     if (GCStress<cfg_any>::IsEnabled())  {
33957         for(int i = 0; i < GCHeap::NUM_HEAP_STRESS_OBJS; i++)
33958         {
33959             m_StressObjs[i] = CreateGlobalHandle(0);
33960         }
33961         m_CurStressObj = 0;
33962     }
33963 #endif //STRESS_HEAP && !MULTIPLE_HEAPS
33964 #endif // FEATURE_REDHAWK
33965
33966     initGCShadow();         // If we are debugging write barriers, initialize heap shadow
33967
33968 #ifdef MULTIPLE_HEAPS
33969
33970     for (unsigned i = 0; i < nhp; i++)
33971     {
33972         GCHeap* Hp = new (nothrow) GCHeap();
33973         if (!Hp)
33974             return E_OUTOFMEMORY;
33975
33976         if ((hr = Hp->Init (i))!= S_OK)
33977         {
33978             return hr;
33979         }
33980     }
33981     // initialize numa node to heap map
33982     heap_select::init_numa_node_to_heap_map(nhp);
33983 #else
33984     hr = Init (0);
33985 #endif //MULTIPLE_HEAPS
33986
33987     if (hr == S_OK)
33988     {
33989         GCScan::GcRuntimeStructuresValid (TRUE);
33990
33991         GCToEEInterface::DiagUpdateGenerationBounds();
33992     }
33993
33994     return hr;
33995 };
33996
33997 ////
33998 // GC callback functions
33999 bool GCHeap::IsPromoted(Object* object)
34000 {
34001 #ifdef _DEBUG
34002     ((CObjectHeader*)object)->Validate();
34003 #endif //_DEBUG
34004
34005     uint8_t* o = (uint8_t*)object;
34006
34007     if (gc_heap::settings.condemned_generation == max_generation)
34008     {
34009 #ifdef MULTIPLE_HEAPS
34010         gc_heap* hp = gc_heap::g_heaps[0];
34011 #else
34012         gc_heap* hp = pGenGCHeap;
34013 #endif //MULTIPLE_HEAPS
34014
34015 #ifdef BACKGROUND_GC
34016         if (gc_heap::settings.concurrent)
34017         {
34018             bool is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))||
34019                             hp->background_marked (o));
34020             return is_marked;
34021         }
34022         else
34023 #endif //BACKGROUND_GC
34024         {
34025             return (!((o < hp->highest_address) && (o >= hp->lowest_address))
34026                     || hp->is_mark_set (o));
34027         }
34028     }
34029     else
34030     {
34031         gc_heap* hp = gc_heap::heap_of (o);
34032         return (!((o < hp->gc_high) && (o >= hp->gc_low))
34033                 || hp->is_mark_set (o));
34034     }
34035 }
34036
34037 size_t GCHeap::GetPromotedBytes(int heap_index)
34038 {
34039 #ifdef BACKGROUND_GC
34040     if (gc_heap::settings.concurrent)
34041     {
34042         return gc_heap::bpromoted_bytes (heap_index);
34043     }
34044     else
34045 #endif //BACKGROUND_GC
34046     {
34047         return gc_heap::promoted_bytes (heap_index);
34048     }
34049 }
34050
34051 void GCHeap::SetYieldProcessorScalingFactor (float scalingFactor)
34052 {
34053     assert (yp_spin_count_unit != 0);
34054     int saved_yp_spin_count_unit = yp_spin_count_unit;
34055     yp_spin_count_unit = (int)((float)yp_spin_count_unit * scalingFactor / (float)9);
34056
34057     // It's very suspicious if it becomes 0
34058     if (yp_spin_count_unit == 0)
34059     {
34060         yp_spin_count_unit = saved_yp_spin_count_unit;
34061     }
34062 }
34063
34064 unsigned int GCHeap::WhichGeneration (Object* object)
34065 {
34066     gc_heap* hp = gc_heap::heap_of ((uint8_t*)object);
34067     unsigned int g = hp->object_gennum ((uint8_t*)object);
34068     dprintf (3, ("%Ix is in gen %d", (size_t)object, g));
34069     return g;
34070 }
34071
34072 bool GCHeap::IsEphemeral (Object* object)
34073 {
34074     uint8_t* o = (uint8_t*)object;
34075     gc_heap* hp = gc_heap::heap_of (o);
34076     return !!hp->ephemeral_pointer_p (o);
34077 }
34078
34079 // Return NULL if can't find next object. When EE is not suspended,
34080 // the result is not accurate: if the input arg is in gen0, the function could 
34081 // return zeroed out memory as next object
34082 Object * GCHeap::NextObj (Object * object)
34083 {
34084 #ifdef VERIFY_HEAP
34085     uint8_t* o = (uint8_t*)object;
34086
34087 #ifndef FEATURE_BASICFREEZE
34088     if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address)))
34089     {
34090         return NULL;
34091     }
34092 #endif //!FEATURE_BASICFREEZE
34093
34094     heap_segment * hs = gc_heap::find_segment (o, FALSE);
34095     if (!hs)
34096     {
34097         return NULL;
34098     }
34099
34100     BOOL large_object_p = heap_segment_loh_p (hs);
34101     if (large_object_p)
34102         return NULL; //could be racing with another core allocating. 
34103 #ifdef MULTIPLE_HEAPS
34104     gc_heap* hp = heap_segment_heap (hs);
34105 #else //MULTIPLE_HEAPS
34106     gc_heap* hp = 0;
34107 #endif //MULTIPLE_HEAPS
34108     unsigned int g = hp->object_gennum ((uint8_t*)object);
34109     if ((g == 0) && hp->settings.demotion)
34110         return NULL;//could be racing with another core allocating. 
34111     int align_const = get_alignment_constant (!large_object_p);
34112     uint8_t* nextobj = o + Align (size (o), align_const);
34113     if (nextobj <= o) // either overflow or 0 sized object.
34114     {
34115         return NULL;
34116     }
34117
34118     if ((nextobj < heap_segment_mem(hs)) || 
34119         (nextobj >= heap_segment_allocated(hs) && hs != hp->ephemeral_heap_segment) || 
34120         (nextobj >= hp->alloc_allocated))
34121     {
34122         return NULL;
34123     }
34124
34125     return (Object *)nextobj;
34126 #else
34127     return nullptr;
34128 #endif // VERIFY_HEAP
34129 }
34130
34131 #ifdef VERIFY_HEAP
34132
34133 #ifdef FEATURE_BASICFREEZE
34134 BOOL GCHeap::IsInFrozenSegment (Object * object)
34135 {
34136     uint8_t* o = (uint8_t*)object;
34137     heap_segment * hs = gc_heap::find_segment (o, FALSE);
34138     //We create a frozen object for each frozen segment before the segment is inserted
34139     //to segment list; during ngen, we could also create frozen objects in segments which
34140     //don't belong to current GC heap.
34141     //So we return true if hs is NULL. It might create a hole about detecting invalidate 
34142     //object. But given all other checks present, the hole should be very small
34143     return !hs || heap_segment_read_only_p (hs);
34144 }
34145 #endif //FEATURE_BASICFREEZE
34146
34147 #endif //VERIFY_HEAP
34148
34149 // returns TRUE if the pointer is in one of the GC heaps.
34150 bool GCHeap::IsHeapPointer (void* vpObject, bool small_heap_only)
34151 {
34152     STATIC_CONTRACT_SO_TOLERANT;
34153
34154     // removed STATIC_CONTRACT_CAN_TAKE_LOCK here because find_segment 
34155     // no longer calls GCEvent::Wait which eventually takes a lock.
34156
34157     uint8_t* object = (uint8_t*) vpObject;
34158 #ifndef FEATURE_BASICFREEZE
34159     if (!((object < g_gc_highest_address) && (object >= g_gc_lowest_address)))
34160         return FALSE;
34161 #endif //!FEATURE_BASICFREEZE
34162
34163     heap_segment * hs = gc_heap::find_segment (object, small_heap_only);
34164     return !!hs;
34165 }
34166
34167 #ifdef STRESS_PINNING
34168 static n_promote = 0;
34169 #endif //STRESS_PINNING
34170 // promote an object
34171 void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
34172 {
34173     THREAD_NUMBER_FROM_CONTEXT;
34174 #ifndef MULTIPLE_HEAPS
34175     const int thread = 0;
34176 #endif //!MULTIPLE_HEAPS
34177
34178     uint8_t* o = (uint8_t*)*ppObject;
34179
34180     if (o == 0)
34181         return;
34182
34183 #ifdef DEBUG_DestroyedHandleValue
34184     // we can race with destroy handle during concurrent scan
34185     if (o == (uint8_t*)DEBUG_DestroyedHandleValue)
34186         return;
34187 #endif //DEBUG_DestroyedHandleValue
34188
34189     HEAP_FROM_THREAD;
34190
34191     gc_heap* hp = gc_heap::heap_of (o);
34192
34193     dprintf (3, ("Promote %Ix", (size_t)o));
34194
34195 #ifdef INTERIOR_POINTERS
34196     if (flags & GC_CALL_INTERIOR)
34197     {
34198         if ((o < hp->gc_low) || (o >= hp->gc_high))
34199         {
34200             return;
34201         }
34202         if ( (o = hp->find_object (o, hp->gc_low)) == 0)
34203         {
34204             return;
34205         }
34206
34207     }
34208 #endif //INTERIOR_POINTERS
34209
34210 #ifdef FEATURE_CONSERVATIVE_GC
34211     // For conservative GC, a value on stack may point to middle of a free object.
34212     // In this case, we don't need to promote the pointer.
34213     if (GCConfig::GetConservativeGC()
34214         && ((CObjectHeader*)o)->IsFree())
34215     {
34216         return;
34217     }
34218 #endif
34219
34220 #ifdef _DEBUG
34221     ((CObjectHeader*)o)->ValidatePromote(sc, flags);
34222 #else 
34223     UNREFERENCED_PARAMETER(sc);
34224 #endif //_DEBUG
34225
34226     if (flags & GC_CALL_PINNED)
34227         hp->pin_object (o, (uint8_t**) ppObject, hp->gc_low, hp->gc_high);
34228
34229 #ifdef STRESS_PINNING
34230     if ((++n_promote % 20) == 1)
34231             hp->pin_object (o, (uint8_t**) ppObject, hp->gc_low, hp->gc_high);
34232 #endif //STRESS_PINNING
34233
34234 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
34235     size_t promoted_size_begin = hp->promoted_bytes (thread);
34236 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
34237
34238     if ((o >= hp->gc_low) && (o < hp->gc_high))
34239     {
34240         hpt->mark_object_simple (&o THREAD_NUMBER_ARG);
34241     }
34242
34243 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
34244     size_t promoted_size_end = hp->promoted_bytes (thread);
34245     if (g_fEnableAppDomainMonitoring)
34246     {
34247         if (sc->pCurrentDomain)
34248         {
34249             GCToEEInterface::RecordSurvivedBytesForHeap((promoted_size_end - promoted_size_begin), thread, sc->pCurrentDomain);
34250         }
34251     }
34252 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
34253
34254     STRESS_LOG_ROOT_PROMOTE(ppObject, o, o ? header(o)->GetMethodTable() : NULL);
34255 }
34256
34257 void GCHeap::Relocate (Object** ppObject, ScanContext* sc,
34258                        uint32_t flags)
34259 {
34260     UNREFERENCED_PARAMETER(sc);
34261
34262     uint8_t* object = (uint8_t*)(Object*)(*ppObject);
34263     
34264     THREAD_NUMBER_FROM_CONTEXT;
34265
34266     //dprintf (3, ("Relocate location %Ix\n", (size_t)ppObject));
34267     dprintf (3, ("R: %Ix", (size_t)ppObject));
34268     
34269     if (object == 0)
34270         return;
34271
34272     gc_heap* hp = gc_heap::heap_of (object);
34273
34274 #ifdef _DEBUG
34275     if (!(flags & GC_CALL_INTERIOR))
34276     {
34277         // We cannot validate this object if it's in the condemned gen because it could 
34278         // be one of the objects that were overwritten by an artificial gap due to a pinned plug.
34279         if (!((object >= hp->gc_low) && (object < hp->gc_high)))
34280         {
34281             ((CObjectHeader*)object)->Validate(FALSE);
34282         }
34283     }
34284 #endif //_DEBUG
34285
34286     dprintf (3, ("Relocate %Ix\n", (size_t)object));
34287
34288     uint8_t* pheader;
34289
34290     if ((flags & GC_CALL_INTERIOR) && gc_heap::settings.loh_compaction)
34291     {
34292         if (!((object >= hp->gc_low) && (object < hp->gc_high)))
34293         {
34294             return;
34295         }
34296
34297         if (gc_heap::loh_object_p (object))
34298         {
34299             pheader = hp->find_object (object, 0);
34300             if (pheader == 0)
34301             {
34302                 return;
34303             }
34304
34305             ptrdiff_t ref_offset = object - pheader;
34306             hp->relocate_address(&pheader THREAD_NUMBER_ARG);
34307             *ppObject = (Object*)(pheader + ref_offset);
34308             return;
34309         }
34310     }
34311
34312     {
34313         pheader = object;
34314         hp->relocate_address(&pheader THREAD_NUMBER_ARG);
34315         *ppObject = (Object*)pheader;
34316     }
34317
34318     STRESS_LOG_ROOT_RELOCATE(ppObject, object, pheader, ((!(flags & GC_CALL_INTERIOR)) ? ((Object*)object)->GetGCSafeMethodTable() : 0));
34319 }
34320
34321 /*static*/ bool GCHeap::IsObjectInFixedHeap(Object *pObj)
34322 {
34323     // For now we simply look at the size of the object to determine if it in the
34324     // fixed heap or not. If the bit indicating this gets set at some point
34325     // we should key off that instead.
34326     return size( pObj ) >= loh_size_threshold;
34327 }
34328
34329 #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
34330 #ifdef STRESS_HEAP
34331
34332 void StressHeapDummy ();
34333
34334 static int32_t GCStressStartCount = -1;
34335 static int32_t GCStressCurCount = 0;
34336 static int32_t GCStressStartAtJit = -1;
34337
34338 // the maximum number of foreground GCs we'll induce during one BGC
34339 // (this number does not include "naturally" occuring GCs).
34340 static int32_t GCStressMaxFGCsPerBGC = -1;
34341
34342 // CLRRandom implementation can produce FPU exceptions if 
34343 // the test/application run by CLR is enabling any FPU exceptions. 
34344 // We want to avoid any unexpected exception coming from stress 
34345 // infrastructure, so CLRRandom is not an option.
34346 // The code below is a replicate of CRT rand() implementation.
34347 // Using CRT rand() is not an option because we will interfere with the user application
34348 // that may also use it. 
34349 int StressRNG(int iMaxValue)
34350 {
34351     static BOOL bisRandInit = FALSE;
34352     static int lHoldrand = 1L;
34353
34354     if (!bisRandInit)
34355     {
34356         lHoldrand = (int)time(NULL);
34357         bisRandInit = TRUE;
34358     }
34359     int randValue = (((lHoldrand = lHoldrand * 214013L + 2531011L) >> 16) & 0x7fff);
34360     return randValue % iMaxValue;
34361 }
34362 #endif // STRESS_HEAP
34363 #endif // !FEATURE_REDHAWK
34364
34365 // free up object so that things will move and then do a GC
34366 //return TRUE if GC actually happens, otherwise FALSE
34367 bool GCHeap::StressHeap(gc_alloc_context * context)
34368 {
34369 #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
34370     alloc_context* acontext = static_cast<alloc_context*>(context);
34371     assert(context != nullptr);
34372
34373     // if GC stress was dynamically disabled during this run we return FALSE
34374     if (!GCStressPolicy::IsEnabled())
34375         return FALSE;
34376
34377 #ifdef _DEBUG
34378     if (g_pConfig->FastGCStressLevel() && !GCToEEInterface::GetThread()->StressHeapIsEnabled()) {
34379         return FALSE;
34380     }
34381
34382 #endif //_DEBUG
34383
34384     if ((g_pConfig->GetGCStressLevel() & EEConfig::GCSTRESS_UNIQUE)
34385 #ifdef _DEBUG
34386         || g_pConfig->FastGCStressLevel() > 1
34387 #endif //_DEBUG
34388         ) {
34389         if (!Thread::UniqueStack(&acontext)) {
34390             return FALSE;
34391         }
34392     }
34393
34394 #ifdef BACKGROUND_GC
34395         // don't trigger a GC from the GC threads but still trigger GCs from user threads.
34396         if (GCToEEInterface::WasCurrentThreadCreatedByGC())
34397         {
34398             return FALSE;
34399         }
34400 #endif //BACKGROUND_GC
34401
34402         if (GCStressStartAtJit == -1 || GCStressStartCount == -1)
34403         {
34404             GCStressStartCount = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_GCStressStart);
34405             GCStressStartAtJit = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressStartAtJit);
34406         }
34407
34408         if (GCStressMaxFGCsPerBGC == -1)
34409         {
34410             GCStressMaxFGCsPerBGC = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressMaxFGCsPerBGC);
34411             if (g_pConfig->IsGCStressMix() && GCStressMaxFGCsPerBGC == -1)
34412                 GCStressMaxFGCsPerBGC = 6;
34413         }
34414
34415 #ifdef _DEBUG
34416         if (g_JitCount < GCStressStartAtJit)
34417             return FALSE;
34418 #endif //_DEBUG
34419
34420         // Allow programmer to skip the first N Stress GCs so that you can
34421         // get to the interesting ones faster.
34422         Interlocked::Increment(&GCStressCurCount);
34423         if (GCStressCurCount < GCStressStartCount)
34424             return FALSE;
34425
34426         // throttle the number of stress-induced GCs by a factor given by GCStressStep
34427         if ((GCStressCurCount % g_pConfig->GetGCStressStep()) != 0)
34428         {
34429             return FALSE;
34430         }
34431
34432 #ifdef BACKGROUND_GC
34433         if (IsConcurrentGCEnabled() && IsConcurrentGCInProgress())
34434         {
34435             // allow a maximum number of stress induced FGCs during one BGC
34436             if (gc_stress_fgcs_in_bgc >= GCStressMaxFGCsPerBGC)
34437                 return FALSE;
34438             ++gc_stress_fgcs_in_bgc;
34439         }
34440 #endif // BACKGROUND_GC
34441
34442     if (g_pStringClass == 0)
34443     {
34444         // If the String class has not been loaded, dont do any stressing. This should
34445         // be kept to a minimum to get as complete coverage as possible.
34446         _ASSERTE(g_fEEInit);
34447         return FALSE;
34448     }
34449
34450 #ifndef MULTIPLE_HEAPS
34451     static int32_t OneAtATime = -1;
34452
34453     // Only bother with this if the stress level is big enough and if nobody else is
34454     // doing it right now.  Note that some callers are inside the AllocLock and are
34455     // guaranteed synchronized.  But others are using AllocationContexts and have no
34456     // particular synchronization.
34457     //
34458     // For this latter case, we want a very high-speed way of limiting this to one
34459     // at a time.  A secondary advantage is that we release part of our StressObjs
34460     // buffer sparingly but just as effectively.
34461
34462     if (Interlocked::Increment(&OneAtATime) == 0 &&
34463         !TrackAllocations()) // Messing with object sizes can confuse the profiler (see ICorProfilerInfo::GetObjectSize)
34464     {
34465         StringObject* str;
34466
34467         // If the current string is used up
34468         if (HndFetchHandle(m_StressObjs[m_CurStressObj]) == 0)
34469         {
34470             // Populate handles with strings
34471             int i = m_CurStressObj;
34472             while(HndFetchHandle(m_StressObjs[i]) == 0)
34473             {
34474                 _ASSERTE(m_StressObjs[i] != 0);
34475                 unsigned strLen = ((unsigned)loh_size_threshold - 32) / sizeof(WCHAR);
34476                 unsigned strSize = PtrAlign(StringObject::GetSize(strLen));
34477                 
34478                 // update the cached type handle before allocating
34479                 SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass));
34480                 str = (StringObject*) pGenGCHeap->allocate (strSize, acontext);
34481                 if (str)
34482                 {
34483                     str->SetMethodTable (g_pStringClass);
34484                     str->SetStringLength (strLen);
34485                     HndAssignHandle(m_StressObjs[i], ObjectToOBJECTREF(str));
34486                 }
34487                 i = (i + 1) % NUM_HEAP_STRESS_OBJS;
34488                 if (i == m_CurStressObj) break;
34489             }
34490
34491             // advance the current handle to the next string
34492             m_CurStressObj = (m_CurStressObj + 1) % NUM_HEAP_STRESS_OBJS;
34493         }
34494
34495         // Get the current string
34496         str = (StringObject*) OBJECTREFToObject(HndFetchHandle(m_StressObjs[m_CurStressObj]));
34497         if (str)
34498         {
34499             // Chop off the end of the string and form a new object out of it.
34500             // This will 'free' an object at the begining of the heap, which will
34501             // force data movement.  Note that we can only do this so many times.
34502             // before we have to move on to the next string.
34503             unsigned sizeOfNewObj = (unsigned)Align(min_obj_size * 31);
34504             if (str->GetStringLength() > sizeOfNewObj / sizeof(WCHAR))
34505             {
34506                 unsigned sizeToNextObj = (unsigned)Align(size(str));
34507                 uint8_t* freeObj = ((uint8_t*) str) + sizeToNextObj - sizeOfNewObj;
34508                 pGenGCHeap->make_unused_array (freeObj, sizeOfNewObj);                    
34509                 str->SetStringLength(str->GetStringLength() - (sizeOfNewObj / sizeof(WCHAR)));
34510             }
34511             else
34512             {
34513                 // Let the string itself become garbage.
34514                 // will be realloced next time around
34515                 HndAssignHandle(m_StressObjs[m_CurStressObj], 0);
34516             }
34517         }
34518     }
34519     Interlocked::Decrement(&OneAtATime);
34520 #endif // !MULTIPLE_HEAPS
34521     if (IsConcurrentGCEnabled())
34522     {
34523         int rgen = StressRNG(10);
34524
34525         // gen0:gen1:gen2 distribution: 40:40:20
34526         if (rgen >= 8)
34527             rgen = 2;
34528         else if (rgen >= 4)
34529             rgen = 1;
34530     else
34531             rgen = 0;
34532
34533         GarbageCollectTry (rgen, FALSE, collection_gcstress);
34534     }
34535     else
34536     {
34537         GarbageCollect(max_generation, FALSE, collection_gcstress);
34538     }
34539
34540     return TRUE;
34541 #else
34542     UNREFERENCED_PARAMETER(context);
34543     return FALSE;
34544 #endif // defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
34545 }
34546
34547
34548 #ifdef FEATURE_PREMORTEM_FINALIZATION
34549 #define REGISTER_FOR_FINALIZATION(_object, _size) \
34550     hp->finalize_queue->RegisterForFinalization (0, (_object), (_size))
34551 #else // FEATURE_PREMORTEM_FINALIZATION
34552 #define REGISTER_FOR_FINALIZATION(_object, _size) true
34553 #endif // FEATURE_PREMORTEM_FINALIZATION
34554
34555 #define CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(_object, _size, _register) do {  \
34556     if ((_object) == NULL || ((_register) && !REGISTER_FOR_FINALIZATION(_object, _size)))   \
34557     {                                                                                       \
34558         STRESS_LOG_OOM_STACK(_size);                                                        \
34559         return NULL;                                                                        \
34560     }                                                                                       \
34561 } while (false)
34562
34563 //
34564 // Small Object Allocator
34565 //
34566 //
34567 // Allocate small object with an alignment requirement of 8-bytes.
34568 Object*
34569 GCHeap::AllocAlign8(gc_alloc_context* ctx, size_t size, uint32_t flags )
34570 {
34571 #ifdef FEATURE_64BIT_ALIGNMENT
34572     CONTRACTL {
34573         NOTHROW;
34574         GC_TRIGGERS;
34575     } CONTRACTL_END;
34576
34577     alloc_context* acontext = static_cast<alloc_context*>(ctx);
34578
34579 #ifdef MULTIPLE_HEAPS
34580     if (acontext->get_alloc_heap() == 0)
34581     {
34582         AssignHeap (acontext);
34583         assert (acontext->get_alloc_heap());
34584     }
34585
34586     gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
34587 #else
34588     gc_heap* hp = pGenGCHeap;
34589 #endif //MULTIPLE_HEAPS
34590
34591     return AllocAlign8Common(hp, acontext, size, flags);
34592 #else
34593     UNREFERENCED_PARAMETER(ctx);
34594     UNREFERENCED_PARAMETER(size);
34595     UNREFERENCED_PARAMETER(flags);
34596     assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
34597     return nullptr;
34598 #endif  //FEATURE_64BIT_ALIGNMENT
34599 }
34600
34601 // Common code used by both variants of AllocAlign8 above.
34602 Object*
34603 GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint32_t flags)
34604 {
34605 #ifdef FEATURE_64BIT_ALIGNMENT
34606     CONTRACTL {
34607         NOTHROW;
34608         GC_TRIGGERS;
34609     } CONTRACTL_END;
34610
34611     gc_heap* hp = (gc_heap*)_hp;
34612
34613     TRIGGERSGC();
34614
34615     Object* newAlloc = NULL;
34616
34617 #ifdef TRACE_GC
34618 #ifdef COUNT_CYCLES
34619     AllocStart = GetCycleCount32();
34620     unsigned finish;
34621 #elif defined(ENABLE_INSTRUMENTATION)
34622     unsigned AllocStart = GetInstLogTime();
34623     unsigned finish;
34624 #endif //COUNT_CYCLES
34625 #endif //TRACE_GC
34626
34627     if (size < loh_size_threshold)
34628     {
34629 #ifdef TRACE_GC
34630         AllocSmallCount++;
34631 #endif //TRACE_GC
34632
34633         // Depending on where in the object the payload requiring 8-byte alignment resides we might have to
34634         // align the object header on an 8-byte boundary or midway between two such boundaries. The unaligned
34635         // case is indicated to the GC via the GC_ALLOC_ALIGN8_BIAS flag.
34636         size_t desiredAlignment = (flags & GC_ALLOC_ALIGN8_BIAS) ? 4 : 0;
34637
34638         // Retrieve the address of the next allocation from the context (note that we're inside the alloc
34639         // lock at this point).
34640         uint8_t*  result = acontext->alloc_ptr;
34641
34642         // Will an allocation at this point yield the correct alignment and fit into the remainder of the
34643         // context?
34644         if ((((size_t)result & 7) == desiredAlignment) && ((result + size) <= acontext->alloc_limit))
34645         {
34646             // Yes, we can just go ahead and make the allocation.
34647             newAlloc = (Object*) hp->allocate (size, acontext);
34648             ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
34649         }
34650         else
34651         {
34652             // No, either the next available address is not aligned in the way we require it or there's
34653             // not enough space to allocate an object of the required size. In both cases we allocate a
34654             // padding object (marked as a free object). This object's size is such that it will reverse
34655             // the alignment of the next header (asserted below).
34656             //
34657             // We allocate both together then decide based on the result whether we'll format the space as
34658             // free object + real object or real object + free object.
34659             ASSERT((Align(min_obj_size) & 7) == 4);
34660             CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext);
34661             if (freeobj)
34662             {
34663                 if (((size_t)freeobj & 7) == desiredAlignment)
34664                 {
34665                     // New allocation has desired alignment, return this one and place the free object at the
34666                     // end of the allocated space.
34667                     newAlloc = (Object*)freeobj;
34668                     freeobj = (CObjectHeader*)((uint8_t*)freeobj + Align(size));
34669                 }
34670                 else
34671                 {
34672                     // New allocation is still mis-aligned, format the initial space as a free object and the
34673                     // rest of the space should be correctly aligned for the real object.
34674                     newAlloc = (Object*)((uint8_t*)freeobj + Align(min_obj_size));
34675                     ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
34676                 }
34677                 freeobj->SetFree(min_obj_size);
34678             }
34679         }
34680     }
34681     else
34682     {
34683         // The LOH always guarantees at least 8-byte alignment, regardless of platform. Moreover it doesn't
34684         // support mis-aligned object headers so we can't support biased headers as above. Luckily for us
34685         // we've managed to arrange things so the only case where we see a bias is for boxed value types and
34686         // these can never get large enough to be allocated on the LOH.
34687         ASSERT(65536 < loh_size_threshold);
34688         ASSERT((flags & GC_ALLOC_ALIGN8_BIAS) == 0);
34689
34690         alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));
34691
34692         newAlloc = (Object*) hp->allocate_large_object (size, acontext->alloc_bytes_loh);
34693         ASSERT(((size_t)newAlloc & 7) == 0);
34694     }
34695
34696     CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
34697
34698 #ifdef TRACE_GC
34699 #ifdef COUNT_CYCLES
34700     finish = GetCycleCount32();
34701 #elif defined(ENABLE_INSTRUMENTATION)
34702     finish = GetInstLogTime();
34703 #endif //COUNT_CYCLES
34704     AllocDuration += finish - AllocStart;
34705     AllocCount++;
34706 #endif //TRACE_GC
34707     return newAlloc;
34708 #else
34709     UNREFERENCED_PARAMETER(_hp);
34710     UNREFERENCED_PARAMETER(acontext);
34711     UNREFERENCED_PARAMETER(size);
34712     UNREFERENCED_PARAMETER(flags);
34713     assert(!"Should not call GCHeap::AllocAlign8Common without FEATURE_64BIT_ALIGNMENT defined!");
34714     return nullptr;
34715 #endif // FEATURE_64BIT_ALIGNMENT
34716 }
34717
34718 Object *
34719 GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
34720 {
34721     CONTRACTL {
34722         NOTHROW;
34723         GC_TRIGGERS;
34724     } CONTRACTL_END;
34725
34726     TRIGGERSGC();
34727
34728     Object* newAlloc = NULL;
34729
34730 #ifdef TRACE_GC
34731 #ifdef COUNT_CYCLES
34732     AllocStart = GetCycleCount32();
34733     unsigned finish;
34734 #elif defined(ENABLE_INSTRUMENTATION)
34735     unsigned AllocStart = GetInstLogTime();
34736     unsigned finish;
34737 #endif //COUNT_CYCLES
34738 #endif //TRACE_GC
34739
34740 #ifdef MULTIPLE_HEAPS
34741     //take the first heap....
34742     gc_heap* hp = gc_heap::g_heaps[0];
34743 #else
34744     gc_heap* hp = pGenGCHeap;
34745 #ifdef _PREFAST_
34746     // prefix complains about us dereferencing hp in wks build even though we only access static members
34747     // this way. not sure how to shut it up except for this ugly workaround:
34748     PREFIX_ASSUME(hp != NULL);
34749 #endif //_PREFAST_
34750 #endif //MULTIPLE_HEAPS
34751
34752     alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));
34753
34754     newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh);
34755 #ifdef FEATURE_STRUCTALIGN
34756     newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
34757 #endif // FEATURE_STRUCTALIGN
34758     CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
34759
34760 #ifdef TRACE_GC
34761 #ifdef COUNT_CYCLES
34762     finish = GetCycleCount32();
34763 #elif defined(ENABLE_INSTRUMENTATION)
34764     finish = GetInstLogTime();
34765 #endif //COUNT_CYCLES
34766     AllocDuration += finish - AllocStart;
34767     AllocCount++;
34768 #endif //TRACE_GC
34769     return newAlloc;
34770 }
34771
34772 Object*
34773 GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_DCL)
34774 {
34775     CONTRACTL {
34776         NOTHROW;
34777         GC_TRIGGERS;
34778     } CONTRACTL_END;
34779
34780     TRIGGERSGC();
34781
34782     Object* newAlloc = NULL;
34783     alloc_context* acontext = static_cast<alloc_context*>(context);
34784
34785 #ifdef TRACE_GC
34786 #ifdef COUNT_CYCLES
34787     AllocStart = GetCycleCount32();
34788     unsigned finish;
34789 #elif defined(ENABLE_INSTRUMENTATION)
34790     unsigned AllocStart = GetInstLogTime();
34791     unsigned finish;
34792 #endif //COUNT_CYCLES
34793 #endif //TRACE_GC
34794
34795 #ifdef MULTIPLE_HEAPS
34796     if (acontext->get_alloc_heap() == 0)
34797     {
34798         AssignHeap (acontext);
34799         assert (acontext->get_alloc_heap());
34800     }
34801 #endif //MULTIPLE_HEAPS
34802
34803 #ifdef MULTIPLE_HEAPS
34804     gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
34805 #else
34806     gc_heap* hp = pGenGCHeap;
34807 #ifdef _PREFAST_
34808     // prefix complains about us dereferencing hp in wks build even though we only access static members
34809     // this way. not sure how to shut it up except for this ugly workaround:
34810     PREFIX_ASSUME(hp != NULL);
34811 #endif //_PREFAST_
34812 #endif //MULTIPLE_HEAPS
34813
34814     if (size < loh_size_threshold)
34815     {
34816
34817 #ifdef TRACE_GC
34818         AllocSmallCount++;
34819 #endif //TRACE_GC
34820         newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext);
34821 #ifdef FEATURE_STRUCTALIGN
34822         newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext);
34823 #endif // FEATURE_STRUCTALIGN
34824 //        ASSERT (newAlloc);
34825     }
34826     else 
34827     {
34828         newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh);
34829 #ifdef FEATURE_STRUCTALIGN
34830         newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
34831 #endif // FEATURE_STRUCTALIGN
34832     }
34833
34834     CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
34835
34836 #ifdef TRACE_GC
34837 #ifdef COUNT_CYCLES
34838     finish = GetCycleCount32();
34839 #elif defined(ENABLE_INSTRUMENTATION)
34840     finish = GetInstLogTime();
34841 #endif //COUNT_CYCLES
34842     AllocDuration += finish - AllocStart;
34843     AllocCount++;
34844 #endif //TRACE_GC
34845     return newAlloc;
34846 }
34847
34848 void
34849 GCHeap::FixAllocContext (gc_alloc_context* context, void* arg, void *heap)
34850 {
34851     alloc_context* acontext = static_cast<alloc_context*>(context);
34852 #ifdef MULTIPLE_HEAPS
34853
34854     if (arg != 0)
34855         acontext->alloc_count = 0;
34856
34857     uint8_t * alloc_ptr = acontext->alloc_ptr;
34858
34859     if (!alloc_ptr)
34860         return;
34861
34862     // The acontext->alloc_heap can be out of sync with the ptrs because
34863     // of heap re-assignment in allocate
34864     gc_heap* hp = gc_heap::heap_of (alloc_ptr);
34865 #else
34866     gc_heap* hp = pGenGCHeap;
34867 #endif //MULTIPLE_HEAPS
34868
34869     if (heap == NULL || heap == hp)
34870     {
34871         hp->fix_allocation_context (acontext, ((arg != 0)? TRUE : FALSE),
34872                                     get_alignment_constant(TRUE));
34873     }
34874 }
34875
34876 Object*
34877 GCHeap::GetContainingObject (void *pInteriorPtr, bool fCollectedGenOnly)
34878 {
34879     uint8_t *o = (uint8_t*)pInteriorPtr;
34880
34881     gc_heap* hp = gc_heap::heap_of (o);
34882
34883     uint8_t* lowest = (fCollectedGenOnly ? hp->gc_low : hp->lowest_address);
34884     uint8_t* highest = (fCollectedGenOnly ? hp->gc_high : hp->highest_address);
34885
34886     if (o >= lowest && o < highest)
34887     {
34888         o = hp->find_object (o, lowest);
34889     }
34890     else
34891     {
34892         o = NULL;
34893     }
34894     
34895     return (Object *)o;
34896 }
34897
34898 BOOL should_collect_optimized (dynamic_data* dd, BOOL low_memory_p)
34899 {
34900     if (dd_new_allocation (dd) < 0)
34901     {
34902         return TRUE;
34903     }
34904
34905     if (((float)(dd_new_allocation (dd)) / (float)dd_desired_allocation (dd)) < (low_memory_p ? 0.7 : 0.3))
34906     {
34907         return TRUE;
34908     }
34909
34910     return FALSE;
34911 }
34912
34913 //----------------------------------------------------------------------------
34914 // #GarbageCollector
34915 //
34916 //  API to ensure that a complete new garbage collection takes place
34917 //
34918 HRESULT
34919 GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode)
34920 {
34921 #if defined(BIT64) 
34922     if (low_memory_p)
34923     {
34924         size_t total_allocated = 0;
34925         size_t total_desired = 0;
34926 #ifdef MULTIPLE_HEAPS
34927         int hn = 0;
34928         for (hn = 0; hn < gc_heap::n_heaps; hn++)
34929         {
34930             gc_heap* hp = gc_heap::g_heaps [hn];
34931             total_desired += dd_desired_allocation (hp->dynamic_data_of (0));
34932             total_allocated += dd_desired_allocation (hp->dynamic_data_of (0))-
34933                 dd_new_allocation (hp->dynamic_data_of (0));
34934         }
34935 #else
34936         gc_heap* hp = pGenGCHeap;
34937         total_desired = dd_desired_allocation (hp->dynamic_data_of (0));
34938         total_allocated = dd_desired_allocation (hp->dynamic_data_of (0))-
34939             dd_new_allocation (hp->dynamic_data_of (0));
34940 #endif //MULTIPLE_HEAPS
34941
34942         if ((total_desired > gc_heap::mem_one_percent) && (total_allocated < gc_heap::mem_one_percent))
34943         {
34944             dprintf (2, ("Async low mem but we've only allocated %d (< 10%% of physical mem) out of %d, returning",
34945                          total_allocated, total_desired));
34946
34947             return S_OK;
34948         }
34949     }
34950 #endif // BIT64 
34951
34952 #ifdef MULTIPLE_HEAPS
34953     gc_heap* hpt = gc_heap::g_heaps[0];
34954 #else
34955     gc_heap* hpt = 0;
34956 #endif //MULTIPLE_HEAPS
34957
34958     generation = (generation < 0) ? max_generation : min (generation, max_generation);
34959     dynamic_data* dd = hpt->dynamic_data_of (generation);
34960
34961 #ifdef BACKGROUND_GC
34962     if (recursive_gc_sync::background_running_p())
34963     {
34964         if ((mode == collection_optimized) || (mode & collection_non_blocking))
34965         {
34966             return S_OK;
34967         }
34968         if (mode & collection_blocking)
34969         {
34970             pGenGCHeap->background_gc_wait();
34971             if (mode & collection_optimized)
34972             {
34973                 return S_OK;
34974             }
34975         }
34976     }
34977 #endif //BACKGROUND_GC
34978
34979     if (mode & collection_optimized)
34980     {
34981         if (pGenGCHeap->gc_started)
34982         {
34983             return S_OK;
34984         }
34985         else 
34986         {
34987             BOOL should_collect = FALSE;
34988             BOOL should_check_loh = (generation == max_generation);
34989 #ifdef MULTIPLE_HEAPS
34990             for (int i = 0; i < gc_heap::n_heaps; i++)
34991             {
34992                 dynamic_data* dd1 = gc_heap::g_heaps [i]->dynamic_data_of (generation);
34993                 dynamic_data* dd2 = (should_check_loh ? 
34994                                      (gc_heap::g_heaps [i]->dynamic_data_of (max_generation + 1)) :
34995                                      0);
34996
34997                 if (should_collect_optimized (dd1, low_memory_p))
34998                 {
34999                     should_collect = TRUE;
35000                     break;
35001                 }
35002                 if (dd2 && should_collect_optimized (dd2, low_memory_p))
35003                 {
35004                     should_collect = TRUE;
35005                     break;
35006                 }
35007             }
35008 #else
35009             should_collect = should_collect_optimized (dd, low_memory_p);
35010             if (!should_collect && should_check_loh)
35011             {
35012                 should_collect = 
35013                     should_collect_optimized (hpt->dynamic_data_of (max_generation + 1), low_memory_p);
35014             }
35015 #endif //MULTIPLE_HEAPS
35016             if (!should_collect)
35017             {
35018                 return S_OK;
35019             }
35020         }
35021     }
35022
35023     size_t CollectionCountAtEntry = dd_collection_count (dd);
35024     size_t BlockingCollectionCountAtEntry = gc_heap::full_gc_counts[gc_type_blocking];
35025     size_t CurrentCollectionCount = 0;
35026
35027 retry:
35028
35029     CurrentCollectionCount = GarbageCollectTry(generation, low_memory_p, mode);
35030     
35031     if ((mode & collection_blocking) && 
35032         (generation == max_generation) && 
35033         (gc_heap::full_gc_counts[gc_type_blocking] == BlockingCollectionCountAtEntry))
35034     {
35035 #ifdef BACKGROUND_GC
35036         if (recursive_gc_sync::background_running_p())
35037         {
35038             pGenGCHeap->background_gc_wait();
35039         }
35040 #endif //BACKGROUND_GC
35041
35042         goto retry;
35043     }
35044
35045     if (CollectionCountAtEntry == CurrentCollectionCount)
35046     {
35047         goto retry;
35048     }
35049
35050     return S_OK;
35051 }
35052
35053 size_t
35054 GCHeap::GarbageCollectTry (int generation, BOOL low_memory_p, int mode)
35055 {
35056     int gen = (generation < 0) ? 
35057                max_generation : min (generation, max_generation);
35058
35059     gc_reason reason = reason_empty;
35060     
35061     if (low_memory_p) 
35062     {
35063         if (mode & collection_blocking)
35064         {
35065             reason = reason_lowmemory_blocking;
35066         }
35067         else
35068         {
35069             reason = reason_lowmemory;
35070         }
35071     }
35072     else
35073     {
35074         reason = reason_induced;
35075     }
35076
35077     if (reason == reason_induced)
35078     {
35079         if (mode & collection_compacting)
35080         {
35081             reason = reason_induced_compacting;
35082         }
35083         else if (mode & collection_non_blocking)
35084         {
35085             reason = reason_induced_noforce;
35086         }
35087 #ifdef STRESS_HEAP
35088         else if (mode & collection_gcstress)
35089         {
35090             reason = reason_gcstress;
35091         }
35092 #endif
35093     }
35094
35095     return GarbageCollectGeneration (gen, reason);
35096 }
35097
35098 void gc_heap::do_pre_gc()
35099 {
35100     STRESS_LOG_GC_STACK;
35101
35102 #ifdef STRESS_LOG
35103     STRESS_LOG_GC_START(VolatileLoad(&settings.gc_index),
35104                         (uint32_t)settings.condemned_generation,
35105                         (uint32_t)settings.reason);
35106 #endif // STRESS_LOG
35107
35108 #ifdef MULTIPLE_HEAPS
35109     gc_heap* hp = g_heaps[0];
35110 #else
35111     gc_heap* hp = 0;
35112 #endif //MULTIPLE_HEAPS
35113
35114 #ifdef BACKGROUND_GC
35115     settings.b_state = hp->current_bgc_state;
35116 #endif //BACKGROUND_GC
35117
35118 #ifdef BACKGROUND_GC
35119     dprintf (1, ("*GC* %d(gen0:%d)(%d)(%s)(%d)", 
35120         VolatileLoad(&settings.gc_index), 
35121         dd_collection_count (hp->dynamic_data_of (0)),
35122         settings.condemned_generation,
35123         (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")),
35124         settings.b_state));
35125 #else
35126     dprintf (1, ("*GC* %d(gen0:%d)(%d)", 
35127         VolatileLoad(&settings.gc_index), 
35128         dd_collection_count(hp->dynamic_data_of(0)),
35129         settings.condemned_generation));
35130 #endif //BACKGROUND_GC
35131
35132     // TODO: this can happen...it's because of the way we are calling
35133     // do_pre_gc, will fix later.
35134     //if (last_gc_index > VolatileLoad(&settings.gc_index))
35135     //{
35136     //    FATAL_GC_ERROR();
35137     //}
35138
35139     last_gc_index = VolatileLoad(&settings.gc_index);
35140     GCHeap::UpdatePreGCCounters();
35141
35142     if (settings.concurrent)
35143     {
35144 #ifdef BACKGROUND_GC
35145         full_gc_counts[gc_type_background]++;
35146 #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
35147         GCHeap::gc_stress_fgcs_in_bgc = 0;
35148 #endif // STRESS_HEAP && !FEATURE_REDHAWK
35149 #endif // BACKGROUND_GC
35150     }
35151     else
35152     {
35153         if (settings.condemned_generation == max_generation)
35154         {
35155             full_gc_counts[gc_type_blocking]++;
35156         }
35157         else
35158         {
35159 #ifdef BACKGROUND_GC
35160             if (settings.background_p)
35161             {
35162                 ephemeral_fgc_counts[settings.condemned_generation]++;
35163             }
35164 #endif //BACKGROUND_GC
35165         }
35166     }
35167
35168 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
35169     if (g_fEnableAppDomainMonitoring)
35170     {
35171         GCToEEInterface::ResetTotalSurvivedBytes();
35172     }
35173 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
35174 }
35175
35176 #ifdef GC_CONFIG_DRIVEN
35177 void gc_heap::record_interesting_info_per_heap()
35178 {
35179     // datapoints are always from the last blocking GC so don't record again
35180     // for BGCs.
35181     if (!(settings.concurrent))
35182     {
35183         for (int i = 0; i < max_idp_count; i++)
35184         {
35185             interesting_data_per_heap[i] += interesting_data_per_gc[i];
35186         }
35187     }
35188
35189     int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact);
35190     if (compact_reason >= 0)
35191         (compact_reasons_per_heap[compact_reason])++;
35192     int expand_mechanism = get_gc_data_per_heap()->get_mechanism (gc_heap_expand);
35193     if (expand_mechanism >= 0)
35194         (expand_mechanisms_per_heap[expand_mechanism])++;
35195
35196     for (int i = 0; i < max_gc_mechanism_bits_count; i++)
35197     {
35198         if (get_gc_data_per_heap()->is_mechanism_bit_set ((gc_mechanism_bit_per_heap)i))
35199             (interesting_mechanism_bits_per_heap[i])++;
35200     }
35201
35202     //         h#  | GC  | gen | C   | EX  | NF  | BF  | ML  | DM  || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP |
35203     cprintf (("%2d | %6d | %1d | %1s | %2s | %2s | %2s | %2s | %2s || %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id |",
35204             heap_number,
35205             (size_t)settings.gc_index,
35206             settings.condemned_generation,
35207             // TEMP - I am just doing this for wks GC 'cuase I wanna see the pattern of doing C/S GCs.
35208             (settings.compaction ? (((compact_reason >= 0) && gc_heap_compact_reason_mandatory_p[compact_reason]) ? "M" : "W") : ""), // compaction
35209             ((expand_mechanism >= 0)? "X" : ""), // EX
35210             ((expand_mechanism == expand_reuse_normal) ? "X" : ""), // NF
35211             ((expand_mechanism == expand_reuse_bestfit) ? "X" : ""), // BF
35212             (get_gc_data_per_heap()->is_mechanism_bit_set (gc_mark_list_bit) ? "X" : ""), // ML
35213             (get_gc_data_per_heap()->is_mechanism_bit_set (gc_demotion_bit) ? "X" : ""), // DM
35214             interesting_data_per_gc[idp_pre_short],
35215             interesting_data_per_gc[idp_post_short],
35216             interesting_data_per_gc[idp_merged_pin],
35217             interesting_data_per_gc[idp_converted_pin],
35218             interesting_data_per_gc[idp_pre_pin],
35219             interesting_data_per_gc[idp_post_pin],
35220             interesting_data_per_gc[idp_pre_and_post_pin],
35221             interesting_data_per_gc[idp_pre_short_padded],
35222             interesting_data_per_gc[idp_post_short_padded]));
35223 }
35224
35225 void gc_heap::record_global_mechanisms()
35226 {
35227     for (int i = 0; i < max_global_mechanisms_count; i++)
35228     {
35229         if (gc_data_global.get_mechanism_p ((gc_global_mechanism_p)i))
35230         {
35231             ::record_global_mechanism (i);
35232         }
35233     }
35234 }
35235
35236 BOOL gc_heap::should_do_sweeping_gc (BOOL compact_p)
35237 {
35238     if (!compact_ratio)
35239         return (!compact_p);
35240
35241     size_t compact_count = compact_or_sweep_gcs[0];
35242     size_t sweep_count = compact_or_sweep_gcs[1];
35243
35244     size_t total_count = compact_count + sweep_count;
35245     BOOL should_compact = compact_p;
35246     if (total_count > 3)
35247     {
35248         if (compact_p)
35249         {
35250             int temp_ratio = (int)((compact_count + 1) * 100 / (total_count + 1));
35251             if (temp_ratio > compact_ratio)
35252             {
35253                 // cprintf (("compact would be: %d, total_count: %d, ratio would be %d%% > target\n",
35254                 //     (compact_count + 1), (total_count + 1), temp_ratio));
35255                 should_compact = FALSE;
35256             }
35257         }
35258         else
35259         {
35260             int temp_ratio = (int)((sweep_count + 1) * 100 / (total_count + 1));
35261             if (temp_ratio > (100 - compact_ratio))
35262             {
35263                 // cprintf (("sweep would be: %d, total_count: %d, ratio would be %d%% > target\n",
35264                 //     (sweep_count + 1), (total_count + 1), temp_ratio));
35265                 should_compact = TRUE;
35266             }
35267         }
35268     }
35269
35270     return !should_compact;
35271 }
35272 #endif //GC_CONFIG_DRIVEN
35273
35274 bool gc_heap::is_pm_ratio_exceeded()
35275 {
35276     size_t maxgen_frag = 0;
35277     size_t maxgen_size = 0;
35278     size_t total_heap_size = get_total_heap_size();
35279
35280 #ifdef MULTIPLE_HEAPS
35281     for (int i = 0; i < gc_heap::n_heaps; i++)
35282     {
35283         gc_heap* hp = gc_heap::g_heaps[i];
35284 #else //MULTIPLE_HEAPS
35285     {
35286         gc_heap* hp = pGenGCHeap;
35287 #endif //MULTIPLE_HEAPS
35288
35289         maxgen_frag += dd_fragmentation (hp->dynamic_data_of (max_generation));
35290         maxgen_size += hp->generation_size (max_generation);
35291     }
35292
35293     double maxgen_ratio = (double)maxgen_size / (double)total_heap_size;
35294     double maxgen_frag_ratio = (double)maxgen_frag / (double)maxgen_size;
35295     dprintf (GTC_LOG, ("maxgen %Id(%d%% total heap), frag: %Id (%d%% maxgen)",
35296         maxgen_size, (int)(maxgen_ratio * 100.0), 
35297         maxgen_frag, (int)(maxgen_frag_ratio * 100.0)));
35298
35299     bool maxgen_highfrag_p = ((maxgen_ratio > 0.5) && (maxgen_frag_ratio > 0.1));
35300
35301     // We need to adjust elevation here because if there's enough fragmentation it's not
35302     // unproductive.
35303     if (maxgen_highfrag_p)
35304     {
35305         settings.should_lock_elevation = FALSE;
35306         dprintf (GTC_LOG, ("high frag gen2, turn off elevation"));
35307     }
35308
35309     return maxgen_highfrag_p;
35310 }
35311
35312 void gc_heap::do_post_gc()
35313 {
35314     if (!settings.concurrent)
35315     {
35316         initGCShadow();
35317     }
35318
35319 #ifdef TRACE_GC
35320 #ifdef COUNT_CYCLES
35321     AllocStart = GetCycleCount32();
35322 #else
35323     AllocStart = clock();
35324 #endif //COUNT_CYCLES
35325 #endif //TRACE_GC
35326
35327 #ifdef MULTIPLE_HEAPS
35328     gc_heap* hp = g_heaps[0];
35329 #else
35330     gc_heap* hp = 0;
35331 #endif //MULTIPLE_HEAPS
35332     
35333     GCToEEInterface::GcDone(settings.condemned_generation);
35334
35335     GCToEEInterface::DiagGCEnd(VolatileLoad(&settings.gc_index),
35336                          (uint32_t)settings.condemned_generation,
35337                          (uint32_t)settings.reason,
35338                          !!settings.concurrent);
35339
35340     //dprintf (1, (" ****end of Garbage Collection**** %d(gen0:%d)(%d)", 
35341     dprintf (1, ("*EGC* %d(gen0:%d)(%d)(%s)", 
35342         VolatileLoad(&settings.gc_index), 
35343         dd_collection_count(hp->dynamic_data_of(0)),
35344         settings.condemned_generation,
35345         (settings.concurrent ? "BGC" : "GC")));
35346
35347     if (settings.exit_memory_load != 0)
35348         last_gc_memory_load = settings.exit_memory_load;
35349     else if (settings.entry_memory_load != 0)
35350         last_gc_memory_load = settings.entry_memory_load;
35351
35352     last_gc_heap_size = get_total_heap_size();
35353     last_gc_fragmentation = get_total_fragmentation();
35354
35355     // Note we only do this at the end of full blocking GCs because we do not want
35356     // to turn on this provisional mode during the middle of a BGC.
35357     if ((settings.condemned_generation == max_generation) && (!settings.concurrent))
35358     {
35359         if (pm_stress_on)
35360         {
35361             size_t full_compacting_gc_count = full_gc_counts[gc_type_compacting];
35362             if (provisional_mode_triggered)
35363             {
35364                 uint64_t r = gc_rand::get_rand(10);
35365                 if ((full_compacting_gc_count - provisional_triggered_gc_count) >= r)
35366                 {
35367                     provisional_mode_triggered = false;
35368                     provisional_off_gc_count = full_compacting_gc_count;
35369                     dprintf (GTC_LOG, ("%Id NGC2s when turned on, %Id NGCs since(%Id)",
35370                         provisional_triggered_gc_count, (full_compacting_gc_count - provisional_triggered_gc_count),
35371                         num_provisional_triggered));
35372                 }
35373             }
35374             else
35375             {
35376                 uint64_t r = gc_rand::get_rand(5);
35377                 if ((full_compacting_gc_count - provisional_off_gc_count) >= r)
35378                 {
35379                     provisional_mode_triggered = true;
35380                     provisional_triggered_gc_count = full_compacting_gc_count;
35381                     num_provisional_triggered++;
35382                     dprintf (GTC_LOG, ("%Id NGC2s when turned off, %Id NGCs since(%Id)",
35383                         provisional_off_gc_count, (full_compacting_gc_count - provisional_off_gc_count),
35384                         num_provisional_triggered));
35385                 }
35386             }
35387         }
35388         else
35389         {
35390             if (provisional_mode_triggered)
35391             {
35392                 if ((settings.entry_memory_load < high_memory_load_th) ||
35393                     !is_pm_ratio_exceeded())
35394                 {
35395                     dprintf (GTC_LOG, ("turning off PM"));
35396                     provisional_mode_triggered = false;
35397                 }
35398             }
35399             else if ((settings.entry_memory_load >= high_memory_load_th) && is_pm_ratio_exceeded())
35400             {
35401                 dprintf (GTC_LOG, ("highmem && highfrag - turning on PM"));
35402                 provisional_mode_triggered = true;
35403                 num_provisional_triggered++;
35404             }
35405         }
35406     }
35407
35408     GCHeap::UpdatePostGCCounters();
35409 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
35410     //if (g_fEnableARM)
35411     //{
35412     //    SystemDomain::GetADSurvivedBytes();
35413     //}
35414 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
35415
35416 #ifdef STRESS_LOG
35417     STRESS_LOG_GC_END(VolatileLoad(&settings.gc_index),
35418                       (uint32_t)settings.condemned_generation,
35419                       (uint32_t)settings.reason);
35420 #endif // STRESS_LOG
35421
35422 #ifdef GC_CONFIG_DRIVEN
35423     if (!settings.concurrent)
35424     {
35425         if (settings.compaction)
35426             (compact_or_sweep_gcs[0])++;
35427         else
35428             (compact_or_sweep_gcs[1])++;
35429     }
35430
35431 #ifdef MULTIPLE_HEAPS
35432     for (int i = 0; i < n_heaps; i++)
35433         g_heaps[i]->record_interesting_info_per_heap();
35434 #else
35435     record_interesting_info_per_heap();
35436 #endif //MULTIPLE_HEAPS
35437     record_global_mechanisms();
35438 #endif //GC_CONFIG_DRIVEN
35439 }
35440
35441 unsigned GCHeap::GetGcCount()
35442 {
35443     return (unsigned int)VolatileLoad(&pGenGCHeap->settings.gc_index);
35444 }
35445
35446 size_t
35447 GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason)
35448 {
35449     dprintf (2, ("triggered a GC!"));
35450
35451 #ifdef MULTIPLE_HEAPS
35452     gc_heap* hpt = gc_heap::g_heaps[0];
35453 #else
35454     gc_heap* hpt = 0;
35455 #endif //MULTIPLE_HEAPS
35456     bool cooperative_mode = true;
35457     dynamic_data* dd = hpt->dynamic_data_of (gen);
35458     size_t localCount = dd_collection_count (dd);
35459
35460     enter_spin_lock (&gc_heap::gc_lock);
35461     dprintf (SPINLOCK_LOG, ("GC Egc"));
35462     ASSERT_HOLDING_SPIN_LOCK(&gc_heap::gc_lock);
35463
35464     //don't trigger another GC if one was already in progress
35465     //while waiting for the lock
35466     {
35467         size_t col_count = dd_collection_count (dd);
35468
35469         if (localCount != col_count)
35470         {
35471 #ifdef SYNCHRONIZATION_STATS
35472             gc_lock_contended++;
35473 #endif //SYNCHRONIZATION_STATS
35474             dprintf (SPINLOCK_LOG, ("no need GC Lgc"));
35475             leave_spin_lock (&gc_heap::gc_lock);
35476
35477             // We don't need to release msl here 'cause this means a GC
35478             // has happened and would have release all msl's.
35479             return col_count;
35480          }
35481     }
35482
35483 #ifdef COUNT_CYCLES
35484     int gc_start = GetCycleCount32();
35485 #endif //COUNT_CYCLES
35486
35487 #ifdef TRACE_GC
35488 #ifdef COUNT_CYCLES
35489     AllocDuration += GetCycleCount32() - AllocStart;
35490 #else
35491     AllocDuration += clock() - AllocStart;
35492 #endif //COUNT_CYCLES
35493 #endif //TRACE_GC
35494
35495         gc_heap::g_low_memory_status = (reason == reason_lowmemory) || 
35496                                        (reason == reason_lowmemory_blocking) ||
35497                                        (gc_heap::latency_level == latency_level_memory_footprint);
35498
35499         gc_trigger_reason = reason;
35500
35501 #ifdef MULTIPLE_HEAPS
35502     for (int i = 0; i < gc_heap::n_heaps; i++)
35503     {
35504         gc_heap::g_heaps[i]->reset_gc_done();
35505     }
35506 #else
35507     gc_heap::reset_gc_done();
35508 #endif //MULTIPLE_HEAPS
35509
35510     gc_heap::gc_started = TRUE;
35511
35512     {
35513         init_sync_log_stats();
35514
35515 #ifndef MULTIPLE_HEAPS
35516         cooperative_mode = gc_heap::enable_preemptive ();
35517
35518         dprintf (2, ("Suspending EE"));
35519         BEGIN_TIMING(suspend_ee_during_log);
35520         GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
35521         END_TIMING(suspend_ee_during_log);
35522         gc_heap::proceed_with_gc_p = gc_heap::should_proceed_with_gc();
35523         gc_heap::disable_preemptive (cooperative_mode);
35524         if (gc_heap::proceed_with_gc_p)
35525             pGenGCHeap->settings.init_mechanisms();
35526         else
35527             gc_heap::update_collection_counts_for_no_gc();
35528
35529 #endif //!MULTIPLE_HEAPS
35530     }
35531
35532 // MAP_EVENT_MONITORS(EE_MONITOR_GARBAGE_COLLECTIONS, NotifyEvent(EE_EVENT_TYPE_GC_STARTED, 0));
35533
35534 #ifdef TRACE_GC
35535 #ifdef COUNT_CYCLES
35536     unsigned start;
35537     unsigned finish;
35538     start = GetCycleCount32();
35539 #else
35540     clock_t start;
35541     clock_t finish;
35542     start = clock();
35543 #endif //COUNT_CYCLES
35544     PromotedObjectCount = 0;
35545 #endif //TRACE_GC
35546
35547     unsigned int condemned_generation_number = gen;
35548
35549     // We want to get a stack from the user thread that triggered the GC
35550     // instead of on the GC thread which is the case for Server GC.
35551     // But we are doing it for Workstation GC as well to be uniform.
35552     FIRE_EVENT(GCTriggered, static_cast<uint32_t>(reason));
35553
35554 #ifdef MULTIPLE_HEAPS
35555     GcCondemnedGeneration = condemned_generation_number;
35556
35557     cooperative_mode = gc_heap::enable_preemptive ();
35558
35559     BEGIN_TIMING(gc_during_log);
35560     gc_heap::ee_suspend_event.Set();
35561     gc_heap::wait_for_gc_done();
35562     END_TIMING(gc_during_log);
35563
35564     gc_heap::disable_preemptive (cooperative_mode);
35565
35566     condemned_generation_number = GcCondemnedGeneration;
35567 #else
35568         if (gc_heap::proceed_with_gc_p)
35569         {
35570             BEGIN_TIMING(gc_during_log);
35571             pGenGCHeap->garbage_collect (condemned_generation_number);
35572             if (gc_heap::pm_trigger_full_gc)
35573             {
35574                 pGenGCHeap->garbage_collect_pm_full_gc();
35575             }
35576             END_TIMING(gc_during_log);
35577         }
35578 #endif //MULTIPLE_HEAPS
35579
35580 #ifdef TRACE_GC
35581 #ifdef COUNT_CYCLES
35582     finish = GetCycleCount32();
35583 #else
35584     finish = clock();
35585 #endif //COUNT_CYCLES
35586     GcDuration += finish - start;
35587     dprintf (3,
35588              ("<GC# %d> Condemned: %d, Duration: %d, total: %d Alloc Avg: %d, Small Objects:%d Large Objects:%d",
35589               VolatileLoad(&pGenGCHeap->settings.gc_index), condemned_generation_number,
35590               finish - start, GcDuration,
35591               AllocCount ? (AllocDuration / AllocCount) : 0,
35592               AllocSmallCount, AllocBigCount));
35593     AllocCount = 0;
35594     AllocDuration = 0;
35595 #endif // TRACE_GC
35596
35597 #ifdef BACKGROUND_GC
35598     // We are deciding whether we should fire the alloc wait end event here
35599     // because in begin_foreground we could be calling end_foreground 
35600     // if we need to retry.
35601     if (gc_heap::alloc_wait_event_p)
35602     {
35603         hpt->fire_alloc_wait_event_end (awr_fgc_wait_for_bgc);
35604         gc_heap::alloc_wait_event_p = FALSE;
35605     }
35606 #endif //BACKGROUND_GC
35607
35608 #ifndef MULTIPLE_HEAPS
35609 #ifdef BACKGROUND_GC
35610     if (!gc_heap::dont_restart_ee_p)
35611     {
35612 #endif //BACKGROUND_GC
35613         BEGIN_TIMING(restart_ee_during_log);
35614         GCToEEInterface::RestartEE(TRUE);
35615         END_TIMING(restart_ee_during_log);
35616 #ifdef BACKGROUND_GC
35617     }
35618 #endif //BACKGROUND_GC
35619 #endif //!MULTIPLE_HEAPS
35620
35621 #ifdef COUNT_CYCLES
35622     printf ("GC: %d Time: %d\n", GcCondemnedGeneration,
35623             GetCycleCount32() - gc_start);
35624 #endif //COUNT_CYCLES
35625
35626 #ifndef MULTIPLE_HEAPS
35627     process_sync_log_stats();
35628     gc_heap::gc_started = FALSE;
35629     gc_heap::set_gc_done();
35630     dprintf (SPINLOCK_LOG, ("GC Lgc"));
35631     leave_spin_lock (&gc_heap::gc_lock);    
35632 #endif //!MULTIPLE_HEAPS
35633
35634 #ifdef FEATURE_PREMORTEM_FINALIZATION
35635     GCToEEInterface::EnableFinalization(!pGenGCHeap->settings.concurrent && pGenGCHeap->settings.found_finalizers);
35636 #endif // FEATURE_PREMORTEM_FINALIZATION
35637
35638     return dd_collection_count (dd);
35639 }
35640
35641 size_t      GCHeap::GetTotalBytesInUse ()
35642 {
35643 #ifdef MULTIPLE_HEAPS
35644     //enumarate all the heaps and get their size.
35645     size_t tot_size = 0;
35646     for (int i = 0; i < gc_heap::n_heaps; i++)
35647     {
35648         GCHeap* Hp = gc_heap::g_heaps [i]->vm_heap;
35649         tot_size += Hp->ApproxTotalBytesInUse (FALSE);
35650     }
35651     return tot_size;
35652 #else
35653     return ApproxTotalBytesInUse ();
35654 #endif //MULTIPLE_HEAPS
35655 }
35656
35657 int GCHeap::CollectionCount (int generation, int get_bgc_fgc_count)
35658 {
35659     if (get_bgc_fgc_count != 0)
35660     {
35661 #ifdef BACKGROUND_GC
35662         if (generation == max_generation)
35663         {
35664             return (int)(gc_heap::full_gc_counts[gc_type_background]);
35665         }
35666         else
35667         {
35668             return (int)(gc_heap::ephemeral_fgc_counts[generation]);
35669         }
35670 #else
35671         return 0;
35672 #endif //BACKGROUND_GC
35673     }
35674
35675 #ifdef MULTIPLE_HEAPS
35676     gc_heap* hp = gc_heap::g_heaps [0];
35677 #else  //MULTIPLE_HEAPS
35678     gc_heap* hp = pGenGCHeap;
35679 #endif //MULTIPLE_HEAPS
35680     if (generation > max_generation)
35681         return 0;
35682     else
35683         return (int)dd_collection_count (hp->dynamic_data_of (generation));
35684 }
35685
35686 size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only)
35687 {
35688     size_t totsize = 0;
35689     //GCTODO
35690     //ASSERT(InMustComplete());
35691     enter_spin_lock (&pGenGCHeap->gc_lock);
35692
35693     heap_segment* eph_seg = generation_allocation_segment (pGenGCHeap->generation_of (0));
35694     // Get small block heap size info
35695     totsize = (pGenGCHeap->alloc_allocated - heap_segment_mem (eph_seg));
35696     heap_segment* seg1 = generation_start_segment (pGenGCHeap->generation_of (max_generation));
35697     while (seg1 != eph_seg)
35698     {
35699         totsize += heap_segment_allocated (seg1) -
35700             heap_segment_mem (seg1);
35701         seg1 = heap_segment_next (seg1);
35702     }
35703
35704     //discount the fragmentation
35705     for (int i = 0; i <= max_generation; i++)
35706     {
35707         generation* gen = pGenGCHeap->generation_of (i);
35708         totsize -= (generation_free_list_space (gen) + generation_free_obj_space (gen));
35709     }
35710
35711     if (!small_heap_only)
35712     {
35713         heap_segment* seg2 = generation_start_segment (pGenGCHeap->generation_of (max_generation+1));
35714
35715         while (seg2 != 0)
35716         {
35717             totsize += heap_segment_allocated (seg2) -
35718                 heap_segment_mem (seg2);
35719             seg2 = heap_segment_next (seg2);
35720         }
35721
35722         //discount the fragmentation
35723         generation* loh_gen = pGenGCHeap->generation_of (max_generation+1);
35724         size_t frag = generation_free_list_space (loh_gen) + generation_free_obj_space (loh_gen);
35725         totsize -= frag;
35726     }
35727     leave_spin_lock (&pGenGCHeap->gc_lock);
35728     return totsize;
35729 }
35730
35731 #ifdef MULTIPLE_HEAPS
35732 void GCHeap::AssignHeap (alloc_context* acontext)
35733 {
35734     // Assign heap based on processor
35735     acontext->set_alloc_heap(GetHeap(heap_select::select_heap(acontext, 0)));
35736     acontext->set_home_heap(acontext->get_alloc_heap());
35737 }
35738 GCHeap* GCHeap::GetHeap (int n)
35739 {
35740     assert (n < gc_heap::n_heaps);
35741     return gc_heap::g_heaps [n]->vm_heap;
35742 }
35743 #endif //MULTIPLE_HEAPS
35744
35745 bool GCHeap::IsThreadUsingAllocationContextHeap(gc_alloc_context* context, int thread_number)
35746 {
35747     alloc_context* acontext = static_cast<alloc_context*>(context);
35748 #ifdef MULTIPLE_HEAPS
35749     return ((acontext->get_home_heap() == GetHeap(thread_number)) ||
35750             ((acontext->get_home_heap() == 0) && (thread_number == 0)));
35751 #else
35752     UNREFERENCED_PARAMETER(acontext);
35753     UNREFERENCED_PARAMETER(thread_number);
35754     return true;
35755 #endif //MULTIPLE_HEAPS
35756 }
35757
35758 // Returns the number of processors required to trigger the use of thread based allocation contexts
35759 int GCHeap::GetNumberOfHeaps ()
35760 {
35761 #ifdef MULTIPLE_HEAPS
35762     return gc_heap::n_heaps;
35763 #else
35764     return 1;
35765 #endif //MULTIPLE_HEAPS
35766 }
35767
35768 /*
35769   in this way we spend extra time cycling through all the heaps while create the handle
35770   it ought to be changed by keeping alloc_context.home_heap as number (equals heap_number)
35771 */
35772 int GCHeap::GetHomeHeapNumber ()
35773 {
35774 #ifdef MULTIPLE_HEAPS
35775     gc_alloc_context* ctx = GCToEEInterface::GetAllocContext();
35776     if (!ctx)
35777     {
35778         return 0;
35779     }
35780
35781     GCHeap *hp = static_cast<alloc_context*>(ctx)->get_home_heap();
35782     return (hp ? hp->pGenGCHeap->heap_number : 0);
35783 #else
35784     return 0;
35785 #endif //MULTIPLE_HEAPS
35786 }
35787
35788 unsigned int GCHeap::GetCondemnedGeneration()
35789
35790     return gc_heap::settings.condemned_generation;
35791 }
35792
35793 void GCHeap::GetMemoryInfo(uint32_t* highMemLoadThreshold, 
35794                            uint64_t* totalPhysicalMem, 
35795                            uint32_t* lastRecordedMemLoad,
35796                            size_t* lastRecordedHeapSize,
35797                            size_t* lastRecordedFragmentation)
35798 {
35799     *highMemLoadThreshold = gc_heap::high_memory_load_th;
35800     *totalPhysicalMem = gc_heap::total_physical_mem;
35801     *lastRecordedMemLoad = gc_heap::last_gc_memory_load;
35802     *lastRecordedHeapSize = gc_heap::last_gc_heap_size;
35803     *lastRecordedFragmentation = gc_heap::last_gc_fragmentation;
35804 }
35805
35806 int GCHeap::GetGcLatencyMode()
35807 {
35808     return (int)(pGenGCHeap->settings.pause_mode);
35809 }
35810
35811 int GCHeap::SetGcLatencyMode (int newLatencyMode)
35812 {
35813     if (gc_heap::settings.pause_mode == pause_no_gc)
35814         return (int)set_pause_mode_no_gc;
35815
35816     gc_pause_mode new_mode = (gc_pause_mode)newLatencyMode;
35817
35818     if (new_mode == pause_low_latency)
35819     {
35820 #ifndef MULTIPLE_HEAPS
35821         pGenGCHeap->settings.pause_mode = new_mode;
35822 #endif //!MULTIPLE_HEAPS
35823     }
35824     else if (new_mode == pause_sustained_low_latency)
35825     {
35826 #ifdef BACKGROUND_GC
35827         if (gc_heap::gc_can_use_concurrent)
35828         {
35829             pGenGCHeap->settings.pause_mode = new_mode;
35830         }
35831 #endif //BACKGROUND_GC
35832     }
35833     else
35834     {
35835         pGenGCHeap->settings.pause_mode = new_mode;
35836     }
35837
35838 #ifdef BACKGROUND_GC
35839     if (recursive_gc_sync::background_running_p())
35840     {
35841         // If we get here, it means we are doing an FGC. If the pause
35842         // mode was altered we will need to save it in the BGC settings.
35843         if (gc_heap::saved_bgc_settings.pause_mode != new_mode)
35844         {
35845             gc_heap::saved_bgc_settings.pause_mode = new_mode;
35846         }
35847     }
35848 #endif //BACKGROUND_GC
35849
35850     return (int)set_pause_mode_success;
35851 }
35852
35853 int GCHeap::GetLOHCompactionMode()
35854 {
35855     return pGenGCHeap->loh_compaction_mode;
35856 }
35857
35858 void GCHeap::SetLOHCompactionMode (int newLOHCompactionyMode)
35859 {
35860 #ifdef FEATURE_LOH_COMPACTION
35861     pGenGCHeap->loh_compaction_mode = (gc_loh_compaction_mode)newLOHCompactionyMode;
35862 #endif //FEATURE_LOH_COMPACTION
35863 }
35864
35865 bool GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage,
35866                                            uint32_t lohPercentage)
35867 {
35868 #ifdef MULTIPLE_HEAPS
35869     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
35870     {
35871         gc_heap* hp = gc_heap::g_heaps [hn];
35872         hp->fgn_last_alloc = dd_new_allocation (hp->dynamic_data_of (0));
35873     }
35874 #else //MULTIPLE_HEAPS
35875     pGenGCHeap->fgn_last_alloc = dd_new_allocation (pGenGCHeap->dynamic_data_of (0));
35876 #endif //MULTIPLE_HEAPS
35877
35878     pGenGCHeap->full_gc_approach_event.Reset();
35879     pGenGCHeap->full_gc_end_event.Reset();
35880     pGenGCHeap->full_gc_approach_event_set = false;
35881
35882     pGenGCHeap->fgn_maxgen_percent = gen2Percentage;
35883     pGenGCHeap->fgn_loh_percent = lohPercentage;
35884
35885     return TRUE;
35886 }
35887
35888 bool GCHeap::CancelFullGCNotification()
35889 {
35890     pGenGCHeap->fgn_maxgen_percent = 0;
35891     pGenGCHeap->fgn_loh_percent = 0;
35892
35893     pGenGCHeap->full_gc_approach_event.Set();
35894     pGenGCHeap->full_gc_end_event.Set();
35895     
35896     return TRUE;
35897 }
35898
35899 int GCHeap::WaitForFullGCApproach(int millisecondsTimeout)
35900 {
35901     dprintf (2, ("WFGA: Begin wait"));
35902     int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_approach_event), millisecondsTimeout);
35903     dprintf (2, ("WFGA: End wait"));
35904     return result;
35905 }
35906
35907 int GCHeap::WaitForFullGCComplete(int millisecondsTimeout)
35908 {
35909     dprintf (2, ("WFGE: Begin wait"));
35910     int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_end_event), millisecondsTimeout);
35911     dprintf (2, ("WFGE: End wait"));
35912     return result;
35913 }
35914
35915 int GCHeap::StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC)
35916 {
35917     NoGCRegionLockHolder lh;
35918
35919     dprintf (1, ("begin no gc called"));
35920     start_no_gc_region_status status = gc_heap::prepare_for_no_gc_region (totalSize, lohSizeKnown, lohSize, disallowFullBlockingGC);
35921     if (status == start_no_gc_success)
35922     {
35923         GarbageCollect (max_generation);
35924         status = gc_heap::get_start_no_gc_region_status();
35925     }
35926
35927     if (status != start_no_gc_success)
35928         gc_heap::handle_failure_for_no_gc();
35929
35930     return (int)status;
35931 }
35932
35933 int GCHeap::EndNoGCRegion()
35934 {
35935     NoGCRegionLockHolder lh;
35936     return (int)gc_heap::end_no_gc_region();
35937 }
35938
35939 void GCHeap::PublishObject (uint8_t* Obj)
35940 {
35941 #ifdef BACKGROUND_GC
35942     gc_heap* hp = gc_heap::heap_of (Obj);
35943     hp->bgc_alloc_lock->loh_alloc_done (Obj);
35944     hp->bgc_untrack_loh_alloc();
35945 #endif //BACKGROUND_GC
35946 }
35947
35948 // The spec for this one isn't clear. This function
35949 // returns the size that can be allocated without
35950 // triggering a GC of any kind.
35951 size_t GCHeap::ApproxFreeBytes()
35952 {
35953     //GCTODO
35954     //ASSERT(InMustComplete());
35955     enter_spin_lock (&pGenGCHeap->gc_lock);
35956
35957     generation* gen = pGenGCHeap->generation_of (0);
35958     size_t res = generation_allocation_limit (gen) - generation_allocation_pointer (gen);
35959
35960     leave_spin_lock (&pGenGCHeap->gc_lock);
35961
35962     return res;
35963 }
35964
35965 HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters)
35966 {
35967     if ((gen < 0) || (gen > max_generation))
35968         return E_FAIL;
35969 #ifdef MULTIPLE_HEAPS
35970     counters->current_size = 0;
35971     counters->promoted_size = 0;
35972     counters->collection_count = 0;
35973
35974     //enumarate all the heaps and get their counters.
35975     for (int i = 0; i < gc_heap::n_heaps; i++)
35976     {
35977         dynamic_data* dd = gc_heap::g_heaps [i]->dynamic_data_of (gen);
35978
35979         counters->current_size += dd_current_size (dd);
35980         counters->promoted_size += dd_promoted_size (dd);
35981         if (i == 0)
35982         counters->collection_count += dd_collection_count (dd);
35983     }
35984 #else
35985     dynamic_data* dd = pGenGCHeap->dynamic_data_of (gen);
35986     counters->current_size = dd_current_size (dd);
35987     counters->promoted_size = dd_promoted_size (dd);
35988     counters->collection_count = dd_collection_count (dd);
35989 #endif //MULTIPLE_HEAPS
35990     return S_OK;
35991 }
35992
35993 // Get the segment size to use, making sure it conforms.
35994 size_t GCHeap::GetValidSegmentSize(bool large_seg)
35995 {
35996     return get_valid_segment_size (large_seg);
35997 }
35998
35999 // Get the max gen0 heap size, making sure it conforms.
36000 size_t GCHeap::GetValidGen0MaxSize(size_t seg_size)
36001 {
36002     size_t gen0size = static_cast<size_t>(GCConfig::GetGen0Size());
36003
36004     if ((gen0size == 0) || !g_theGCHeap->IsValidGen0MaxSize(gen0size))
36005     {
36006 #ifdef SERVER_GC
36007         // performance data seems to indicate halving the size results
36008         // in optimal perf.  Ask for adjusted gen0 size.
36009         gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),(256*1024));
36010
36011         // if gen0 size is too large given the available memory, reduce it.
36012         // Get true cache size, as we don't want to reduce below this.
36013         size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE),(256*1024));
36014         dprintf (2, ("cache: %Id-%Id, cpu: %Id", 
36015             GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),
36016             GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE)));
36017
36018         int n_heaps = gc_heap::n_heaps;
36019 #else //SERVER_GC
36020         size_t trueSize = GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE);
36021         gen0size = max((4*trueSize/5),(256*1024));
36022         trueSize = max(trueSize, (256*1024));
36023         int n_heaps = 1;
36024 #endif //SERVER_GC
36025
36026         // if the total min GC across heaps will exceed 1/6th of available memory,
36027         // then reduce the min GC size until it either fits or has been reduced to cache size.
36028         while ((gen0size * n_heaps) > GCToOSInterface::GetPhysicalMemoryLimit() / 6)
36029         {
36030             gen0size = gen0size / 2;
36031             if (gen0size <= trueSize)
36032             {
36033                 gen0size = trueSize;
36034                 break;
36035             }
36036         }
36037     }
36038
36039     // Generation 0 must never be more than 1/2 the segment size.
36040     if (gen0size >= (seg_size / 2))
36041         gen0size = seg_size / 2;
36042
36043     return (gen0size);
36044 }
36045
36046 void GCHeap::SetReservedVMLimit (size_t vmlimit)
36047 {
36048     gc_heap::reserved_memory_limit = vmlimit;
36049 }
36050
36051
36052 //versions of same method on each heap
36053
36054 #ifdef FEATURE_PREMORTEM_FINALIZATION
36055
36056 Object* GCHeap::GetNextFinalizableObject()
36057 {
36058
36059 #ifdef MULTIPLE_HEAPS
36060
36061     //return the first non critical one in the first queue.
36062     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36063     {
36064         gc_heap* hp = gc_heap::g_heaps [hn];
36065         Object* O = hp->finalize_queue->GetNextFinalizableObject(TRUE);
36066         if (O)
36067             return O;
36068     }
36069     //return the first non crtitical/critical one in the first queue.
36070     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36071     {
36072         gc_heap* hp = gc_heap::g_heaps [hn];
36073         Object* O = hp->finalize_queue->GetNextFinalizableObject(FALSE);
36074         if (O)
36075             return O;
36076     }
36077     return 0;
36078
36079
36080 #else //MULTIPLE_HEAPS
36081     return pGenGCHeap->finalize_queue->GetNextFinalizableObject();
36082 #endif //MULTIPLE_HEAPS
36083
36084 }
36085
36086 size_t GCHeap::GetNumberFinalizableObjects()
36087 {
36088 #ifdef MULTIPLE_HEAPS
36089     size_t cnt = 0;
36090     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36091     {
36092         gc_heap* hp = gc_heap::g_heaps [hn];
36093         cnt += hp->finalize_queue->GetNumberFinalizableObjects();
36094     }
36095     return cnt;
36096
36097
36098 #else //MULTIPLE_HEAPS
36099     return pGenGCHeap->finalize_queue->GetNumberFinalizableObjects();
36100 #endif //MULTIPLE_HEAPS
36101 }
36102
36103 size_t GCHeap::GetFinalizablePromotedCount()
36104 {
36105 #ifdef MULTIPLE_HEAPS
36106     size_t cnt = 0;
36107
36108     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36109     {
36110         gc_heap* hp = gc_heap::g_heaps [hn];
36111         cnt += hp->finalize_queue->GetPromotedCount();
36112     }
36113     return cnt;
36114
36115 #else //MULTIPLE_HEAPS
36116     return pGenGCHeap->finalize_queue->GetPromotedCount();
36117 #endif //MULTIPLE_HEAPS
36118 }
36119
36120 bool GCHeap::FinalizeAppDomain(void *pDomain, bool fRunFinalizers)
36121 {
36122 #ifdef MULTIPLE_HEAPS
36123     bool foundp = false;
36124     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36125     {
36126         gc_heap* hp = gc_heap::g_heaps [hn];
36127         if (hp->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers))
36128             foundp = true;
36129     }
36130     return foundp;
36131
36132 #else //MULTIPLE_HEAPS
36133     return pGenGCHeap->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers);
36134 #endif //MULTIPLE_HEAPS
36135 }
36136
36137 bool GCHeap::ShouldRestartFinalizerWatchDog()
36138 {
36139     // This condition was historically used as part of the condition to detect finalizer thread timeouts
36140     return gc_heap::gc_lock.lock != -1;
36141 }
36142
36143 void GCHeap::SetFinalizeQueueForShutdown(bool fHasLock)
36144 {
36145 #ifdef MULTIPLE_HEAPS
36146     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36147     {
36148         gc_heap* hp = gc_heap::g_heaps [hn];
36149         hp->finalize_queue->SetSegForShutDown(fHasLock);
36150     }
36151
36152 #else //MULTIPLE_HEAPS
36153     pGenGCHeap->finalize_queue->SetSegForShutDown(fHasLock);
36154 #endif //MULTIPLE_HEAPS
36155 }
36156
36157 //---------------------------------------------------------------------------
36158 // Finalized class tracking
36159 //---------------------------------------------------------------------------
36160
36161 bool GCHeap::RegisterForFinalization (int gen, Object* obj)
36162 {
36163     if (gen == -1)
36164         gen = 0;
36165     if (((((CObjectHeader*)obj)->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN))
36166     {
36167         //just reset the bit
36168         ((CObjectHeader*)obj)->GetHeader()->ClrBit(BIT_SBLK_FINALIZER_RUN);
36169         return true;
36170     }
36171     else
36172     {
36173         gc_heap* hp = gc_heap::heap_of ((uint8_t*)obj);
36174         return hp->finalize_queue->RegisterForFinalization (gen, obj);
36175     }
36176 }
36177
36178 void GCHeap::SetFinalizationRun (Object* obj)
36179 {
36180     ((CObjectHeader*)obj)->GetHeader()->SetBit(BIT_SBLK_FINALIZER_RUN);
36181 }
36182
36183
36184 //--------------------------------------------------------------------
36185 //
36186 //          Support for finalization
36187 //
36188 //--------------------------------------------------------------------
36189
36190 inline
36191 unsigned int gen_segment (int gen)
36192 {
36193     assert (((signed)NUMBERGENERATIONS - gen - 1)>=0);
36194     return (NUMBERGENERATIONS - gen - 1);
36195 }
36196
36197 bool CFinalize::Initialize()
36198 {
36199     CONTRACTL {
36200         NOTHROW;
36201         GC_NOTRIGGER;
36202     } CONTRACTL_END;
36203
36204     m_Array = new (nothrow)(Object*[100]);
36205
36206     if (!m_Array)
36207     {
36208         ASSERT (m_Array);
36209         STRESS_LOG_OOM_STACK(sizeof(Object*[100]));
36210         if (GCConfig::GetBreakOnOOM())
36211         {
36212             GCToOSInterface::DebugBreak();
36213         }
36214         return false;
36215     }
36216     m_EndArray = &m_Array[100];
36217
36218     for (int i =0; i < FreeList; i++)
36219     {
36220         SegQueueLimit (i) = m_Array;
36221     }
36222     m_PromotedCount = 0;
36223     lock = -1;
36224 #ifdef _DEBUG
36225     lockowner_threadid.Clear();
36226 #endif // _DEBUG
36227
36228     return true;
36229 }
36230
36231 CFinalize::~CFinalize()
36232 {
36233     delete m_Array;
36234 }
36235
36236 size_t CFinalize::GetPromotedCount ()
36237 {
36238     return m_PromotedCount;
36239 }
36240
36241 inline
36242 void CFinalize::EnterFinalizeLock()
36243 {
36244     _ASSERTE(dbgOnly_IsSpecialEEThread() ||
36245              GCToEEInterface::GetThread() == 0 ||
36246              GCToEEInterface::IsPreemptiveGCDisabled());
36247
36248 retry:
36249     if (Interlocked::CompareExchange(&lock, 0, -1) >= 0)
36250     {
36251         unsigned int i = 0;
36252         while (lock >= 0)
36253         {
36254             YieldProcessor();           // indicate to the processor that we are spinning
36255             if (++i & 7)
36256                 GCToOSInterface::YieldThread (0);
36257             else
36258                 GCToOSInterface::Sleep (5);
36259         }
36260         goto retry;
36261     }
36262
36263 #ifdef _DEBUG
36264     lockowner_threadid.SetToCurrentThread();
36265 #endif // _DEBUG
36266 }
36267
36268 inline
36269 void CFinalize::LeaveFinalizeLock()
36270 {
36271     _ASSERTE(dbgOnly_IsSpecialEEThread() ||
36272              GCToEEInterface::GetThread() == 0 ||
36273              GCToEEInterface::IsPreemptiveGCDisabled());
36274
36275 #ifdef _DEBUG
36276     lockowner_threadid.Clear();
36277 #endif // _DEBUG
36278     lock = -1;
36279 }
36280
36281 bool
36282 CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size)
36283 {
36284     CONTRACTL {
36285         NOTHROW;
36286         GC_NOTRIGGER;
36287     } CONTRACTL_END;
36288
36289     EnterFinalizeLock();
36290     // Adjust gen
36291     unsigned int dest = 0;
36292
36293     if (g_fFinalizerRunOnShutDown)
36294     {
36295         //no method table available yet,
36296         //put it in the finalizer queue and sort out when
36297         //dequeueing
36298         dest = FinalizerListSeg;
36299     }
36300
36301     else
36302         dest = gen_segment (gen);
36303
36304     // Adjust boundary for segments so that GC will keep objects alive.
36305     Object*** s_i = &SegQueue (FreeList);
36306     if ((*s_i) == m_EndArray)
36307     {
36308         if (!GrowArray())
36309         {
36310             LeaveFinalizeLock();
36311             if (method_table(obj) == NULL)
36312             {
36313                 // If the object is uninitialized, a valid size should have been passed.
36314                 assert (size >= Align (min_obj_size));
36315                 dprintf (3, ("Making unused array [%Ix, %Ix[", (size_t)obj, (size_t)(obj+size)));
36316                 ((CObjectHeader*)obj)->SetFree(size);
36317             }
36318             STRESS_LOG_OOM_STACK(0);
36319             if (GCConfig::GetBreakOnOOM())
36320             {
36321                 GCToOSInterface::DebugBreak();
36322             }
36323             return false;
36324         }
36325     }
36326     Object*** end_si = &SegQueueLimit (dest);
36327     do
36328     {
36329         //is the segment empty?
36330         if (!(*s_i == *(s_i-1)))
36331         {
36332             //no, swap the end elements.
36333             *(*s_i) = *(*(s_i-1));
36334         }
36335         //increment the fill pointer
36336         (*s_i)++;
36337         //go to the next segment.
36338         s_i--;
36339     } while (s_i > end_si);
36340
36341     // We have reached the destination segment
36342     // store the object
36343     **s_i = obj;
36344     // increment the fill pointer
36345     (*s_i)++;
36346
36347     LeaveFinalizeLock();
36348
36349     return true;
36350 }
36351
36352 Object*
36353 CFinalize::GetNextFinalizableObject (BOOL only_non_critical)
36354 {
36355     Object* obj = 0;
36356     //serialize
36357     EnterFinalizeLock();
36358
36359 retry:
36360     if (!IsSegEmpty(FinalizerListSeg))
36361     {
36362         if (g_fFinalizerRunOnShutDown)
36363         {
36364             obj = *(SegQueueLimit (FinalizerListSeg)-1);
36365             if (method_table(obj)->HasCriticalFinalizer())
36366             {
36367                 MoveItem ((SegQueueLimit (FinalizerListSeg)-1),
36368                           FinalizerListSeg, CriticalFinalizerListSeg);
36369                 goto retry;
36370             }
36371             else
36372                 --SegQueueLimit (FinalizerListSeg);
36373         }
36374         else
36375             obj =  *(--SegQueueLimit (FinalizerListSeg));
36376
36377     }
36378     else if (!only_non_critical && !IsSegEmpty(CriticalFinalizerListSeg))
36379     {
36380         //the FinalizerList is empty, we can adjust both
36381         // limit instead of moving the object to the free list
36382         obj =  *(--SegQueueLimit (CriticalFinalizerListSeg));
36383         --SegQueueLimit (FinalizerListSeg);
36384     }
36385     if (obj)
36386     {
36387         dprintf (3, ("running finalizer for %Ix (mt: %Ix)", obj, method_table (obj)));
36388     }
36389     LeaveFinalizeLock();
36390     return obj;
36391 }
36392
36393 void
36394 CFinalize::SetSegForShutDown(BOOL fHasLock)
36395 {
36396     int i;
36397
36398     if (!fHasLock)
36399         EnterFinalizeLock();
36400     for (i = 0; i <= max_generation; i++)
36401     {
36402         unsigned int seg = gen_segment (i);
36403         Object** startIndex = SegQueueLimit (seg)-1;
36404         Object** stopIndex  = SegQueue (seg);
36405         for (Object** po = startIndex; po >= stopIndex; po--)
36406         {
36407             Object* obj = *po;
36408             if (method_table(obj)->HasCriticalFinalizer())
36409             {
36410                 MoveItem (po, seg, CriticalFinalizerListSeg);
36411             }
36412             else
36413             {
36414                 MoveItem (po, seg, FinalizerListSeg);
36415             }
36416         }
36417     }
36418     if (!fHasLock)
36419         LeaveFinalizeLock();
36420 }
36421
36422 void
36423 CFinalize::DiscardNonCriticalObjects()
36424 {
36425     //empty the finalization queue
36426     Object** startIndex = SegQueueLimit (FinalizerListSeg)-1;
36427     Object** stopIndex  = SegQueue (FinalizerListSeg);
36428     for (Object** po = startIndex; po >= stopIndex; po--)
36429     {
36430         MoveItem (po, FinalizerListSeg, FreeList);
36431     }
36432 }
36433
36434 size_t
36435 CFinalize::GetNumberFinalizableObjects()
36436 {
36437     return SegQueueLimit (FinalizerListSeg) -
36438         (g_fFinalizerRunOnShutDown ? m_Array : SegQueue(FinalizerListSeg));
36439 }
36440
36441 BOOL
36442 CFinalize::FinalizeSegForAppDomain (void *pDomain, 
36443                                     BOOL fRunFinalizers, 
36444                                     unsigned int Seg)
36445 {
36446     BOOL finalizedFound = FALSE;
36447     Object** endIndex = SegQueue (Seg);
36448     for (Object** i = SegQueueLimit (Seg)-1; i >= endIndex ;i--)
36449     {
36450         CObjectHeader* obj = (CObjectHeader*)*i;
36451
36452         // Objects are put into the finalization queue before they are complete (ie their methodtable
36453         // may be null) so we must check that the object we found has a method table before checking
36454         // if it has the index we are looking for. If the methodtable is null, it can't be from the
36455         // unloading domain, so skip it.
36456         if (method_table(obj) == NULL)
36457         {
36458             continue;
36459         }
36460
36461         // does the EE actually want us to finalize this object?
36462         if (!GCToEEInterface::ShouldFinalizeObjectForUnload(pDomain, obj))
36463         {
36464             continue;
36465         }
36466
36467         if (!fRunFinalizers || (obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
36468         {
36469             //remove the object because we don't want to
36470             //run the finalizer
36471             MoveItem (i, Seg, FreeList);
36472             //Reset the bit so it will be put back on the queue
36473             //if resurrected and re-registered.
36474             obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);
36475         }
36476         else
36477         {
36478             if (method_table(obj)->HasCriticalFinalizer())
36479             {
36480                 finalizedFound = TRUE;
36481                 MoveItem (i, Seg, CriticalFinalizerListSeg);
36482             }
36483             else
36484             {
36485                 if (GCToEEInterface::AppDomainIsRudeUnload(pDomain))
36486                 {
36487                     MoveItem (i, Seg, FreeList);
36488                 }
36489                 else
36490                 {
36491                     finalizedFound = TRUE;
36492                     MoveItem (i, Seg, FinalizerListSeg);
36493                 }
36494             }
36495         }
36496     }
36497
36498     return finalizedFound;
36499 }
36500
36501 bool
36502 CFinalize::FinalizeAppDomain (void *pDomain, bool fRunFinalizers)
36503 {
36504     bool finalizedFound = false;
36505
36506     unsigned int startSeg = gen_segment (max_generation);
36507
36508     EnterFinalizeLock();
36509
36510     for (unsigned int Seg = startSeg; Seg <= gen_segment (0); Seg++)
36511     {
36512         if (FinalizeSegForAppDomain (pDomain, fRunFinalizers, Seg))
36513         {
36514             finalizedFound = true;
36515         }
36516     }
36517
36518     LeaveFinalizeLock();
36519
36520     return finalizedFound;
36521 }
36522
36523 void
36524 CFinalize::MoveItem (Object** fromIndex,
36525                      unsigned int fromSeg,
36526                      unsigned int toSeg)
36527 {
36528
36529     int step;
36530     ASSERT (fromSeg != toSeg);
36531     if (fromSeg > toSeg)
36532         step = -1;
36533     else
36534         step = +1;
36535     // Place the element at the boundary closest to dest
36536     Object** srcIndex = fromIndex;
36537     for (unsigned int i = fromSeg; i != toSeg; i+= step)
36538     {
36539         Object**& destFill = m_FillPointers[i+(step - 1 )/2];
36540         Object** destIndex = destFill - (step + 1)/2;
36541         if (srcIndex != destIndex)
36542         {
36543             Object* tmp = *srcIndex;
36544             *srcIndex = *destIndex;
36545             *destIndex = tmp;
36546         }
36547         destFill -= step;
36548         srcIndex = destIndex;
36549     }
36550 }
36551
36552 void
36553 CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC)
36554 {
36555     ScanContext sc;
36556     if (pSC == 0)
36557         pSC = &sc;
36558
36559     pSC->thread_number = hn;
36560
36561     //scan the finalization queue
36562     Object** startIndex  = SegQueue (CriticalFinalizerListSeg);
36563     Object** stopIndex  = SegQueueLimit (FinalizerListSeg);
36564
36565     for (Object** po = startIndex; po < stopIndex; po++)
36566     {
36567         Object* o = *po;
36568         //dprintf (3, ("scan freacheable %Ix", (size_t)o));
36569         dprintf (3, ("scan f %Ix", (size_t)o));
36570 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
36571         if (g_fEnableAppDomainMonitoring)
36572         {
36573             pSC->pCurrentDomain = GCToEEInterface::GetAppDomainForObject(o);
36574         }
36575 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
36576
36577         (*fn)(po, pSC, 0);
36578     }
36579 }
36580
36581 void CFinalize::WalkFReachableObjects (fq_walk_fn fn)
36582 {
36583     Object** startIndex = SegQueue (CriticalFinalizerListSeg);
36584     Object** stopCriticalIndex = SegQueueLimit (CriticalFinalizerListSeg);
36585     Object** stopIndex  = SegQueueLimit (FinalizerListSeg);
36586     for (Object** po = startIndex; po < stopIndex; po++)
36587     {
36588         //report *po
36589         fn(po < stopCriticalIndex, *po);
36590     }
36591 }
36592
36593 BOOL
36594 CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
36595                                 gc_heap* hp)
36596 {
36597     ScanContext sc;
36598     sc.promotion = TRUE;
36599 #ifdef MULTIPLE_HEAPS
36600     sc.thread_number = hp->heap_number;
36601 #else
36602     UNREFERENCED_PARAMETER(hp);
36603 #endif //MULTIPLE_HEAPS
36604
36605     BOOL finalizedFound = FALSE;
36606
36607     //start with gen and explore all the younger generations.
36608     unsigned int startSeg = gen_segment (gen);
36609     {
36610         m_PromotedCount = 0;
36611         for (unsigned int Seg = startSeg; Seg <= gen_segment(0); Seg++)
36612         {
36613             Object** endIndex = SegQueue (Seg);
36614             for (Object** i = SegQueueLimit (Seg)-1; i >= endIndex ;i--)
36615             {
36616                 CObjectHeader* obj = (CObjectHeader*)*i;
36617                 dprintf (3, ("scanning: %Ix", (size_t)obj));
36618                 if (!g_theGCHeap->IsPromoted (obj))
36619                 {
36620                     dprintf (3, ("freacheable: %Ix", (size_t)obj));
36621
36622                     assert (method_table(obj)->HasFinalizer());
36623
36624                     if (GCToEEInterface::EagerFinalized(obj))
36625                     {
36626                         MoveItem (i, Seg, FreeList);
36627                     }
36628                     else if ((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
36629                     {
36630                         //remove the object because we don't want to
36631                         //run the finalizer
36632
36633                         MoveItem (i, Seg, FreeList);
36634
36635                         //Reset the bit so it will be put back on the queue
36636                         //if resurrected and re-registered.
36637                         obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);
36638
36639                     }
36640                     else
36641                     {
36642                         m_PromotedCount++;
36643
36644                         if (method_table(obj)->HasCriticalFinalizer())
36645                         {
36646                             MoveItem (i, Seg, CriticalFinalizerListSeg);
36647                         }
36648                         else
36649                         {
36650                             MoveItem (i, Seg, FinalizerListSeg);
36651                         }
36652                     }
36653                 }
36654 #ifdef BACKGROUND_GC
36655                 else
36656                 {
36657                     if ((gen == max_generation) && (recursive_gc_sync::background_running_p()))
36658                     {
36659                         // TODO - fix the following line.
36660                         //assert (gc_heap::background_object_marked ((uint8_t*)obj, FALSE));
36661                         dprintf (3, ("%Ix is marked", (size_t)obj));
36662                     }
36663                 }
36664 #endif //BACKGROUND_GC
36665             }
36666         }
36667     }
36668     finalizedFound = !IsSegEmpty(FinalizerListSeg) ||
36669                      !IsSegEmpty(CriticalFinalizerListSeg);
36670                     
36671     if (finalizedFound)
36672     {
36673         //Promote the f-reachable objects
36674         GcScanRoots (pfn,
36675 #ifdef MULTIPLE_HEAPS
36676                      hp->heap_number
36677 #else
36678                      0
36679 #endif //MULTIPLE_HEAPS
36680                      , 0);
36681
36682         hp->settings.found_finalizers = TRUE;
36683
36684 #ifdef BACKGROUND_GC
36685         if (hp->settings.concurrent)
36686         {
36687             hp->settings.found_finalizers = !(IsSegEmpty(FinalizerListSeg) && IsSegEmpty(CriticalFinalizerListSeg));
36688         }
36689 #endif //BACKGROUND_GC
36690         if (hp->settings.concurrent && hp->settings.found_finalizers)
36691         {
36692             if (!mark_only_p)
36693                 GCToEEInterface::EnableFinalization(true);
36694         }
36695     }
36696
36697     return finalizedFound;
36698 }
36699
36700 //Relocates all of the objects in the finalization array
36701 void
36702 CFinalize::RelocateFinalizationData (int gen, gc_heap* hp)
36703 {
36704     ScanContext sc;
36705     sc.promotion = FALSE;
36706 #ifdef MULTIPLE_HEAPS
36707     sc.thread_number = hp->heap_number;
36708 #else
36709     UNREFERENCED_PARAMETER(hp);
36710 #endif //MULTIPLE_HEAPS
36711
36712     unsigned int Seg = gen_segment (gen);
36713
36714     Object** startIndex = SegQueue (Seg);
36715     for (Object** po = startIndex; po < SegQueue (FreeList);po++)
36716     {
36717         GCHeap::Relocate (po, &sc);
36718     }
36719 }
36720
36721 void
36722 CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p)
36723 {
36724     // update the generation fill pointers.
36725     // if gen_0_empty is FALSE, test each object to find out if
36726     // it was promoted or not
36727     if (gen_0_empty_p)
36728     {
36729         for (int i = min (gen+1, max_generation); i > 0; i--)
36730         {
36731             m_FillPointers [gen_segment(i)] = m_FillPointers [gen_segment(i-1)];
36732         }
36733     }
36734     else
36735     {
36736         //Look for demoted or promoted plugs
36737
36738         for (int i = gen; i >= 0; i--)
36739         {
36740             unsigned int Seg = gen_segment (i);
36741             Object** startIndex = SegQueue (Seg);
36742
36743             for (Object** po = startIndex;
36744                  po < SegQueueLimit (gen_segment(i)); po++)
36745             {
36746                 int new_gen = g_theGCHeap->WhichGeneration (*po);
36747                 if (new_gen != i)
36748                 {
36749                     if (new_gen > i)
36750                     {
36751                         //promotion
36752                         MoveItem (po, gen_segment (i), gen_segment (new_gen));
36753                     }
36754                     else
36755                     {
36756                         //demotion
36757                         MoveItem (po, gen_segment (i), gen_segment (new_gen));
36758                         //back down in order to see all objects.
36759                         po--;
36760                     }
36761                 }
36762
36763             }
36764         }
36765     }
36766 }
36767
36768 BOOL
36769 CFinalize::GrowArray()
36770 {
36771     size_t oldArraySize = (m_EndArray - m_Array);
36772     size_t newArraySize =  (size_t)(((float)oldArraySize / 10) * 12);
36773
36774     Object** newArray = new (nothrow) Object*[newArraySize];
36775     if (!newArray)
36776     {
36777         // It's not safe to throw here, because of the FinalizeLock.  Tell our caller
36778         // to throw for us.
36779 //        ASSERT (newArray);
36780         return FALSE;
36781     }
36782     memcpy (newArray, m_Array, oldArraySize*sizeof(Object*));
36783
36784     //adjust the fill pointers
36785     for (int i = 0; i < FreeList; i++)
36786     {
36787         m_FillPointers [i] += (newArray - m_Array);
36788     }
36789     delete m_Array;
36790     m_Array = newArray;
36791     m_EndArray = &m_Array [newArraySize];
36792
36793     return TRUE;
36794 }
36795
36796 #ifdef VERIFY_HEAP
36797 void CFinalize::CheckFinalizerObjects()
36798 {
36799     for (int i = 0; i <= max_generation; i++)
36800     {
36801         Object **startIndex = SegQueue (gen_segment (i));
36802         Object **stopIndex  = SegQueueLimit (gen_segment (i));
36803
36804         for (Object **po = startIndex; po < stopIndex; po++)
36805         {
36806             if ((int)g_theGCHeap->WhichGeneration (*po) < i)
36807                 FATAL_GC_ERROR ();
36808             ((CObjectHeader*)*po)->Validate();
36809         }
36810     }
36811 }
36812 #endif //VERIFY_HEAP
36813
36814 #endif // FEATURE_PREMORTEM_FINALIZATION
36815
36816
36817 //------------------------------------------------------------------------------
36818 //
36819 //                      End of VM specific support
36820 //
36821 //------------------------------------------------------------------------------
36822 void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
36823 {
36824     generation* gen = gc_heap::generation_of (gen_number);
36825     heap_segment*    seg = generation_start_segment (gen);
36826     uint8_t*       x = ((gen_number == max_generation) ? heap_segment_mem (seg) :
36827                      generation_allocation_start (gen));
36828
36829     uint8_t*       end = heap_segment_allocated (seg);
36830     BOOL small_object_segments = TRUE;
36831     int align_const = get_alignment_constant (small_object_segments);
36832
36833     while (1)
36834
36835     {
36836         if (x >= end)
36837         {
36838             if ((seg = heap_segment_next (seg)) != 0)
36839             {
36840                 x = heap_segment_mem (seg);
36841                 end = heap_segment_allocated (seg);
36842                 continue;
36843             }
36844             else
36845             {
36846                 if (small_object_segments && walk_large_object_heap_p)
36847
36848                 {
36849                     small_object_segments = FALSE;
36850                     align_const = get_alignment_constant (small_object_segments);
36851                     seg = generation_start_segment (large_object_generation);
36852                     x = heap_segment_mem (seg);
36853                     end = heap_segment_allocated (seg);
36854                     continue;
36855                 }
36856                 else
36857                 {
36858                     break;
36859                 }
36860             }
36861         }
36862
36863         size_t s = size (x);
36864         CObjectHeader* o = (CObjectHeader*)x;
36865
36866         if (!o->IsFree())
36867
36868         {
36869             _ASSERTE(((size_t)o & 0x3) == 0); // Last two bits should never be set at this point
36870
36871             if (!fn (o->GetObjectBase(), context))
36872                 return;
36873         }
36874         x = x + Align (s, align_const);
36875     }
36876 }
36877
36878 void gc_heap::walk_finalize_queue (fq_walk_fn fn)
36879 {
36880 #ifdef FEATURE_PREMORTEM_FINALIZATION
36881     finalize_queue->WalkFReachableObjects (fn);
36882 #endif //FEATURE_PREMORTEM_FINALIZATION
36883 }
36884
36885 void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
36886 {
36887 #ifdef MULTIPLE_HEAPS
36888     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36889     {
36890         gc_heap* hp = gc_heap::g_heaps [hn];
36891
36892         hp->walk_heap_per_heap (fn, context, gen_number, walk_large_object_heap_p);
36893     }
36894 #else
36895     walk_heap_per_heap(fn, context, gen_number, walk_large_object_heap_p);
36896 #endif //MULTIPLE_HEAPS
36897 }
36898
36899 void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context)
36900 {
36901     uint8_t* o = (uint8_t*)obj;
36902     if (o)
36903     {
36904         go_through_object_cl (method_table (o), o, size(o), oo,
36905                                     {
36906                                         if (*oo)
36907                                         {
36908                                             Object *oh = (Object*)*oo;
36909                                             if (!fn (oh, context))
36910                                                 return;
36911                                         }
36912                                     }
36913             );
36914     }
36915 }
36916
36917 void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type)
36918 {
36919     gc_heap* hp = (gc_heap*)gc_context;
36920     hp->walk_survivors (fn, diag_context, type);
36921 }
36922
36923 void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p)
36924 {
36925     gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p);
36926 }
36927
36928 void GCHeap::DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn)
36929 {
36930     gc_heap* hp = (gc_heap*)gc_context;
36931     hp->walk_finalize_queue (fn);
36932 }
36933
36934 void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc)
36935 {
36936 #ifdef MULTIPLE_HEAPS
36937     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36938     {
36939         gc_heap* hp = gc_heap::g_heaps [hn];
36940         hp->finalize_queue->GcScanRoots(fn, hn, sc);
36941     }
36942 #else
36943         pGenGCHeap->finalize_queue->GcScanRoots(fn, 0, sc);
36944 #endif //MULTIPLE_HEAPS
36945 }
36946
36947 void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
36948 {
36949     UNREFERENCED_PARAMETER(gen_number);
36950     GCScan::GcScanHandlesForProfilerAndETW (max_generation, context, fn);
36951 }
36952
36953 void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
36954 {
36955     UNREFERENCED_PARAMETER(gen_number);
36956     GCScan::GcScanDependentHandlesForProfilerAndETW (max_generation, context, fn);
36957 }
36958
36959 // Go through and touch (read) each page straddled by a memory block.
36960 void TouchPages(void * pStart, size_t cb)
36961 {
36962     const uint32_t pagesize = OS_PAGE_SIZE;
36963     _ASSERTE(0 == (pagesize & (pagesize-1))); // Must be a power of 2.
36964     if (cb)
36965     {
36966         VOLATILE(char)* pEnd = (VOLATILE(char)*)(cb + (char*)pStart);
36967         VOLATILE(char)* p = (VOLATILE(char)*)(((char*)pStart) -  (((size_t)pStart) & (pagesize-1)));
36968         while (p < pEnd)
36969         {
36970             char a;
36971             a = VolatileLoad(p);
36972             //printf("Touching page %lxh\n", (uint32_t)p);
36973             p += pagesize;
36974         }
36975     }
36976 }
36977
36978 #if defined(WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
36979     // This code is designed to catch the failure to update the write barrier
36980     // The way it works is to copy the whole heap right after every GC.  The write
36981     // barrier code has been modified so that it updates the shadow as well as the
36982     // real GC heap.  Before doing the next GC, we walk the heap, looking for pointers
36983     // that were updated in the real heap, but not the shadow.  A mismatch indicates
36984     // an error.  The offending code can be found by breaking after the correct GC,
36985     // and then placing a data breakpoint on the Heap location that was updated without
36986     // going through the write barrier.
36987
36988     // Called at process shutdown
36989 void deleteGCShadow()
36990 {
36991     if (g_GCShadow != 0)
36992         GCToOSInterface::VirtualRelease (g_GCShadow, g_GCShadowEnd - g_GCShadow);
36993     g_GCShadow = 0;
36994     g_GCShadowEnd = 0;
36995 }
36996
36997     // Called at startup and right after a GC, get a snapshot of the GC Heap
36998 void initGCShadow()
36999 {
37000     if (!(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK))
37001         return;
37002
37003     size_t len = g_gc_highest_address - g_gc_lowest_address;
37004     if (len > (size_t)(g_GCShadowEnd - g_GCShadow)) 
37005     {
37006         deleteGCShadow();
37007         g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(len, 0, VirtualReserveFlags::None);
37008         if (g_GCShadow == NULL || !GCToOSInterface::VirtualCommit(g_GCShadow, len))
37009         {
37010             _ASSERTE(!"Not enough memory to run HeapVerify level 2");
37011             // If after the assert we decide to allow the program to continue 
37012             // running we need to be in a state that will not trigger any 
37013             // additional AVs while we fail to allocate a shadow segment, i.e. 
37014             // ensure calls to updateGCShadow() checkGCWriteBarrier() don't AV
37015             deleteGCShadow();
37016             return;
37017         }
37018
37019         g_GCShadowEnd += len;
37020     }
37021
37022     // save the value of g_gc_lowest_address at this time.  If this value changes before
37023     // the next call to checkGCWriteBarrier() it means we extended the heap (with a
37024     // large object segment most probably), and the whole shadow segment is inconsistent.
37025     g_shadow_lowest_address = g_gc_lowest_address;
37026
37027         //****** Copy the whole GC heap ******
37028     //
37029     // NOTE: This is the one situation where the combination of heap_segment_rw(gen_start_segment())
37030     // can produce a NULL result.  This is because the initialization has not completed.
37031     //
37032     generation* gen = gc_heap::generation_of (max_generation);
37033     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
37034
37035     ptrdiff_t delta = g_GCShadow - g_gc_lowest_address;
37036     BOOL small_object_segments = TRUE;
37037     while(1)
37038     {
37039         if (!seg)
37040         {
37041             if (small_object_segments)
37042             {
37043                 small_object_segments = FALSE;
37044                 seg = heap_segment_rw (generation_start_segment (gc_heap::generation_of (max_generation+1)));
37045                 continue;
37046             }
37047             else
37048                 break;
37049         }
37050             // Copy the segment
37051         uint8_t* start = heap_segment_mem(seg);
37052         uint8_t* end = heap_segment_allocated (seg);
37053         memcpy(start + delta, start, end - start);
37054         seg = heap_segment_next_rw (seg);
37055     }
37056 }
37057
37058 #define INVALIDGCVALUE (void*)((size_t)0xcccccccd)
37059
37060     // test to see if 'ptr' was only updated via the write barrier.
37061 inline void testGCShadow(Object** ptr)
37062 {
37063     Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_gc_lowest_address)];
37064     if (*ptr != 0 && (uint8_t*) shadow < g_GCShadowEnd && *ptr != *shadow)
37065     {
37066
37067         // If you get this assertion, someone updated a GC pointer in the heap without
37068         // using the write barrier.  To find out who, check the value of 
37069         // dd_collection_count (dynamic_data_of (0)). Also
37070         // note the value of 'ptr'.  Rerun the App that the previous GC just occurred.
37071         // Then put a data breakpoint for the value of 'ptr'  Then check every write
37072         // to pointer between the two GCs.  The last one is not using the write barrier.
37073
37074         // If the memory of interest does not exist at system startup,
37075         // you need to set the data breakpoint right after the memory gets committed
37076         // Set a breakpoint at the end of grow_heap_segment, and put the value of 'ptr'
37077         // in the memory window.  run until the memory gets mapped. Then you can set
37078         // your breakpoint
37079
37080         // Note a recent change, we've identified race conditions when updating the gc shadow.
37081         // Throughout the runtime, code will update an address in the gc heap, then erect the
37082         // write barrier, which calls updateGCShadow. With an app that pounds one heap location
37083         // from multiple threads, you can hit this assert even though all involved are using the
37084         // write barrier properly. Thusly, we detect the race and set this location to INVALIDGCVALUE.
37085         // TODO: the code in jithelp.asm doesn't call updateGCShadow, and hasn't been
37086         // TODO: fixed to detect the race. We've only seen this race from VolatileWritePtr,
37087         // TODO: so elect not to fix jithelp.asm at this time. It should be done if we start hitting
37088         // TODO: erroneous asserts in here.
37089
37090         if(*shadow!=INVALIDGCVALUE)
37091         {
37092 #ifdef FEATURE_BASICFREEZE
37093             // Write barriers for stores of references to frozen objects may be optimized away.
37094             if (!gc_heap::frozen_object_p(*ptr))
37095 #endif // FEATURE_BASICFREEZE
37096             {
37097                 _ASSERTE(!"Pointer updated without using write barrier");
37098             }
37099         }
37100         /*
37101         else
37102         {
37103              printf("saw a INVALIDGCVALUE. (just to let you know)\n");
37104         }
37105         */
37106     }
37107 }
37108
37109 void testGCShadowHelper (uint8_t* x)
37110 {
37111     size_t s = size (x);
37112     if (contain_pointers (x))
37113     {
37114         go_through_object_nostart (method_table(x), x, s, oo,
37115                            { testGCShadow((Object**) oo); });
37116     }
37117 }
37118
37119     // Walk the whole heap, looking for pointers that were not updated with the write barrier.
37120 void checkGCWriteBarrier()
37121 {
37122     // g_shadow_lowest_address != g_gc_lowest_address means the GC heap was extended by a segment
37123     // and the GC shadow segment did not track that change!
37124     if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_gc_lowest_address)
37125     {
37126         // No shadow stack, nothing to check.
37127         return;
37128     }
37129
37130     {
37131         generation* gen = gc_heap::generation_of (max_generation);
37132         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
37133
37134         PREFIX_ASSUME(seg != NULL);
37135
37136         while(seg)
37137         {
37138             uint8_t* x = heap_segment_mem(seg);
37139             while (x < heap_segment_allocated (seg))
37140             {
37141                 size_t s = size (x);
37142                 testGCShadowHelper (x);
37143                 x = x + Align (s);
37144             }
37145             seg = heap_segment_next_rw (seg);
37146         }
37147     }
37148
37149     {
37150         // go through large object heap
37151         int alignment = get_alignment_constant(FALSE);
37152         generation* gen = gc_heap::generation_of (max_generation+1);
37153         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
37154
37155         PREFIX_ASSUME(seg != NULL);
37156
37157         while(seg)
37158         {
37159             uint8_t* x = heap_segment_mem(seg);
37160             while (x < heap_segment_allocated (seg))
37161             {
37162                 size_t s = size (x);
37163                 testGCShadowHelper (x);
37164                 x = x + Align (s, alignment);
37165             }
37166             seg = heap_segment_next_rw (seg);
37167         }
37168     }
37169 }
37170 #endif //WRITE_BARRIER_CHECK && !SERVER_GC
37171
37172 #endif // !DACCESS_COMPILE
37173
37174 #ifdef FEATURE_BASICFREEZE
37175 void gc_heap::walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef)
37176 {
37177 #ifdef DACCESS_COMPILE
37178     UNREFERENCED_PARAMETER(seg);
37179     UNREFERENCED_PARAMETER(pvContext);
37180     UNREFERENCED_PARAMETER(pfnMethodTable);
37181     UNREFERENCED_PARAMETER(pfnObjRef);
37182 #else
37183     uint8_t *o = heap_segment_mem(seg);
37184
37185     // small heap alignment constant
37186     int alignment = get_alignment_constant(TRUE);
37187
37188     while (o < heap_segment_allocated(seg))
37189     {
37190         pfnMethodTable(pvContext, o);
37191
37192         if (contain_pointers (o))
37193         {
37194             go_through_object_nostart (method_table (o), o, size(o), oo,
37195                    {
37196                        if (*oo)
37197                            pfnObjRef(pvContext, oo);
37198                    }
37199             );
37200         }
37201
37202         o += Align(size(o), alignment);
37203     }
37204 #endif //!DACCESS_COMPILE
37205 }
37206 #endif // FEATURE_BASICFREEZE
37207
37208 #ifndef DACCESS_COMPILE
37209 HRESULT GCHeap::WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout)
37210 {
37211 #ifdef BACKGROUND_GC
37212     if (recursive_gc_sync::background_running_p())
37213     {
37214         uint32_t dwRet = pGenGCHeap->background_gc_wait(awr_ignored, millisecondsTimeout);
37215         if (dwRet == WAIT_OBJECT_0)
37216             return S_OK;
37217         else if (dwRet == WAIT_TIMEOUT)
37218             return HRESULT_FROM_WIN32(ERROR_TIMEOUT);
37219         else
37220             return E_FAIL;      // It is not clear if what the last error would be if the wait failed,
37221                                 // as there are too many layers in between. The best we can do is to return E_FAIL;
37222     }
37223 #endif
37224
37225     return S_OK;
37226 }
37227 #endif // !DACCESS_COMPILE
37228
37229 void GCHeap::TemporaryEnableConcurrentGC()
37230 {
37231 #ifdef BACKGROUND_GC
37232     gc_heap::temp_disable_concurrent_p = false;
37233 #endif //BACKGROUND_GC
37234 }
37235
37236 void GCHeap::TemporaryDisableConcurrentGC()
37237 {
37238 #ifdef BACKGROUND_GC
37239     gc_heap::temp_disable_concurrent_p = true;
37240 #endif //BACKGROUND_GC
37241 }
37242
37243 bool GCHeap::IsConcurrentGCEnabled()
37244 {
37245 #ifdef BACKGROUND_GC
37246     return (gc_heap::gc_can_use_concurrent && !(gc_heap::temp_disable_concurrent_p));
37247 #else
37248     return FALSE;
37249 #endif //BACKGROUND_GC
37250 }
37251
37252 void GCHeap::SetFinalizeRunOnShutdown(bool value)
37253 {
37254     g_fFinalizerRunOnShutDown = value;
37255 }
37256
37257 void PopulateDacVars(GcDacVars *gcDacVars)
37258 {
37259 #ifndef DACCESS_COMPILE
37260     assert(gcDacVars != nullptr);
37261     *gcDacVars = {};
37262     gcDacVars->major_version_number = 1;
37263     gcDacVars->minor_version_number = 0;
37264     gcDacVars->built_with_svr = &g_built_with_svr_gc;
37265     gcDacVars->build_variant = &g_build_variant;
37266     gcDacVars->gc_structures_invalid_cnt = const_cast<int32_t*>(&GCScan::m_GcStructuresInvalidCnt);
37267     gcDacVars->generation_size = sizeof(generation);
37268     gcDacVars->max_gen = &g_max_generation;
37269 #ifndef MULTIPLE_HEAPS
37270     gcDacVars->mark_array = &gc_heap::mark_array;
37271     gcDacVars->ephemeral_heap_segment = reinterpret_cast<dac_heap_segment**>(&gc_heap::ephemeral_heap_segment);
37272     gcDacVars->current_c_gc_state = const_cast<c_gc_state*>(&gc_heap::current_c_gc_state);
37273     gcDacVars->saved_sweep_ephemeral_seg = reinterpret_cast<dac_heap_segment**>(&gc_heap::saved_sweep_ephemeral_seg);
37274     gcDacVars->saved_sweep_ephemeral_start = &gc_heap::saved_sweep_ephemeral_start;
37275     gcDacVars->background_saved_lowest_address = &gc_heap::background_saved_lowest_address;
37276     gcDacVars->background_saved_highest_address = &gc_heap::background_saved_highest_address;
37277     gcDacVars->alloc_allocated = &gc_heap::alloc_allocated;
37278     gcDacVars->next_sweep_obj = &gc_heap::next_sweep_obj;
37279     gcDacVars->oom_info = &gc_heap::oom_info;
37280     gcDacVars->finalize_queue = reinterpret_cast<dac_finalize_queue**>(&gc_heap::finalize_queue);
37281     gcDacVars->generation_table = reinterpret_cast<dac_generation**>(&gc_heap::generation_table);
37282 #ifdef GC_CONFIG_DRIVEN
37283     gcDacVars->gc_global_mechanisms = reinterpret_cast<size_t**>(&gc_global_mechanisms);
37284     gcDacVars->interesting_data_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_data_per_heap);
37285     gcDacVars->compact_reasons_per_heap = reinterpret_cast<size_t**>(&gc_heap::compact_reasons_per_heap);
37286     gcDacVars->expand_mechanisms_per_heap = reinterpret_cast<size_t**>(&gc_heap::expand_mechanisms_per_heap);
37287     gcDacVars->interesting_mechanism_bits_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_mechanism_bits_per_heap);
37288 #endif // GC_CONFIG_DRIVEN
37289 #ifdef HEAP_ANALYZE
37290     gcDacVars->internal_root_array = &gc_heap::internal_root_array;
37291     gcDacVars->internal_root_array_index = &gc_heap::internal_root_array_index;
37292     gcDacVars->heap_analyze_success = &gc_heap::heap_analyze_success;
37293 #endif // HEAP_ANALYZE
37294 #else
37295     gcDacVars->n_heaps = &gc_heap::n_heaps;
37296     gcDacVars->g_heaps = reinterpret_cast<dac_gc_heap***>(&gc_heap::g_heaps);
37297 #endif // MULTIPLE_HEAPS
37298 #endif // DACCESS_COMPILE
37299 }